Total Complexity | 1029 |
Total Lines | 6476 |
Duplicated Lines | 78.8 % |
Changes | 0 |
Duplicate code is one of the most pungent code smells. A rule that is often used is to re-structure code once it is duplicated in three or more places.
Common duplication problems, and corresponding solutions are:
Complex classes like cpplint often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.
Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.
1 | #!/usr/bin/env python |
||
2 | # |
||
3 | # Copyright (c) 2009 Google Inc. All rights reserved. |
||
4 | # |
||
5 | # Redistribution and use in source and binary forms, with or without |
||
6 | # modification, are permitted provided that the following conditions are |
||
7 | # met: |
||
8 | # |
||
9 | # * Redistributions of source code must retain the above copyright |
||
10 | # notice, this list of conditions and the following disclaimer. |
||
11 | # * Redistributions in binary form must reproduce the above |
||
12 | # copyright notice, this list of conditions and the following disclaimer |
||
13 | # in the documentation and/or other materials provided with the |
||
14 | # distribution. |
||
15 | # * Neither the name of Google Inc. nor the names of its |
||
16 | # contributors may be used to endorse or promote products derived from |
||
17 | # this software without specific prior written permission. |
||
18 | # |
||
19 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
||
20 | # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
||
21 | # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
||
22 | # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
||
23 | # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
||
24 | # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
||
25 | # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||
26 | # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||
27 | # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||
28 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
||
29 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||
30 | |||
31 | """Does google-lint on c++ files. |
||
32 | |||
33 | The goal of this script is to identify places in the code that *may* |
||
34 | be in non-compliance with google style. It does not attempt to fix |
||
35 | up these problems -- the point is to educate. It does also not |
||
36 | attempt to find all problems, or to ensure that everything it does |
||
37 | find is legitimately a problem. |
||
38 | |||
39 | In particular, we can get very confused by /* and // inside strings! |
||
40 | We do a small hack, which is to ignore //'s with "'s after them on the |
||
41 | same line, but it is far from perfect (in either direction). |
||
42 | """ |
||
43 | |||
44 | import codecs |
||
45 | import copy |
||
46 | import getopt |
||
47 | import glob |
||
48 | import itertools |
||
49 | import math # for log |
||
50 | import os |
||
51 | import re |
||
52 | import sre_compile |
||
53 | import string |
||
54 | import sys |
||
55 | import unicodedata |
||
56 | import xml.etree.ElementTree |
||
57 | |||
58 | # if empty, use defaults |
||
59 | _header_extensions = set([]) |
||
60 | |||
61 | # if empty, use defaults |
||
62 | _valid_extensions = set([]) |
||
63 | |||
64 | |||
65 | # Files with any of these extensions are considered to be |
||
66 | # header files (and will undergo different style checks). |
||
67 | # This set can be extended by using the --headers |
||
68 | # option (also supported in CPPLINT.cfg) |
||
69 | def GetHeaderExtensions(): |
||
70 | if not _header_extensions: |
||
71 | return set(['h', 'hpp', 'hxx', 'h++', 'cuh']) |
||
72 | return _header_extensions |
||
73 | |||
74 | # The allowed extensions for file names |
||
75 | # This is set by --extensions flag |
||
76 | def GetAllExtensions(): |
||
77 | if not _valid_extensions: |
||
78 | return GetHeaderExtensions().union(set(['c', 'cc', 'cpp', 'cxx', 'c++', 'cu'])) |
||
79 | return _valid_extensions |
||
80 | |||
81 | def GetNonHeaderExtensions(): |
||
82 | return GetAllExtensions().difference(GetHeaderExtensions()) |
||
83 | |||
84 | |||
85 | _USAGE = """ |
||
86 | Syntax: cpplint.py [--verbose=#] [--output=emacs|eclipse|vs7|junit] |
||
87 | [--filter=-x,+y,...] |
||
88 | [--counting=total|toplevel|detailed] [--repository=path] |
||
89 | [--root=subdir] [--linelength=digits] [--recursive] |
||
90 | [--exclude=path] |
||
91 | [--headers=ext1,ext2] |
||
92 | [--extensions=hpp,cpp,...] |
||
93 | <file> [file] ... |
||
94 | |||
95 | The style guidelines this tries to follow are those in |
||
96 | https://google.github.io/styleguide/cppguide.html |
||
97 | |||
98 | Every problem is given a confidence score from 1-5, with 5 meaning we are |
||
99 | certain of the problem, and 1 meaning it could be a legitimate construct. |
||
100 | This will miss some errors, and is not a substitute for a code review. |
||
101 | |||
102 | To suppress false-positive errors of a certain category, add a |
||
103 | 'NOLINT(category)' comment to the line. NOLINT or NOLINT(*) |
||
104 | suppresses errors of all categories on that line. |
||
105 | |||
106 | The files passed in will be linted; at least one file must be provided. |
||
107 | Default linted extensions are %s. |
||
108 | Other file types will be ignored. |
||
109 | Change the extensions with the --extensions flag. |
||
110 | |||
111 | Flags: |
||
112 | |||
113 | output=emacs|eclipse|vs7|junit |
||
114 | By default, the output is formatted to ease emacs parsing. Output |
||
115 | compatible with eclipse (eclipse), Visual Studio (vs7), and JUnit |
||
116 | XML parsers such as those used in Jenkins and Bamboo may also be |
||
117 | used. Other formats are unsupported. |
||
118 | |||
119 | verbose=# |
||
120 | Specify a number 0-5 to restrict errors to certain verbosity levels. |
||
121 | Errors with lower verbosity levels have lower confidence and are more |
||
122 | likely to be false positives. |
||
123 | |||
124 | quiet |
||
125 | Supress output other than linting errors, such as information about |
||
126 | which files have been processed and excluded. |
||
127 | |||
128 | filter=-x,+y,... |
||
129 | Specify a comma-separated list of category-filters to apply: only |
||
130 | error messages whose category names pass the filters will be printed. |
||
131 | (Category names are printed with the message and look like |
||
132 | "[whitespace/indent]".) Filters are evaluated left to right. |
||
133 | "-FOO" and "FOO" means "do not print categories that start with FOO". |
||
134 | "+FOO" means "do print categories that start with FOO". |
||
135 | |||
136 | Examples: --filter=-whitespace,+whitespace/braces |
||
137 | --filter=whitespace,runtime/printf,+runtime/printf_format |
||
138 | --filter=-,+build/include_what_you_use |
||
139 | |||
140 | To see a list of all the categories used in cpplint, pass no arg: |
||
141 | --filter= |
||
142 | |||
143 | counting=total|toplevel|detailed |
||
144 | The total number of errors found is always printed. If |
||
145 | 'toplevel' is provided, then the count of errors in each of |
||
146 | the top-level categories like 'build' and 'whitespace' will |
||
147 | also be printed. If 'detailed' is provided, then a count |
||
148 | is provided for each category like 'build/class'. |
||
149 | |||
150 | repository=path |
||
151 | The top level directory of the repository, used to derive the header |
||
152 | guard CPP variable. By default, this is determined by searching for a |
||
153 | path that contains .git, .hg, or .svn. When this flag is specified, the |
||
154 | given path is used instead. This option allows the header guard CPP |
||
155 | variable to remain consistent even if members of a team have different |
||
156 | repository root directories (such as when checking out a subdirectory |
||
157 | with SVN). In addition, users of non-mainstream version control systems |
||
158 | can use this flag to ensure readable header guard CPP variables. |
||
159 | |||
160 | Examples: |
||
161 | Assuming that Alice checks out ProjectName and Bob checks out |
||
162 | ProjectName/trunk and trunk contains src/chrome/ui/browser.h, then |
||
163 | with no --repository flag, the header guard CPP variable will be: |
||
164 | |||
165 | Alice => TRUNK_SRC_CHROME_BROWSER_UI_BROWSER_H_ |
||
166 | Bob => SRC_CHROME_BROWSER_UI_BROWSER_H_ |
||
167 | |||
168 | If Alice uses the --repository=trunk flag and Bob omits the flag or |
||
169 | uses --repository=. then the header guard CPP variable will be: |
||
170 | |||
171 | Alice => SRC_CHROME_BROWSER_UI_BROWSER_H_ |
||
172 | Bob => SRC_CHROME_BROWSER_UI_BROWSER_H_ |
||
173 | |||
174 | root=subdir |
||
175 | The root directory used for deriving header guard CPP variables. This |
||
176 | directory is relative to the top level directory of the repository which |
||
177 | by default is determined by searching for a directory that contains .git, |
||
178 | .hg, or .svn but can also be controlled with the --repository flag. If |
||
179 | the specified directory does not exist, this flag is ignored. |
||
180 | |||
181 | Examples: |
||
182 | Assuming that src is the top level directory of the repository, the |
||
183 | header guard CPP variables for src/chrome/browser/ui/browser.h are: |
||
184 | |||
185 | No flag => CHROME_BROWSER_UI_BROWSER_H_ |
||
186 | --root=chrome => BROWSER_UI_BROWSER_H_ |
||
187 | --root=chrome/browser => UI_BROWSER_H_ |
||
188 | |||
189 | linelength=digits |
||
190 | This is the allowed line length for the project. The default value is |
||
191 | 80 characters. |
||
192 | |||
193 | Examples: |
||
194 | --linelength=120 |
||
195 | |||
196 | recursive |
||
197 | Search for files to lint recursively. Each directory given in the list |
||
198 | of files to be linted is replaced by all files that descend from that |
||
199 | directory. Files with extensions not in the valid extensions list are |
||
200 | excluded. |
||
201 | |||
202 | exclude=path |
||
203 | Exclude the given path from the list of files to be linted. Relative |
||
204 | paths are evaluated relative to the current directory and shell globbing |
||
205 | is performed. This flag can be provided multiple times to exclude |
||
206 | multiple files. |
||
207 | |||
208 | Examples: |
||
209 | --exclude=one.cc |
||
210 | --exclude=src/*.cc |
||
211 | --exclude=src/*.cc --exclude=test/*.cc |
||
212 | |||
213 | extensions=extension,extension,... |
||
214 | The allowed file extensions that cpplint will check |
||
215 | |||
216 | Examples: |
||
217 | --extensions=%s |
||
218 | |||
219 | headers=extension,extension,... |
||
220 | The allowed header extensions that cpplint will consider to be header files |
||
221 | (by default, only files with extensions %s |
||
222 | will be assumed to be headers) |
||
223 | |||
224 | Examples: |
||
225 | --headers=%s |
||
226 | |||
227 | cpplint.py supports per-directory configurations specified in CPPLINT.cfg |
||
228 | files. CPPLINT.cfg file can contain a number of key=value pairs. |
||
229 | Currently the following options are supported: |
||
230 | |||
231 | set noparent |
||
232 | filter=+filter1,-filter2,... |
||
233 | exclude_files=regex |
||
234 | linelength=80 |
||
235 | root=subdir |
||
236 | |||
237 | "set noparent" option prevents cpplint from traversing directory tree |
||
238 | upwards looking for more .cfg files in parent directories. This option |
||
239 | is usually placed in the top-level project directory. |
||
240 | |||
241 | The "filter" option is similar in function to --filter flag. It specifies |
||
242 | message filters in addition to the |_DEFAULT_FILTERS| and those specified |
||
243 | through --filter command-line flag. |
||
244 | |||
245 | "exclude_files" allows to specify a regular expression to be matched against |
||
246 | a file name. If the expression matches, the file is skipped and not run |
||
247 | through the linter. |
||
248 | |||
249 | "linelength" specifies the allowed line length for the project. |
||
250 | |||
251 | The "root" option is similar in function to the --root flag (see example |
||
252 | above). |
||
253 | |||
254 | CPPLINT.cfg has an effect on files in the same directory and all |
||
255 | subdirectories, unless overridden by a nested configuration file. |
||
256 | |||
257 | Example file: |
||
258 | filter=-build/include_order,+build/include_alpha |
||
259 | exclude_files=.*\\.cc |
||
260 | |||
261 | The above example disables build/include_order warning and enables |
||
262 | build/include_alpha as well as excludes all .cc from being |
||
263 | processed by linter, in the current directory (where the .cfg |
||
264 | file is located) and all subdirectories. |
||
265 | """ % (list(GetAllExtensions()), |
||
266 | ','.join(list(GetAllExtensions())), |
||
267 | GetHeaderExtensions(), |
||
268 | ','.join(GetHeaderExtensions())) |
||
269 | |||
270 | # We categorize each error message we print. Here are the categories. |
||
271 | # We want an explicit list so we can list them all in cpplint --filter=. |
||
272 | # If you add a new error message with a new category, add it to the list |
||
273 | # here! cpplint_unittest.py should tell you if you forget to do this. |
||
274 | _ERROR_CATEGORIES = [ |
||
275 | 'build/class', |
||
276 | 'build/c++11', |
||
277 | 'build/c++14', |
||
278 | 'build/c++tr1', |
||
279 | 'build/deprecated', |
||
280 | 'build/endif_comment', |
||
281 | 'build/explicit_make_pair', |
||
282 | 'build/forward_decl', |
||
283 | 'build/header_guard', |
||
284 | 'build/include', |
||
285 | 'build/include_subdir', |
||
286 | 'build/include_alpha', |
||
287 | 'build/include_order', |
||
288 | 'build/include_what_you_use', |
||
289 | 'build/namespaces_literals', |
||
290 | 'build/namespaces', |
||
291 | 'build/printf_format', |
||
292 | 'build/storage_class', |
||
293 | 'legal/copyright', |
||
294 | 'readability/alt_tokens', |
||
295 | 'readability/braces', |
||
296 | 'readability/casting', |
||
297 | 'readability/check', |
||
298 | 'readability/constructors', |
||
299 | 'readability/fn_size', |
||
300 | 'readability/inheritance', |
||
301 | 'readability/multiline_comment', |
||
302 | 'readability/multiline_string', |
||
303 | 'readability/namespace', |
||
304 | 'readability/nolint', |
||
305 | 'readability/nul', |
||
306 | 'readability/strings', |
||
307 | 'readability/todo', |
||
308 | 'readability/utf8', |
||
309 | 'runtime/arrays', |
||
310 | 'runtime/casting', |
||
311 | 'runtime/explicit', |
||
312 | 'runtime/int', |
||
313 | 'runtime/init', |
||
314 | 'runtime/invalid_increment', |
||
315 | 'runtime/member_string_references', |
||
316 | 'runtime/memset', |
||
317 | 'runtime/indentation_namespace', |
||
318 | 'runtime/operator', |
||
319 | 'runtime/printf', |
||
320 | 'runtime/printf_format', |
||
321 | 'runtime/references', |
||
322 | 'runtime/string', |
||
323 | 'runtime/threadsafe_fn', |
||
324 | 'runtime/vlog', |
||
325 | 'whitespace/blank_line', |
||
326 | 'whitespace/braces', |
||
327 | 'whitespace/comma', |
||
328 | 'whitespace/comments', |
||
329 | 'whitespace/empty_conditional_body', |
||
330 | 'whitespace/empty_if_body', |
||
331 | 'whitespace/empty_loop_body', |
||
332 | 'whitespace/end_of_line', |
||
333 | 'whitespace/ending_newline', |
||
334 | 'whitespace/forcolon', |
||
335 | 'whitespace/indent', |
||
336 | 'whitespace/line_length', |
||
337 | 'whitespace/newline', |
||
338 | 'whitespace/operators', |
||
339 | 'whitespace/parens', |
||
340 | 'whitespace/semicolon', |
||
341 | 'whitespace/tab', |
||
342 | 'whitespace/todo', |
||
343 | ] |
||
344 | |||
345 | # These error categories are no longer enforced by cpplint, but for backwards- |
||
346 | # compatibility they may still appear in NOLINT comments. |
||
347 | _LEGACY_ERROR_CATEGORIES = [ |
||
348 | 'readability/streams', |
||
349 | 'readability/function', |
||
350 | ] |
||
351 | |||
352 | # The default state of the category filter. This is overridden by the --filter= |
||
353 | # flag. By default all errors are on, so only add here categories that should be |
||
354 | # off by default (i.e., categories that must be enabled by the --filter= flags). |
||
355 | # All entries here should start with a '-' or '+', as in the --filter= flag. |
||
356 | _DEFAULT_FILTERS = ['-build/include_alpha'] |
||
357 | |||
358 | # The default list of categories suppressed for C (not C++) files. |
||
359 | _DEFAULT_C_SUPPRESSED_CATEGORIES = [ |
||
360 | 'readability/casting', |
||
361 | ] |
||
362 | |||
363 | # The default list of categories suppressed for Linux Kernel files. |
||
364 | _DEFAULT_KERNEL_SUPPRESSED_CATEGORIES = [ |
||
365 | 'whitespace/tab', |
||
366 | ] |
||
367 | |||
368 | # We used to check for high-bit characters, but after much discussion we |
||
369 | # decided those were OK, as long as they were in UTF-8 and didn't represent |
||
370 | # hard-coded international strings, which belong in a separate i18n file. |
||
371 | |||
372 | # C++ headers |
||
373 | _CPP_HEADERS = frozenset([ |
||
374 | # Legacy |
||
375 | 'algobase.h', |
||
376 | 'algo.h', |
||
377 | 'alloc.h', |
||
378 | 'builtinbuf.h', |
||
379 | 'bvector.h', |
||
380 | 'complex.h', |
||
381 | 'defalloc.h', |
||
382 | 'deque.h', |
||
383 | 'editbuf.h', |
||
384 | 'fstream.h', |
||
385 | 'function.h', |
||
386 | 'hash_map', |
||
387 | 'hash_map.h', |
||
388 | 'hash_set', |
||
389 | 'hash_set.h', |
||
390 | 'hashtable.h', |
||
391 | 'heap.h', |
||
392 | 'indstream.h', |
||
393 | 'iomanip.h', |
||
394 | 'iostream.h', |
||
395 | 'istream.h', |
||
396 | 'iterator.h', |
||
397 | 'list.h', |
||
398 | 'map.h', |
||
399 | 'multimap.h', |
||
400 | 'multiset.h', |
||
401 | 'ostream.h', |
||
402 | 'pair.h', |
||
403 | 'parsestream.h', |
||
404 | 'pfstream.h', |
||
405 | 'procbuf.h', |
||
406 | 'pthread_alloc', |
||
407 | 'pthread_alloc.h', |
||
408 | 'rope', |
||
409 | 'rope.h', |
||
410 | 'ropeimpl.h', |
||
411 | 'set.h', |
||
412 | 'slist', |
||
413 | 'slist.h', |
||
414 | 'stack.h', |
||
415 | 'stdiostream.h', |
||
416 | 'stl_alloc.h', |
||
417 | 'stl_relops.h', |
||
418 | 'streambuf.h', |
||
419 | 'stream.h', |
||
420 | 'strfile.h', |
||
421 | 'strstream.h', |
||
422 | 'tempbuf.h', |
||
423 | 'tree.h', |
||
424 | 'type_traits.h', |
||
425 | 'vector.h', |
||
426 | # 17.6.1.2 C++ library headers |
||
427 | 'algorithm', |
||
428 | 'array', |
||
429 | 'atomic', |
||
430 | 'bitset', |
||
431 | 'chrono', |
||
432 | 'codecvt', |
||
433 | 'complex', |
||
434 | 'condition_variable', |
||
435 | 'deque', |
||
436 | 'exception', |
||
437 | 'forward_list', |
||
438 | 'fstream', |
||
439 | 'functional', |
||
440 | 'future', |
||
441 | 'initializer_list', |
||
442 | 'iomanip', |
||
443 | 'ios', |
||
444 | 'iosfwd', |
||
445 | 'iostream', |
||
446 | 'istream', |
||
447 | 'iterator', |
||
448 | 'limits', |
||
449 | 'list', |
||
450 | 'locale', |
||
451 | 'map', |
||
452 | 'memory', |
||
453 | 'mutex', |
||
454 | 'new', |
||
455 | 'numeric', |
||
456 | 'ostream', |
||
457 | 'queue', |
||
458 | 'random', |
||
459 | 'ratio', |
||
460 | 'regex', |
||
461 | 'scoped_allocator', |
||
462 | 'set', |
||
463 | 'sstream', |
||
464 | 'stack', |
||
465 | 'stdexcept', |
||
466 | 'streambuf', |
||
467 | 'string', |
||
468 | 'strstream', |
||
469 | 'system_error', |
||
470 | 'thread', |
||
471 | 'tuple', |
||
472 | 'typeindex', |
||
473 | 'typeinfo', |
||
474 | 'type_traits', |
||
475 | 'unordered_map', |
||
476 | 'unordered_set', |
||
477 | 'utility', |
||
478 | 'valarray', |
||
479 | 'vector', |
||
480 | # 17.6.1.2 C++ headers for C library facilities |
||
481 | 'cassert', |
||
482 | 'ccomplex', |
||
483 | 'cctype', |
||
484 | 'cerrno', |
||
485 | 'cfenv', |
||
486 | 'cfloat', |
||
487 | 'cinttypes', |
||
488 | 'ciso646', |
||
489 | 'climits', |
||
490 | 'clocale', |
||
491 | 'cmath', |
||
492 | 'csetjmp', |
||
493 | 'csignal', |
||
494 | 'cstdalign', |
||
495 | 'cstdarg', |
||
496 | 'cstdbool', |
||
497 | 'cstddef', |
||
498 | 'cstdint', |
||
499 | 'cstdio', |
||
500 | 'cstdlib', |
||
501 | 'cstring', |
||
502 | 'ctgmath', |
||
503 | 'ctime', |
||
504 | 'cuchar', |
||
505 | 'cwchar', |
||
506 | 'cwctype', |
||
507 | ]) |
||
508 | |||
509 | # Type names |
||
510 | _TYPES = re.compile( |
||
511 | r'^(?:' |
||
512 | # [dcl.type.simple] |
||
513 | r'(char(16_t|32_t)?)|wchar_t|' |
||
514 | r'bool|short|int|long|signed|unsigned|float|double|' |
||
515 | # [support.types] |
||
516 | r'(ptrdiff_t|size_t|max_align_t|nullptr_t)|' |
||
517 | # [cstdint.syn] |
||
518 | r'(u?int(_fast|_least)?(8|16|32|64)_t)|' |
||
519 | r'(u?int(max|ptr)_t)|' |
||
520 | r')$') |
||
521 | |||
522 | |||
523 | # These headers are excluded from [build/include] and [build/include_order] |
||
524 | # checks: |
||
525 | # - Anything not following google file name conventions (containing an |
||
526 | # uppercase character, such as Python.h or nsStringAPI.h, for example). |
||
527 | # - Lua headers. |
||
528 | _THIRD_PARTY_HEADERS_PATTERN = re.compile( |
||
529 | r'^(?:[^/]*[A-Z][^/]*\.h|lua\.h|lauxlib\.h|lualib\.h)$') |
||
530 | |||
531 | # Pattern for matching FileInfo.BaseName() against test file name |
||
532 | _test_suffixes = ['_test', '_regtest', '_unittest'] |
||
533 | _TEST_FILE_SUFFIX = '(' + '|'.join(_test_suffixes) + r')$' |
||
534 | |||
535 | # Pattern that matches only complete whitespace, possibly across multiple lines. |
||
536 | _EMPTY_CONDITIONAL_BODY_PATTERN = re.compile(r'^\s*$', re.DOTALL) |
||
537 | |||
538 | # Assertion macros. These are defined in base/logging.h and |
||
539 | # testing/base/public/gunit.h. |
||
540 | _CHECK_MACROS = [ |
||
541 | 'DCHECK', 'CHECK', |
||
542 | 'EXPECT_TRUE', 'ASSERT_TRUE', |
||
543 | 'EXPECT_FALSE', 'ASSERT_FALSE', |
||
544 | ] |
||
545 | |||
546 | # Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE |
||
547 | _CHECK_REPLACEMENT = dict([(macro_var, {}) for macro_var in _CHECK_MACROS]) |
||
548 | |||
549 | for op, replacement in [('==', 'EQ'), ('!=', 'NE'), |
||
550 | ('>=', 'GE'), ('>', 'GT'), |
||
551 | ('<=', 'LE'), ('<', 'LT')]: |
||
552 | _CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement |
||
553 | _CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement |
||
554 | _CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement |
||
555 | _CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement |
||
556 | |||
557 | for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'), |
||
558 | ('>=', 'LT'), ('>', 'LE'), |
||
559 | ('<=', 'GT'), ('<', 'GE')]: |
||
560 | _CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement |
||
561 | _CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement |
||
562 | |||
563 | # Alternative tokens and their replacements. For full list, see section 2.5 |
||
564 | # Alternative tokens [lex.digraph] in the C++ standard. |
||
565 | # |
||
566 | # Digraphs (such as '%:') are not included here since it's a mess to |
||
567 | # match those on a word boundary. |
||
568 | _ALT_TOKEN_REPLACEMENT = { |
||
569 | 'and': '&&', |
||
570 | 'bitor': '|', |
||
571 | 'or': '||', |
||
572 | 'xor': '^', |
||
573 | 'compl': '~', |
||
574 | 'bitand': '&', |
||
575 | 'and_eq': '&=', |
||
576 | 'or_eq': '|=', |
||
577 | 'xor_eq': '^=', |
||
578 | 'not': '!', |
||
579 | 'not_eq': '!=' |
||
580 | } |
||
581 | |||
582 | # Compile regular expression that matches all the above keywords. The "[ =()]" |
||
583 | # bit is meant to avoid matching these keywords outside of boolean expressions. |
||
584 | # |
||
585 | # False positives include C-style multi-line comments and multi-line strings |
||
586 | # but those have always been troublesome for cpplint. |
||
587 | _ALT_TOKEN_REPLACEMENT_PATTERN = re.compile( |
||
588 | r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)') |
||
589 | |||
590 | |||
591 | # These constants define types of headers for use with |
||
592 | # _IncludeState.CheckNextIncludeOrder(). |
||
593 | _C_SYS_HEADER = 1 |
||
594 | _CPP_SYS_HEADER = 2 |
||
595 | _LIKELY_MY_HEADER = 3 |
||
596 | _POSSIBLE_MY_HEADER = 4 |
||
597 | _OTHER_HEADER = 5 |
||
598 | |||
599 | # These constants define the current inline assembly state |
||
600 | _NO_ASM = 0 # Outside of inline assembly block |
||
601 | _INSIDE_ASM = 1 # Inside inline assembly block |
||
602 | _END_ASM = 2 # Last line of inline assembly block |
||
603 | _BLOCK_ASM = 3 # The whole block is an inline assembly block |
||
604 | |||
605 | # Match start of assembly blocks |
||
606 | _MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)' |
||
607 | r'(?:\s+(volatile|__volatile__))?' |
||
608 | r'\s*[{(]') |
||
609 | |||
610 | # Match strings that indicate we're working on a C (not C++) file. |
||
611 | _SEARCH_C_FILE = re.compile(r'\b(?:LINT_C_FILE|' |
||
612 | r'vim?:\s*.*(\s*|:)filetype=c(\s*|:|$))') |
||
613 | |||
614 | # Match string that indicates we're working on a Linux Kernel file. |
||
615 | _SEARCH_KERNEL_FILE = re.compile(r'\b(?:LINT_KERNEL_FILE)') |
||
616 | |||
617 | _regexp_compile_cache = {} |
||
618 | |||
619 | # {str, set(int)}: a map from error categories to sets of linenumbers |
||
620 | # on which those errors are expected and should be suppressed. |
||
621 | _error_suppressions = {} |
||
622 | |||
623 | # The root directory used for deriving header guard CPP variable. |
||
624 | # This is set by --root flag. |
||
625 | _root = None |
||
626 | |||
627 | # The top level repository directory. If set, _root is calculated relative to |
||
628 | # this directory instead of the directory containing version control artifacts. |
||
629 | # This is set by the --repository flag. |
||
630 | _repository = None |
||
631 | |||
632 | # Files to exclude from linting. This is set by the --exclude flag. |
||
633 | _excludes = None |
||
634 | |||
635 | # Whether to supress PrintInfo messages |
||
636 | _quiet = False |
||
637 | |||
638 | # The allowed line length of files. |
||
639 | # This is set by --linelength flag. |
||
640 | _line_length = 80 |
||
641 | |||
642 | try: |
||
643 | xrange(1, 0) |
||
|
|||
644 | except NameError: |
||
645 | # -- pylint: disable=redefined-builtin |
||
646 | xrange = range |
||
647 | |||
648 | try: |
||
649 | unicode |
||
650 | except NameError: |
||
651 | # -- pylint: disable=redefined-builtin |
||
652 | basestring = unicode = str |
||
653 | |||
654 | try: |
||
655 | long(2) |
||
656 | except NameError: |
||
657 | # -- pylint: disable=redefined-builtin |
||
658 | long = int |
||
659 | |||
660 | if sys.version_info < (3,): |
||
661 | # -- pylint: disable=no-member |
||
662 | # BINARY_TYPE = str |
||
663 | itervalues = dict.itervalues |
||
664 | iteritems = dict.iteritems |
||
665 | else: |
||
666 | # BINARY_TYPE = bytes |
||
667 | itervalues = dict.values |
||
668 | iteritems = dict.items |
||
669 | |||
670 | def unicode_escape_decode(x): |
||
671 | if sys.version_info < (3,): |
||
672 | return codecs.unicode_escape_decode(x)[0] |
||
673 | else: |
||
674 | return x |
||
675 | |||
676 | # {str, bool}: a map from error categories to booleans which indicate if the |
||
677 | # category should be suppressed for every line. |
||
678 | _global_error_suppressions = {} |
||
679 | |||
680 | |||
681 | |||
682 | |||
683 | View Code Duplication | def ParseNolintSuppressions(filename, raw_line, linenum, error): |
|
684 | """Updates the global list of line error-suppressions. |
||
685 | |||
686 | Parses any NOLINT comments on the current line, updating the global |
||
687 | error_suppressions store. Reports an error if the NOLINT comment |
||
688 | was malformed. |
||
689 | |||
690 | Args: |
||
691 | filename: str, the name of the input file. |
||
692 | raw_line: str, the line of input text, with comments. |
||
693 | linenum: int, the number of the current line. |
||
694 | error: function, an error handler. |
||
695 | """ |
||
696 | matched = Search(r'\bNOLINT(NEXTLINE)?\b(\([^)]+\))?', raw_line) |
||
697 | if matched: |
||
698 | if matched.group(1): |
||
699 | suppressed_line = linenum + 1 |
||
700 | else: |
||
701 | suppressed_line = linenum |
||
702 | category = matched.group(2) |
||
703 | if category in (None, '(*)'): # => "suppress all" |
||
704 | _error_suppressions.setdefault(None, set()).add(suppressed_line) |
||
705 | else: |
||
706 | if category.startswith('(') and category.endswith(')'): |
||
707 | category = category[1:-1] |
||
708 | if category in _ERROR_CATEGORIES: |
||
709 | _error_suppressions.setdefault(category, set()).add(suppressed_line) |
||
710 | elif category not in _LEGACY_ERROR_CATEGORIES: |
||
711 | error(filename, linenum, 'readability/nolint', 5, |
||
712 | 'Unknown NOLINT error category: %s' % category) |
||
713 | |||
714 | |||
715 | View Code Duplication | def ProcessGlobalSuppresions(lines): |
|
716 | """Updates the list of global error suppressions. |
||
717 | |||
718 | Parses any lint directives in the file that have global effect. |
||
719 | |||
720 | Args: |
||
721 | lines: An array of strings, each representing a line of the file, with the |
||
722 | last element being empty if the file is terminated with a newline. |
||
723 | """ |
||
724 | for line in lines: |
||
725 | if _SEARCH_C_FILE.search(line): |
||
726 | for category in _DEFAULT_C_SUPPRESSED_CATEGORIES: |
||
727 | _global_error_suppressions[category] = True |
||
728 | if _SEARCH_KERNEL_FILE.search(line): |
||
729 | for category in _DEFAULT_KERNEL_SUPPRESSED_CATEGORIES: |
||
730 | _global_error_suppressions[category] = True |
||
731 | |||
732 | |||
733 | def ResetNolintSuppressions(): |
||
734 | """Resets the set of NOLINT suppressions to empty.""" |
||
735 | _error_suppressions.clear() |
||
736 | _global_error_suppressions.clear() |
||
737 | |||
738 | |||
739 | def IsErrorSuppressedByNolint(category, linenum): |
||
740 | """Returns true if the specified error category is suppressed on this line. |
||
741 | |||
742 | Consults the global error_suppressions map populated by |
||
743 | ParseNolintSuppressions/ProcessGlobalSuppresions/ResetNolintSuppressions. |
||
744 | |||
745 | Args: |
||
746 | category: str, the category of the error. |
||
747 | linenum: int, the current line number. |
||
748 | Returns: |
||
749 | bool, True iff the error should be suppressed due to a NOLINT comment or |
||
750 | global suppression. |
||
751 | """ |
||
752 | return (_global_error_suppressions.get(category, False) or |
||
753 | linenum in _error_suppressions.get(category, set()) or |
||
754 | linenum in _error_suppressions.get(None, set())) |
||
755 | |||
756 | |||
757 | def Match(pattern, s): |
||
758 | """Matches the string with the pattern, caching the compiled regexp.""" |
||
759 | # The regexp compilation caching is inlined in both Match and Search for |
||
760 | # performance reasons; factoring it out into a separate function turns out |
||
761 | # to be noticeably expensive. |
||
762 | if pattern not in _regexp_compile_cache: |
||
763 | _regexp_compile_cache[pattern] = sre_compile.compile(pattern) |
||
764 | return _regexp_compile_cache[pattern].match(s) |
||
765 | |||
766 | |||
767 | def ReplaceAll(pattern, rep, s): |
||
768 | """Replaces instances of pattern in a string with a replacement. |
||
769 | |||
770 | The compiled regex is kept in a cache shared by Match and Search. |
||
771 | |||
772 | Args: |
||
773 | pattern: regex pattern |
||
774 | rep: replacement text |
||
775 | s: search string |
||
776 | |||
777 | Returns: |
||
778 | string with replacements made (or original string if no replacements) |
||
779 | """ |
||
780 | if pattern not in _regexp_compile_cache: |
||
781 | _regexp_compile_cache[pattern] = sre_compile.compile(pattern) |
||
782 | return _regexp_compile_cache[pattern].sub(rep, s) |
||
783 | |||
784 | |||
785 | def Search(pattern, s): |
||
786 | """Searches the string for the pattern, caching the compiled regexp.""" |
||
787 | if pattern not in _regexp_compile_cache: |
||
788 | _regexp_compile_cache[pattern] = sre_compile.compile(pattern) |
||
789 | return _regexp_compile_cache[pattern].search(s) |
||
790 | |||
791 | |||
792 | def _IsSourceExtension(s): |
||
793 | """File extension (excluding dot) matches a source file extension.""" |
||
794 | return s in GetNonHeaderExtensions() |
||
795 | |||
796 | |||
797 | View Code Duplication | class _IncludeState(object): |
|
798 | """Tracks line numbers for includes, and the order in which includes appear. |
||
799 | |||
800 | include_list contains list of lists of (header, line number) pairs. |
||
801 | It's a lists of lists rather than just one flat list to make it |
||
802 | easier to update across preprocessor boundaries. |
||
803 | |||
804 | Call CheckNextIncludeOrder() once for each header in the file, passing |
||
805 | in the type constants defined above. Calls in an illegal order will |
||
806 | raise an _IncludeError with an appropriate error message. |
||
807 | |||
808 | """ |
||
809 | # self._section will move monotonically through this set. If it ever |
||
810 | # needs to move backwards, CheckNextIncludeOrder will raise an error. |
||
811 | _INITIAL_SECTION = 0 |
||
812 | _MY_H_SECTION = 1 |
||
813 | _C_SECTION = 2 |
||
814 | _CPP_SECTION = 3 |
||
815 | _OTHER_H_SECTION = 4 |
||
816 | |||
817 | _TYPE_NAMES = { |
||
818 | _C_SYS_HEADER: 'C system header', |
||
819 | _CPP_SYS_HEADER: 'C++ system header', |
||
820 | _LIKELY_MY_HEADER: 'header this file implements', |
||
821 | _POSSIBLE_MY_HEADER: 'header this file may implement', |
||
822 | _OTHER_HEADER: 'other header', |
||
823 | } |
||
824 | _SECTION_NAMES = { |
||
825 | _INITIAL_SECTION: "... nothing. (This can't be an error.)", |
||
826 | _MY_H_SECTION: 'a header this file implements', |
||
827 | _C_SECTION: 'C system header', |
||
828 | _CPP_SECTION: 'C++ system header', |
||
829 | _OTHER_H_SECTION: 'other header', |
||
830 | } |
||
831 | |||
832 | def __init__(self): |
||
833 | self.include_list = [[]] |
||
834 | self._section = None |
||
835 | self._last_header = None |
||
836 | self.ResetSection('') |
||
837 | |||
838 | def FindHeader(self, header): |
||
839 | """Check if a header has already been included. |
||
840 | |||
841 | Args: |
||
842 | header: header to check. |
||
843 | Returns: |
||
844 | Line number of previous occurrence, or -1 if the header has not |
||
845 | been seen before. |
||
846 | """ |
||
847 | for section_list in self.include_list: |
||
848 | for f in section_list: |
||
849 | if f[0] == header: |
||
850 | return f[1] |
||
851 | return -1 |
||
852 | |||
853 | def ResetSection(self, directive): |
||
854 | """Reset section checking for preprocessor directive. |
||
855 | |||
856 | Args: |
||
857 | directive: preprocessor directive (e.g. "if", "else"). |
||
858 | """ |
||
859 | # The name of the current section. |
||
860 | self._section = self._INITIAL_SECTION |
||
861 | # The path of last found header. |
||
862 | self._last_header = '' |
||
863 | |||
864 | # Update list of includes. Note that we never pop from the |
||
865 | # include list. |
||
866 | if directive in ('if', 'ifdef', 'ifndef'): |
||
867 | self.include_list.append([]) |
||
868 | elif directive in ('else', 'elif'): |
||
869 | self.include_list[-1] = [] |
||
870 | |||
871 | def SetLastHeader(self, header_path): |
||
872 | self._last_header = header_path |
||
873 | |||
874 | def CanonicalizeAlphabeticalOrder(self, header_path): |
||
875 | """Returns a path canonicalized for alphabetical comparison. |
||
876 | |||
877 | - replaces "-" with "_" so they both cmp the same. |
||
878 | - removes '-inl' since we don't require them to be after the main header. |
||
879 | - lowercase everything, just in case. |
||
880 | |||
881 | Args: |
||
882 | header_path: Path to be canonicalized. |
||
883 | |||
884 | Returns: |
||
885 | Canonicalized path. |
||
886 | """ |
||
887 | return header_path.replace('-inl.h', '.h').replace('-', '_').lower() |
||
888 | |||
889 | def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path): |
||
890 | """Check if a header is in alphabetical order with the previous header. |
||
891 | |||
892 | Args: |
||
893 | clean_lines: A CleansedLines instance containing the file. |
||
894 | linenum: The number of the line to check. |
||
895 | header_path: Canonicalized header to be checked. |
||
896 | |||
897 | Returns: |
||
898 | Returns true if the header is in alphabetical order. |
||
899 | """ |
||
900 | # If previous section is different from current section, _last_header will |
||
901 | # be reset to empty string, so it's always less than current header. |
||
902 | # |
||
903 | # If previous line was a blank line, assume that the headers are |
||
904 | # intentionally sorted the way they are. |
||
905 | if (self._last_header > header_path and |
||
906 | Match(r'^\s*#\s*include\b', clean_lines.elided[linenum - 1])): |
||
907 | return False |
||
908 | return True |
||
909 | |||
910 | def CheckNextIncludeOrder(self, header_type): |
||
911 | """Returns a non-empty error message if the next header is out of order. |
||
912 | |||
913 | This function also updates the internal state to be ready to check |
||
914 | the next include. |
||
915 | |||
916 | Args: |
||
917 | header_type: One of the _XXX_HEADER constants defined above. |
||
918 | |||
919 | Returns: |
||
920 | The empty string if the header is in the right order, or an |
||
921 | error message describing what's wrong. |
||
922 | |||
923 | """ |
||
924 | error_message = ('Found %s after %s' % |
||
925 | (self._TYPE_NAMES[header_type], |
||
926 | self._SECTION_NAMES[self._section])) |
||
927 | |||
928 | last_section = self._section |
||
929 | |||
930 | if header_type == _C_SYS_HEADER: |
||
931 | if self._section <= self._C_SECTION: |
||
932 | self._section = self._C_SECTION |
||
933 | else: |
||
934 | self._last_header = '' |
||
935 | return error_message |
||
936 | elif header_type == _CPP_SYS_HEADER: |
||
937 | if self._section <= self._CPP_SECTION: |
||
938 | self._section = self._CPP_SECTION |
||
939 | else: |
||
940 | self._last_header = '' |
||
941 | return error_message |
||
942 | elif header_type == _LIKELY_MY_HEADER: |
||
943 | if self._section <= self._MY_H_SECTION: |
||
944 | self._section = self._MY_H_SECTION |
||
945 | else: |
||
946 | self._section = self._OTHER_H_SECTION |
||
947 | elif header_type == _POSSIBLE_MY_HEADER: |
||
948 | if self._section <= self._MY_H_SECTION: |
||
949 | self._section = self._MY_H_SECTION |
||
950 | else: |
||
951 | # This will always be the fallback because we're not sure |
||
952 | # enough that the header is associated with this file. |
||
953 | self._section = self._OTHER_H_SECTION |
||
954 | else: |
||
955 | assert header_type == _OTHER_HEADER |
||
956 | self._section = self._OTHER_H_SECTION |
||
957 | |||
958 | if last_section != self._section: |
||
959 | self._last_header = '' |
||
960 | |||
961 | return '' |
||
962 | |||
963 | |||
964 | View Code Duplication | class _CppLintState(object): |
|
965 | """Maintains module-wide state..""" |
||
966 | |||
967 | def __init__(self): |
||
968 | self.verbose_level = 1 # global setting. |
||
969 | self.error_count = 0 # global count of reported errors |
||
970 | # filters to apply when emitting error messages |
||
971 | self.filters = _DEFAULT_FILTERS[:] |
||
972 | # backup of filter list. Used to restore the state after each file. |
||
973 | self._filters_backup = self.filters[:] |
||
974 | self.counting = 'total' # In what way are we counting errors? |
||
975 | self.errors_by_category = {} # string to int dict storing error counts |
||
976 | |||
977 | # output format: |
||
978 | # "emacs" - format that emacs can parse (default) |
||
979 | # "eclipse" - format that eclipse can parse |
||
980 | # "vs7" - format that Microsoft Visual Studio 7 can parse |
||
981 | # "junit" - format that Jenkins, Bamboo, etc can parse |
||
982 | self.output_format = 'emacs' |
||
983 | |||
984 | # For JUnit output, save errors and failures until the end so that they |
||
985 | # can be written into the XML |
||
986 | self._junit_errors = [] |
||
987 | self._junit_failures = [] |
||
988 | |||
989 | def SetOutputFormat(self, output_format): |
||
990 | """Sets the output format for errors.""" |
||
991 | self.output_format = output_format |
||
992 | |||
993 | def SetVerboseLevel(self, level): |
||
994 | """Sets the module's verbosity, and returns the previous setting.""" |
||
995 | last_verbose_level = self.verbose_level |
||
996 | self.verbose_level = level |
||
997 | return last_verbose_level |
||
998 | |||
999 | def SetCountingStyle(self, counting_style): |
||
1000 | """Sets the module's counting options.""" |
||
1001 | self.counting = counting_style |
||
1002 | |||
1003 | def SetFilters(self, filters): |
||
1004 | """Sets the error-message filters. |
||
1005 | |||
1006 | These filters are applied when deciding whether to emit a given |
||
1007 | error message. |
||
1008 | |||
1009 | Args: |
||
1010 | filters: A string of comma-separated filters (eg "+whitespace/indent"). |
||
1011 | Each filter should start with + or -; else we die. |
||
1012 | |||
1013 | Raises: |
||
1014 | ValueError: The comma-separated filters did not all start with '+' or '-'. |
||
1015 | E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter" |
||
1016 | """ |
||
1017 | # Default filters always have less priority than the flag ones. |
||
1018 | self.filters = _DEFAULT_FILTERS[:] |
||
1019 | self.AddFilters(filters) |
||
1020 | |||
1021 | def AddFilters(self, filters): |
||
1022 | """ Adds more filters to the existing list of error-message filters. """ |
||
1023 | for filt in filters.split(','): |
||
1024 | clean_filt = filt.strip() |
||
1025 | if clean_filt: |
||
1026 | self.filters.append(clean_filt) |
||
1027 | for filt in self.filters: |
||
1028 | if not (filt.startswith('+') or filt.startswith('-')): |
||
1029 | raise ValueError('Every filter in --filters must start with + or -' |
||
1030 | ' (%s does not)' % filt) |
||
1031 | |||
1032 | def BackupFilters(self): |
||
1033 | """ Saves the current filter list to backup storage.""" |
||
1034 | self._filters_backup = self.filters[:] |
||
1035 | |||
1036 | def RestoreFilters(self): |
||
1037 | """ Restores filters previously backed up.""" |
||
1038 | self.filters = self._filters_backup[:] |
||
1039 | |||
1040 | def ResetErrorCounts(self): |
||
1041 | """Sets the module's error statistic back to zero.""" |
||
1042 | self.error_count = 0 |
||
1043 | self.errors_by_category = {} |
||
1044 | |||
1045 | def IncrementErrorCount(self, category): |
||
1046 | """Bumps the module's error statistic.""" |
||
1047 | self.error_count += 1 |
||
1048 | if self.counting in ('toplevel', 'detailed'): |
||
1049 | if self.counting != 'detailed': |
||
1050 | category = category.split('/')[0] |
||
1051 | if category not in self.errors_by_category: |
||
1052 | self.errors_by_category[category] = 0 |
||
1053 | self.errors_by_category[category] += 1 |
||
1054 | |||
1055 | def PrintErrorCounts(self): |
||
1056 | """Print a summary of errors by category, and the total.""" |
||
1057 | for category, count in sorted(iteritems(self.errors_by_category)): |
||
1058 | self.PrintInfo('Category \'%s\' errors found: %d\n' % |
||
1059 | (category, count)) |
||
1060 | if self.error_count > 0: |
||
1061 | self.PrintInfo('Total errors found: %d\n' % self.error_count) |
||
1062 | |||
1063 | def PrintInfo(self, message): |
||
1064 | if not _quiet and self.output_format != 'junit': |
||
1065 | sys.stderr.write(message) |
||
1066 | |||
1067 | def PrintError(self, message): |
||
1068 | if self.output_format == 'junit': |
||
1069 | self._junit_errors.append(message) |
||
1070 | else: |
||
1071 | sys.stderr.write(message) |
||
1072 | |||
1073 | def AddJUnitFailure(self, filename, linenum, message, category, confidence): |
||
1074 | self._junit_failures.append((filename, linenum, message, category, |
||
1075 | confidence)) |
||
1076 | |||
1077 | def FormatJUnitXML(self): |
||
1078 | num_errors = len(self._junit_errors) |
||
1079 | num_failures = len(self._junit_failures) |
||
1080 | |||
1081 | testsuite = xml.etree.ElementTree.Element('testsuite') |
||
1082 | testsuite.attrib['name'] = 'cpplint' |
||
1083 | testsuite.attrib['errors'] = str(num_errors) |
||
1084 | testsuite.attrib['failures'] = str(num_failures) |
||
1085 | |||
1086 | if num_errors == 0 and num_failures == 0: |
||
1087 | testsuite.attrib['tests'] = str(1) |
||
1088 | xml.etree.ElementTree.SubElement(testsuite, 'testcase', name='passed') |
||
1089 | |||
1090 | else: |
||
1091 | testsuite.attrib['tests'] = str(num_errors + num_failures) |
||
1092 | if num_errors > 0: |
||
1093 | testcase = xml.etree.ElementTree.SubElement(testsuite, 'testcase') |
||
1094 | testcase.attrib['name'] = 'errors' |
||
1095 | error = xml.etree.ElementTree.SubElement(testcase, 'error') |
||
1096 | error.text = '\n'.join(self._junit_errors) |
||
1097 | if num_failures > 0: |
||
1098 | # Group failures by file |
||
1099 | failed_file_order = [] |
||
1100 | failures_by_file = {} |
||
1101 | for failure in self._junit_failures: |
||
1102 | failed_file = failure[0] |
||
1103 | if failed_file not in failed_file_order: |
||
1104 | failed_file_order.append(failed_file) |
||
1105 | failures_by_file[failed_file] = [] |
||
1106 | failures_by_file[failed_file].append(failure) |
||
1107 | # Create a testcase for each file |
||
1108 | for failed_file in failed_file_order: |
||
1109 | failures = failures_by_file[failed_file] |
||
1110 | testcase = xml.etree.ElementTree.SubElement(testsuite, 'testcase') |
||
1111 | testcase.attrib['name'] = failed_file |
||
1112 | failure = xml.etree.ElementTree.SubElement(testcase, 'failure') |
||
1113 | template = '{0}: {1} [{2}] [{3}]' |
||
1114 | texts = [template.format(f[1], f[2], f[3], f[4]) for f in failures] |
||
1115 | failure.text = '\n'.join(texts) |
||
1116 | |||
1117 | xml_decl = '<?xml version="1.0" encoding="UTF-8" ?>\n' |
||
1118 | return xml_decl + xml.etree.ElementTree.tostring(testsuite, 'utf-8').decode('utf-8') |
||
1119 | |||
1120 | |||
1121 | _cpplint_state = _CppLintState() |
||
1122 | |||
1123 | |||
1124 | def _OutputFormat(): |
||
1125 | """Gets the module's output format.""" |
||
1126 | return _cpplint_state.output_format |
||
1127 | |||
1128 | |||
1129 | def _SetOutputFormat(output_format): |
||
1130 | """Sets the module's output format.""" |
||
1131 | _cpplint_state.SetOutputFormat(output_format) |
||
1132 | |||
1133 | |||
1134 | def _VerboseLevel(): |
||
1135 | """Returns the module's verbosity setting.""" |
||
1136 | return _cpplint_state.verbose_level |
||
1137 | |||
1138 | |||
1139 | def _SetVerboseLevel(level): |
||
1140 | """Sets the module's verbosity, and returns the previous setting.""" |
||
1141 | return _cpplint_state.SetVerboseLevel(level) |
||
1142 | |||
1143 | |||
1144 | def _SetCountingStyle(level): |
||
1145 | """Sets the module's counting options.""" |
||
1146 | _cpplint_state.SetCountingStyle(level) |
||
1147 | |||
1148 | |||
1149 | def _Filters(): |
||
1150 | """Returns the module's list of output filters, as a list.""" |
||
1151 | return _cpplint_state.filters |
||
1152 | |||
1153 | |||
1154 | def _SetFilters(filters): |
||
1155 | """Sets the module's error-message filters. |
||
1156 | |||
1157 | These filters are applied when deciding whether to emit a given |
||
1158 | error message. |
||
1159 | |||
1160 | Args: |
||
1161 | filters: A string of comma-separated filters (eg "whitespace/indent"). |
||
1162 | Each filter should start with + or -; else we die. |
||
1163 | """ |
||
1164 | _cpplint_state.SetFilters(filters) |
||
1165 | |||
1166 | def _AddFilters(filters): |
||
1167 | """Adds more filter overrides. |
||
1168 | |||
1169 | Unlike _SetFilters, this function does not reset the current list of filters |
||
1170 | available. |
||
1171 | |||
1172 | Args: |
||
1173 | filters: A string of comma-separated filters (eg "whitespace/indent"). |
||
1174 | Each filter should start with + or -; else we die. |
||
1175 | """ |
||
1176 | _cpplint_state.AddFilters(filters) |
||
1177 | |||
1178 | def _BackupFilters(): |
||
1179 | """ Saves the current filter list to backup storage.""" |
||
1180 | _cpplint_state.BackupFilters() |
||
1181 | |||
1182 | def _RestoreFilters(): |
||
1183 | """ Restores filters previously backed up.""" |
||
1184 | _cpplint_state.RestoreFilters() |
||
1185 | |||
1186 | View Code Duplication | class _FunctionState(object): |
|
1187 | """Tracks current function name and the number of lines in its body.""" |
||
1188 | |||
1189 | _NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc. |
||
1190 | _TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER. |
||
1191 | |||
1192 | def __init__(self): |
||
1193 | self.in_a_function = False |
||
1194 | self.lines_in_function = 0 |
||
1195 | self.current_function = '' |
||
1196 | |||
1197 | def Begin(self, function_name): |
||
1198 | """Start analyzing function body. |
||
1199 | |||
1200 | Args: |
||
1201 | function_name: The name of the function being tracked. |
||
1202 | """ |
||
1203 | self.in_a_function = True |
||
1204 | self.lines_in_function = 0 |
||
1205 | self.current_function = function_name |
||
1206 | |||
1207 | def Count(self): |
||
1208 | """Count line in current function body.""" |
||
1209 | if self.in_a_function: |
||
1210 | self.lines_in_function += 1 |
||
1211 | |||
1212 | def Check(self, error, filename, linenum): |
||
1213 | """Report if too many lines in function body. |
||
1214 | |||
1215 | Args: |
||
1216 | error: The function to call with any errors found. |
||
1217 | filename: The name of the current file. |
||
1218 | linenum: The number of the line to check. |
||
1219 | """ |
||
1220 | if not self.in_a_function: |
||
1221 | return |
||
1222 | |||
1223 | if Match(r'T(EST|est)', self.current_function): |
||
1224 | base_trigger = self._TEST_TRIGGER |
||
1225 | else: |
||
1226 | base_trigger = self._NORMAL_TRIGGER |
||
1227 | trigger = base_trigger * 2**_VerboseLevel() |
||
1228 | |||
1229 | if self.lines_in_function > trigger: |
||
1230 | error_level = int(math.log(self.lines_in_function / base_trigger, 2)) |
||
1231 | # 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ... |
||
1232 | if error_level > 5: |
||
1233 | error_level = 5 |
||
1234 | error(filename, linenum, 'readability/fn_size', error_level, |
||
1235 | 'Small and focused functions are preferred:' |
||
1236 | ' %s has %d non-comment lines' |
||
1237 | ' (error triggered by exceeding %d lines).' % ( |
||
1238 | self.current_function, self.lines_in_function, trigger)) |
||
1239 | |||
1240 | def End(self): |
||
1241 | """Stop analyzing function body.""" |
||
1242 | self.in_a_function = False |
||
1243 | |||
1244 | |||
1245 | class _IncludeError(Exception): |
||
1246 | """Indicates a problem with the include order in a file.""" |
||
1247 | pass |
||
1248 | |||
1249 | |||
1250 | View Code Duplication | class FileInfo(object): |
|
1251 | """Provides utility functions for filenames. |
||
1252 | |||
1253 | FileInfo provides easy access to the components of a file's path |
||
1254 | relative to the project root. |
||
1255 | """ |
||
1256 | |||
1257 | def __init__(self, filename): |
||
1258 | self._filename = filename |
||
1259 | |||
1260 | def FullName(self): |
||
1261 | """Make Windows paths like Unix.""" |
||
1262 | return os.path.abspath(self._filename).replace('\\', '/') |
||
1263 | |||
1264 | def RepositoryName(self): |
||
1265 | r"""FullName after removing the local path to the repository. |
||
1266 | |||
1267 | If we have a real absolute path name here we can try to do something smart: |
||
1268 | detecting the root of the checkout and truncating /path/to/checkout from |
||
1269 | the name so that we get header guards that don't include things like |
||
1270 | "C:\Documents and Settings\..." or "/home/username/..." in them and thus |
||
1271 | people on different computers who have checked the source out to different |
||
1272 | locations won't see bogus errors. |
||
1273 | """ |
||
1274 | fullname = self.FullName() |
||
1275 | |||
1276 | if os.path.exists(fullname): |
||
1277 | project_dir = os.path.dirname(fullname) |
||
1278 | |||
1279 | # If the user specified a repository path, it exists, and the file is |
||
1280 | # contained in it, use the specified repository path |
||
1281 | if _repository: |
||
1282 | repo = FileInfo(_repository).FullName() |
||
1283 | root_dir = project_dir |
||
1284 | while os.path.exists(root_dir): |
||
1285 | # allow case insensitive compare on Windows |
||
1286 | if os.path.normcase(root_dir) == os.path.normcase(repo): |
||
1287 | return os.path.relpath(fullname, root_dir).replace('\\', '/') |
||
1288 | one_up_dir = os.path.dirname(root_dir) |
||
1289 | if one_up_dir == root_dir: |
||
1290 | break |
||
1291 | root_dir = one_up_dir |
||
1292 | |||
1293 | if os.path.exists(os.path.join(project_dir, ".svn")): |
||
1294 | # If there's a .svn file in the current directory, we recursively look |
||
1295 | # up the directory tree for the top of the SVN checkout |
||
1296 | root_dir = project_dir |
||
1297 | one_up_dir = os.path.dirname(root_dir) |
||
1298 | while os.path.exists(os.path.join(one_up_dir, ".svn")): |
||
1299 | root_dir = os.path.dirname(root_dir) |
||
1300 | one_up_dir = os.path.dirname(one_up_dir) |
||
1301 | |||
1302 | prefix = os.path.commonprefix([root_dir, project_dir]) |
||
1303 | return fullname[len(prefix) + 1:] |
||
1304 | |||
1305 | # Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by |
||
1306 | # searching up from the current path. |
||
1307 | root_dir = current_dir = os.path.dirname(fullname) |
||
1308 | while current_dir != os.path.dirname(current_dir): |
||
1309 | if (os.path.exists(os.path.join(current_dir, ".git")) or |
||
1310 | os.path.exists(os.path.join(current_dir, ".hg")) or |
||
1311 | os.path.exists(os.path.join(current_dir, ".svn"))): |
||
1312 | root_dir = current_dir |
||
1313 | current_dir = os.path.dirname(current_dir) |
||
1314 | |||
1315 | if (os.path.exists(os.path.join(root_dir, ".git")) or |
||
1316 | os.path.exists(os.path.join(root_dir, ".hg")) or |
||
1317 | os.path.exists(os.path.join(root_dir, ".svn"))): |
||
1318 | prefix = os.path.commonprefix([root_dir, project_dir]) |
||
1319 | return fullname[len(prefix) + 1:] |
||
1320 | |||
1321 | # Don't know what to do; header guard warnings may be wrong... |
||
1322 | return fullname |
||
1323 | |||
1324 | def Split(self): |
||
1325 | """Splits the file into the directory, basename, and extension. |
||
1326 | |||
1327 | For 'chrome/browser/browser.cc', Split() would |
||
1328 | return ('chrome/browser', 'browser', '.cc') |
||
1329 | |||
1330 | Returns: |
||
1331 | A tuple of (directory, basename, extension). |
||
1332 | """ |
||
1333 | |||
1334 | googlename = self.RepositoryName() |
||
1335 | project, rest = os.path.split(googlename) |
||
1336 | return (project,) + os.path.splitext(rest) |
||
1337 | |||
1338 | def BaseName(self): |
||
1339 | """File base name - text after the final slash, before the final period.""" |
||
1340 | return self.Split()[1] |
||
1341 | |||
1342 | def Extension(self): |
||
1343 | """File extension - text following the final period, includes that period.""" |
||
1344 | return self.Split()[2] |
||
1345 | |||
1346 | def NoExtension(self): |
||
1347 | """File has no source file extension.""" |
||
1348 | return '/'.join(self.Split()[0:2]) |
||
1349 | |||
1350 | def IsSource(self): |
||
1351 | """File has a source file extension.""" |
||
1352 | return _IsSourceExtension(self.Extension()[1:]) |
||
1353 | |||
1354 | |||
1355 | View Code Duplication | def _ShouldPrintError(category, confidence, linenum): |
|
1356 | """If confidence >= verbose, category passes filter and is not suppressed.""" |
||
1357 | |||
1358 | # There are three ways we might decide not to print an error message: |
||
1359 | # a "NOLINT(category)" comment appears in the source, |
||
1360 | # the verbosity level isn't high enough, or the filters filter it out. |
||
1361 | if IsErrorSuppressedByNolint(category, linenum): |
||
1362 | return False |
||
1363 | |||
1364 | if confidence < _cpplint_state.verbose_level: |
||
1365 | return False |
||
1366 | |||
1367 | is_filtered = False |
||
1368 | for one_filter in _Filters(): |
||
1369 | if one_filter.startswith('-'): |
||
1370 | if category.startswith(one_filter[1:]): |
||
1371 | is_filtered = True |
||
1372 | elif one_filter.startswith('+'): |
||
1373 | if category.startswith(one_filter[1:]): |
||
1374 | is_filtered = False |
||
1375 | else: |
||
1376 | assert False # should have been checked for in SetFilter. |
||
1377 | if is_filtered: |
||
1378 | return False |
||
1379 | |||
1380 | return True |
||
1381 | |||
1382 | |||
1383 | View Code Duplication | def Error(filename, linenum, category, confidence, message): |
|
1384 | """Logs the fact we've found a lint error. |
||
1385 | |||
1386 | We log where the error was found, and also our confidence in the error, |
||
1387 | that is, how certain we are this is a legitimate style regression, and |
||
1388 | not a misidentification or a use that's sometimes justified. |
||
1389 | |||
1390 | False positives can be suppressed by the use of |
||
1391 | "cpplint(category)" comments on the offending line. These are |
||
1392 | parsed into _error_suppressions. |
||
1393 | |||
1394 | Args: |
||
1395 | filename: The name of the file containing the error. |
||
1396 | linenum: The number of the line containing the error. |
||
1397 | category: A string used to describe the "category" this bug |
||
1398 | falls under: "whitespace", say, or "runtime". Categories |
||
1399 | may have a hierarchy separated by slashes: "whitespace/indent". |
||
1400 | confidence: A number from 1-5 representing a confidence score for |
||
1401 | the error, with 5 meaning that we are certain of the problem, |
||
1402 | and 1 meaning that it could be a legitimate construct. |
||
1403 | message: The error message. |
||
1404 | """ |
||
1405 | if _ShouldPrintError(category, confidence, linenum): |
||
1406 | _cpplint_state.IncrementErrorCount(category) |
||
1407 | if _cpplint_state.output_format == 'vs7': |
||
1408 | _cpplint_state.PrintError('%s(%s): warning: %s [%s] [%d]\n' % ( |
||
1409 | filename, linenum, message, category, confidence)) |
||
1410 | elif _cpplint_state.output_format == 'eclipse': |
||
1411 | sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % ( |
||
1412 | filename, linenum, message, category, confidence)) |
||
1413 | elif _cpplint_state.output_format == 'junit': |
||
1414 | _cpplint_state.AddJUnitFailure(filename, linenum, message, category, |
||
1415 | confidence) |
||
1416 | else: |
||
1417 | final_message = '%s:%s: %s [%s] [%d]\n' % ( |
||
1418 | filename, linenum, message, category, confidence) |
||
1419 | sys.stderr.write(final_message) |
||
1420 | |||
1421 | # Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard. |
||
1422 | _RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile( |
||
1423 | r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)') |
||
1424 | # Match a single C style comment on the same line. |
||
1425 | _RE_PATTERN_C_COMMENTS = r'/\*(?:[^*]|\*(?!/))*\*/' |
||
1426 | # Matches multi-line C style comments. |
||
1427 | # This RE is a little bit more complicated than one might expect, because we |
||
1428 | # have to take care of space removals tools so we can handle comments inside |
||
1429 | # statements better. |
||
1430 | # The current rule is: We only clear spaces from both sides when we're at the |
||
1431 | # end of the line. Otherwise, we try to remove spaces from the right side, |
||
1432 | # if this doesn't work we try on left side but only if there's a non-character |
||
1433 | # on the right. |
||
1434 | _RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile( |
||
1435 | r'(\s*' + _RE_PATTERN_C_COMMENTS + r'\s*$|' + |
||
1436 | _RE_PATTERN_C_COMMENTS + r'\s+|' + |
||
1437 | r'\s+' + _RE_PATTERN_C_COMMENTS + r'(?=\W)|' + |
||
1438 | _RE_PATTERN_C_COMMENTS + r')') |
||
1439 | |||
1440 | |||
1441 | def IsCppString(line): |
||
1442 | """Does line terminate so, that the next symbol is in string constant. |
||
1443 | |||
1444 | This function does not consider single-line nor multi-line comments. |
||
1445 | |||
1446 | Args: |
||
1447 | line: is a partial line of code starting from the 0..n. |
||
1448 | |||
1449 | Returns: |
||
1450 | True, if next character appended to 'line' is inside a |
||
1451 | string constant. |
||
1452 | """ |
||
1453 | |||
1454 | line = line.replace(r'\\', 'XX') # after this, \\" does not match to \" |
||
1455 | return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1 |
||
1456 | |||
1457 | |||
1458 | View Code Duplication | def CleanseRawStrings(raw_lines): |
|
1459 | """Removes C++11 raw strings from lines. |
||
1460 | |||
1461 | Before: |
||
1462 | static const char kData[] = R"( |
||
1463 | multi-line string |
||
1464 | )"; |
||
1465 | |||
1466 | After: |
||
1467 | static const char kData[] = "" |
||
1468 | (replaced by blank line) |
||
1469 | ""; |
||
1470 | |||
1471 | Args: |
||
1472 | raw_lines: list of raw lines. |
||
1473 | |||
1474 | Returns: |
||
1475 | list of lines with C++11 raw strings replaced by empty strings. |
||
1476 | """ |
||
1477 | |||
1478 | delimiter = None |
||
1479 | lines_without_raw_strings = [] |
||
1480 | for line in raw_lines: |
||
1481 | if delimiter: |
||
1482 | # Inside a raw string, look for the end |
||
1483 | end = line.find(delimiter) |
||
1484 | if end >= 0: |
||
1485 | # Found the end of the string, match leading space for this |
||
1486 | # line and resume copying the original lines, and also insert |
||
1487 | # a "" on the last line. |
||
1488 | leading_space = Match(r'^(\s*)\S', line) |
||
1489 | line = leading_space.group(1) + '""' + line[end + len(delimiter):] |
||
1490 | delimiter = None |
||
1491 | else: |
||
1492 | # Haven't found the end yet, append a blank line. |
||
1493 | line = '""' |
||
1494 | |||
1495 | # Look for beginning of a raw string, and replace them with |
||
1496 | # empty strings. This is done in a loop to handle multiple raw |
||
1497 | # strings on the same line. |
||
1498 | while delimiter is None: |
||
1499 | # Look for beginning of a raw string. |
||
1500 | # See 2.14.15 [lex.string] for syntax. |
||
1501 | # |
||
1502 | # Once we have matched a raw string, we check the prefix of the |
||
1503 | # line to make sure that the line is not part of a single line |
||
1504 | # comment. It's done this way because we remove raw strings |
||
1505 | # before removing comments as opposed to removing comments |
||
1506 | # before removing raw strings. This is because there are some |
||
1507 | # cpplint checks that requires the comments to be preserved, but |
||
1508 | # we don't want to check comments that are inside raw strings. |
||
1509 | matched = Match(r'^(.*?)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line) |
||
1510 | if (matched and |
||
1511 | not Match(r'^([^\'"]|\'(\\.|[^\'])*\'|"(\\.|[^"])*")*//', |
||
1512 | matched.group(1))): |
||
1513 | delimiter = ')' + matched.group(2) + '"' |
||
1514 | |||
1515 | end = matched.group(3).find(delimiter) |
||
1516 | if end >= 0: |
||
1517 | # Raw string ended on same line |
||
1518 | line = (matched.group(1) + '""' + |
||
1519 | matched.group(3)[end + len(delimiter):]) |
||
1520 | delimiter = None |
||
1521 | else: |
||
1522 | # Start of a multi-line raw string |
||
1523 | line = matched.group(1) + '""' |
||
1524 | else: |
||
1525 | break |
||
1526 | |||
1527 | lines_without_raw_strings.append(line) |
||
1528 | |||
1529 | # TODO(unknown): if delimiter is not None here, we might want to |
||
1530 | # emit a warning for unterminated string. |
||
1531 | return lines_without_raw_strings |
||
1532 | |||
1533 | |||
1534 | def FindNextMultiLineCommentStart(lines, lineix): |
||
1535 | """Find the beginning marker for a multiline comment.""" |
||
1536 | while lineix < len(lines): |
||
1537 | if lines[lineix].strip().startswith('/*'): |
||
1538 | # Only return this marker if the comment goes beyond this line |
||
1539 | if lines[lineix].strip().find('*/', 2) < 0: |
||
1540 | return lineix |
||
1541 | lineix += 1 |
||
1542 | return len(lines) |
||
1543 | |||
1544 | |||
1545 | def FindNextMultiLineCommentEnd(lines, lineix): |
||
1546 | """We are inside a comment, find the end marker.""" |
||
1547 | while lineix < len(lines): |
||
1548 | if lines[lineix].strip().endswith('*/'): |
||
1549 | return lineix |
||
1550 | lineix += 1 |
||
1551 | return len(lines) |
||
1552 | |||
1553 | |||
1554 | def RemoveMultiLineCommentsFromRange(lines, begin, end): |
||
1555 | """Clears a range of lines for multi-line comments.""" |
||
1556 | # Having // dummy comments makes the lines non-empty, so we will not get |
||
1557 | # unnecessary blank line warnings later in the code. |
||
1558 | for i in range(begin, end): |
||
1559 | lines[i] = '/**/' |
||
1560 | |||
1561 | |||
1562 | View Code Duplication | def RemoveMultiLineComments(filename, lines, error): |
|
1563 | """Removes multiline (c-style) comments from lines.""" |
||
1564 | lineix = 0 |
||
1565 | while lineix < len(lines): |
||
1566 | lineix_begin = FindNextMultiLineCommentStart(lines, lineix) |
||
1567 | if lineix_begin >= len(lines): |
||
1568 | return |
||
1569 | lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin) |
||
1570 | if lineix_end >= len(lines): |
||
1571 | error(filename, lineix_begin + 1, 'readability/multiline_comment', 5, |
||
1572 | 'Could not find end of multi-line comment') |
||
1573 | return |
||
1574 | RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1) |
||
1575 | lineix = lineix_end + 1 |
||
1576 | |||
1577 | |||
1578 | View Code Duplication | def CleanseComments(line): |
|
1579 | """Removes //-comments and single-line C-style /* */ comments. |
||
1580 | |||
1581 | Args: |
||
1582 | line: A line of C++ source. |
||
1583 | |||
1584 | Returns: |
||
1585 | The line with single-line comments removed. |
||
1586 | """ |
||
1587 | commentpos = line.find('//') |
||
1588 | if commentpos != -1 and not IsCppString(line[:commentpos]): |
||
1589 | line = line[:commentpos].rstrip() |
||
1590 | # get rid of /* ... */ |
||
1591 | return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line) |
||
1592 | |||
1593 | |||
1594 | View Code Duplication | class CleansedLines(object): |
|
1595 | """Holds 4 copies of all lines with different preprocessing applied to them. |
||
1596 | |||
1597 | 1) elided member contains lines without strings and comments. |
||
1598 | 2) lines member contains lines without comments. |
||
1599 | 3) raw_lines member contains all the lines without processing. |
||
1600 | 4) lines_without_raw_strings member is same as raw_lines, but with C++11 raw |
||
1601 | strings removed. |
||
1602 | All these members are of <type 'list'>, and of the same length. |
||
1603 | """ |
||
1604 | |||
1605 | def __init__(self, lines): |
||
1606 | self.elided = [] |
||
1607 | self.lines = [] |
||
1608 | self.raw_lines = lines |
||
1609 | self.num_lines = len(lines) |
||
1610 | self.lines_without_raw_strings = CleanseRawStrings(lines) |
||
1611 | for linenum in range(len(self.lines_without_raw_strings)): |
||
1612 | self.lines.append(CleanseComments( |
||
1613 | self.lines_without_raw_strings[linenum])) |
||
1614 | elided = self._CollapseStrings(self.lines_without_raw_strings[linenum]) |
||
1615 | self.elided.append(CleanseComments(elided)) |
||
1616 | |||
1617 | def NumLines(self): |
||
1618 | """Returns the number of lines represented.""" |
||
1619 | return self.num_lines |
||
1620 | |||
1621 | @staticmethod |
||
1622 | def _CollapseStrings(elided): |
||
1623 | """Collapses strings and chars on a line to simple "" or '' blocks. |
||
1624 | |||
1625 | We nix strings first so we're not fooled by text like '"http://"' |
||
1626 | |||
1627 | Args: |
||
1628 | elided: The line being processed. |
||
1629 | |||
1630 | Returns: |
||
1631 | The line with collapsed strings. |
||
1632 | """ |
||
1633 | if _RE_PATTERN_INCLUDE.match(elided): |
||
1634 | return elided |
||
1635 | |||
1636 | # Remove escaped characters first to make quote/single quote collapsing |
||
1637 | # basic. Things that look like escaped characters shouldn't occur |
||
1638 | # outside of strings and chars. |
||
1639 | elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided) |
||
1640 | |||
1641 | # Replace quoted strings and digit separators. Both single quotes |
||
1642 | # and double quotes are processed in the same loop, otherwise |
||
1643 | # nested quotes wouldn't work. |
||
1644 | collapsed = '' |
||
1645 | while True: |
||
1646 | # Find the first quote character |
||
1647 | match = Match(r'^([^\'"]*)([\'"])(.*)$', elided) |
||
1648 | if not match: |
||
1649 | collapsed += elided |
||
1650 | break |
||
1651 | head, quote, tail = match.groups() |
||
1652 | |||
1653 | if quote == '"': |
||
1654 | # Collapse double quoted strings |
||
1655 | second_quote = tail.find('"') |
||
1656 | if second_quote >= 0: |
||
1657 | collapsed += head + '""' |
||
1658 | elided = tail[second_quote + 1:] |
||
1659 | else: |
||
1660 | # Unmatched double quote, don't bother processing the rest |
||
1661 | # of the line since this is probably a multiline string. |
||
1662 | collapsed += elided |
||
1663 | break |
||
1664 | else: |
||
1665 | # Found single quote, check nearby text to eliminate digit separators. |
||
1666 | # |
||
1667 | # There is no special handling for floating point here, because |
||
1668 | # the integer/fractional/exponent parts would all be parsed |
||
1669 | # correctly as long as there are digits on both sides of the |
||
1670 | # separator. So we are fine as long as we don't see something |
||
1671 | # like "0.'3" (gcc 4.9.0 will not allow this literal). |
||
1672 | if Search(r'\b(?:0[bBxX]?|[1-9])[0-9a-fA-F]*$', head): |
||
1673 | match_literal = Match(r'^((?:\'?[0-9a-zA-Z_])*)(.*)$', "'" + tail) |
||
1674 | collapsed += head + match_literal.group(1).replace("'", '') |
||
1675 | elided = match_literal.group(2) |
||
1676 | else: |
||
1677 | second_quote = tail.find('\'') |
||
1678 | if second_quote >= 0: |
||
1679 | collapsed += head + "''" |
||
1680 | elided = tail[second_quote + 1:] |
||
1681 | else: |
||
1682 | # Unmatched single quote |
||
1683 | collapsed += elided |
||
1684 | break |
||
1685 | |||
1686 | return collapsed |
||
1687 | |||
1688 | |||
1689 | View Code Duplication | def FindEndOfExpressionInLine(line, startpos, stack): |
|
1690 | """Find the position just after the end of current parenthesized expression. |
||
1691 | |||
1692 | Args: |
||
1693 | line: a CleansedLines line. |
||
1694 | startpos: start searching at this position. |
||
1695 | stack: nesting stack at startpos. |
||
1696 | |||
1697 | Returns: |
||
1698 | On finding matching end: (index just after matching end, None) |
||
1699 | On finding an unclosed expression: (-1, None) |
||
1700 | Otherwise: (-1, new stack at end of this line) |
||
1701 | """ |
||
1702 | for i in xrange(startpos, len(line)): |
||
1703 | char = line[i] |
||
1704 | if char in '([{': |
||
1705 | # Found start of parenthesized expression, push to expression stack |
||
1706 | stack.append(char) |
||
1707 | elif char == '<': |
||
1708 | # Found potential start of template argument list |
||
1709 | if i > 0 and line[i - 1] == '<': |
||
1710 | # Left shift operator |
||
1711 | if stack and stack[-1] == '<': |
||
1712 | stack.pop() |
||
1713 | if not stack: |
||
1714 | return (-1, None) |
||
1715 | elif i > 0 and Search(r'\boperator\s*$', line[0:i]): |
||
1716 | # operator<, don't add to stack |
||
1717 | continue |
||
1718 | else: |
||
1719 | # Tentative start of template argument list |
||
1720 | stack.append('<') |
||
1721 | elif char in ')]}': |
||
1722 | # Found end of parenthesized expression. |
||
1723 | # |
||
1724 | # If we are currently expecting a matching '>', the pending '<' |
||
1725 | # must have been an operator. Remove them from expression stack. |
||
1726 | while stack and stack[-1] == '<': |
||
1727 | stack.pop() |
||
1728 | if not stack: |
||
1729 | return (-1, None) |
||
1730 | if ((stack[-1] == '(' and char == ')') or |
||
1731 | (stack[-1] == '[' and char == ']') or |
||
1732 | (stack[-1] == '{' and char == '}')): |
||
1733 | stack.pop() |
||
1734 | if not stack: |
||
1735 | return (i + 1, None) |
||
1736 | else: |
||
1737 | # Mismatched parentheses |
||
1738 | return (-1, None) |
||
1739 | elif char == '>': |
||
1740 | # Found potential end of template argument list. |
||
1741 | |||
1742 | # Ignore "->" and operator functions |
||
1743 | if (i > 0 and |
||
1744 | (line[i - 1] == '-' or Search(r'\boperator\s*$', line[0:i - 1]))): |
||
1745 | continue |
||
1746 | |||
1747 | # Pop the stack if there is a matching '<'. Otherwise, ignore |
||
1748 | # this '>' since it must be an operator. |
||
1749 | if stack: |
||
1750 | if stack[-1] == '<': |
||
1751 | stack.pop() |
||
1752 | if not stack: |
||
1753 | return (i + 1, None) |
||
1754 | elif char == ';': |
||
1755 | # Found something that look like end of statements. If we are currently |
||
1756 | # expecting a '>', the matching '<' must have been an operator, since |
||
1757 | # template argument list should not contain statements. |
||
1758 | while stack and stack[-1] == '<': |
||
1759 | stack.pop() |
||
1760 | if not stack: |
||
1761 | return (-1, None) |
||
1762 | |||
1763 | # Did not find end of expression or unbalanced parentheses on this line |
||
1764 | return (-1, stack) |
||
1765 | |||
1766 | |||
1767 | View Code Duplication | def CloseExpression(clean_lines, linenum, pos): |
|
1768 | """If input points to ( or { or [ or <, finds the position that closes it. |
||
1769 | |||
1770 | If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the |
||
1771 | linenum/pos that correspond to the closing of the expression. |
||
1772 | |||
1773 | TODO(unknown): cpplint spends a fair bit of time matching parentheses. |
||
1774 | Ideally we would want to index all opening and closing parentheses once |
||
1775 | and have CloseExpression be just a simple lookup, but due to preprocessor |
||
1776 | tricks, this is not so easy. |
||
1777 | |||
1778 | Args: |
||
1779 | clean_lines: A CleansedLines instance containing the file. |
||
1780 | linenum: The number of the line to check. |
||
1781 | pos: A position on the line. |
||
1782 | |||
1783 | Returns: |
||
1784 | A tuple (line, linenum, pos) pointer *past* the closing brace, or |
||
1785 | (line, len(lines), -1) if we never find a close. Note we ignore |
||
1786 | strings and comments when matching; and the line we return is the |
||
1787 | 'cleansed' line at linenum. |
||
1788 | """ |
||
1789 | |||
1790 | line = clean_lines.elided[linenum] |
||
1791 | if (line[pos] not in '({[<') or Match(r'<[<=]', line[pos:]): |
||
1792 | return (line, clean_lines.NumLines(), -1) |
||
1793 | |||
1794 | # Check first line |
||
1795 | (end_pos, stack) = FindEndOfExpressionInLine(line, pos, []) |
||
1796 | if end_pos > -1: |
||
1797 | return (line, linenum, end_pos) |
||
1798 | |||
1799 | # Continue scanning forward |
||
1800 | while stack and linenum < clean_lines.NumLines() - 1: |
||
1801 | linenum += 1 |
||
1802 | line = clean_lines.elided[linenum] |
||
1803 | (end_pos, stack) = FindEndOfExpressionInLine(line, 0, stack) |
||
1804 | if end_pos > -1: |
||
1805 | return (line, linenum, end_pos) |
||
1806 | |||
1807 | # Did not find end of expression before end of file, give up |
||
1808 | return (line, clean_lines.NumLines(), -1) |
||
1809 | |||
1810 | |||
1811 | View Code Duplication | def FindStartOfExpressionInLine(line, endpos, stack): |
|
1812 | """Find position at the matching start of current expression. |
||
1813 | |||
1814 | This is almost the reverse of FindEndOfExpressionInLine, but note |
||
1815 | that the input position and returned position differs by 1. |
||
1816 | |||
1817 | Args: |
||
1818 | line: a CleansedLines line. |
||
1819 | endpos: start searching at this position. |
||
1820 | stack: nesting stack at endpos. |
||
1821 | |||
1822 | Returns: |
||
1823 | On finding matching start: (index at matching start, None) |
||
1824 | On finding an unclosed expression: (-1, None) |
||
1825 | Otherwise: (-1, new stack at beginning of this line) |
||
1826 | """ |
||
1827 | i = endpos |
||
1828 | while i >= 0: |
||
1829 | char = line[i] |
||
1830 | if char in ')]}': |
||
1831 | # Found end of expression, push to expression stack |
||
1832 | stack.append(char) |
||
1833 | elif char == '>': |
||
1834 | # Found potential end of template argument list. |
||
1835 | # |
||
1836 | # Ignore it if it's a "->" or ">=" or "operator>" |
||
1837 | if (i > 0 and |
||
1838 | (line[i - 1] == '-' or |
||
1839 | Match(r'\s>=\s', line[i - 1:]) or |
||
1840 | Search(r'\boperator\s*$', line[0:i]))): |
||
1841 | i -= 1 |
||
1842 | else: |
||
1843 | stack.append('>') |
||
1844 | elif char == '<': |
||
1845 | # Found potential start of template argument list |
||
1846 | if i > 0 and line[i - 1] == '<': |
||
1847 | # Left shift operator |
||
1848 | i -= 1 |
||
1849 | else: |
||
1850 | # If there is a matching '>', we can pop the expression stack. |
||
1851 | # Otherwise, ignore this '<' since it must be an operator. |
||
1852 | if stack and stack[-1] == '>': |
||
1853 | stack.pop() |
||
1854 | if not stack: |
||
1855 | return (i, None) |
||
1856 | elif char in '([{': |
||
1857 | # Found start of expression. |
||
1858 | # |
||
1859 | # If there are any unmatched '>' on the stack, they must be |
||
1860 | # operators. Remove those. |
||
1861 | while stack and stack[-1] == '>': |
||
1862 | stack.pop() |
||
1863 | if not stack: |
||
1864 | return (-1, None) |
||
1865 | if ((char == '(' and stack[-1] == ')') or |
||
1866 | (char == '[' and stack[-1] == ']') or |
||
1867 | (char == '{' and stack[-1] == '}')): |
||
1868 | stack.pop() |
||
1869 | if not stack: |
||
1870 | return (i, None) |
||
1871 | else: |
||
1872 | # Mismatched parentheses |
||
1873 | return (-1, None) |
||
1874 | elif char == ';': |
||
1875 | # Found something that look like end of statements. If we are currently |
||
1876 | # expecting a '<', the matching '>' must have been an operator, since |
||
1877 | # template argument list should not contain statements. |
||
1878 | while stack and stack[-1] == '>': |
||
1879 | stack.pop() |
||
1880 | if not stack: |
||
1881 | return (-1, None) |
||
1882 | |||
1883 | i -= 1 |
||
1884 | |||
1885 | return (-1, stack) |
||
1886 | |||
1887 | |||
1888 | View Code Duplication | def ReverseCloseExpression(clean_lines, linenum, pos): |
|
1889 | """If input points to ) or } or ] or >, finds the position that opens it. |
||
1890 | |||
1891 | If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the |
||
1892 | linenum/pos that correspond to the opening of the expression. |
||
1893 | |||
1894 | Args: |
||
1895 | clean_lines: A CleansedLines instance containing the file. |
||
1896 | linenum: The number of the line to check. |
||
1897 | pos: A position on the line. |
||
1898 | |||
1899 | Returns: |
||
1900 | A tuple (line, linenum, pos) pointer *at* the opening brace, or |
||
1901 | (line, 0, -1) if we never find the matching opening brace. Note |
||
1902 | we ignore strings and comments when matching; and the line we |
||
1903 | return is the 'cleansed' line at linenum. |
||
1904 | """ |
||
1905 | line = clean_lines.elided[linenum] |
||
1906 | if line[pos] not in ')}]>': |
||
1907 | return (line, 0, -1) |
||
1908 | |||
1909 | # Check last line |
||
1910 | (start_pos, stack) = FindStartOfExpressionInLine(line, pos, []) |
||
1911 | if start_pos > -1: |
||
1912 | return (line, linenum, start_pos) |
||
1913 | |||
1914 | # Continue scanning backward |
||
1915 | while stack and linenum > 0: |
||
1916 | linenum -= 1 |
||
1917 | line = clean_lines.elided[linenum] |
||
1918 | (start_pos, stack) = FindStartOfExpressionInLine(line, len(line) - 1, stack) |
||
1919 | if start_pos > -1: |
||
1920 | return (line, linenum, start_pos) |
||
1921 | |||
1922 | # Did not find start of expression before beginning of file, give up |
||
1923 | return (line, 0, -1) |
||
1924 | |||
1925 | |||
1926 | View Code Duplication | def CheckForCopyright(filename, lines, error): |
|
1927 | """Logs an error if no Copyright message appears at the top of the file.""" |
||
1928 | |||
1929 | # We'll say it should occur by line 10. Don't forget there's a |
||
1930 | # dummy line at the front. |
||
1931 | for line in range(1, min(len(lines), 11)): |
||
1932 | if re.search(r'Copyright', lines[line], re.I): break |
||
1933 | else: # means no copyright line was found |
||
1934 | error(filename, 0, 'legal/copyright', 5, |
||
1935 | 'No copyright message found. ' |
||
1936 | 'You should have a line: "Copyright [year] <Copyright Owner>"') |
||
1937 | |||
1938 | |||
1939 | def GetIndentLevel(line): |
||
1940 | """Return the number of leading spaces in line. |
||
1941 | |||
1942 | Args: |
||
1943 | line: A string to check. |
||
1944 | |||
1945 | Returns: |
||
1946 | An integer count of leading spaces, possibly zero. |
||
1947 | """ |
||
1948 | indent = Match(r'^( *)\S', line) |
||
1949 | if indent: |
||
1950 | return len(indent.group(1)) |
||
1951 | else: |
||
1952 | return 0 |
||
1953 | |||
1954 | |||
1955 | View Code Duplication | def GetHeaderGuardCPPVariable(filename): |
|
1956 | """Returns the CPP variable that should be used as a header guard. |
||
1957 | |||
1958 | Args: |
||
1959 | filename: The name of a C++ header file. |
||
1960 | |||
1961 | Returns: |
||
1962 | The CPP variable that should be used as a header guard in the |
||
1963 | named file. |
||
1964 | |||
1965 | """ |
||
1966 | |||
1967 | # Restores original filename in case that cpplint is invoked from Emacs's |
||
1968 | # flymake. |
||
1969 | filename = re.sub(r'_flymake\.h$', '.h', filename) |
||
1970 | filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename) |
||
1971 | # Replace 'c++' with 'cpp'. |
||
1972 | filename = filename.replace('C++', 'cpp').replace('c++', 'cpp') |
||
1973 | |||
1974 | fileinfo = FileInfo(filename) |
||
1975 | file_path_from_root = fileinfo.RepositoryName() |
||
1976 | if _root: |
||
1977 | suffix = os.sep |
||
1978 | # On Windows using directory separator will leave us with |
||
1979 | # "bogus escape error" unless we properly escape regex. |
||
1980 | if suffix == '\\': |
||
1981 | suffix += '\\' |
||
1982 | file_path_from_root = re.sub('^' + _root + suffix, '', file_path_from_root) |
||
1983 | return re.sub(r'[^a-zA-Z0-9]', '_', file_path_from_root).upper() + '_' |
||
1984 | |||
1985 | |||
1986 | View Code Duplication | def CheckForHeaderGuard(filename, clean_lines, error): |
|
1987 | """Checks that the file contains a header guard. |
||
1988 | |||
1989 | Logs an error if no #ifndef header guard is present. For other |
||
1990 | headers, checks that the full pathname is used. |
||
1991 | |||
1992 | Args: |
||
1993 | filename: The name of the C++ header file. |
||
1994 | clean_lines: A CleansedLines instance containing the file. |
||
1995 | error: The function to call with any errors found. |
||
1996 | """ |
||
1997 | |||
1998 | # Don't check for header guards if there are error suppression |
||
1999 | # comments somewhere in this file. |
||
2000 | # |
||
2001 | # Because this is silencing a warning for a nonexistent line, we |
||
2002 | # only support the very specific NOLINT(build/header_guard) syntax, |
||
2003 | # and not the general NOLINT or NOLINT(*) syntax. |
||
2004 | raw_lines = clean_lines.lines_without_raw_strings |
||
2005 | for i in raw_lines: |
||
2006 | if Search(r'//\s*NOLINT\(build/header_guard\)', i): |
||
2007 | return |
||
2008 | |||
2009 | # Allow pragma once instead of header guards |
||
2010 | for i in raw_lines: |
||
2011 | if Search(r'^\s*#pragma\s+once', i): |
||
2012 | return |
||
2013 | |||
2014 | cppvar = GetHeaderGuardCPPVariable(filename) |
||
2015 | |||
2016 | ifndef = '' |
||
2017 | ifndef_linenum = 0 |
||
2018 | define = '' |
||
2019 | endif = '' |
||
2020 | endif_linenum = 0 |
||
2021 | for linenum, line in enumerate(raw_lines): |
||
2022 | linesplit = line.split() |
||
2023 | if len(linesplit) >= 2: |
||
2024 | # find the first occurrence of #ifndef and #define, save arg |
||
2025 | if not ifndef and linesplit[0] == '#ifndef': |
||
2026 | # set ifndef to the header guard presented on the #ifndef line. |
||
2027 | ifndef = linesplit[1] |
||
2028 | ifndef_linenum = linenum |
||
2029 | if not define and linesplit[0] == '#define': |
||
2030 | define = linesplit[1] |
||
2031 | # find the last occurrence of #endif, save entire line |
||
2032 | if line.startswith('#endif'): |
||
2033 | endif = line |
||
2034 | endif_linenum = linenum |
||
2035 | |||
2036 | if not ifndef or not define or ifndef != define: |
||
2037 | error(filename, 0, 'build/header_guard', 5, |
||
2038 | 'No #ifndef header guard found, suggested CPP variable is: %s' % |
||
2039 | cppvar) |
||
2040 | return |
||
2041 | |||
2042 | # The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__ |
||
2043 | # for backward compatibility. |
||
2044 | if ifndef != cppvar: |
||
2045 | error_level = 0 |
||
2046 | if ifndef != cppvar + '_': |
||
2047 | error_level = 5 |
||
2048 | |||
2049 | ParseNolintSuppressions(filename, raw_lines[ifndef_linenum], ifndef_linenum, |
||
2050 | error) |
||
2051 | error(filename, ifndef_linenum, 'build/header_guard', error_level, |
||
2052 | '#ifndef header guard has wrong style, please use: %s' % cppvar) |
||
2053 | |||
2054 | # Check for "//" comments on endif line. |
||
2055 | ParseNolintSuppressions(filename, raw_lines[endif_linenum], endif_linenum, |
||
2056 | error) |
||
2057 | match = Match(r'#endif\s*//\s*' + cppvar + r'(_)?\b', endif) |
||
2058 | if match: |
||
2059 | if match.group(1) == '_': |
||
2060 | # Issue low severity warning for deprecated double trailing underscore |
||
2061 | error(filename, endif_linenum, 'build/header_guard', 0, |
||
2062 | '#endif line should be "#endif // %s"' % cppvar) |
||
2063 | return |
||
2064 | |||
2065 | # Didn't find the corresponding "//" comment. If this file does not |
||
2066 | # contain any "//" comments at all, it could be that the compiler |
||
2067 | # only wants "/**/" comments, look for those instead. |
||
2068 | no_single_line_comments = True |
||
2069 | for i in xrange(1, len(raw_lines) - 1): |
||
2070 | line = raw_lines[i] |
||
2071 | if Match(r'^(?:(?:\'(?:\.|[^\'])*\')|(?:"(?:\.|[^"])*")|[^\'"])*//', line): |
||
2072 | no_single_line_comments = False |
||
2073 | break |
||
2074 | |||
2075 | if no_single_line_comments: |
||
2076 | match = Match(r'#endif\s*/\*\s*' + cppvar + r'(_)?\s*\*/', endif) |
||
2077 | if match: |
||
2078 | if match.group(1) == '_': |
||
2079 | # Low severity warning for double trailing underscore |
||
2080 | error(filename, endif_linenum, 'build/header_guard', 0, |
||
2081 | '#endif line should be "#endif /* %s */"' % cppvar) |
||
2082 | return |
||
2083 | |||
2084 | # Didn't find anything |
||
2085 | error(filename, endif_linenum, 'build/header_guard', 5, |
||
2086 | '#endif line should be "#endif // %s"' % cppvar) |
||
2087 | |||
2088 | |||
2089 | View Code Duplication | def CheckHeaderFileIncluded(filename, include_state, error): |
|
2090 | """Logs an error if a source file does not include its header.""" |
||
2091 | |||
2092 | # Do not check test files |
||
2093 | fileinfo = FileInfo(filename) |
||
2094 | if Search(_TEST_FILE_SUFFIX, fileinfo.BaseName()): |
||
2095 | return |
||
2096 | |||
2097 | for ext in GetHeaderExtensions(): |
||
2098 | basefilename = filename[0:len(filename) - len(fileinfo.Extension())] |
||
2099 | headerfile = basefilename + '.' + ext |
||
2100 | if not os.path.exists(headerfile): |
||
2101 | continue |
||
2102 | headername = FileInfo(headerfile).RepositoryName() |
||
2103 | first_include = None |
||
2104 | for section_list in include_state.include_list: |
||
2105 | for f in section_list: |
||
2106 | if headername in f[0] or f[0] in headername: |
||
2107 | return |
||
2108 | if not first_include: |
||
2109 | first_include = f[1] |
||
2110 | |||
2111 | error(filename, first_include, 'build/include', 5, |
||
2112 | '%s should include its header file %s' % (fileinfo.RepositoryName(), |
||
2113 | headername)) |
||
2114 | |||
2115 | |||
2116 | View Code Duplication | def CheckForBadCharacters(filename, lines, error): |
|
2117 | """Logs an error for each line containing bad characters. |
||
2118 | |||
2119 | Two kinds of bad characters: |
||
2120 | |||
2121 | 1. Unicode replacement characters: These indicate that either the file |
||
2122 | contained invalid UTF-8 (likely) or Unicode replacement characters (which |
||
2123 | it shouldn't). Note that it's possible for this to throw off line |
||
2124 | numbering if the invalid UTF-8 occurred adjacent to a newline. |
||
2125 | |||
2126 | 2. NUL bytes. These are problematic for some tools. |
||
2127 | |||
2128 | Args: |
||
2129 | filename: The name of the current file. |
||
2130 | lines: An array of strings, each representing a line of the file. |
||
2131 | error: The function to call with any errors found. |
||
2132 | """ |
||
2133 | for linenum, line in enumerate(lines): |
||
2134 | if unicode_escape_decode('\ufffd') in line: |
||
2135 | error(filename, linenum, 'readability/utf8', 5, |
||
2136 | 'Line contains invalid UTF-8 (or Unicode replacement character).') |
||
2137 | if '\0' in line: |
||
2138 | error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.') |
||
2139 | |||
2140 | |||
2141 | View Code Duplication | def CheckForNewlineAtEOF(filename, lines, error): |
|
2142 | """Logs an error if there is no newline char at the end of the file. |
||
2143 | |||
2144 | Args: |
||
2145 | filename: The name of the current file. |
||
2146 | lines: An array of strings, each representing a line of the file. |
||
2147 | error: The function to call with any errors found. |
||
2148 | """ |
||
2149 | |||
2150 | # The array lines() was created by adding two newlines to the |
||
2151 | # original file (go figure), then splitting on \n. |
||
2152 | # To verify that the file ends in \n, we just have to make sure the |
||
2153 | # last-but-two element of lines() exists and is empty. |
||
2154 | if len(lines) < 3 or lines[-2]: |
||
2155 | error(filename, len(lines) - 2, 'whitespace/ending_newline', 5, |
||
2156 | 'Could not find a newline character at the end of the file.') |
||
2157 | |||
2158 | |||
2159 | View Code Duplication | def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error): |
|
2160 | """Logs an error if we see /* ... */ or "..." that extend past one line. |
||
2161 | |||
2162 | /* ... */ comments are legit inside macros, for one line. |
||
2163 | Otherwise, we prefer // comments, so it's ok to warn about the |
||
2164 | other. Likewise, it's ok for strings to extend across multiple |
||
2165 | lines, as long as a line continuation character (backslash) |
||
2166 | terminates each line. Although not currently prohibited by the C++ |
||
2167 | style guide, it's ugly and unnecessary. We don't do well with either |
||
2168 | in this lint program, so we warn about both. |
||
2169 | |||
2170 | Args: |
||
2171 | filename: The name of the current file. |
||
2172 | clean_lines: A CleansedLines instance containing the file. |
||
2173 | linenum: The number of the line to check. |
||
2174 | error: The function to call with any errors found. |
||
2175 | """ |
||
2176 | line = clean_lines.elided[linenum] |
||
2177 | |||
2178 | # Remove all \\ (escaped backslashes) from the line. They are OK, and the |
||
2179 | # second (escaped) slash may trigger later \" detection erroneously. |
||
2180 | line = line.replace('\\\\', '') |
||
2181 | |||
2182 | if line.count('/*') > line.count('*/'): |
||
2183 | error(filename, linenum, 'readability/multiline_comment', 5, |
||
2184 | 'Complex multi-line /*...*/-style comment found. ' |
||
2185 | 'Lint may give bogus warnings. ' |
||
2186 | 'Consider replacing these with //-style comments, ' |
||
2187 | 'with #if 0...#endif, ' |
||
2188 | 'or with more clearly structured multi-line comments.') |
||
2189 | |||
2190 | if (line.count('"') - line.count('\\"')) % 2: |
||
2191 | error(filename, linenum, 'readability/multiline_string', 5, |
||
2192 | 'Multi-line string ("...") found. This lint script doesn\'t ' |
||
2193 | 'do well with such strings, and may give bogus warnings. ' |
||
2194 | 'Use C++11 raw strings or concatenation instead.') |
||
2195 | |||
2196 | |||
2197 | # (non-threadsafe name, thread-safe alternative, validation pattern) |
||
2198 | # |
||
2199 | # The validation pattern is used to eliminate false positives such as: |
||
2200 | # _rand(); // false positive due to substring match. |
||
2201 | # ->rand(); // some member function rand(). |
||
2202 | # ACMRandom rand(seed); // some variable named rand. |
||
2203 | # ISAACRandom rand(); // another variable named rand. |
||
2204 | # |
||
2205 | # Basically we require the return value of these functions to be used |
||
2206 | # in some expression context on the same line by matching on some |
||
2207 | # operator before the function name. This eliminates constructors and |
||
2208 | # member function calls. |
||
2209 | _UNSAFE_FUNC_PREFIX = r'(?:[-+*/=%^&|(<]\s*|>\s+)' |
||
2210 | _THREADING_LIST = ( |
||
2211 | ('asctime(', 'asctime_r(', _UNSAFE_FUNC_PREFIX + r'asctime\([^)]+\)'), |
||
2212 | ('ctime(', 'ctime_r(', _UNSAFE_FUNC_PREFIX + r'ctime\([^)]+\)'), |
||
2213 | ('getgrgid(', 'getgrgid_r(', _UNSAFE_FUNC_PREFIX + r'getgrgid\([^)]+\)'), |
||
2214 | ('getgrnam(', 'getgrnam_r(', _UNSAFE_FUNC_PREFIX + r'getgrnam\([^)]+\)'), |
||
2215 | ('getlogin(', 'getlogin_r(', _UNSAFE_FUNC_PREFIX + r'getlogin\(\)'), |
||
2216 | ('getpwnam(', 'getpwnam_r(', _UNSAFE_FUNC_PREFIX + r'getpwnam\([^)]+\)'), |
||
2217 | ('getpwuid(', 'getpwuid_r(', _UNSAFE_FUNC_PREFIX + r'getpwuid\([^)]+\)'), |
||
2218 | ('gmtime(', 'gmtime_r(', _UNSAFE_FUNC_PREFIX + r'gmtime\([^)]+\)'), |
||
2219 | ('localtime(', 'localtime_r(', _UNSAFE_FUNC_PREFIX + r'localtime\([^)]+\)'), |
||
2220 | ('rand(', 'rand_r(', _UNSAFE_FUNC_PREFIX + r'rand\(\)'), |
||
2221 | ('strtok(', 'strtok_r(', |
||
2222 | _UNSAFE_FUNC_PREFIX + r'strtok\([^)]+\)'), |
||
2223 | ('ttyname(', 'ttyname_r(', _UNSAFE_FUNC_PREFIX + r'ttyname\([^)]+\)'), |
||
2224 | ) |
||
2225 | |||
2226 | |||
2227 | View Code Duplication | def CheckPosixThreading(filename, clean_lines, linenum, error): |
|
2228 | """Checks for calls to thread-unsafe functions. |
||
2229 | |||
2230 | Much code has been originally written without consideration of |
||
2231 | multi-threading. Also, engineers are relying on their old experience; |
||
2232 | they have learned posix before threading extensions were added. These |
||
2233 | tests guide the engineers to use thread-safe functions (when using |
||
2234 | posix directly). |
||
2235 | |||
2236 | Args: |
||
2237 | filename: The name of the current file. |
||
2238 | clean_lines: A CleansedLines instance containing the file. |
||
2239 | linenum: The number of the line to check. |
||
2240 | error: The function to call with any errors found. |
||
2241 | """ |
||
2242 | line = clean_lines.elided[linenum] |
||
2243 | for single_thread_func, multithread_safe_func, pattern in _THREADING_LIST: |
||
2244 | # Additional pattern matching check to confirm that this is the |
||
2245 | # function we are looking for |
||
2246 | if Search(pattern, line): |
||
2247 | error(filename, linenum, 'runtime/threadsafe_fn', 2, |
||
2248 | 'Consider using ' + multithread_safe_func + |
||
2249 | '...) instead of ' + single_thread_func + |
||
2250 | '...) for improved thread safety.') |
||
2251 | |||
2252 | |||
2253 | def CheckVlogArguments(filename, clean_lines, linenum, error): |
||
2254 | """Checks that VLOG() is only used for defining a logging level. |
||
2255 | |||
2256 | For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and |
||
2257 | VLOG(FATAL) are not. |
||
2258 | |||
2259 | Args: |
||
2260 | filename: The name of the current file. |
||
2261 | clean_lines: A CleansedLines instance containing the file. |
||
2262 | linenum: The number of the line to check. |
||
2263 | error: The function to call with any errors found. |
||
2264 | """ |
||
2265 | line = clean_lines.elided[linenum] |
||
2266 | if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line): |
||
2267 | error(filename, linenum, 'runtime/vlog', 5, |
||
2268 | 'VLOG() should be used with numeric verbosity level. ' |
||
2269 | 'Use LOG() if you want symbolic severity levels.') |
||
2270 | |||
2271 | # Matches invalid increment: *count++, which moves pointer instead of |
||
2272 | # incrementing a value. |
||
2273 | _RE_PATTERN_INVALID_INCREMENT = re.compile( |
||
2274 | r'^\s*\*\w+(\+\+|--);') |
||
2275 | |||
2276 | |||
2277 | def CheckInvalidIncrement(filename, clean_lines, linenum, error): |
||
2278 | """Checks for invalid increment *count++. |
||
2279 | |||
2280 | For example following function: |
||
2281 | void increment_counter(int* count) { |
||
2282 | *count++; |
||
2283 | } |
||
2284 | is invalid, because it effectively does count++, moving pointer, and should |
||
2285 | be replaced with ++*count, (*count)++ or *count += 1. |
||
2286 | |||
2287 | Args: |
||
2288 | filename: The name of the current file. |
||
2289 | clean_lines: A CleansedLines instance containing the file. |
||
2290 | linenum: The number of the line to check. |
||
2291 | error: The function to call with any errors found. |
||
2292 | """ |
||
2293 | line = clean_lines.elided[linenum] |
||
2294 | if _RE_PATTERN_INVALID_INCREMENT.match(line): |
||
2295 | error(filename, linenum, 'runtime/invalid_increment', 5, |
||
2296 | 'Changing pointer instead of value (or unused value of operator*).') |
||
2297 | |||
2298 | |||
2299 | def IsMacroDefinition(clean_lines, linenum): |
||
2300 | if Search(r'^#define', clean_lines[linenum]): |
||
2301 | return True |
||
2302 | |||
2303 | if linenum > 0 and Search(r'\\$', clean_lines[linenum - 1]): |
||
2304 | return True |
||
2305 | |||
2306 | return False |
||
2307 | |||
2308 | |||
2309 | def IsForwardClassDeclaration(clean_lines, linenum): |
||
2310 | return Match(r'^\s*(\btemplate\b)*.*class\s+\w+;\s*$', clean_lines[linenum]) |
||
2311 | |||
2312 | |||
2313 | View Code Duplication | class _BlockInfo(object): |
|
2314 | """Stores information about a generic block of code.""" |
||
2315 | |||
2316 | def __init__(self, linenum, seen_open_brace): |
||
2317 | self.starting_linenum = linenum |
||
2318 | self.seen_open_brace = seen_open_brace |
||
2319 | self.open_parentheses = 0 |
||
2320 | self.inline_asm = _NO_ASM |
||
2321 | self.check_namespace_indentation = False |
||
2322 | |||
2323 | def CheckBegin(self, filename, clean_lines, linenum, error): |
||
2324 | """Run checks that applies to text up to the opening brace. |
||
2325 | |||
2326 | This is mostly for checking the text after the class identifier |
||
2327 | and the "{", usually where the base class is specified. For other |
||
2328 | blocks, there isn't much to check, so we always pass. |
||
2329 | |||
2330 | Args: |
||
2331 | filename: The name of the current file. |
||
2332 | clean_lines: A CleansedLines instance containing the file. |
||
2333 | linenum: The number of the line to check. |
||
2334 | error: The function to call with any errors found. |
||
2335 | """ |
||
2336 | pass |
||
2337 | |||
2338 | def CheckEnd(self, filename, clean_lines, linenum, error): |
||
2339 | """Run checks that applies to text after the closing brace. |
||
2340 | |||
2341 | This is mostly used for checking end of namespace comments. |
||
2342 | |||
2343 | Args: |
||
2344 | filename: The name of the current file. |
||
2345 | clean_lines: A CleansedLines instance containing the file. |
||
2346 | linenum: The number of the line to check. |
||
2347 | error: The function to call with any errors found. |
||
2348 | """ |
||
2349 | pass |
||
2350 | |||
2351 | def IsBlockInfo(self): |
||
2352 | """Returns true if this block is a _BlockInfo. |
||
2353 | |||
2354 | This is convenient for verifying that an object is an instance of |
||
2355 | a _BlockInfo, but not an instance of any of the derived classes. |
||
2356 | |||
2357 | Returns: |
||
2358 | True for this class, False for derived classes. |
||
2359 | """ |
||
2360 | return self.__class__ == _BlockInfo |
||
2361 | |||
2362 | |||
2363 | class _ExternCInfo(_BlockInfo): |
||
2364 | """Stores information about an 'extern "C"' block.""" |
||
2365 | |||
2366 | def __init__(self, linenum): |
||
2367 | _BlockInfo.__init__(self, linenum, True) |
||
2368 | |||
2369 | |||
2370 | View Code Duplication | class _ClassInfo(_BlockInfo): |
|
2371 | """Stores information about a class.""" |
||
2372 | |||
2373 | def __init__(self, name, class_or_struct, clean_lines, linenum): |
||
2374 | _BlockInfo.__init__(self, linenum, False) |
||
2375 | self.name = name |
||
2376 | self.is_derived = False |
||
2377 | self.check_namespace_indentation = True |
||
2378 | if class_or_struct == 'struct': |
||
2379 | self.access = 'public' |
||
2380 | self.is_struct = True |
||
2381 | else: |
||
2382 | self.access = 'private' |
||
2383 | self.is_struct = False |
||
2384 | |||
2385 | # Remember initial indentation level for this class. Using raw_lines here |
||
2386 | # instead of elided to account for leading comments. |
||
2387 | self.class_indent = GetIndentLevel(clean_lines.raw_lines[linenum]) |
||
2388 | |||
2389 | # Try to find the end of the class. This will be confused by things like: |
||
2390 | # class A { |
||
2391 | # } *x = { ... |
||
2392 | # |
||
2393 | # But it's still good enough for CheckSectionSpacing. |
||
2394 | self.last_line = 0 |
||
2395 | depth = 0 |
||
2396 | for i in range(linenum, clean_lines.NumLines()): |
||
2397 | line = clean_lines.elided[i] |
||
2398 | depth += line.count('{') - line.count('}') |
||
2399 | if not depth: |
||
2400 | self.last_line = i |
||
2401 | break |
||
2402 | |||
2403 | def CheckBegin(self, filename, clean_lines, linenum, error): |
||
2404 | # Look for a bare ':' |
||
2405 | if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]): |
||
2406 | self.is_derived = True |
||
2407 | |||
2408 | def CheckEnd(self, filename, clean_lines, linenum, error): |
||
2409 | # If there is a DISALLOW macro, it should appear near the end of |
||
2410 | # the class. |
||
2411 | seen_last_thing_in_class = False |
||
2412 | for i in xrange(linenum - 1, self.starting_linenum, -1): |
||
2413 | match = Search( |
||
2414 | r'\b(DISALLOW_COPY_AND_ASSIGN|DISALLOW_IMPLICIT_CONSTRUCTORS)\(' + |
||
2415 | self.name + r'\)', |
||
2416 | clean_lines.elided[i]) |
||
2417 | if match: |
||
2418 | if seen_last_thing_in_class: |
||
2419 | error(filename, i, 'readability/constructors', 3, |
||
2420 | match.group(1) + ' should be the last thing in the class') |
||
2421 | break |
||
2422 | |||
2423 | if not Match(r'^\s*$', clean_lines.elided[i]): |
||
2424 | seen_last_thing_in_class = True |
||
2425 | |||
2426 | # Check that closing brace is aligned with beginning of the class. |
||
2427 | # Only do this if the closing brace is indented by only whitespaces. |
||
2428 | # This means we will not check single-line class definitions. |
||
2429 | indent = Match(r'^( *)\}', clean_lines.elided[linenum]) |
||
2430 | if indent and len(indent.group(1)) != self.class_indent: |
||
2431 | if self.is_struct: |
||
2432 | parent = 'struct ' + self.name |
||
2433 | else: |
||
2434 | parent = 'class ' + self.name |
||
2435 | error(filename, linenum, 'whitespace/indent', 3, |
||
2436 | 'Closing brace should be aligned with beginning of %s' % parent) |
||
2437 | |||
2438 | |||
2439 | View Code Duplication | class _NamespaceInfo(_BlockInfo): |
|
2440 | """Stores information about a namespace.""" |
||
2441 | |||
2442 | def __init__(self, name, linenum): |
||
2443 | _BlockInfo.__init__(self, linenum, False) |
||
2444 | self.name = name or '' |
||
2445 | self.check_namespace_indentation = True |
||
2446 | |||
2447 | def CheckEnd(self, filename, clean_lines, linenum, error): |
||
2448 | """Check end of namespace comments.""" |
||
2449 | line = clean_lines.raw_lines[linenum] |
||
2450 | |||
2451 | # Check how many lines is enclosed in this namespace. Don't issue |
||
2452 | # warning for missing namespace comments if there aren't enough |
||
2453 | # lines. However, do apply checks if there is already an end of |
||
2454 | # namespace comment and it's incorrect. |
||
2455 | # |
||
2456 | # TODO(unknown): We always want to check end of namespace comments |
||
2457 | # if a namespace is large, but sometimes we also want to apply the |
||
2458 | # check if a short namespace contained nontrivial things (something |
||
2459 | # other than forward declarations). There is currently no logic on |
||
2460 | # deciding what these nontrivial things are, so this check is |
||
2461 | # triggered by namespace size only, which works most of the time. |
||
2462 | if (linenum - self.starting_linenum < 10 |
||
2463 | and not Match(r'^\s*};*\s*(//|/\*).*\bnamespace\b', line)): |
||
2464 | return |
||
2465 | |||
2466 | # Look for matching comment at end of namespace. |
||
2467 | # |
||
2468 | # Note that we accept C style "/* */" comments for terminating |
||
2469 | # namespaces, so that code that terminate namespaces inside |
||
2470 | # preprocessor macros can be cpplint clean. |
||
2471 | # |
||
2472 | # We also accept stuff like "// end of namespace <name>." with the |
||
2473 | # period at the end. |
||
2474 | # |
||
2475 | # Besides these, we don't accept anything else, otherwise we might |
||
2476 | # get false negatives when existing comment is a substring of the |
||
2477 | # expected namespace. |
||
2478 | if self.name: |
||
2479 | # Named namespace |
||
2480 | if not Match((r'^\s*};*\s*(//|/\*).*\bnamespace\s+' + |
||
2481 | re.escape(self.name) + r'[\*/\.\\\s]*$'), |
||
2482 | line): |
||
2483 | error(filename, linenum, 'readability/namespace', 5, |
||
2484 | 'Namespace should be terminated with "// namespace %s"' % |
||
2485 | self.name) |
||
2486 | else: |
||
2487 | # Anonymous namespace |
||
2488 | if not Match(r'^\s*};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line): |
||
2489 | # If "// namespace anonymous" or "// anonymous namespace (more text)", |
||
2490 | # mention "// anonymous namespace" as an acceptable form |
||
2491 | if Match(r'^\s*}.*\b(namespace anonymous|anonymous namespace)\b', line): |
||
2492 | error(filename, linenum, 'readability/namespace', 5, |
||
2493 | 'Anonymous namespace should be terminated with "// namespace"' |
||
2494 | ' or "// anonymous namespace"') |
||
2495 | else: |
||
2496 | error(filename, linenum, 'readability/namespace', 5, |
||
2497 | 'Anonymous namespace should be terminated with "// namespace"') |
||
2498 | |||
2499 | |||
2500 | class _PreprocessorInfo(object): |
||
2501 | """Stores checkpoints of nesting stacks when #if/#else is seen.""" |
||
2502 | |||
2503 | def __init__(self, stack_before_if): |
||
2504 | # The entire nesting stack before #if |
||
2505 | self.stack_before_if = stack_before_if |
||
2506 | |||
2507 | # The entire nesting stack up to #else |
||
2508 | self.stack_before_else = [] |
||
2509 | |||
2510 | # Whether we have already seen #else or #elif |
||
2511 | self.seen_else = False |
||
2512 | |||
2513 | |||
2514 | View Code Duplication | class NestingState(object): |
|
2515 | """Holds states related to parsing braces.""" |
||
2516 | |||
2517 | def __init__(self): |
||
2518 | # Stack for tracking all braces. An object is pushed whenever we |
||
2519 | # see a "{", and popped when we see a "}". Only 3 types of |
||
2520 | # objects are possible: |
||
2521 | # - _ClassInfo: a class or struct. |
||
2522 | # - _NamespaceInfo: a namespace. |
||
2523 | # - _BlockInfo: some other type of block. |
||
2524 | self.stack = [] |
||
2525 | |||
2526 | # Top of the previous stack before each Update(). |
||
2527 | # |
||
2528 | # Because the nesting_stack is updated at the end of each line, we |
||
2529 | # had to do some convoluted checks to find out what is the current |
||
2530 | # scope at the beginning of the line. This check is simplified by |
||
2531 | # saving the previous top of nesting stack. |
||
2532 | # |
||
2533 | # We could save the full stack, but we only need the top. Copying |
||
2534 | # the full nesting stack would slow down cpplint by ~10%. |
||
2535 | self.previous_stack_top = [] |
||
2536 | |||
2537 | # Stack of _PreprocessorInfo objects. |
||
2538 | self.pp_stack = [] |
||
2539 | |||
2540 | def SeenOpenBrace(self): |
||
2541 | """Check if we have seen the opening brace for the innermost block. |
||
2542 | |||
2543 | Returns: |
||
2544 | True if we have seen the opening brace, False if the innermost |
||
2545 | block is still expecting an opening brace. |
||
2546 | """ |
||
2547 | return (not self.stack) or self.stack[-1].seen_open_brace |
||
2548 | |||
2549 | def InNamespaceBody(self): |
||
2550 | """Check if we are currently one level inside a namespace body. |
||
2551 | |||
2552 | Returns: |
||
2553 | True if top of the stack is a namespace block, False otherwise. |
||
2554 | """ |
||
2555 | return self.stack and isinstance(self.stack[-1], _NamespaceInfo) |
||
2556 | |||
2557 | def InExternC(self): |
||
2558 | """Check if we are currently one level inside an 'extern "C"' block. |
||
2559 | |||
2560 | Returns: |
||
2561 | True if top of the stack is an extern block, False otherwise. |
||
2562 | """ |
||
2563 | return self.stack and isinstance(self.stack[-1], _ExternCInfo) |
||
2564 | |||
2565 | def InClassDeclaration(self): |
||
2566 | """Check if we are currently one level inside a class or struct declaration. |
||
2567 | |||
2568 | Returns: |
||
2569 | True if top of the stack is a class/struct, False otherwise. |
||
2570 | """ |
||
2571 | return self.stack and isinstance(self.stack[-1], _ClassInfo) |
||
2572 | |||
2573 | def InAsmBlock(self): |
||
2574 | """Check if we are currently one level inside an inline ASM block. |
||
2575 | |||
2576 | Returns: |
||
2577 | True if the top of the stack is a block containing inline ASM. |
||
2578 | """ |
||
2579 | return self.stack and self.stack[-1].inline_asm != _NO_ASM |
||
2580 | |||
2581 | def InTemplateArgumentList(self, clean_lines, linenum, pos): |
||
2582 | """Check if current position is inside template argument list. |
||
2583 | |||
2584 | Args: |
||
2585 | clean_lines: A CleansedLines instance containing the file. |
||
2586 | linenum: The number of the line to check. |
||
2587 | pos: position just after the suspected template argument. |
||
2588 | Returns: |
||
2589 | True if (linenum, pos) is inside template arguments. |
||
2590 | """ |
||
2591 | while linenum < clean_lines.NumLines(): |
||
2592 | # Find the earliest character that might indicate a template argument |
||
2593 | line = clean_lines.elided[linenum] |
||
2594 | match = Match(r'^[^{};=\[\]\.<>]*(.)', line[pos:]) |
||
2595 | if not match: |
||
2596 | linenum += 1 |
||
2597 | pos = 0 |
||
2598 | continue |
||
2599 | token = match.group(1) |
||
2600 | pos += len(match.group(0)) |
||
2601 | |||
2602 | # These things do not look like template argument list: |
||
2603 | # class Suspect { |
||
2604 | # class Suspect x; } |
||
2605 | if token in ('{', '}', ';'): return False |
||
2606 | |||
2607 | # These things look like template argument list: |
||
2608 | # template <class Suspect> |
||
2609 | # template <class Suspect = default_value> |
||
2610 | # template <class Suspect[]> |
||
2611 | # template <class Suspect...> |
||
2612 | if token in ('>', '=', '[', ']', '.'): return True |
||
2613 | |||
2614 | # Check if token is an unmatched '<'. |
||
2615 | # If not, move on to the next character. |
||
2616 | if token != '<': |
||
2617 | pos += 1 |
||
2618 | if pos >= len(line): |
||
2619 | linenum += 1 |
||
2620 | pos = 0 |
||
2621 | continue |
||
2622 | |||
2623 | # We can't be sure if we just find a single '<', and need to |
||
2624 | # find the matching '>'. |
||
2625 | (_, end_line, end_pos) = CloseExpression(clean_lines, linenum, pos - 1) |
||
2626 | if end_pos < 0: |
||
2627 | # Not sure if template argument list or syntax error in file |
||
2628 | return False |
||
2629 | linenum = end_line |
||
2630 | pos = end_pos |
||
2631 | return False |
||
2632 | |||
2633 | def UpdatePreprocessor(self, line): |
||
2634 | """Update preprocessor stack. |
||
2635 | |||
2636 | We need to handle preprocessors due to classes like this: |
||
2637 | #ifdef SWIG |
||
2638 | struct ResultDetailsPageElementExtensionPoint { |
||
2639 | #else |
||
2640 | struct ResultDetailsPageElementExtensionPoint : public Extension { |
||
2641 | #endif |
||
2642 | |||
2643 | We make the following assumptions (good enough for most files): |
||
2644 | - Preprocessor condition evaluates to true from #if up to first |
||
2645 | #else/#elif/#endif. |
||
2646 | |||
2647 | - Preprocessor condition evaluates to false from #else/#elif up |
||
2648 | to #endif. We still perform lint checks on these lines, but |
||
2649 | these do not affect nesting stack. |
||
2650 | |||
2651 | Args: |
||
2652 | line: current line to check. |
||
2653 | """ |
||
2654 | if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line): |
||
2655 | # Beginning of #if block, save the nesting stack here. The saved |
||
2656 | # stack will allow us to restore the parsing state in the #else case. |
||
2657 | self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack))) |
||
2658 | elif Match(r'^\s*#\s*(else|elif)\b', line): |
||
2659 | # Beginning of #else block |
||
2660 | if self.pp_stack: |
||
2661 | if not self.pp_stack[-1].seen_else: |
||
2662 | # This is the first #else or #elif block. Remember the |
||
2663 | # whole nesting stack up to this point. This is what we |
||
2664 | # keep after the #endif. |
||
2665 | self.pp_stack[-1].seen_else = True |
||
2666 | self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack) |
||
2667 | |||
2668 | # Restore the stack to how it was before the #if |
||
2669 | self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if) |
||
2670 | else: |
||
2671 | # TODO(unknown): unexpected #else, issue warning? |
||
2672 | pass |
||
2673 | elif Match(r'^\s*#\s*endif\b', line): |
||
2674 | # End of #if or #else blocks. |
||
2675 | if self.pp_stack: |
||
2676 | # If we saw an #else, we will need to restore the nesting |
||
2677 | # stack to its former state before the #else, otherwise we |
||
2678 | # will just continue from where we left off. |
||
2679 | if self.pp_stack[-1].seen_else: |
||
2680 | # Here we can just use a shallow copy since we are the last |
||
2681 | # reference to it. |
||
2682 | self.stack = self.pp_stack[-1].stack_before_else |
||
2683 | # Drop the corresponding #if |
||
2684 | self.pp_stack.pop() |
||
2685 | else: |
||
2686 | # TODO(unknown): unexpected #endif, issue warning? |
||
2687 | pass |
||
2688 | |||
2689 | # TODO(unknown): Update() is too long, but we will refactor later. |
||
2690 | def Update(self, filename, clean_lines, linenum, error): |
||
2691 | """Update nesting state with current line. |
||
2692 | |||
2693 | Args: |
||
2694 | filename: The name of the current file. |
||
2695 | clean_lines: A CleansedLines instance containing the file. |
||
2696 | linenum: The number of the line to check. |
||
2697 | error: The function to call with any errors found. |
||
2698 | """ |
||
2699 | line = clean_lines.elided[linenum] |
||
2700 | |||
2701 | # Remember top of the previous nesting stack. |
||
2702 | # |
||
2703 | # The stack is always pushed/popped and not modified in place, so |
||
2704 | # we can just do a shallow copy instead of copy.deepcopy. Using |
||
2705 | # deepcopy would slow down cpplint by ~28%. |
||
2706 | if self.stack: |
||
2707 | self.previous_stack_top = self.stack[-1] |
||
2708 | else: |
||
2709 | self.previous_stack_top = None |
||
2710 | |||
2711 | # Update pp_stack |
||
2712 | self.UpdatePreprocessor(line) |
||
2713 | |||
2714 | # Count parentheses. This is to avoid adding struct arguments to |
||
2715 | # the nesting stack. |
||
2716 | if self.stack: |
||
2717 | inner_block = self.stack[-1] |
||
2718 | depth_change = line.count('(') - line.count(')') |
||
2719 | inner_block.open_parentheses += depth_change |
||
2720 | |||
2721 | # Also check if we are starting or ending an inline assembly block. |
||
2722 | if inner_block.inline_asm in (_NO_ASM, _END_ASM): |
||
2723 | if (depth_change != 0 and |
||
2724 | inner_block.open_parentheses == 1 and |
||
2725 | _MATCH_ASM.match(line)): |
||
2726 | # Enter assembly block |
||
2727 | inner_block.inline_asm = _INSIDE_ASM |
||
2728 | else: |
||
2729 | # Not entering assembly block. If previous line was _END_ASM, |
||
2730 | # we will now shift to _NO_ASM state. |
||
2731 | inner_block.inline_asm = _NO_ASM |
||
2732 | elif (inner_block.inline_asm == _INSIDE_ASM and |
||
2733 | inner_block.open_parentheses == 0): |
||
2734 | # Exit assembly block |
||
2735 | inner_block.inline_asm = _END_ASM |
||
2736 | |||
2737 | # Consume namespace declaration at the beginning of the line. Do |
||
2738 | # this in a loop so that we catch same line declarations like this: |
||
2739 | # namespace proto2 { namespace bridge { class MessageSet; } } |
||
2740 | while True: |
||
2741 | # Match start of namespace. The "\b\s*" below catches namespace |
||
2742 | # declarations even if it weren't followed by a whitespace, this |
||
2743 | # is so that we don't confuse our namespace checker. The |
||
2744 | # missing spaces will be flagged by CheckSpacing. |
||
2745 | namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line) |
||
2746 | if not namespace_decl_match: |
||
2747 | break |
||
2748 | |||
2749 | new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum) |
||
2750 | self.stack.append(new_namespace) |
||
2751 | |||
2752 | line = namespace_decl_match.group(2) |
||
2753 | if line.find('{') != -1: |
||
2754 | new_namespace.seen_open_brace = True |
||
2755 | line = line[line.find('{') + 1:] |
||
2756 | |||
2757 | # Look for a class declaration in whatever is left of the line |
||
2758 | # after parsing namespaces. The regexp accounts for decorated classes |
||
2759 | # such as in: |
||
2760 | # class LOCKABLE API Object { |
||
2761 | # }; |
||
2762 | class_decl_match = Match( |
||
2763 | r'^(\s*(?:template\s*<[\w\s<>,:=]*>\s*)?' |
||
2764 | r'(class|struct)\s+(?:[A-Z_]+\s+)*(\w+(?:::\w+)*))' |
||
2765 | r'(.*)$', line) |
||
2766 | if (class_decl_match and |
||
2767 | (not self.stack or self.stack[-1].open_parentheses == 0)): |
||
2768 | # We do not want to accept classes that are actually template arguments: |
||
2769 | # template <class Ignore1, |
||
2770 | # class Ignore2 = Default<Args>, |
||
2771 | # template <Args> class Ignore3> |
||
2772 | # void Function() {}; |
||
2773 | # |
||
2774 | # To avoid template argument cases, we scan forward and look for |
||
2775 | # an unmatched '>'. If we see one, assume we are inside a |
||
2776 | # template argument list. |
||
2777 | end_declaration = len(class_decl_match.group(1)) |
||
2778 | if not self.InTemplateArgumentList(clean_lines, linenum, end_declaration): |
||
2779 | self.stack.append(_ClassInfo( |
||
2780 | class_decl_match.group(3), class_decl_match.group(2), |
||
2781 | clean_lines, linenum)) |
||
2782 | line = class_decl_match.group(4) |
||
2783 | |||
2784 | # If we have not yet seen the opening brace for the innermost block, |
||
2785 | # run checks here. |
||
2786 | if not self.SeenOpenBrace(): |
||
2787 | self.stack[-1].CheckBegin(filename, clean_lines, linenum, error) |
||
2788 | |||
2789 | # Update access control if we are inside a class/struct |
||
2790 | if self.stack and isinstance(self.stack[-1], _ClassInfo): |
||
2791 | classinfo = self.stack[-1] |
||
2792 | access_match = Match( |
||
2793 | r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?' |
||
2794 | r':(?:[^:]|$)', |
||
2795 | line) |
||
2796 | if access_match: |
||
2797 | classinfo.access = access_match.group(2) |
||
2798 | |||
2799 | # Check that access keywords are indented +1 space. Skip this |
||
2800 | # check if the keywords are not preceded by whitespaces. |
||
2801 | indent = access_match.group(1) |
||
2802 | if (len(indent) != classinfo.class_indent + 1 and |
||
2803 | Match(r'^\s*$', indent)): |
||
2804 | if classinfo.is_struct: |
||
2805 | parent = 'struct ' + classinfo.name |
||
2806 | else: |
||
2807 | parent = 'class ' + classinfo.name |
||
2808 | slots = '' |
||
2809 | if access_match.group(3): |
||
2810 | slots = access_match.group(3) |
||
2811 | error(filename, linenum, 'whitespace/indent', 3, |
||
2812 | '%s%s: should be indented +1 space inside %s' % ( |
||
2813 | access_match.group(2), slots, parent)) |
||
2814 | |||
2815 | # Consume braces or semicolons from what's left of the line |
||
2816 | while True: |
||
2817 | # Match first brace, semicolon, or closed parenthesis. |
||
2818 | matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line) |
||
2819 | if not matched: |
||
2820 | break |
||
2821 | |||
2822 | token = matched.group(1) |
||
2823 | if token == '{': |
||
2824 | # If namespace or class hasn't seen a opening brace yet, mark |
||
2825 | # namespace/class head as complete. Push a new block onto the |
||
2826 | # stack otherwise. |
||
2827 | if not self.SeenOpenBrace(): |
||
2828 | self.stack[-1].seen_open_brace = True |
||
2829 | elif Match(r'^extern\s*"[^"]*"\s*\{', line): |
||
2830 | self.stack.append(_ExternCInfo(linenum)) |
||
2831 | else: |
||
2832 | self.stack.append(_BlockInfo(linenum, True)) |
||
2833 | if _MATCH_ASM.match(line): |
||
2834 | self.stack[-1].inline_asm = _BLOCK_ASM |
||
2835 | |||
2836 | elif token == ';' or token == ')': |
||
2837 | # If we haven't seen an opening brace yet, but we already saw |
||
2838 | # a semicolon, this is probably a forward declaration. Pop |
||
2839 | # the stack for these. |
||
2840 | # |
||
2841 | # Similarly, if we haven't seen an opening brace yet, but we |
||
2842 | # already saw a closing parenthesis, then these are probably |
||
2843 | # function arguments with extra "class" or "struct" keywords. |
||
2844 | # Also pop these stack for these. |
||
2845 | if not self.SeenOpenBrace(): |
||
2846 | self.stack.pop() |
||
2847 | else: # token == '}' |
||
2848 | # Perform end of block checks and pop the stack. |
||
2849 | if self.stack: |
||
2850 | self.stack[-1].CheckEnd(filename, clean_lines, linenum, error) |
||
2851 | self.stack.pop() |
||
2852 | line = matched.group(2) |
||
2853 | |||
2854 | def InnermostClass(self): |
||
2855 | """Get class info on the top of the stack. |
||
2856 | |||
2857 | Returns: |
||
2858 | A _ClassInfo object if we are inside a class, or None otherwise. |
||
2859 | """ |
||
2860 | for i in range(len(self.stack), 0, -1): |
||
2861 | classinfo = self.stack[i - 1] |
||
2862 | if isinstance(classinfo, _ClassInfo): |
||
2863 | return classinfo |
||
2864 | return None |
||
2865 | |||
2866 | def CheckCompletedBlocks(self, filename, error): |
||
2867 | """Checks that all classes and namespaces have been completely parsed. |
||
2868 | |||
2869 | Call this when all lines in a file have been processed. |
||
2870 | Args: |
||
2871 | filename: The name of the current file. |
||
2872 | error: The function to call with any errors found. |
||
2873 | """ |
||
2874 | # Note: This test can result in false positives if #ifdef constructs |
||
2875 | # get in the way of brace matching. See the testBuildClass test in |
||
2876 | # cpplint_unittest.py for an example of this. |
||
2877 | for obj in self.stack: |
||
2878 | if isinstance(obj, _ClassInfo): |
||
2879 | error(filename, obj.starting_linenum, 'build/class', 5, |
||
2880 | 'Failed to find complete declaration of class %s' % |
||
2881 | obj.name) |
||
2882 | elif isinstance(obj, _NamespaceInfo): |
||
2883 | error(filename, obj.starting_linenum, 'build/namespaces', 5, |
||
2884 | 'Failed to find complete declaration of namespace %s' % |
||
2885 | obj.name) |
||
2886 | |||
2887 | |||
2888 | View Code Duplication | def CheckForNonStandardConstructs(filename, clean_lines, linenum, |
|
2889 | nesting_state, error): |
||
2890 | r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2. |
||
2891 | |||
2892 | Complain about several constructs which gcc-2 accepts, but which are |
||
2893 | not standard C++. Warning about these in lint is one way to ease the |
||
2894 | transition to new compilers. |
||
2895 | - put storage class first (e.g. "static const" instead of "const static"). |
||
2896 | - "%lld" instead of %qd" in printf-type functions. |
||
2897 | - "%1$d" is non-standard in printf-type functions. |
||
2898 | - "\%" is an undefined character escape sequence. |
||
2899 | - text after #endif is not allowed. |
||
2900 | - invalid inner-style forward declaration. |
||
2901 | - >? and <? operators, and their >?= and <?= cousins. |
||
2902 | |||
2903 | Additionally, check for constructor/destructor style violations and reference |
||
2904 | members, as it is very convenient to do so while checking for |
||
2905 | gcc-2 compliance. |
||
2906 | |||
2907 | Args: |
||
2908 | filename: The name of the current file. |
||
2909 | clean_lines: A CleansedLines instance containing the file. |
||
2910 | linenum: The number of the line to check. |
||
2911 | nesting_state: A NestingState instance which maintains information about |
||
2912 | the current stack of nested blocks being parsed. |
||
2913 | error: A callable to which errors are reported, which takes 4 arguments: |
||
2914 | filename, line number, error level, and message |
||
2915 | """ |
||
2916 | |||
2917 | # Remove comments from the line, but leave in strings for now. |
||
2918 | line = clean_lines.lines[linenum] |
||
2919 | |||
2920 | if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line): |
||
2921 | error(filename, linenum, 'runtime/printf_format', 3, |
||
2922 | '%q in format strings is deprecated. Use %ll instead.') |
||
2923 | |||
2924 | if Search(r'printf\s*\(.*".*%\d+\$', line): |
||
2925 | error(filename, linenum, 'runtime/printf_format', 2, |
||
2926 | '%N$ formats are unconventional. Try rewriting to avoid them.') |
||
2927 | |||
2928 | # Remove escaped backslashes before looking for undefined escapes. |
||
2929 | line = line.replace('\\\\', '') |
||
2930 | |||
2931 | if Search(r'("|\').*\\(%|\[|\(|{)', line): |
||
2932 | error(filename, linenum, 'build/printf_format', 3, |
||
2933 | '%, [, (, and { are undefined character escapes. Unescape them.') |
||
2934 | |||
2935 | # For the rest, work with both comments and strings removed. |
||
2936 | line = clean_lines.elided[linenum] |
||
2937 | |||
2938 | if Search(r'\b(const|volatile|void|char|short|int|long' |
||
2939 | r'|float|double|signed|unsigned' |
||
2940 | r'|schar|u?int8|u?int16|u?int32|u?int64)' |
||
2941 | r'\s+(register|static|extern|typedef)\b', |
||
2942 | line): |
||
2943 | error(filename, linenum, 'build/storage_class', 5, |
||
2944 | 'Storage-class specifier (static, extern, typedef, etc) should be ' |
||
2945 | 'at the beginning of the declaration.') |
||
2946 | |||
2947 | if Match(r'\s*#\s*endif\s*[^/\s]+', line): |
||
2948 | error(filename, linenum, 'build/endif_comment', 5, |
||
2949 | 'Uncommented text after #endif is non-standard. Use a comment.') |
||
2950 | |||
2951 | if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line): |
||
2952 | error(filename, linenum, 'build/forward_decl', 5, |
||
2953 | 'Inner-style forward declarations are invalid. Remove this line.') |
||
2954 | |||
2955 | if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?', |
||
2956 | line): |
||
2957 | error(filename, linenum, 'build/deprecated', 3, |
||
2958 | '>? and <? (max and min) operators are non-standard and deprecated.') |
||
2959 | |||
2960 | if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line): |
||
2961 | # TODO(unknown): Could it be expanded safely to arbitrary references, |
||
2962 | # without triggering too many false positives? The first |
||
2963 | # attempt triggered 5 warnings for mostly benign code in the regtest, hence |
||
2964 | # the restriction. |
||
2965 | # Here's the original regexp, for the reference: |
||
2966 | # type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?' |
||
2967 | # r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;' |
||
2968 | error(filename, linenum, 'runtime/member_string_references', 2, |
||
2969 | 'const string& members are dangerous. It is much better to use ' |
||
2970 | 'alternatives, such as pointers or simple constants.') |
||
2971 | |||
2972 | # Everything else in this function operates on class declarations. |
||
2973 | # Return early if the top of the nesting stack is not a class, or if |
||
2974 | # the class head is not completed yet. |
||
2975 | classinfo = nesting_state.InnermostClass() |
||
2976 | if not classinfo or not classinfo.seen_open_brace: |
||
2977 | return |
||
2978 | |||
2979 | # The class may have been declared with namespace or classname qualifiers. |
||
2980 | # The constructor and destructor will not have those qualifiers. |
||
2981 | base_classname = classinfo.name.split('::')[-1] |
||
2982 | |||
2983 | # Look for single-argument constructors that aren't marked explicit. |
||
2984 | # Technically a valid construct, but against style. |
||
2985 | explicit_constructor_match = Match( |
||
2986 | r'\s+(?:inline\s+)?(explicit\s+)?(?:inline\s+)?%s\s*' |
||
2987 | r'\(((?:[^()]|\([^()]*\))*)\)' |
||
2988 | % re.escape(base_classname), |
||
2989 | line) |
||
2990 | |||
2991 | if explicit_constructor_match: |
||
2992 | is_marked_explicit = explicit_constructor_match.group(1) |
||
2993 | |||
2994 | if not explicit_constructor_match.group(2): |
||
2995 | constructor_args = [] |
||
2996 | else: |
||
2997 | constructor_args = explicit_constructor_match.group(2).split(',') |
||
2998 | |||
2999 | # collapse arguments so that commas in template parameter lists and function |
||
3000 | # argument parameter lists don't split arguments in two |
||
3001 | i = 0 |
||
3002 | while i < len(constructor_args): |
||
3003 | constructor_arg = constructor_args[i] |
||
3004 | while (constructor_arg.count('<') > constructor_arg.count('>') or |
||
3005 | constructor_arg.count('(') > constructor_arg.count(')')): |
||
3006 | constructor_arg += ',' + constructor_args[i + 1] |
||
3007 | del constructor_args[i + 1] |
||
3008 | constructor_args[i] = constructor_arg |
||
3009 | i += 1 |
||
3010 | |||
3011 | variadic_args = [arg for arg in constructor_args if '&&...' in arg] |
||
3012 | defaulted_args = [arg for arg in constructor_args if '=' in arg] |
||
3013 | noarg_constructor = (not constructor_args or # empty arg list |
||
3014 | # 'void' arg specifier |
||
3015 | (len(constructor_args) == 1 and |
||
3016 | constructor_args[0].strip() == 'void')) |
||
3017 | onearg_constructor = ((len(constructor_args) == 1 and # exactly one arg |
||
3018 | not noarg_constructor) or |
||
3019 | # all but at most one arg defaulted |
||
3020 | (len(constructor_args) >= 1 and |
||
3021 | not noarg_constructor and |
||
3022 | len(defaulted_args) >= len(constructor_args) - 1) or |
||
3023 | # variadic arguments with zero or one argument |
||
3024 | (len(constructor_args) <= 2 and |
||
3025 | len(variadic_args) >= 1)) |
||
3026 | initializer_list_constructor = bool( |
||
3027 | onearg_constructor and |
||
3028 | Search(r'\bstd\s*::\s*initializer_list\b', constructor_args[0])) |
||
3029 | copy_constructor = bool( |
||
3030 | onearg_constructor and |
||
3031 | Match(r'(const\s+)?%s(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&' |
||
3032 | % re.escape(base_classname), constructor_args[0].strip())) |
||
3033 | |||
3034 | if (not is_marked_explicit and |
||
3035 | onearg_constructor and |
||
3036 | not initializer_list_constructor and |
||
3037 | not copy_constructor): |
||
3038 | if defaulted_args or variadic_args: |
||
3039 | error(filename, linenum, 'runtime/explicit', 5, |
||
3040 | 'Constructors callable with one argument ' |
||
3041 | 'should be marked explicit.') |
||
3042 | else: |
||
3043 | error(filename, linenum, 'runtime/explicit', 5, |
||
3044 | 'Single-parameter constructors should be marked explicit.') |
||
3045 | elif is_marked_explicit and not onearg_constructor: |
||
3046 | if noarg_constructor: |
||
3047 | error(filename, linenum, 'runtime/explicit', 5, |
||
3048 | 'Zero-parameter constructors should not be marked explicit.') |
||
3049 | |||
3050 | |||
3051 | View Code Duplication | def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error): |
|
3052 | """Checks for the correctness of various spacing around function calls. |
||
3053 | |||
3054 | Args: |
||
3055 | filename: The name of the current file. |
||
3056 | clean_lines: A CleansedLines instance containing the file. |
||
3057 | linenum: The number of the line to check. |
||
3058 | error: The function to call with any errors found. |
||
3059 | """ |
||
3060 | line = clean_lines.elided[linenum] |
||
3061 | |||
3062 | # Since function calls often occur inside if/for/while/switch |
||
3063 | # expressions - which have their own, more liberal conventions - we |
||
3064 | # first see if we should be looking inside such an expression for a |
||
3065 | # function call, to which we can apply more strict standards. |
||
3066 | fncall = line # if there's no control flow construct, look at whole line |
||
3067 | for pattern in (r'\bif\s*\((.*)\)\s*{', |
||
3068 | r'\bfor\s*\((.*)\)\s*{', |
||
3069 | r'\bwhile\s*\((.*)\)\s*[{;]', |
||
3070 | r'\bswitch\s*\((.*)\)\s*{'): |
||
3071 | match = Search(pattern, line) |
||
3072 | if match: |
||
3073 | fncall = match.group(1) # look inside the parens for function calls |
||
3074 | break |
||
3075 | |||
3076 | # Except in if/for/while/switch, there should never be space |
||
3077 | # immediately inside parens (eg "f( 3, 4 )"). We make an exception |
||
3078 | # for nested parens ( (a+b) + c ). Likewise, there should never be |
||
3079 | # a space before a ( when it's a function argument. I assume it's a |
||
3080 | # function argument when the char before the whitespace is legal in |
||
3081 | # a function name (alnum + _) and we're not starting a macro. Also ignore |
||
3082 | # pointers and references to arrays and functions coz they're too tricky: |
||
3083 | # we use a very simple way to recognize these: |
||
3084 | # " (something)(maybe-something)" or |
||
3085 | # " (something)(maybe-something," or |
||
3086 | # " (something)[something]" |
||
3087 | # Note that we assume the contents of [] to be short enough that |
||
3088 | # they'll never need to wrap. |
||
3089 | if ( # Ignore control structures. |
||
3090 | not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b', |
||
3091 | fncall) and |
||
3092 | # Ignore pointers/references to functions. |
||
3093 | not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and |
||
3094 | # Ignore pointers/references to arrays. |
||
3095 | not Search(r' \([^)]+\)\[[^\]]+\]', fncall)): |
||
3096 | if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call |
||
3097 | error(filename, linenum, 'whitespace/parens', 4, |
||
3098 | 'Extra space after ( in function call') |
||
3099 | elif Search(r'\(\s+(?!(\s*\\)|\()', fncall): |
||
3100 | error(filename, linenum, 'whitespace/parens', 2, |
||
3101 | 'Extra space after (') |
||
3102 | if (Search(r'\w\s+\(', fncall) and |
||
3103 | not Search(r'_{0,2}asm_{0,2}\s+_{0,2}volatile_{0,2}\s+\(', fncall) and |
||
3104 | not Search(r'#\s*define|typedef|using\s+\w+\s*=', fncall) and |
||
3105 | not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall) and |
||
3106 | not Search(r'\bcase\s+\(', fncall)): |
||
3107 | # TODO(unknown): Space after an operator function seem to be a common |
||
3108 | # error, silence those for now by restricting them to highest verbosity. |
||
3109 | if Search(r'\boperator_*\b', line): |
||
3110 | error(filename, linenum, 'whitespace/parens', 0, |
||
3111 | 'Extra space before ( in function call') |
||
3112 | else: |
||
3113 | error(filename, linenum, 'whitespace/parens', 4, |
||
3114 | 'Extra space before ( in function call') |
||
3115 | # If the ) is followed only by a newline or a { + newline, assume it's |
||
3116 | # part of a control statement (if/while/etc), and don't complain |
||
3117 | if Search(r'[^)]\s+\)\s*[^{\s]', fncall): |
||
3118 | # If the closing parenthesis is preceded by only whitespaces, |
||
3119 | # try to give a more descriptive error message. |
||
3120 | if Search(r'^\s+\)', fncall): |
||
3121 | error(filename, linenum, 'whitespace/parens', 2, |
||
3122 | 'Closing ) should be moved to the previous line') |
||
3123 | else: |
||
3124 | error(filename, linenum, 'whitespace/parens', 2, |
||
3125 | 'Extra space before )') |
||
3126 | |||
3127 | |||
3128 | def IsBlankLine(line): |
||
3129 | """Returns true if the given line is blank. |
||
3130 | |||
3131 | We consider a line to be blank if the line is empty or consists of |
||
3132 | only white spaces. |
||
3133 | |||
3134 | Args: |
||
3135 | line: A line of a string. |
||
3136 | |||
3137 | Returns: |
||
3138 | True, if the given line is blank. |
||
3139 | """ |
||
3140 | return not line or line.isspace() |
||
3141 | |||
3142 | |||
3143 | View Code Duplication | def CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line, |
|
3144 | error): |
||
3145 | is_namespace_indent_item = ( |
||
3146 | len(nesting_state.stack) > 1 and |
||
3147 | nesting_state.stack[-1].check_namespace_indentation and |
||
3148 | isinstance(nesting_state.previous_stack_top, _NamespaceInfo) and |
||
3149 | nesting_state.previous_stack_top == nesting_state.stack[-2]) |
||
3150 | |||
3151 | if ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item, |
||
3152 | clean_lines.elided, line): |
||
3153 | CheckItemIndentationInNamespace(filename, clean_lines.elided, |
||
3154 | line, error) |
||
3155 | |||
3156 | |||
3157 | View Code Duplication | def CheckForFunctionLengths(filename, clean_lines, linenum, |
|
3158 | function_state, error): |
||
3159 | """Reports for long function bodies. |
||
3160 | |||
3161 | For an overview why this is done, see: |
||
3162 | https://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions |
||
3163 | |||
3164 | Uses a simplistic algorithm assuming other style guidelines |
||
3165 | (especially spacing) are followed. |
||
3166 | Only checks unindented functions, so class members are unchecked. |
||
3167 | Trivial bodies are unchecked, so constructors with huge initializer lists |
||
3168 | may be missed. |
||
3169 | Blank/comment lines are not counted so as to avoid encouraging the removal |
||
3170 | of vertical space and comments just to get through a lint check. |
||
3171 | NOLINT *on the last line of a function* disables this check. |
||
3172 | |||
3173 | Args: |
||
3174 | filename: The name of the current file. |
||
3175 | clean_lines: A CleansedLines instance containing the file. |
||
3176 | linenum: The number of the line to check. |
||
3177 | function_state: Current function name and lines in body so far. |
||
3178 | error: The function to call with any errors found. |
||
3179 | """ |
||
3180 | lines = clean_lines.lines |
||
3181 | line = lines[linenum] |
||
3182 | joined_line = '' |
||
3183 | |||
3184 | starting_func = False |
||
3185 | regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ... |
||
3186 | match_result = Match(regexp, line) |
||
3187 | if match_result: |
||
3188 | # If the name is all caps and underscores, figure it's a macro and |
||
3189 | # ignore it, unless it's TEST or TEST_F. |
||
3190 | function_name = match_result.group(1).split()[-1] |
||
3191 | if function_name == 'TEST' or function_name == 'TEST_F' or ( |
||
3192 | not Match(r'[A-Z_]+$', function_name)): |
||
3193 | starting_func = True |
||
3194 | |||
3195 | if starting_func: |
||
3196 | body_found = False |
||
3197 | for start_linenum in range(linenum, clean_lines.NumLines()): |
||
3198 | start_line = lines[start_linenum] |
||
3199 | joined_line += ' ' + start_line.lstrip() |
||
3200 | if Search(r'(;|})', start_line): # Declarations and trivial functions |
||
3201 | body_found = True |
||
3202 | break # ... ignore |
||
3203 | elif Search(r'{', start_line): |
||
3204 | body_found = True |
||
3205 | function = Search(r'((\w|:)*)\(', line).group(1) |
||
3206 | if Match(r'TEST', function): # Handle TEST... macros |
||
3207 | parameter_regexp = Search(r'(\(.*\))', joined_line) |
||
3208 | if parameter_regexp: # Ignore bad syntax |
||
3209 | function += parameter_regexp.group(1) |
||
3210 | else: |
||
3211 | function += '()' |
||
3212 | function_state.Begin(function) |
||
3213 | break |
||
3214 | if not body_found: |
||
3215 | # No body for the function (or evidence of a non-function) was found. |
||
3216 | error(filename, linenum, 'readability/fn_size', 5, |
||
3217 | 'Lint failed to find start of function body.') |
||
3218 | elif Match(r'^\}\s*$', line): # function end |
||
3219 | function_state.Check(error, filename, linenum) |
||
3220 | function_state.End() |
||
3221 | elif not Match(r'^\s*$', line): |
||
3222 | function_state.Count() # Count non-blank/non-comment lines. |
||
3223 | |||
3224 | |||
3225 | _RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?') |
||
3226 | |||
3227 | |||
3228 | View Code Duplication | def CheckComment(line, filename, linenum, next_line_start, error): |
|
3229 | """Checks for common mistakes in comments. |
||
3230 | |||
3231 | Args: |
||
3232 | line: The line in question. |
||
3233 | filename: The name of the current file. |
||
3234 | linenum: The number of the line to check. |
||
3235 | next_line_start: The first non-whitespace column of the next line. |
||
3236 | error: The function to call with any errors found. |
||
3237 | """ |
||
3238 | commentpos = line.find('//') |
||
3239 | if commentpos != -1: |
||
3240 | # Check if the // may be in quotes. If so, ignore it |
||
3241 | if re.sub(r'\\.', '', line[0:commentpos]).count('"') % 2 == 0: |
||
3242 | # Allow one space for new scopes, two spaces otherwise: |
||
3243 | if (not (Match(r'^.*{ *//', line) and next_line_start == commentpos) and |
||
3244 | ((commentpos >= 1 and |
||
3245 | line[commentpos-1] not in string.whitespace) or |
||
3246 | (commentpos >= 2 and |
||
3247 | line[commentpos-2] not in string.whitespace))): |
||
3248 | error(filename, linenum, 'whitespace/comments', 2, |
||
3249 | 'At least two spaces is best between code and comments') |
||
3250 | |||
3251 | # Checks for common mistakes in TODO comments. |
||
3252 | comment = line[commentpos:] |
||
3253 | match = _RE_PATTERN_TODO.match(comment) |
||
3254 | if match: |
||
3255 | # One whitespace is correct; zero whitespace is handled elsewhere. |
||
3256 | leading_whitespace = match.group(1) |
||
3257 | if len(leading_whitespace) > 1: |
||
3258 | error(filename, linenum, 'whitespace/todo', 2, |
||
3259 | 'Too many spaces before TODO') |
||
3260 | |||
3261 | username = match.group(2) |
||
3262 | if not username: |
||
3263 | error(filename, linenum, 'readability/todo', 2, |
||
3264 | 'Missing username in TODO; it should look like ' |
||
3265 | '"// TODO(my_username): Stuff."') |
||
3266 | |||
3267 | middle_whitespace = match.group(3) |
||
3268 | # Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison |
||
3269 | if middle_whitespace != ' ' and middle_whitespace != '': |
||
3270 | error(filename, linenum, 'whitespace/todo', 2, |
||
3271 | 'TODO(my_username) should be followed by a space') |
||
3272 | |||
3273 | # If the comment contains an alphanumeric character, there |
||
3274 | # should be a space somewhere between it and the // unless |
||
3275 | # it's a /// or //! Doxygen comment. |
||
3276 | if (Match(r'//[^ ]*\w', comment) and |
||
3277 | not Match(r'(///|//\!)(\s+|$)', comment)): |
||
3278 | error(filename, linenum, 'whitespace/comments', 4, |
||
3279 | 'Should have a space between // and comment') |
||
3280 | |||
3281 | |||
3282 | View Code Duplication | def CheckAccess(filename, clean_lines, linenum, nesting_state, error): |
|
3283 | """Checks for improper use of DISALLOW* macros. |
||
3284 | |||
3285 | Args: |
||
3286 | filename: The name of the current file. |
||
3287 | clean_lines: A CleansedLines instance containing the file. |
||
3288 | linenum: The number of the line to check. |
||
3289 | nesting_state: A NestingState instance which maintains information about |
||
3290 | the current stack of nested blocks being parsed. |
||
3291 | error: The function to call with any errors found. |
||
3292 | """ |
||
3293 | line = clean_lines.elided[linenum] # get rid of comments and strings |
||
3294 | |||
3295 | matched = Match((r'\s*(DISALLOW_COPY_AND_ASSIGN|' |
||
3296 | r'DISALLOW_IMPLICIT_CONSTRUCTORS)'), line) |
||
3297 | if not matched: |
||
3298 | return |
||
3299 | if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo): |
||
3300 | if nesting_state.stack[-1].access != 'private': |
||
3301 | error(filename, linenum, 'readability/constructors', 3, |
||
3302 | '%s must be in the private: section' % matched.group(1)) |
||
3303 | |||
3304 | else: |
||
3305 | # Found DISALLOW* macro outside a class declaration, or perhaps it |
||
3306 | # was used inside a function when it should have been part of the |
||
3307 | # class declaration. We could issue a warning here, but it |
||
3308 | # probably resulted in a compiler error already. |
||
3309 | pass |
||
3310 | |||
3311 | |||
3312 | View Code Duplication | def CheckSpacing(filename, clean_lines, linenum, nesting_state, error): |
|
3313 | """Checks for the correctness of various spacing issues in the code. |
||
3314 | |||
3315 | Things we check for: spaces around operators, spaces after |
||
3316 | if/for/while/switch, no spaces around parens in function calls, two |
||
3317 | spaces between code and comment, don't start a block with a blank |
||
3318 | line, don't end a function with a blank line, don't add a blank line |
||
3319 | after public/protected/private, don't have too many blank lines in a row. |
||
3320 | |||
3321 | Args: |
||
3322 | filename: The name of the current file. |
||
3323 | clean_lines: A CleansedLines instance containing the file. |
||
3324 | linenum: The number of the line to check. |
||
3325 | nesting_state: A NestingState instance which maintains information about |
||
3326 | the current stack of nested blocks being parsed. |
||
3327 | error: The function to call with any errors found. |
||
3328 | """ |
||
3329 | |||
3330 | # Don't use "elided" lines here, otherwise we can't check commented lines. |
||
3331 | # Don't want to use "raw" either, because we don't want to check inside C++11 |
||
3332 | # raw strings, |
||
3333 | raw = clean_lines.lines_without_raw_strings |
||
3334 | line = raw[linenum] |
||
3335 | |||
3336 | # Before nixing comments, check if the line is blank for no good |
||
3337 | # reason. This includes the first line after a block is opened, and |
||
3338 | # blank lines at the end of a function (ie, right before a line like '}' |
||
3339 | # |
||
3340 | # Skip all the blank line checks if we are immediately inside a |
||
3341 | # namespace body. In other words, don't issue blank line warnings |
||
3342 | # for this block: |
||
3343 | # namespace { |
||
3344 | # |
||
3345 | # } |
||
3346 | # |
||
3347 | # A warning about missing end of namespace comments will be issued instead. |
||
3348 | # |
||
3349 | # Also skip blank line checks for 'extern "C"' blocks, which are formatted |
||
3350 | # like namespaces. |
||
3351 | if (IsBlankLine(line) and |
||
3352 | not nesting_state.InNamespaceBody() and |
||
3353 | not nesting_state.InExternC()): |
||
3354 | elided = clean_lines.elided |
||
3355 | prev_line = elided[linenum - 1] |
||
3356 | prevbrace = prev_line.rfind('{') |
||
3357 | # TODO(unknown): Don't complain if line before blank line, and line after, |
||
3358 | # both start with alnums and are indented the same amount. |
||
3359 | # This ignores whitespace at the start of a namespace block |
||
3360 | # because those are not usually indented. |
||
3361 | if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1: |
||
3362 | # OK, we have a blank line at the start of a code block. Before we |
||
3363 | # complain, we check if it is an exception to the rule: The previous |
||
3364 | # non-empty line has the parameters of a function header that are indented |
||
3365 | # 4 spaces (because they did not fit in a 80 column line when placed on |
||
3366 | # the same line as the function name). We also check for the case where |
||
3367 | # the previous line is indented 6 spaces, which may happen when the |
||
3368 | # initializers of a constructor do not fit into a 80 column line. |
||
3369 | exception = False |
||
3370 | if Match(r' {6}\w', prev_line): # Initializer list? |
||
3371 | # We are looking for the opening column of initializer list, which |
||
3372 | # should be indented 4 spaces to cause 6 space indentation afterwards. |
||
3373 | search_position = linenum-2 |
||
3374 | while (search_position >= 0 |
||
3375 | and Match(r' {6}\w', elided[search_position])): |
||
3376 | search_position -= 1 |
||
3377 | exception = (search_position >= 0 |
||
3378 | and elided[search_position][:5] == ' :') |
||
3379 | else: |
||
3380 | # Search for the function arguments or an initializer list. We use a |
||
3381 | # simple heuristic here: If the line is indented 4 spaces; and we have a |
||
3382 | # closing paren, without the opening paren, followed by an opening brace |
||
3383 | # or colon (for initializer lists) we assume that it is the last line of |
||
3384 | # a function header. If we have a colon indented 4 spaces, it is an |
||
3385 | # initializer list. |
||
3386 | exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)', |
||
3387 | prev_line) |
||
3388 | or Match(r' {4}:', prev_line)) |
||
3389 | |||
3390 | if not exception: |
||
3391 | error(filename, linenum, 'whitespace/blank_line', 2, |
||
3392 | 'Redundant blank line at the start of a code block ' |
||
3393 | 'should be deleted.') |
||
3394 | # Ignore blank lines at the end of a block in a long if-else |
||
3395 | # chain, like this: |
||
3396 | # if (condition1) { |
||
3397 | # // Something followed by a blank line |
||
3398 | # |
||
3399 | # } else if (condition2) { |
||
3400 | # // Something else |
||
3401 | # } |
||
3402 | if linenum + 1 < clean_lines.NumLines(): |
||
3403 | next_line = raw[linenum + 1] |
||
3404 | if (next_line |
||
3405 | and Match(r'\s*}', next_line) |
||
3406 | and next_line.find('} else ') == -1): |
||
3407 | error(filename, linenum, 'whitespace/blank_line', 3, |
||
3408 | 'Redundant blank line at the end of a code block ' |
||
3409 | 'should be deleted.') |
||
3410 | |||
3411 | matched = Match(r'\s*(public|protected|private):', prev_line) |
||
3412 | if matched: |
||
3413 | error(filename, linenum, 'whitespace/blank_line', 3, |
||
3414 | 'Do not leave a blank line after "%s:"' % matched.group(1)) |
||
3415 | |||
3416 | # Next, check comments |
||
3417 | next_line_start = 0 |
||
3418 | if linenum + 1 < clean_lines.NumLines(): |
||
3419 | next_line = raw[linenum + 1] |
||
3420 | next_line_start = len(next_line) - len(next_line.lstrip()) |
||
3421 | CheckComment(line, filename, linenum, next_line_start, error) |
||
3422 | |||
3423 | # get rid of comments and strings |
||
3424 | line = clean_lines.elided[linenum] |
||
3425 | |||
3426 | # You shouldn't have spaces before your brackets, except maybe after |
||
3427 | # 'delete []' or 'return []() {};' |
||
3428 | if Search(r'\w\s+\[', line) and not Search(r'(?:delete|return)\s+\[', line): |
||
3429 | error(filename, linenum, 'whitespace/braces', 5, |
||
3430 | 'Extra space before [') |
||
3431 | |||
3432 | # In range-based for, we wanted spaces before and after the colon, but |
||
3433 | # not around "::" tokens that might appear. |
||
3434 | if (Search(r'for *\(.*[^:]:[^: ]', line) or |
||
3435 | Search(r'for *\(.*[^: ]:[^:]', line)): |
||
3436 | error(filename, linenum, 'whitespace/forcolon', 2, |
||
3437 | 'Missing space around colon in range-based for loop') |
||
3438 | |||
3439 | |||
3440 | View Code Duplication | def CheckOperatorSpacing(filename, clean_lines, linenum, error): |
|
3441 | """Checks for horizontal spacing around operators. |
||
3442 | |||
3443 | Args: |
||
3444 | filename: The name of the current file. |
||
3445 | clean_lines: A CleansedLines instance containing the file. |
||
3446 | linenum: The number of the line to check. |
||
3447 | error: The function to call with any errors found. |
||
3448 | """ |
||
3449 | line = clean_lines.elided[linenum] |
||
3450 | |||
3451 | # Don't try to do spacing checks for operator methods. Do this by |
||
3452 | # replacing the troublesome characters with something else, |
||
3453 | # preserving column position for all other characters. |
||
3454 | # |
||
3455 | # The replacement is done repeatedly to avoid false positives from |
||
3456 | # operators that call operators. |
||
3457 | while True: |
||
3458 | match = Match(r'^(.*\boperator\b)(\S+)(\s*\(.*)$', line) |
||
3459 | if match: |
||
3460 | line = match.group(1) + ('_' * len(match.group(2))) + match.group(3) |
||
3461 | else: |
||
3462 | break |
||
3463 | |||
3464 | # We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )". |
||
3465 | # Otherwise not. Note we only check for non-spaces on *both* sides; |
||
3466 | # sometimes people put non-spaces on one side when aligning ='s among |
||
3467 | # many lines (not that this is behavior that I approve of...) |
||
3468 | if ((Search(r'[\w.]=', line) or |
||
3469 | Search(r'=[\w.]', line)) |
||
3470 | and not Search(r'\b(if|while|for) ', line) |
||
3471 | # Operators taken from [lex.operators] in C++11 standard. |
||
3472 | and not Search(r'(>=|<=|==|!=|&=|\^=|\|=|\+=|\*=|\/=|\%=)', line) |
||
3473 | and not Search(r'operator=', line)): |
||
3474 | error(filename, linenum, 'whitespace/operators', 4, |
||
3475 | 'Missing spaces around =') |
||
3476 | |||
3477 | # It's ok not to have spaces around binary operators like + - * /, but if |
||
3478 | # there's too little whitespace, we get concerned. It's hard to tell, |
||
3479 | # though, so we punt on this one for now. TODO. |
||
3480 | |||
3481 | # You should always have whitespace around binary operators. |
||
3482 | # |
||
3483 | # Check <= and >= first to avoid false positives with < and >, then |
||
3484 | # check non-include lines for spacing around < and >. |
||
3485 | # |
||
3486 | # If the operator is followed by a comma, assume it's be used in a |
||
3487 | # macro context and don't do any checks. This avoids false |
||
3488 | # positives. |
||
3489 | # |
||
3490 | # Note that && is not included here. This is because there are too |
||
3491 | # many false positives due to RValue references. |
||
3492 | match = Search(r'[^<>=!\s](==|!=|<=|>=|\|\|)[^<>=!\s,;\)]', line) |
||
3493 | if match: |
||
3494 | error(filename, linenum, 'whitespace/operators', 3, |
||
3495 | 'Missing spaces around %s' % match.group(1)) |
||
3496 | elif not Match(r'#.*include', line): |
||
3497 | # Look for < that is not surrounded by spaces. This is only |
||
3498 | # triggered if both sides are missing spaces, even though |
||
3499 | # technically should should flag if at least one side is missing a |
||
3500 | # space. This is done to avoid some false positives with shifts. |
||
3501 | match = Match(r'^(.*[^\s<])<[^\s=<,]', line) |
||
3502 | if match: |
||
3503 | (_, _, end_pos) = CloseExpression( |
||
3504 | clean_lines, linenum, len(match.group(1))) |
||
3505 | if end_pos <= -1: |
||
3506 | error(filename, linenum, 'whitespace/operators', 3, |
||
3507 | 'Missing spaces around <') |
||
3508 | |||
3509 | # Look for > that is not surrounded by spaces. Similar to the |
||
3510 | # above, we only trigger if both sides are missing spaces to avoid |
||
3511 | # false positives with shifts. |
||
3512 | match = Match(r'^(.*[^-\s>])>[^\s=>,]', line) |
||
3513 | if match: |
||
3514 | (_, _, start_pos) = ReverseCloseExpression( |
||
3515 | clean_lines, linenum, len(match.group(1))) |
||
3516 | if start_pos <= -1: |
||
3517 | error(filename, linenum, 'whitespace/operators', 3, |
||
3518 | 'Missing spaces around >') |
||
3519 | |||
3520 | # We allow no-spaces around << when used like this: 10<<20, but |
||
3521 | # not otherwise (particularly, not when used as streams) |
||
3522 | # |
||
3523 | # We also allow operators following an opening parenthesis, since |
||
3524 | # those tend to be macros that deal with operators. |
||
3525 | match = Search(r'(operator|[^\s(<])(?:L|UL|LL|ULL|l|ul|ll|ull)?<<([^\s,=<])', line) |
||
3526 | if (match and not (match.group(1).isdigit() and match.group(2).isdigit()) and |
||
3527 | not (match.group(1) == 'operator' and match.group(2) == ';')): |
||
3528 | error(filename, linenum, 'whitespace/operators', 3, |
||
3529 | 'Missing spaces around <<') |
||
3530 | |||
3531 | # We allow no-spaces around >> for almost anything. This is because |
||
3532 | # C++11 allows ">>" to close nested templates, which accounts for |
||
3533 | # most cases when ">>" is not followed by a space. |
||
3534 | # |
||
3535 | # We still warn on ">>" followed by alpha character, because that is |
||
3536 | # likely due to ">>" being used for right shifts, e.g.: |
||
3537 | # value >> alpha |
||
3538 | # |
||
3539 | # When ">>" is used to close templates, the alphanumeric letter that |
||
3540 | # follows would be part of an identifier, and there should still be |
||
3541 | # a space separating the template type and the identifier. |
||
3542 | # type<type<type>> alpha |
||
3543 | match = Search(r'>>[a-zA-Z_]', line) |
||
3544 | if match: |
||
3545 | error(filename, linenum, 'whitespace/operators', 3, |
||
3546 | 'Missing spaces around >>') |
||
3547 | |||
3548 | # There shouldn't be space around unary operators |
||
3549 | match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line) |
||
3550 | if match: |
||
3551 | error(filename, linenum, 'whitespace/operators', 4, |
||
3552 | 'Extra space for operator %s' % match.group(1)) |
||
3553 | |||
3554 | |||
3555 | View Code Duplication | def CheckParenthesisSpacing(filename, clean_lines, linenum, error): |
|
3556 | """Checks for horizontal spacing around parentheses. |
||
3557 | |||
3558 | Args: |
||
3559 | filename: The name of the current file. |
||
3560 | clean_lines: A CleansedLines instance containing the file. |
||
3561 | linenum: The number of the line to check. |
||
3562 | error: The function to call with any errors found. |
||
3563 | """ |
||
3564 | line = clean_lines.elided[linenum] |
||
3565 | |||
3566 | # No spaces after an if, while, switch, or for |
||
3567 | match = Search(r' (if\(|for\(|while\(|switch\()', line) |
||
3568 | if match: |
||
3569 | error(filename, linenum, 'whitespace/parens', 5, |
||
3570 | 'Missing space before ( in %s' % match.group(1)) |
||
3571 | |||
3572 | # For if/for/while/switch, the left and right parens should be |
||
3573 | # consistent about how many spaces are inside the parens, and |
||
3574 | # there should either be zero or one spaces inside the parens. |
||
3575 | # We don't want: "if ( foo)" or "if ( foo )". |
||
3576 | # Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed. |
||
3577 | match = Search(r'\b(if|for|while|switch)\s*' |
||
3578 | r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$', |
||
3579 | line) |
||
3580 | if match: |
||
3581 | if len(match.group(2)) != len(match.group(4)): |
||
3582 | if not (match.group(3) == ';' and |
||
3583 | len(match.group(2)) == 1 + len(match.group(4)) or |
||
3584 | not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)): |
||
3585 | error(filename, linenum, 'whitespace/parens', 5, |
||
3586 | 'Mismatching spaces inside () in %s' % match.group(1)) |
||
3587 | if len(match.group(2)) not in [0, 1]: |
||
3588 | error(filename, linenum, 'whitespace/parens', 5, |
||
3589 | 'Should have zero or one spaces inside ( and ) in %s' % |
||
3590 | match.group(1)) |
||
3591 | |||
3592 | |||
3593 | View Code Duplication | def CheckCommaSpacing(filename, clean_lines, linenum, error): |
|
3594 | """Checks for horizontal spacing near commas and semicolons. |
||
3595 | |||
3596 | Args: |
||
3597 | filename: The name of the current file. |
||
3598 | clean_lines: A CleansedLines instance containing the file. |
||
3599 | linenum: The number of the line to check. |
||
3600 | error: The function to call with any errors found. |
||
3601 | """ |
||
3602 | raw = clean_lines.lines_without_raw_strings |
||
3603 | line = clean_lines.elided[linenum] |
||
3604 | |||
3605 | # You should always have a space after a comma (either as fn arg or operator) |
||
3606 | # |
||
3607 | # This does not apply when the non-space character following the |
||
3608 | # comma is another comma, since the only time when that happens is |
||
3609 | # for empty macro arguments. |
||
3610 | # |
||
3611 | # We run this check in two passes: first pass on elided lines to |
||
3612 | # verify that lines contain missing whitespaces, second pass on raw |
||
3613 | # lines to confirm that those missing whitespaces are not due to |
||
3614 | # elided comments. |
||
3615 | if (Search(r',[^,\s]', ReplaceAll(r'\boperator\s*,\s*\(', 'F(', line)) and |
||
3616 | Search(r',[^,\s]', raw[linenum])): |
||
3617 | error(filename, linenum, 'whitespace/comma', 3, |
||
3618 | 'Missing space after ,') |
||
3619 | |||
3620 | # You should always have a space after a semicolon |
||
3621 | # except for few corner cases |
||
3622 | # TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more |
||
3623 | # space after ; |
||
3624 | if Search(r';[^\s};\\)/]', line): |
||
3625 | error(filename, linenum, 'whitespace/semicolon', 3, |
||
3626 | 'Missing space after ;') |
||
3627 | |||
3628 | |||
3629 | View Code Duplication | def _IsType(clean_lines, nesting_state, expr): |
|
3630 | """Check if expression looks like a type name, returns true if so. |
||
3631 | |||
3632 | Args: |
||
3633 | clean_lines: A CleansedLines instance containing the file. |
||
3634 | nesting_state: A NestingState instance which maintains information about |
||
3635 | the current stack of nested blocks being parsed. |
||
3636 | expr: The expression to check. |
||
3637 | Returns: |
||
3638 | True, if token looks like a type. |
||
3639 | """ |
||
3640 | # Keep only the last token in the expression |
||
3641 | last_word = Match(r'^.*(\b\S+)$', expr) |
||
3642 | if last_word: |
||
3643 | token = last_word.group(1) |
||
3644 | else: |
||
3645 | token = expr |
||
3646 | |||
3647 | # Match native types and stdint types |
||
3648 | if _TYPES.match(token): |
||
3649 | return True |
||
3650 | |||
3651 | # Try a bit harder to match templated types. Walk up the nesting |
||
3652 | # stack until we find something that resembles a typename |
||
3653 | # declaration for what we are looking for. |
||
3654 | typename_pattern = (r'\b(?:typename|class|struct)\s+' + re.escape(token) + |
||
3655 | r'\b') |
||
3656 | block_index = len(nesting_state.stack) - 1 |
||
3657 | while block_index >= 0: |
||
3658 | if isinstance(nesting_state.stack[block_index], _NamespaceInfo): |
||
3659 | return False |
||
3660 | |||
3661 | # Found where the opening brace is. We want to scan from this |
||
3662 | # line up to the beginning of the function, minus a few lines. |
||
3663 | # template <typename Type1, // stop scanning here |
||
3664 | # ...> |
||
3665 | # class C |
||
3666 | # : public ... { // start scanning here |
||
3667 | last_line = nesting_state.stack[block_index].starting_linenum |
||
3668 | |||
3669 | next_block_start = 0 |
||
3670 | if block_index > 0: |
||
3671 | next_block_start = nesting_state.stack[block_index - 1].starting_linenum |
||
3672 | first_line = last_line |
||
3673 | while first_line >= next_block_start: |
||
3674 | if clean_lines.elided[first_line].find('template') >= 0: |
||
3675 | break |
||
3676 | first_line -= 1 |
||
3677 | if first_line < next_block_start: |
||
3678 | # Didn't find any "template" keyword before reaching the next block, |
||
3679 | # there are probably no template things to check for this block |
||
3680 | block_index -= 1 |
||
3681 | continue |
||
3682 | |||
3683 | # Look for typename in the specified range |
||
3684 | for i in xrange(first_line, last_line + 1, 1): |
||
3685 | if Search(typename_pattern, clean_lines.elided[i]): |
||
3686 | return True |
||
3687 | block_index -= 1 |
||
3688 | |||
3689 | return False |
||
3690 | |||
3691 | |||
3692 | View Code Duplication | def CheckBracesSpacing(filename, clean_lines, linenum, nesting_state, error): |
|
3693 | """Checks for horizontal spacing near commas. |
||
3694 | |||
3695 | Args: |
||
3696 | filename: The name of the current file. |
||
3697 | clean_lines: A CleansedLines instance containing the file. |
||
3698 | linenum: The number of the line to check. |
||
3699 | nesting_state: A NestingState instance which maintains information about |
||
3700 | the current stack of nested blocks being parsed. |
||
3701 | error: The function to call with any errors found. |
||
3702 | """ |
||
3703 | line = clean_lines.elided[linenum] |
||
3704 | |||
3705 | # Except after an opening paren, or after another opening brace (in case of |
||
3706 | # an initializer list, for instance), you should have spaces before your |
||
3707 | # braces when they are delimiting blocks, classes, namespaces etc. |
||
3708 | # And since you should never have braces at the beginning of a line, |
||
3709 | # this is an easy test. Except that braces used for initialization don't |
||
3710 | # follow the same rule; we often don't want spaces before those. |
||
3711 | match = Match(r'^(.*[^ ({>]){', line) |
||
3712 | |||
3713 | if match: |
||
3714 | # Try a bit harder to check for brace initialization. This |
||
3715 | # happens in one of the following forms: |
||
3716 | # Constructor() : initializer_list_{} { ... } |
||
3717 | # Constructor{}.MemberFunction() |
||
3718 | # Type variable{}; |
||
3719 | # FunctionCall(type{}, ...); |
||
3720 | # LastArgument(..., type{}); |
||
3721 | # LOG(INFO) << type{} << " ..."; |
||
3722 | # map_of_type[{...}] = ...; |
||
3723 | # ternary = expr ? new type{} : nullptr; |
||
3724 | # OuterTemplate<InnerTemplateConstructor<Type>{}> |
||
3725 | # |
||
3726 | # We check for the character following the closing brace, and |
||
3727 | # silence the warning if it's one of those listed above, i.e. |
||
3728 | # "{.;,)<>]:". |
||
3729 | # |
||
3730 | # To account for nested initializer list, we allow any number of |
||
3731 | # closing braces up to "{;,)<". We can't simply silence the |
||
3732 | # warning on first sight of closing brace, because that would |
||
3733 | # cause false negatives for things that are not initializer lists. |
||
3734 | # Silence this: But not this: |
||
3735 | # Outer{ if (...) { |
||
3736 | # Inner{...} if (...){ // Missing space before { |
||
3737 | # }; } |
||
3738 | # |
||
3739 | # There is a false negative with this approach if people inserted |
||
3740 | # spurious semicolons, e.g. "if (cond){};", but we will catch the |
||
3741 | # spurious semicolon with a separate check. |
||
3742 | leading_text = match.group(1) |
||
3743 | (endline, endlinenum, endpos) = CloseExpression( |
||
3744 | clean_lines, linenum, len(match.group(1))) |
||
3745 | trailing_text = '' |
||
3746 | if endpos > -1: |
||
3747 | trailing_text = endline[endpos:] |
||
3748 | for offset in xrange(endlinenum + 1, |
||
3749 | min(endlinenum + 3, clean_lines.NumLines() - 1)): |
||
3750 | trailing_text += clean_lines.elided[offset] |
||
3751 | # We also suppress warnings for `uint64_t{expression}` etc., as the style |
||
3752 | # guide recommends brace initialization for integral types to avoid |
||
3753 | # overflow/truncation. |
||
3754 | if (not Match(r'^[\s}]*[{.;,)<>\]:]', trailing_text) |
||
3755 | and not _IsType(clean_lines, nesting_state, leading_text)): |
||
3756 | error(filename, linenum, 'whitespace/braces', 5, |
||
3757 | 'Missing space before {') |
||
3758 | |||
3759 | # Make sure '} else {' has spaces. |
||
3760 | if Search(r'}else', line): |
||
3761 | error(filename, linenum, 'whitespace/braces', 5, |
||
3762 | 'Missing space before else') |
||
3763 | |||
3764 | # You shouldn't have a space before a semicolon at the end of the line. |
||
3765 | # There's a special case for "for" since the style guide allows space before |
||
3766 | # the semicolon there. |
||
3767 | if Search(r':\s*;\s*$', line): |
||
3768 | error(filename, linenum, 'whitespace/semicolon', 5, |
||
3769 | 'Semicolon defining empty statement. Use {} instead.') |
||
3770 | elif Search(r'^\s*;\s*$', line): |
||
3771 | error(filename, linenum, 'whitespace/semicolon', 5, |
||
3772 | 'Line contains only semicolon. If this should be an empty statement, ' |
||
3773 | 'use {} instead.') |
||
3774 | elif (Search(r'\s+;\s*$', line) and |
||
3775 | not Search(r'\bfor\b', line)): |
||
3776 | error(filename, linenum, 'whitespace/semicolon', 5, |
||
3777 | 'Extra space before last semicolon. If this should be an empty ' |
||
3778 | 'statement, use {} instead.') |
||
3779 | |||
3780 | |||
3781 | View Code Duplication | def IsDecltype(clean_lines, linenum, column): |
|
3782 | """Check if the token ending on (linenum, column) is decltype(). |
||
3783 | |||
3784 | Args: |
||
3785 | clean_lines: A CleansedLines instance containing the file. |
||
3786 | linenum: the number of the line to check. |
||
3787 | column: end column of the token to check. |
||
3788 | Returns: |
||
3789 | True if this token is decltype() expression, False otherwise. |
||
3790 | """ |
||
3791 | (text, _, start_col) = ReverseCloseExpression(clean_lines, linenum, column) |
||
3792 | if start_col < 0: |
||
3793 | return False |
||
3794 | if Search(r'\bdecltype\s*$', text[0:start_col]): |
||
3795 | return True |
||
3796 | return False |
||
3797 | |||
3798 | View Code Duplication | def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error): |
|
3799 | """Checks for additional blank line issues related to sections. |
||
3800 | |||
3801 | Currently the only thing checked here is blank line before protected/private. |
||
3802 | |||
3803 | Args: |
||
3804 | filename: The name of the current file. |
||
3805 | clean_lines: A CleansedLines instance containing the file. |
||
3806 | class_info: A _ClassInfo objects. |
||
3807 | linenum: The number of the line to check. |
||
3808 | error: The function to call with any errors found. |
||
3809 | """ |
||
3810 | # Skip checks if the class is small, where small means 25 lines or less. |
||
3811 | # 25 lines seems like a good cutoff since that's the usual height of |
||
3812 | # terminals, and any class that can't fit in one screen can't really |
||
3813 | # be considered "small". |
||
3814 | # |
||
3815 | # Also skip checks if we are on the first line. This accounts for |
||
3816 | # classes that look like |
||
3817 | # class Foo { public: ... }; |
||
3818 | # |
||
3819 | # If we didn't find the end of the class, last_line would be zero, |
||
3820 | # and the check will be skipped by the first condition. |
||
3821 | if (class_info.last_line - class_info.starting_linenum <= 24 or |
||
3822 | linenum <= class_info.starting_linenum): |
||
3823 | return |
||
3824 | |||
3825 | matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum]) |
||
3826 | if matched: |
||
3827 | # Issue warning if the line before public/protected/private was |
||
3828 | # not a blank line, but don't do this if the previous line contains |
||
3829 | # "class" or "struct". This can happen two ways: |
||
3830 | # - We are at the beginning of the class. |
||
3831 | # - We are forward-declaring an inner class that is semantically |
||
3832 | # private, but needed to be public for implementation reasons. |
||
3833 | # Also ignores cases where the previous line ends with a backslash as can be |
||
3834 | # common when defining classes in C macros. |
||
3835 | prev_line = clean_lines.lines[linenum - 1] |
||
3836 | if (not IsBlankLine(prev_line) and |
||
3837 | not Search(r'\b(class|struct)\b', prev_line) and |
||
3838 | not Search(r'\\$', prev_line)): |
||
3839 | # Try a bit harder to find the beginning of the class. This is to |
||
3840 | # account for multi-line base-specifier lists, e.g.: |
||
3841 | # class Derived |
||
3842 | # : public Base { |
||
3843 | end_class_head = class_info.starting_linenum |
||
3844 | for i in range(class_info.starting_linenum, linenum): |
||
3845 | if Search(r'\{\s*$', clean_lines.lines[i]): |
||
3846 | end_class_head = i |
||
3847 | break |
||
3848 | if end_class_head < linenum - 1: |
||
3849 | error(filename, linenum, 'whitespace/blank_line', 3, |
||
3850 | '"%s:" should be preceded by a blank line' % matched.group(1)) |
||
3851 | |||
3852 | |||
3853 | View Code Duplication | def GetPreviousNonBlankLine(clean_lines, linenum): |
|
3854 | """Return the most recent non-blank line and its line number. |
||
3855 | |||
3856 | Args: |
||
3857 | clean_lines: A CleansedLines instance containing the file contents. |
||
3858 | linenum: The number of the line to check. |
||
3859 | |||
3860 | Returns: |
||
3861 | A tuple with two elements. The first element is the contents of the last |
||
3862 | non-blank line before the current line, or the empty string if this is the |
||
3863 | first non-blank line. The second is the line number of that line, or -1 |
||
3864 | if this is the first non-blank line. |
||
3865 | """ |
||
3866 | |||
3867 | prevlinenum = linenum - 1 |
||
3868 | while prevlinenum >= 0: |
||
3869 | prevline = clean_lines.elided[prevlinenum] |
||
3870 | if not IsBlankLine(prevline): # if not a blank line... |
||
3871 | return (prevline, prevlinenum) |
||
3872 | prevlinenum -= 1 |
||
3873 | return ('', -1) |
||
3874 | |||
3875 | |||
3876 | View Code Duplication | def CheckBraces(filename, clean_lines, linenum, error): |
|
3877 | """Looks for misplaced braces (e.g. at the end of line). |
||
3878 | |||
3879 | Args: |
||
3880 | filename: The name of the current file. |
||
3881 | clean_lines: A CleansedLines instance containing the file. |
||
3882 | linenum: The number of the line to check. |
||
3883 | error: The function to call with any errors found. |
||
3884 | """ |
||
3885 | |||
3886 | line = clean_lines.elided[linenum] # get rid of comments and strings |
||
3887 | |||
3888 | if Match(r'\s*{\s*$', line): |
||
3889 | # We allow an open brace to start a line in the case where someone is using |
||
3890 | # braces in a block to explicitly create a new scope, which is commonly used |
||
3891 | # to control the lifetime of stack-allocated variables. Braces are also |
||
3892 | # used for brace initializers inside function calls. We don't detect this |
||
3893 | # perfectly: we just don't complain if the last non-whitespace character on |
||
3894 | # the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the |
||
3895 | # previous line starts a preprocessor block. We also allow a brace on the |
||
3896 | # following line if it is part of an array initialization and would not fit |
||
3897 | # within the 80 character limit of the preceding line. |
||
3898 | prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] |
||
3899 | if (not Search(r'[,;:}{(]\s*$', prevline) and |
||
3900 | not Match(r'\s*#', prevline) and |
||
3901 | not (GetLineWidth(prevline) > _line_length - 2 and '[]' in prevline)): |
||
3902 | error(filename, linenum, 'whitespace/braces', 4, |
||
3903 | '{ should almost always be at the end of the previous line') |
||
3904 | |||
3905 | # An else clause should be on the same line as the preceding closing brace. |
||
3906 | if Match(r'\s*else\b\s*(?:if\b|\{|$)', line): |
||
3907 | prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] |
||
3908 | if Match(r'\s*}\s*$', prevline): |
||
3909 | error(filename, linenum, 'whitespace/newline', 4, |
||
3910 | 'An else should appear on the same line as the preceding }') |
||
3911 | |||
3912 | # If braces come on one side of an else, they should be on both. |
||
3913 | # However, we have to worry about "else if" that spans multiple lines! |
||
3914 | if Search(r'else if\s*\(', line): # could be multi-line if |
||
3915 | brace_on_left = bool(Search(r'}\s*else if\s*\(', line)) |
||
3916 | # find the ( after the if |
||
3917 | pos = line.find('else if') |
||
3918 | pos = line.find('(', pos) |
||
3919 | if pos > 0: |
||
3920 | (endline, _, endpos) = CloseExpression(clean_lines, linenum, pos) |
||
3921 | brace_on_right = endline[endpos:].find('{') != -1 |
||
3922 | if brace_on_left != brace_on_right: # must be brace after if |
||
3923 | error(filename, linenum, 'readability/braces', 5, |
||
3924 | 'If an else has a brace on one side, it should have it on both') |
||
3925 | elif Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line): |
||
3926 | error(filename, linenum, 'readability/braces', 5, |
||
3927 | 'If an else has a brace on one side, it should have it on both') |
||
3928 | |||
3929 | # Likewise, an else should never have the else clause on the same line |
||
3930 | if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line): |
||
3931 | error(filename, linenum, 'whitespace/newline', 4, |
||
3932 | 'Else clause should never be on same line as else (use 2 lines)') |
||
3933 | |||
3934 | # In the same way, a do/while should never be on one line |
||
3935 | if Match(r'\s*do [^\s{]', line): |
||
3936 | error(filename, linenum, 'whitespace/newline', 4, |
||
3937 | 'do/while clauses should not be on a single line') |
||
3938 | |||
3939 | # Check single-line if/else bodies. The style guide says 'curly braces are not |
||
3940 | # required for single-line statements'. We additionally allow multi-line, |
||
3941 | # single statements, but we reject anything with more than one semicolon in |
||
3942 | # it. This means that the first semicolon after the if should be at the end of |
||
3943 | # its line, and the line after that should have an indent level equal to or |
||
3944 | # lower than the if. We also check for ambiguous if/else nesting without |
||
3945 | # braces. |
||
3946 | if_else_match = Search(r'\b(if\s*\(|else\b)', line) |
||
3947 | if if_else_match and not Match(r'\s*#', line): |
||
3948 | if_indent = GetIndentLevel(line) |
||
3949 | endline, endlinenum, endpos = line, linenum, if_else_match.end() |
||
3950 | if_match = Search(r'\bif\s*\(', line) |
||
3951 | if if_match: |
||
3952 | # This could be a multiline if condition, so find the end first. |
||
3953 | pos = if_match.end() - 1 |
||
3954 | (endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, pos) |
||
3955 | # Check for an opening brace, either directly after the if or on the next |
||
3956 | # line. If found, this isn't a single-statement conditional. |
||
3957 | if (not Match(r'\s*{', endline[endpos:]) |
||
3958 | and not (Match(r'\s*$', endline[endpos:]) |
||
3959 | and endlinenum < (len(clean_lines.elided) - 1) |
||
3960 | and Match(r'\s*{', clean_lines.elided[endlinenum + 1]))): |
||
3961 | while (endlinenum < len(clean_lines.elided) |
||
3962 | and ';' not in clean_lines.elided[endlinenum][endpos:]): |
||
3963 | endlinenum += 1 |
||
3964 | endpos = 0 |
||
3965 | if endlinenum < len(clean_lines.elided): |
||
3966 | endline = clean_lines.elided[endlinenum] |
||
3967 | # We allow a mix of whitespace and closing braces (e.g. for one-liner |
||
3968 | # methods) and a single \ after the semicolon (for macros) |
||
3969 | endpos = endline.find(';') |
||
3970 | if not Match(r';[\s}]*(\\?)$', endline[endpos:]): |
||
3971 | # Semicolon isn't the last character, there's something trailing. |
||
3972 | # Output a warning if the semicolon is not contained inside |
||
3973 | # a lambda expression. |
||
3974 | if not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}]*\}\s*\)*[;,]\s*$', |
||
3975 | endline): |
||
3976 | error(filename, linenum, 'readability/braces', 4, |
||
3977 | 'If/else bodies with multiple statements require braces') |
||
3978 | elif endlinenum < len(clean_lines.elided) - 1: |
||
3979 | # Make sure the next line is dedented |
||
3980 | next_line = clean_lines.elided[endlinenum + 1] |
||
3981 | next_indent = GetIndentLevel(next_line) |
||
3982 | # With ambiguous nested if statements, this will error out on the |
||
3983 | # if that *doesn't* match the else, regardless of whether it's the |
||
3984 | # inner one or outer one. |
||
3985 | if (if_match and Match(r'\s*else\b', next_line) |
||
3986 | and next_indent != if_indent): |
||
3987 | error(filename, linenum, 'readability/braces', 4, |
||
3988 | 'Else clause should be indented at the same level as if. ' |
||
3989 | 'Ambiguous nested if/else chains require braces.') |
||
3990 | elif next_indent > if_indent: |
||
3991 | error(filename, linenum, 'readability/braces', 4, |
||
3992 | 'If/else bodies with multiple statements require braces') |
||
3993 | |||
3994 | |||
3995 | View Code Duplication | def CheckTrailingSemicolon(filename, clean_lines, linenum, error): |
|
3996 | """Looks for redundant trailing semicolon. |
||
3997 | |||
3998 | Args: |
||
3999 | filename: The name of the current file. |
||
4000 | clean_lines: A CleansedLines instance containing the file. |
||
4001 | linenum: The number of the line to check. |
||
4002 | error: The function to call with any errors found. |
||
4003 | """ |
||
4004 | |||
4005 | line = clean_lines.elided[linenum] |
||
4006 | |||
4007 | # Block bodies should not be followed by a semicolon. Due to C++11 |
||
4008 | # brace initialization, there are more places where semicolons are |
||
4009 | # required than not, so we use a whitelist approach to check these |
||
4010 | # rather than a blacklist. These are the places where "};" should |
||
4011 | # be replaced by just "}": |
||
4012 | # 1. Some flavor of block following closing parenthesis: |
||
4013 | # for (;;) {}; |
||
4014 | # while (...) {}; |
||
4015 | # switch (...) {}; |
||
4016 | # Function(...) {}; |
||
4017 | # if (...) {}; |
||
4018 | # if (...) else if (...) {}; |
||
4019 | # |
||
4020 | # 2. else block: |
||
4021 | # if (...) else {}; |
||
4022 | # |
||
4023 | # 3. const member function: |
||
4024 | # Function(...) const {}; |
||
4025 | # |
||
4026 | # 4. Block following some statement: |
||
4027 | # x = 42; |
||
4028 | # {}; |
||
4029 | # |
||
4030 | # 5. Block at the beginning of a function: |
||
4031 | # Function(...) { |
||
4032 | # {}; |
||
4033 | # } |
||
4034 | # |
||
4035 | # Note that naively checking for the preceding "{" will also match |
||
4036 | # braces inside multi-dimensional arrays, but this is fine since |
||
4037 | # that expression will not contain semicolons. |
||
4038 | # |
||
4039 | # 6. Block following another block: |
||
4040 | # while (true) {} |
||
4041 | # {}; |
||
4042 | # |
||
4043 | # 7. End of namespaces: |
||
4044 | # namespace {}; |
||
4045 | # |
||
4046 | # These semicolons seems far more common than other kinds of |
||
4047 | # redundant semicolons, possibly due to people converting classes |
||
4048 | # to namespaces. For now we do not warn for this case. |
||
4049 | # |
||
4050 | # Try matching case 1 first. |
||
4051 | match = Match(r'^(.*\)\s*)\{', line) |
||
4052 | if match: |
||
4053 | # Matched closing parenthesis (case 1). Check the token before the |
||
4054 | # matching opening parenthesis, and don't warn if it looks like a |
||
4055 | # macro. This avoids these false positives: |
||
4056 | # - macro that defines a base class |
||
4057 | # - multi-line macro that defines a base class |
||
4058 | # - macro that defines the whole class-head |
||
4059 | # |
||
4060 | # But we still issue warnings for macros that we know are safe to |
||
4061 | # warn, specifically: |
||
4062 | # - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P |
||
4063 | # - TYPED_TEST |
||
4064 | # - INTERFACE_DEF |
||
4065 | # - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED: |
||
4066 | # |
||
4067 | # We implement a whitelist of safe macros instead of a blacklist of |
||
4068 | # unsafe macros, even though the latter appears less frequently in |
||
4069 | # google code and would have been easier to implement. This is because |
||
4070 | # the downside for getting the whitelist wrong means some extra |
||
4071 | # semicolons, while the downside for getting the blacklist wrong |
||
4072 | # would result in compile errors. |
||
4073 | # |
||
4074 | # In addition to macros, we also don't want to warn on |
||
4075 | # - Compound literals |
||
4076 | # - Lambdas |
||
4077 | # - alignas specifier with anonymous structs |
||
4078 | # - decltype |
||
4079 | closing_brace_pos = match.group(1).rfind(')') |
||
4080 | opening_parenthesis = ReverseCloseExpression( |
||
4081 | clean_lines, linenum, closing_brace_pos) |
||
4082 | if opening_parenthesis[2] > -1: |
||
4083 | line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]] |
||
4084 | macro = Search(r'\b([A-Z_][A-Z0-9_]*)\s*$', line_prefix) |
||
4085 | func = Match(r'^(.*\])\s*$', line_prefix) |
||
4086 | if ((macro and |
||
4087 | macro.group(1) not in ( |
||
4088 | 'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST', |
||
4089 | 'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED', |
||
4090 | 'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or |
||
4091 | (func and not Search(r'\boperator\s*\[\s*\]', func.group(1))) or |
||
4092 | Search(r'\b(?:struct|union)\s+alignas\s*$', line_prefix) or |
||
4093 | Search(r'\bdecltype$', line_prefix) or |
||
4094 | Search(r'\s+=\s*$', line_prefix)): |
||
4095 | match = None |
||
4096 | if (match and |
||
4097 | opening_parenthesis[1] > 1 and |
||
4098 | Search(r'\]\s*$', clean_lines.elided[opening_parenthesis[1] - 1])): |
||
4099 | # Multi-line lambda-expression |
||
4100 | match = None |
||
4101 | |||
4102 | else: |
||
4103 | # Try matching cases 2-3. |
||
4104 | match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line) |
||
4105 | if not match: |
||
4106 | # Try matching cases 4-6. These are always matched on separate lines. |
||
4107 | # |
||
4108 | # Note that we can't simply concatenate the previous line to the |
||
4109 | # current line and do a single match, otherwise we may output |
||
4110 | # duplicate warnings for the blank line case: |
||
4111 | # if (cond) { |
||
4112 | # // blank line |
||
4113 | # } |
||
4114 | prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] |
||
4115 | if prevline and Search(r'[;{}]\s*$', prevline): |
||
4116 | match = Match(r'^(\s*)\{', line) |
||
4117 | |||
4118 | # Check matching closing brace |
||
4119 | if match: |
||
4120 | (endline, endlinenum, endpos) = CloseExpression( |
||
4121 | clean_lines, linenum, len(match.group(1))) |
||
4122 | if endpos > -1 and Match(r'^\s*;', endline[endpos:]): |
||
4123 | # Current {} pair is eligible for semicolon check, and we have found |
||
4124 | # the redundant semicolon, output warning here. |
||
4125 | # |
||
4126 | # Note: because we are scanning forward for opening braces, and |
||
4127 | # outputting warnings for the matching closing brace, if there are |
||
4128 | # nested blocks with trailing semicolons, we will get the error |
||
4129 | # messages in reversed order. |
||
4130 | |||
4131 | # We need to check the line forward for NOLINT |
||
4132 | raw_lines = clean_lines.raw_lines |
||
4133 | ParseNolintSuppressions(filename, raw_lines[endlinenum-1], endlinenum-1, |
||
4134 | error) |
||
4135 | ParseNolintSuppressions(filename, raw_lines[endlinenum], endlinenum, |
||
4136 | error) |
||
4137 | |||
4138 | error(filename, endlinenum, 'readability/braces', 4, |
||
4139 | "You don't need a ; after a }") |
||
4140 | |||
4141 | |||
4142 | View Code Duplication | def CheckEmptyBlockBody(filename, clean_lines, linenum, error): |
|
4143 | """Look for empty loop/conditional body with only a single semicolon. |
||
4144 | |||
4145 | Args: |
||
4146 | filename: The name of the current file. |
||
4147 | clean_lines: A CleansedLines instance containing the file. |
||
4148 | linenum: The number of the line to check. |
||
4149 | error: The function to call with any errors found. |
||
4150 | """ |
||
4151 | |||
4152 | # Search for loop keywords at the beginning of the line. Because only |
||
4153 | # whitespaces are allowed before the keywords, this will also ignore most |
||
4154 | # do-while-loops, since those lines should start with closing brace. |
||
4155 | # |
||
4156 | # We also check "if" blocks here, since an empty conditional block |
||
4157 | # is likely an error. |
||
4158 | line = clean_lines.elided[linenum] |
||
4159 | matched = Match(r'\s*(for|while|if)\s*\(', line) |
||
4160 | if matched: |
||
4161 | # Find the end of the conditional expression. |
||
4162 | (end_line, end_linenum, end_pos) = CloseExpression( |
||
4163 | clean_lines, linenum, line.find('(')) |
||
4164 | |||
4165 | # Output warning if what follows the condition expression is a semicolon. |
||
4166 | # No warning for all other cases, including whitespace or newline, since we |
||
4167 | # have a separate check for semicolons preceded by whitespace. |
||
4168 | if end_pos >= 0 and Match(r';', end_line[end_pos:]): |
||
4169 | if matched.group(1) == 'if': |
||
4170 | error(filename, end_linenum, 'whitespace/empty_conditional_body', 5, |
||
4171 | 'Empty conditional bodies should use {}') |
||
4172 | else: |
||
4173 | error(filename, end_linenum, 'whitespace/empty_loop_body', 5, |
||
4174 | 'Empty loop bodies should use {} or continue') |
||
4175 | |||
4176 | # Check for if statements that have completely empty bodies (no comments) |
||
4177 | # and no else clauses. |
||
4178 | if end_pos >= 0 and matched.group(1) == 'if': |
||
4179 | # Find the position of the opening { for the if statement. |
||
4180 | # Return without logging an error if it has no brackets. |
||
4181 | opening_linenum = end_linenum |
||
4182 | opening_line_fragment = end_line[end_pos:] |
||
4183 | # Loop until EOF or find anything that's not whitespace or opening {. |
||
4184 | while not Search(r'^\s*\{', opening_line_fragment): |
||
4185 | if Search(r'^(?!\s*$)', opening_line_fragment): |
||
4186 | # Conditional has no brackets. |
||
4187 | return |
||
4188 | opening_linenum += 1 |
||
4189 | if opening_linenum == len(clean_lines.elided): |
||
4190 | # Couldn't find conditional's opening { or any code before EOF. |
||
4191 | return |
||
4192 | opening_line_fragment = clean_lines.elided[opening_linenum] |
||
4193 | # Set opening_line (opening_line_fragment may not be entire opening line). |
||
4194 | opening_line = clean_lines.elided[opening_linenum] |
||
4195 | |||
4196 | # Find the position of the closing }. |
||
4197 | opening_pos = opening_line_fragment.find('{') |
||
4198 | if opening_linenum == end_linenum: |
||
4199 | # We need to make opening_pos relative to the start of the entire line. |
||
4200 | opening_pos += end_pos |
||
4201 | (closing_line, closing_linenum, closing_pos) = CloseExpression( |
||
4202 | clean_lines, opening_linenum, opening_pos) |
||
4203 | if closing_pos < 0: |
||
4204 | return |
||
4205 | |||
4206 | # Now construct the body of the conditional. This consists of the portion |
||
4207 | # of the opening line after the {, all lines until the closing line, |
||
4208 | # and the portion of the closing line before the }. |
||
4209 | if (clean_lines.raw_lines[opening_linenum] != |
||
4210 | CleanseComments(clean_lines.raw_lines[opening_linenum])): |
||
4211 | # Opening line ends with a comment, so conditional isn't empty. |
||
4212 | return |
||
4213 | if closing_linenum > opening_linenum: |
||
4214 | # Opening line after the {. Ignore comments here since we checked above. |
||
4215 | bodylist = list(opening_line[opening_pos+1:]) |
||
4216 | # All lines until closing line, excluding closing line, with comments. |
||
4217 | bodylist.extend(clean_lines.raw_lines[opening_linenum+1:closing_linenum]) |
||
4218 | # Closing line before the }. Won't (and can't) have comments. |
||
4219 | bodylist.append(clean_lines.elided[closing_linenum][:closing_pos-1]) |
||
4220 | body = '\n'.join(bodylist) |
||
4221 | else: |
||
4222 | # If statement has brackets and fits on a single line. |
||
4223 | body = opening_line[opening_pos+1:closing_pos-1] |
||
4224 | |||
4225 | # Check if the body is empty |
||
4226 | if not _EMPTY_CONDITIONAL_BODY_PATTERN.search(body): |
||
4227 | return |
||
4228 | # The body is empty. Now make sure there's not an else clause. |
||
4229 | current_linenum = closing_linenum |
||
4230 | current_line_fragment = closing_line[closing_pos:] |
||
4231 | # Loop until EOF or find anything that's not whitespace or else clause. |
||
4232 | while Search(r'^\s*$|^(?=\s*else)', current_line_fragment): |
||
4233 | if Search(r'^(?=\s*else)', current_line_fragment): |
||
4234 | # Found an else clause, so don't log an error. |
||
4235 | return |
||
4236 | current_linenum += 1 |
||
4237 | if current_linenum == len(clean_lines.elided): |
||
4238 | break |
||
4239 | current_line_fragment = clean_lines.elided[current_linenum] |
||
4240 | |||
4241 | # The body is empty and there's no else clause until EOF or other code. |
||
4242 | error(filename, end_linenum, 'whitespace/empty_if_body', 4, |
||
4243 | ('If statement had no body and no else clause')) |
||
4244 | |||
4245 | |||
4246 | View Code Duplication | def FindCheckMacro(line): |
|
4247 | """Find a replaceable CHECK-like macro. |
||
4248 | |||
4249 | Args: |
||
4250 | line: line to search on. |
||
4251 | Returns: |
||
4252 | (macro name, start position), or (None, -1) if no replaceable |
||
4253 | macro is found. |
||
4254 | """ |
||
4255 | for macro in _CHECK_MACROS: |
||
4256 | i = line.find(macro) |
||
4257 | if i >= 0: |
||
4258 | # Find opening parenthesis. Do a regular expression match here |
||
4259 | # to make sure that we are matching the expected CHECK macro, as |
||
4260 | # opposed to some other macro that happens to contain the CHECK |
||
4261 | # substring. |
||
4262 | matched = Match(r'^(.*\b' + macro + r'\s*)\(', line) |
||
4263 | if not matched: |
||
4264 | continue |
||
4265 | return (macro, len(matched.group(1))) |
||
4266 | return (None, -1) |
||
4267 | |||
4268 | |||
4269 | View Code Duplication | def CheckCheck(filename, clean_lines, linenum, error): |
|
4270 | """Checks the use of CHECK and EXPECT macros. |
||
4271 | |||
4272 | Args: |
||
4273 | filename: The name of the current file. |
||
4274 | clean_lines: A CleansedLines instance containing the file. |
||
4275 | linenum: The number of the line to check. |
||
4276 | error: The function to call with any errors found. |
||
4277 | """ |
||
4278 | |||
4279 | # Decide the set of replacement macros that should be suggested |
||
4280 | lines = clean_lines.elided |
||
4281 | (check_macro, start_pos) = FindCheckMacro(lines[linenum]) |
||
4282 | if not check_macro: |
||
4283 | return |
||
4284 | |||
4285 | # Find end of the boolean expression by matching parentheses |
||
4286 | (last_line, end_line, end_pos) = CloseExpression( |
||
4287 | clean_lines, linenum, start_pos) |
||
4288 | if end_pos < 0: |
||
4289 | return |
||
4290 | |||
4291 | # If the check macro is followed by something other than a |
||
4292 | # semicolon, assume users will log their own custom error messages |
||
4293 | # and don't suggest any replacements. |
||
4294 | if not Match(r'\s*;', last_line[end_pos:]): |
||
4295 | return |
||
4296 | |||
4297 | if linenum == end_line: |
||
4298 | expression = lines[linenum][start_pos + 1:end_pos - 1] |
||
4299 | else: |
||
4300 | expression = lines[linenum][start_pos + 1:] |
||
4301 | for i in xrange(linenum + 1, end_line): |
||
4302 | expression += lines[i] |
||
4303 | expression += last_line[0:end_pos - 1] |
||
4304 | |||
4305 | # Parse expression so that we can take parentheses into account. |
||
4306 | # This avoids false positives for inputs like "CHECK((a < 4) == b)", |
||
4307 | # which is not replaceable by CHECK_LE. |
||
4308 | lhs = '' |
||
4309 | rhs = '' |
||
4310 | operator = None |
||
4311 | while expression: |
||
4312 | matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||' |
||
4313 | r'==|!=|>=|>|<=|<|\()(.*)$', expression) |
||
4314 | if matched: |
||
4315 | token = matched.group(1) |
||
4316 | if token == '(': |
||
4317 | # Parenthesized operand |
||
4318 | expression = matched.group(2) |
||
4319 | (end, _) = FindEndOfExpressionInLine(expression, 0, ['(']) |
||
4320 | if end < 0: |
||
4321 | return # Unmatched parenthesis |
||
4322 | lhs += '(' + expression[0:end] |
||
4323 | expression = expression[end:] |
||
4324 | elif token in ('&&', '||'): |
||
4325 | # Logical and/or operators. This means the expression |
||
4326 | # contains more than one term, for example: |
||
4327 | # CHECK(42 < a && a < b); |
||
4328 | # |
||
4329 | # These are not replaceable with CHECK_LE, so bail out early. |
||
4330 | return |
||
4331 | elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'): |
||
4332 | # Non-relational operator |
||
4333 | lhs += token |
||
4334 | expression = matched.group(2) |
||
4335 | else: |
||
4336 | # Relational operator |
||
4337 | operator = token |
||
4338 | rhs = matched.group(2) |
||
4339 | break |
||
4340 | else: |
||
4341 | # Unparenthesized operand. Instead of appending to lhs one character |
||
4342 | # at a time, we do another regular expression match to consume several |
||
4343 | # characters at once if possible. Trivial benchmark shows that this |
||
4344 | # is more efficient when the operands are longer than a single |
||
4345 | # character, which is generally the case. |
||
4346 | matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression) |
||
4347 | if not matched: |
||
4348 | matched = Match(r'^(\s*\S)(.*)$', expression) |
||
4349 | if not matched: |
||
4350 | break |
||
4351 | lhs += matched.group(1) |
||
4352 | expression = matched.group(2) |
||
4353 | |||
4354 | # Only apply checks if we got all parts of the boolean expression |
||
4355 | if not (lhs and operator and rhs): |
||
4356 | return |
||
4357 | |||
4358 | # Check that rhs do not contain logical operators. We already know |
||
4359 | # that lhs is fine since the loop above parses out && and ||. |
||
4360 | if rhs.find('&&') > -1 or rhs.find('||') > -1: |
||
4361 | return |
||
4362 | |||
4363 | # At least one of the operands must be a constant literal. This is |
||
4364 | # to avoid suggesting replacements for unprintable things like |
||
4365 | # CHECK(variable != iterator) |
||
4366 | # |
||
4367 | # The following pattern matches decimal, hex integers, strings, and |
||
4368 | # characters (in that order). |
||
4369 | lhs = lhs.strip() |
||
4370 | rhs = rhs.strip() |
||
4371 | match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$' |
||
4372 | if Match(match_constant, lhs) or Match(match_constant, rhs): |
||
4373 | # Note: since we know both lhs and rhs, we can provide a more |
||
4374 | # descriptive error message like: |
||
4375 | # Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42) |
||
4376 | # Instead of: |
||
4377 | # Consider using CHECK_EQ instead of CHECK(a == b) |
||
4378 | # |
||
4379 | # We are still keeping the less descriptive message because if lhs |
||
4380 | # or rhs gets long, the error message might become unreadable. |
||
4381 | error(filename, linenum, 'readability/check', 2, |
||
4382 | 'Consider using %s instead of %s(a %s b)' % ( |
||
4383 | _CHECK_REPLACEMENT[check_macro][operator], |
||
4384 | check_macro, operator)) |
||
4385 | |||
4386 | |||
4387 | View Code Duplication | def CheckAltTokens(filename, clean_lines, linenum, error): |
|
4388 | """Check alternative keywords being used in boolean expressions. |
||
4389 | |||
4390 | Args: |
||
4391 | filename: The name of the current file. |
||
4392 | clean_lines: A CleansedLines instance containing the file. |
||
4393 | linenum: The number of the line to check. |
||
4394 | error: The function to call with any errors found. |
||
4395 | """ |
||
4396 | line = clean_lines.elided[linenum] |
||
4397 | |||
4398 | # Avoid preprocessor lines |
||
4399 | if Match(r'^\s*#', line): |
||
4400 | return |
||
4401 | |||
4402 | # Last ditch effort to avoid multi-line comments. This will not help |
||
4403 | # if the comment started before the current line or ended after the |
||
4404 | # current line, but it catches most of the false positives. At least, |
||
4405 | # it provides a way to workaround this warning for people who use |
||
4406 | # multi-line comments in preprocessor macros. |
||
4407 | # |
||
4408 | # TODO(unknown): remove this once cpplint has better support for |
||
4409 | # multi-line comments. |
||
4410 | if line.find('/*') >= 0 or line.find('*/') >= 0: |
||
4411 | return |
||
4412 | |||
4413 | for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line): |
||
4414 | error(filename, linenum, 'readability/alt_tokens', 2, |
||
4415 | 'Use operator %s instead of %s' % ( |
||
4416 | _ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1))) |
||
4417 | |||
4418 | |||
4419 | View Code Duplication | def GetLineWidth(line): |
|
4420 | """Determines the width of the line in column positions. |
||
4421 | |||
4422 | Args: |
||
4423 | line: A string, which may be a Unicode string. |
||
4424 | |||
4425 | Returns: |
||
4426 | The width of the line in column positions, accounting for Unicode |
||
4427 | combining characters and wide characters. |
||
4428 | """ |
||
4429 | if isinstance(line, unicode): |
||
4430 | width = 0 |
||
4431 | for uc in unicodedata.normalize('NFC', line): |
||
4432 | if unicodedata.east_asian_width(uc) in ('W', 'F'): |
||
4433 | width += 2 |
||
4434 | elif not unicodedata.combining(uc): |
||
4435 | width += 1 |
||
4436 | return width |
||
4437 | else: |
||
4438 | return len(line) |
||
4439 | |||
4440 | |||
4441 | View Code Duplication | def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state, |
|
4442 | error): |
||
4443 | """Checks rules from the 'C++ style rules' section of cppguide.html. |
||
4444 | |||
4445 | Most of these rules are hard to test (naming, comment style), but we |
||
4446 | do what we can. In particular we check for 2-space indents, line lengths, |
||
4447 | tab usage, spaces inside code, etc. |
||
4448 | |||
4449 | Args: |
||
4450 | filename: The name of the current file. |
||
4451 | clean_lines: A CleansedLines instance containing the file. |
||
4452 | linenum: The number of the line to check. |
||
4453 | file_extension: The extension (without the dot) of the filename. |
||
4454 | nesting_state: A NestingState instance which maintains information about |
||
4455 | the current stack of nested blocks being parsed. |
||
4456 | error: The function to call with any errors found. |
||
4457 | """ |
||
4458 | |||
4459 | # Don't use "elided" lines here, otherwise we can't check commented lines. |
||
4460 | # Don't want to use "raw" either, because we don't want to check inside C++11 |
||
4461 | # raw strings, |
||
4462 | raw_lines = clean_lines.lines_without_raw_strings |
||
4463 | line = raw_lines[linenum] |
||
4464 | prev = raw_lines[linenum - 1] if linenum > 0 else '' |
||
4465 | |||
4466 | if line.find('\t') != -1: |
||
4467 | error(filename, linenum, 'whitespace/tab', 1, |
||
4468 | 'Tab found; better to use spaces') |
||
4469 | |||
4470 | # One or three blank spaces at the beginning of the line is weird; it's |
||
4471 | # hard to reconcile that with 2-space indents. |
||
4472 | # NOTE: here are the conditions rob pike used for his tests. Mine aren't |
||
4473 | # as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces |
||
4474 | # if(RLENGTH > 20) complain = 0; |
||
4475 | # if(match($0, " +(error|private|public|protected):")) complain = 0; |
||
4476 | # if(match(prev, "&& *$")) complain = 0; |
||
4477 | # if(match(prev, "\\|\\| *$")) complain = 0; |
||
4478 | # if(match(prev, "[\",=><] *$")) complain = 0; |
||
4479 | # if(match($0, " <<")) complain = 0; |
||
4480 | # if(match(prev, " +for \\(")) complain = 0; |
||
4481 | # if(prevodd && match(prevprev, " +for \\(")) complain = 0; |
||
4482 | scope_or_label_pattern = r'\s*\w+\s*:\s*\\?$' |
||
4483 | classinfo = nesting_state.InnermostClass() |
||
4484 | initial_spaces = 0 |
||
4485 | cleansed_line = clean_lines.elided[linenum] |
||
4486 | while initial_spaces < len(line) and line[initial_spaces] == ' ': |
||
4487 | initial_spaces += 1 |
||
4488 | # There are certain situations we allow one space, notably for |
||
4489 | # section labels, and also lines containing multi-line raw strings. |
||
4490 | # We also don't check for lines that look like continuation lines |
||
4491 | # (of lines ending in double quotes, commas, equals, or angle brackets) |
||
4492 | # because the rules for how to indent those are non-trivial. |
||
4493 | if (not Search(r'[",=><] *$', prev) and |
||
4494 | (initial_spaces == 1 or initial_spaces == 3) and |
||
4495 | not Match(scope_or_label_pattern, cleansed_line) and |
||
4496 | not (clean_lines.raw_lines[linenum] != line and |
||
4497 | Match(r'^\s*""', line))): |
||
4498 | error(filename, linenum, 'whitespace/indent', 3, |
||
4499 | 'Weird number of spaces at line-start. ' |
||
4500 | 'Are you using a 2-space indent?') |
||
4501 | |||
4502 | if line and line[-1].isspace(): |
||
4503 | error(filename, linenum, 'whitespace/end_of_line', 4, |
||
4504 | 'Line ends in whitespace. Consider deleting these extra spaces.') |
||
4505 | |||
4506 | # Check if the line is a header guard. |
||
4507 | is_header_guard = False |
||
4508 | if file_extension in GetHeaderExtensions(): |
||
4509 | cppvar = GetHeaderGuardCPPVariable(filename) |
||
4510 | if (line.startswith('#ifndef %s' % cppvar) or |
||
4511 | line.startswith('#define %s' % cppvar) or |
||
4512 | line.startswith('#endif // %s' % cppvar)): |
||
4513 | is_header_guard = True |
||
4514 | # #include lines and header guards can be long, since there's no clean way to |
||
4515 | # split them. |
||
4516 | # |
||
4517 | # URLs can be long too. It's possible to split these, but it makes them |
||
4518 | # harder to cut&paste. |
||
4519 | # |
||
4520 | # The "$Id:...$" comment may also get very long without it being the |
||
4521 | # developers fault. |
||
4522 | # |
||
4523 | # Doxygen documentation copying can get pretty long when using an overloaded |
||
4524 | # function declaration |
||
4525 | if (not line.startswith('#include') and not is_header_guard and |
||
4526 | not Match(r'^\s*//.*http(s?)://\S*$', line) and |
||
4527 | not Match(r'^\s*//\s*[^\s]*$', line) and |
||
4528 | not Match(r'^// \$Id:.*#[0-9]+ \$$', line) and |
||
4529 | not Match(r'^\s*/// [@\\](copydoc|copydetails|copybrief) .*$', line)): |
||
4530 | line_width = GetLineWidth(line) |
||
4531 | if line_width > _line_length: |
||
4532 | error(filename, linenum, 'whitespace/line_length', 2, |
||
4533 | 'Lines should be <= %i characters long' % _line_length) |
||
4534 | |||
4535 | if (cleansed_line.count(';') > 1 and |
||
4536 | # allow simple single line lambdas |
||
4537 | not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}\n\r]*\}', |
||
4538 | line) and |
||
4539 | # for loops are allowed two ;'s (and may run over two lines). |
||
4540 | cleansed_line.find('for') == -1 and |
||
4541 | (GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or |
||
4542 | GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and |
||
4543 | # It's ok to have many commands in a switch case that fits in 1 line |
||
4544 | not ((cleansed_line.find('case ') != -1 or |
||
4545 | cleansed_line.find('default:') != -1) and |
||
4546 | cleansed_line.find('break;') != -1)): |
||
4547 | error(filename, linenum, 'whitespace/newline', 0, |
||
4548 | 'More than one command on the same line') |
||
4549 | |||
4550 | # Some more style checks |
||
4551 | CheckBraces(filename, clean_lines, linenum, error) |
||
4552 | CheckTrailingSemicolon(filename, clean_lines, linenum, error) |
||
4553 | CheckEmptyBlockBody(filename, clean_lines, linenum, error) |
||
4554 | CheckAccess(filename, clean_lines, linenum, nesting_state, error) |
||
4555 | CheckSpacing(filename, clean_lines, linenum, nesting_state, error) |
||
4556 | CheckOperatorSpacing(filename, clean_lines, linenum, error) |
||
4557 | CheckParenthesisSpacing(filename, clean_lines, linenum, error) |
||
4558 | CheckCommaSpacing(filename, clean_lines, linenum, error) |
||
4559 | CheckBracesSpacing(filename, clean_lines, linenum, nesting_state, error) |
||
4560 | CheckSpacingForFunctionCall(filename, clean_lines, linenum, error) |
||
4561 | CheckCheck(filename, clean_lines, linenum, error) |
||
4562 | CheckAltTokens(filename, clean_lines, linenum, error) |
||
4563 | classinfo = nesting_state.InnermostClass() |
||
4564 | if classinfo: |
||
4565 | CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error) |
||
4566 | |||
4567 | |||
4568 | _RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$') |
||
4569 | # Matches the first component of a filename delimited by -s and _s. That is: |
||
4570 | # _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo' |
||
4571 | # _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo' |
||
4572 | # _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo' |
||
4573 | # _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo' |
||
4574 | _RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+') |
||
4575 | |||
4576 | |||
4577 | View Code Duplication | def _DropCommonSuffixes(filename): |
|
4578 | """Drops common suffixes like _test.cc or -inl.h from filename. |
||
4579 | |||
4580 | For example: |
||
4581 | >>> _DropCommonSuffixes('foo/foo-inl.h') |
||
4582 | 'foo/foo' |
||
4583 | >>> _DropCommonSuffixes('foo/bar/foo.cc') |
||
4584 | 'foo/bar/foo' |
||
4585 | >>> _DropCommonSuffixes('foo/foo_internal.h') |
||
4586 | 'foo/foo' |
||
4587 | >>> _DropCommonSuffixes('foo/foo_unusualinternal.h') |
||
4588 | 'foo/foo_unusualinternal' |
||
4589 | |||
4590 | Args: |
||
4591 | filename: The input filename. |
||
4592 | |||
4593 | Returns: |
||
4594 | The filename with the common suffix removed. |
||
4595 | """ |
||
4596 | for suffix in itertools.chain( |
||
4597 | ('%s.%s' % (test_suffix.lstrip('_'), ext) |
||
4598 | for test_suffix, ext in itertools.product(_test_suffixes, GetNonHeaderExtensions())), |
||
4599 | ('%s.%s' % (suffix, ext) |
||
4600 | for suffix, ext in itertools.product(['inl', 'imp', 'internal'], GetHeaderExtensions()))): |
||
4601 | if (filename.endswith(suffix) and len(filename) > len(suffix) and |
||
4602 | filename[-len(suffix) - 1] in ('-', '_')): |
||
4603 | return filename[:-len(suffix) - 1] |
||
4604 | return os.path.splitext(filename)[0] |
||
4605 | |||
4606 | |||
4607 | View Code Duplication | def _ClassifyInclude(fileinfo, include, is_system): |
|
4608 | """Figures out what kind of header 'include' is. |
||
4609 | |||
4610 | Args: |
||
4611 | fileinfo: The current file cpplint is running over. A FileInfo instance. |
||
4612 | include: The path to a #included file. |
||
4613 | is_system: True if the #include used <> rather than "". |
||
4614 | |||
4615 | Returns: |
||
4616 | One of the _XXX_HEADER constants. |
||
4617 | |||
4618 | For example: |
||
4619 | >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True) |
||
4620 | _C_SYS_HEADER |
||
4621 | >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True) |
||
4622 | _CPP_SYS_HEADER |
||
4623 | >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False) |
||
4624 | _LIKELY_MY_HEADER |
||
4625 | >>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'), |
||
4626 | ... 'bar/foo_other_ext.h', False) |
||
4627 | _POSSIBLE_MY_HEADER |
||
4628 | >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False) |
||
4629 | _OTHER_HEADER |
||
4630 | """ |
||
4631 | # This is a list of all standard c++ header files, except |
||
4632 | # those already checked for above. |
||
4633 | is_cpp_h = include in _CPP_HEADERS |
||
4634 | |||
4635 | # Headers with C++ extensions shouldn't be considered C system headers |
||
4636 | if is_system and os.path.splitext(include)[1] in ['.hpp', '.hxx', '.h++']: |
||
4637 | is_system = False |
||
4638 | |||
4639 | if is_system: |
||
4640 | if is_cpp_h: |
||
4641 | return _CPP_SYS_HEADER |
||
4642 | else: |
||
4643 | return _C_SYS_HEADER |
||
4644 | |||
4645 | # If the target file and the include we're checking share a |
||
4646 | # basename when we drop common extensions, and the include |
||
4647 | # lives in . , then it's likely to be owned by the target file. |
||
4648 | target_dir, target_base = ( |
||
4649 | os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName()))) |
||
4650 | include_dir, include_base = os.path.split(_DropCommonSuffixes(include)) |
||
4651 | target_dir_pub = os.path.normpath(target_dir + '/../public') |
||
4652 | target_dir_pub = target_dir_pub.replace('\\', '/') |
||
4653 | if target_base == include_base and ( |
||
4654 | include_dir == target_dir or |
||
4655 | include_dir == target_dir_pub): |
||
4656 | return _LIKELY_MY_HEADER |
||
4657 | |||
4658 | # If the target and include share some initial basename |
||
4659 | # component, it's possible the target is implementing the |
||
4660 | # include, so it's allowed to be first, but we'll never |
||
4661 | # complain if it's not there. |
||
4662 | target_first_component = _RE_FIRST_COMPONENT.match(target_base) |
||
4663 | include_first_component = _RE_FIRST_COMPONENT.match(include_base) |
||
4664 | if (target_first_component and include_first_component and |
||
4665 | target_first_component.group(0) == |
||
4666 | include_first_component.group(0)): |
||
4667 | return _POSSIBLE_MY_HEADER |
||
4668 | |||
4669 | return _OTHER_HEADER |
||
4670 | |||
4671 | |||
4672 | |||
4673 | View Code Duplication | def CheckIncludeLine(filename, clean_lines, linenum, include_state, error): |
|
4674 | """Check rules that are applicable to #include lines. |
||
4675 | |||
4676 | Strings on #include lines are NOT removed from elided line, to make |
||
4677 | certain tasks easier. However, to prevent false positives, checks |
||
4678 | applicable to #include lines in CheckLanguage must be put here. |
||
4679 | |||
4680 | Args: |
||
4681 | filename: The name of the current file. |
||
4682 | clean_lines: A CleansedLines instance containing the file. |
||
4683 | linenum: The number of the line to check. |
||
4684 | include_state: An _IncludeState instance in which the headers are inserted. |
||
4685 | error: The function to call with any errors found. |
||
4686 | """ |
||
4687 | fileinfo = FileInfo(filename) |
||
4688 | line = clean_lines.lines[linenum] |
||
4689 | |||
4690 | # "include" should use the new style "foo/bar.h" instead of just "bar.h" |
||
4691 | # Only do this check if the included header follows google naming |
||
4692 | # conventions. If not, assume that it's a 3rd party API that |
||
4693 | # requires special include conventions. |
||
4694 | # |
||
4695 | # We also make an exception for Lua headers, which follow google |
||
4696 | # naming convention but not the include convention. |
||
4697 | match = Match(r'#include\s*"([^/]+\.h)"', line) |
||
4698 | if match and not _THIRD_PARTY_HEADERS_PATTERN.match(match.group(1)): |
||
4699 | error(filename, linenum, 'build/include_subdir', 4, |
||
4700 | 'Include the directory when naming .h files') |
||
4701 | |||
4702 | # we shouldn't include a file more than once. actually, there are a |
||
4703 | # handful of instances where doing so is okay, but in general it's |
||
4704 | # not. |
||
4705 | match = _RE_PATTERN_INCLUDE.search(line) |
||
4706 | if match: |
||
4707 | include = match.group(2) |
||
4708 | is_system = (match.group(1) == '<') |
||
4709 | duplicate_line = include_state.FindHeader(include) |
||
4710 | if duplicate_line >= 0: |
||
4711 | error(filename, linenum, 'build/include', 4, |
||
4712 | '"%s" already included at %s:%s' % |
||
4713 | (include, filename, duplicate_line)) |
||
4714 | return |
||
4715 | |||
4716 | for extension in GetNonHeaderExtensions(): |
||
4717 | if (include.endswith('.' + extension) and |
||
4718 | os.path.dirname(fileinfo.RepositoryName()) != os.path.dirname(include)): |
||
4719 | error(filename, linenum, 'build/include', 4, |
||
4720 | 'Do not include .' + extension + ' files from other packages') |
||
4721 | return |
||
4722 | |||
4723 | if not _THIRD_PARTY_HEADERS_PATTERN.match(include): |
||
4724 | include_state.include_list[-1].append((include, linenum)) |
||
4725 | |||
4726 | # We want to ensure that headers appear in the right order: |
||
4727 | # 1) for foo.cc, foo.h (preferred location) |
||
4728 | # 2) c system files |
||
4729 | # 3) cpp system files |
||
4730 | # 4) for foo.cc, foo.h (deprecated location) |
||
4731 | # 5) other google headers |
||
4732 | # |
||
4733 | # We classify each include statement as one of those 5 types |
||
4734 | # using a number of techniques. The include_state object keeps |
||
4735 | # track of the highest type seen, and complains if we see a |
||
4736 | # lower type after that. |
||
4737 | error_message = include_state.CheckNextIncludeOrder( |
||
4738 | _ClassifyInclude(fileinfo, include, is_system)) |
||
4739 | if error_message: |
||
4740 | error(filename, linenum, 'build/include_order', 4, |
||
4741 | '%s. Should be: %s.h, c system, c++ system, other.' % |
||
4742 | (error_message, fileinfo.BaseName())) |
||
4743 | canonical_include = include_state.CanonicalizeAlphabeticalOrder(include) |
||
4744 | if not include_state.IsInAlphabeticalOrder( |
||
4745 | clean_lines, linenum, canonical_include): |
||
4746 | error(filename, linenum, 'build/include_alpha', 4, |
||
4747 | 'Include "%s" not in alphabetical order' % include) |
||
4748 | include_state.SetLastHeader(canonical_include) |
||
4749 | |||
4750 | |||
4751 | |||
4752 | View Code Duplication | def _GetTextInside(text, start_pattern): |
|
4753 | r"""Retrieves all the text between matching open and close parentheses. |
||
4754 | |||
4755 | Given a string of lines and a regular expression string, retrieve all the text |
||
4756 | following the expression and between opening punctuation symbols like |
||
4757 | (, [, or {, and the matching close-punctuation symbol. This properly nested |
||
4758 | occurrences of the punctuations, so for the text like |
||
4759 | printf(a(), b(c())); |
||
4760 | a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'. |
||
4761 | start_pattern must match string having an open punctuation symbol at the end. |
||
4762 | |||
4763 | Args: |
||
4764 | text: The lines to extract text. Its comments and strings must be elided. |
||
4765 | It can be single line and can span multiple lines. |
||
4766 | start_pattern: The regexp string indicating where to start extracting |
||
4767 | the text. |
||
4768 | Returns: |
||
4769 | The extracted text. |
||
4770 | None if either the opening string or ending punctuation could not be found. |
||
4771 | """ |
||
4772 | # TODO(unknown): Audit cpplint.py to see what places could be profitably |
||
4773 | # rewritten to use _GetTextInside (and use inferior regexp matching today). |
||
4774 | |||
4775 | # Give opening punctuations to get the matching close-punctuations. |
||
4776 | matching_punctuation = {'(': ')', '{': '}', '[': ']'} |
||
4777 | closing_punctuation = set(itervalues(matching_punctuation)) |
||
4778 | |||
4779 | # Find the position to start extracting text. |
||
4780 | match = re.search(start_pattern, text, re.M) |
||
4781 | if not match: # start_pattern not found in text. |
||
4782 | return None |
||
4783 | start_position = match.end(0) |
||
4784 | |||
4785 | assert start_position > 0, ( |
||
4786 | 'start_pattern must ends with an opening punctuation.') |
||
4787 | assert text[start_position - 1] in matching_punctuation, ( |
||
4788 | 'start_pattern must ends with an opening punctuation.') |
||
4789 | # Stack of closing punctuations we expect to have in text after position. |
||
4790 | punctuation_stack = [matching_punctuation[text[start_position - 1]]] |
||
4791 | position = start_position |
||
4792 | while punctuation_stack and position < len(text): |
||
4793 | if text[position] == punctuation_stack[-1]: |
||
4794 | punctuation_stack.pop() |
||
4795 | elif text[position] in closing_punctuation: |
||
4796 | # A closing punctuation without matching opening punctuations. |
||
4797 | return None |
||
4798 | elif text[position] in matching_punctuation: |
||
4799 | punctuation_stack.append(matching_punctuation[text[position]]) |
||
4800 | position += 1 |
||
4801 | if punctuation_stack: |
||
4802 | # Opening punctuations left without matching close-punctuations. |
||
4803 | return None |
||
4804 | # punctuations match. |
||
4805 | return text[start_position:position - 1] |
||
4806 | |||
4807 | |||
4808 | # Patterns for matching call-by-reference parameters. |
||
4809 | # |
||
4810 | # Supports nested templates up to 2 levels deep using this messy pattern: |
||
4811 | # < (?: < (?: < [^<>]* |
||
4812 | # > |
||
4813 | # | [^<>] )* |
||
4814 | # > |
||
4815 | # | [^<>] )* |
||
4816 | # > |
||
4817 | _RE_PATTERN_IDENT = r'[_a-zA-Z]\w*' # =~ [[:alpha:]][[:alnum:]]* |
||
4818 | _RE_PATTERN_TYPE = ( |
||
4819 | r'(?:const\s+)?(?:typename\s+|class\s+|struct\s+|union\s+|enum\s+)?' |
||
4820 | r'(?:\w|' |
||
4821 | r'\s*<(?:<(?:<[^<>]*>|[^<>])*>|[^<>])*>|' |
||
4822 | r'::)+') |
||
4823 | # A call-by-reference parameter ends with '& identifier'. |
||
4824 | _RE_PATTERN_REF_PARAM = re.compile( |
||
4825 | r'(' + _RE_PATTERN_TYPE + r'(?:\s*(?:\bconst\b|[*]))*\s*' |
||
4826 | r'&\s*' + _RE_PATTERN_IDENT + r')\s*(?:=[^,()]+)?[,)]') |
||
4827 | # A call-by-const-reference parameter either ends with 'const& identifier' |
||
4828 | # or looks like 'const type& identifier' when 'type' is atomic. |
||
4829 | _RE_PATTERN_CONST_REF_PARAM = ( |
||
4830 | r'(?:.*\s*\bconst\s*&\s*' + _RE_PATTERN_IDENT + |
||
4831 | r'|const\s+' + _RE_PATTERN_TYPE + r'\s*&\s*' + _RE_PATTERN_IDENT + r')') |
||
4832 | # Stream types. |
||
4833 | _RE_PATTERN_REF_STREAM_PARAM = ( |
||
4834 | r'(?:.*stream\s*&\s*' + _RE_PATTERN_IDENT + r')') |
||
4835 | |||
4836 | |||
4837 | View Code Duplication | def CheckLanguage(filename, clean_lines, linenum, file_extension, |
|
4838 | include_state, nesting_state, error): |
||
4839 | """Checks rules from the 'C++ language rules' section of cppguide.html. |
||
4840 | |||
4841 | Some of these rules are hard to test (function overloading, using |
||
4842 | uint32 inappropriately), but we do the best we can. |
||
4843 | |||
4844 | Args: |
||
4845 | filename: The name of the current file. |
||
4846 | clean_lines: A CleansedLines instance containing the file. |
||
4847 | linenum: The number of the line to check. |
||
4848 | file_extension: The extension (without the dot) of the filename. |
||
4849 | include_state: An _IncludeState instance in which the headers are inserted. |
||
4850 | nesting_state: A NestingState instance which maintains information about |
||
4851 | the current stack of nested blocks being parsed. |
||
4852 | error: The function to call with any errors found. |
||
4853 | """ |
||
4854 | # If the line is empty or consists of entirely a comment, no need to |
||
4855 | # check it. |
||
4856 | line = clean_lines.elided[linenum] |
||
4857 | if not line: |
||
4858 | return |
||
4859 | |||
4860 | match = _RE_PATTERN_INCLUDE.search(line) |
||
4861 | if match: |
||
4862 | CheckIncludeLine(filename, clean_lines, linenum, include_state, error) |
||
4863 | return |
||
4864 | |||
4865 | # Reset include state across preprocessor directives. This is meant |
||
4866 | # to silence warnings for conditional includes. |
||
4867 | match = Match(r'^\s*#\s*(if|ifdef|ifndef|elif|else|endif)\b', line) |
||
4868 | if match: |
||
4869 | include_state.ResetSection(match.group(1)) |
||
4870 | |||
4871 | |||
4872 | # Perform other checks now that we are sure that this is not an include line |
||
4873 | CheckCasts(filename, clean_lines, linenum, error) |
||
4874 | CheckGlobalStatic(filename, clean_lines, linenum, error) |
||
4875 | CheckPrintf(filename, clean_lines, linenum, error) |
||
4876 | |||
4877 | if file_extension in GetHeaderExtensions(): |
||
4878 | # TODO(unknown): check that 1-arg constructors are explicit. |
||
4879 | # How to tell it's a constructor? |
||
4880 | # (handled in CheckForNonStandardConstructs for now) |
||
4881 | # TODO(unknown): check that classes declare or disable copy/assign |
||
4882 | # (level 1 error) |
||
4883 | pass |
||
4884 | |||
4885 | # Check if people are using the verboten C basic types. The only exception |
||
4886 | # we regularly allow is "unsigned short port" for port. |
||
4887 | if Search(r'\bshort port\b', line): |
||
4888 | if not Search(r'\bunsigned short port\b', line): |
||
4889 | error(filename, linenum, 'runtime/int', 4, |
||
4890 | 'Use "unsigned short" for ports, not "short"') |
||
4891 | else: |
||
4892 | match = Search(r'\b(short|long(?! +double)|long long)\b', line) |
||
4893 | if match: |
||
4894 | error(filename, linenum, 'runtime/int', 4, |
||
4895 | 'Use int16/int64/etc, rather than the C type %s' % match.group(1)) |
||
4896 | |||
4897 | # Check if some verboten operator overloading is going on |
||
4898 | # TODO(unknown): catch out-of-line unary operator&: |
||
4899 | # class X {}; |
||
4900 | # int operator&(const X& x) { return 42; } // unary operator& |
||
4901 | # The trick is it's hard to tell apart from binary operator&: |
||
4902 | # class Y { int operator&(const Y& x) { return 23; } }; // binary operator& |
||
4903 | if Search(r'\boperator\s*&\s*\(\s*\)', line): |
||
4904 | error(filename, linenum, 'runtime/operator', 4, |
||
4905 | 'Unary operator& is dangerous. Do not use it.') |
||
4906 | |||
4907 | # Check for suspicious usage of "if" like |
||
4908 | # } if (a == b) { |
||
4909 | if Search(r'\}\s*if\s*\(', line): |
||
4910 | error(filename, linenum, 'readability/braces', 4, |
||
4911 | 'Did you mean "else if"? If not, start a new line for "if".') |
||
4912 | |||
4913 | # Check for potential format string bugs like printf(foo). |
||
4914 | # We constrain the pattern not to pick things like DocidForPrintf(foo). |
||
4915 | # Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str()) |
||
4916 | # TODO(unknown): Catch the following case. Need to change the calling |
||
4917 | # convention of the whole function to process multiple line to handle it. |
||
4918 | # printf( |
||
4919 | # boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line); |
||
4920 | printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(') |
||
4921 | if printf_args: |
||
4922 | match = Match(r'([\w.\->()]+)$', printf_args) |
||
4923 | if match and match.group(1) != '__VA_ARGS__': |
||
4924 | function_name = re.search(r'\b((?:string)?printf)\s*\(', |
||
4925 | line, re.I).group(1) |
||
4926 | error(filename, linenum, 'runtime/printf', 4, |
||
4927 | 'Potential format string bug. Do %s("%%s", %s) instead.' |
||
4928 | % (function_name, match.group(1))) |
||
4929 | |||
4930 | # Check for potential memset bugs like memset(buf, sizeof(buf), 0). |
||
4931 | match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line) |
||
4932 | if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)): |
||
4933 | error(filename, linenum, 'runtime/memset', 4, |
||
4934 | 'Did you mean "memset(%s, 0, %s)"?' |
||
4935 | % (match.group(1), match.group(2))) |
||
4936 | |||
4937 | if Search(r'\busing namespace\b', line): |
||
4938 | if Search(r'\bliterals\b', line): |
||
4939 | error(filename, linenum, 'build/namespaces_literals', 5, |
||
4940 | 'Do not use namespace using-directives. ' |
||
4941 | 'Use using-declarations instead.') |
||
4942 | else: |
||
4943 | error(filename, linenum, 'build/namespaces', 5, |
||
4944 | 'Do not use namespace using-directives. ' |
||
4945 | 'Use using-declarations instead.') |
||
4946 | |||
4947 | # Detect variable-length arrays. |
||
4948 | match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line) |
||
4949 | if (match and match.group(2) != 'return' and match.group(2) != 'delete' and |
||
4950 | match.group(3).find(']') == -1): |
||
4951 | # Split the size using space and arithmetic operators as delimiters. |
||
4952 | # If any of the resulting tokens are not compile time constants then |
||
4953 | # report the error. |
||
4954 | tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3)) |
||
4955 | is_const = True |
||
4956 | skip_next = False |
||
4957 | for tok in tokens: |
||
4958 | if skip_next: |
||
4959 | skip_next = False |
||
4960 | continue |
||
4961 | |||
4962 | if Search(r'sizeof\(.+\)', tok): continue |
||
4963 | if Search(r'arraysize\(\w+\)', tok): continue |
||
4964 | |||
4965 | tok = tok.lstrip('(') |
||
4966 | tok = tok.rstrip(')') |
||
4967 | if not tok: continue |
||
4968 | if Match(r'\d+', tok): continue |
||
4969 | if Match(r'0[xX][0-9a-fA-F]+', tok): continue |
||
4970 | if Match(r'k[A-Z0-9]\w*', tok): continue |
||
4971 | if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue |
||
4972 | if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue |
||
4973 | # A catch all for tricky sizeof cases, including 'sizeof expression', |
||
4974 | # 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)' |
||
4975 | # requires skipping the next token because we split on ' ' and '*'. |
||
4976 | if tok.startswith('sizeof'): |
||
4977 | skip_next = True |
||
4978 | continue |
||
4979 | is_const = False |
||
4980 | break |
||
4981 | if not is_const: |
||
4982 | error(filename, linenum, 'runtime/arrays', 1, |
||
4983 | 'Do not use variable-length arrays. Use an appropriately named ' |
||
4984 | "('k' followed by CamelCase) compile-time constant for the size.") |
||
4985 | |||
4986 | # Check for use of unnamed namespaces in header files. Registration |
||
4987 | # macros are typically OK, so we allow use of "namespace {" on lines |
||
4988 | # that end with backslashes. |
||
4989 | if (file_extension in GetHeaderExtensions() |
||
4990 | and Search(r'\bnamespace\s*{', line) |
||
4991 | and line[-1] != '\\'): |
||
4992 | error(filename, linenum, 'build/namespaces', 4, |
||
4993 | 'Do not use unnamed namespaces in header files. See ' |
||
4994 | 'https://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces' |
||
4995 | ' for more information.') |
||
4996 | |||
4997 | |||
4998 | View Code Duplication | def CheckGlobalStatic(filename, clean_lines, linenum, error): |
|
4999 | """Check for unsafe global or static objects. |
||
5000 | |||
5001 | Args: |
||
5002 | filename: The name of the current file. |
||
5003 | clean_lines: A CleansedLines instance containing the file. |
||
5004 | linenum: The number of the line to check. |
||
5005 | error: The function to call with any errors found. |
||
5006 | """ |
||
5007 | line = clean_lines.elided[linenum] |
||
5008 | |||
5009 | # Match two lines at a time to support multiline declarations |
||
5010 | if linenum + 1 < clean_lines.NumLines() and not Search(r'[;({]', line): |
||
5011 | line += clean_lines.elided[linenum + 1].strip() |
||
5012 | |||
5013 | # Check for people declaring static/global STL strings at the top level. |
||
5014 | # This is dangerous because the C++ language does not guarantee that |
||
5015 | # globals with constructors are initialized before the first access, and |
||
5016 | # also because globals can be destroyed when some threads are still running. |
||
5017 | # TODO(unknown): Generalize this to also find static unique_ptr instances. |
||
5018 | # TODO(unknown): File bugs for clang-tidy to find these. |
||
5019 | match = Match( |
||
5020 | r'((?:|static +)(?:|const +))(?::*std::)?string( +const)? +' |
||
5021 | r'([a-zA-Z0-9_:]+)\b(.*)', |
||
5022 | line) |
||
5023 | |||
5024 | # Remove false positives: |
||
5025 | # - String pointers (as opposed to values). |
||
5026 | # string *pointer |
||
5027 | # const string *pointer |
||
5028 | # string const *pointer |
||
5029 | # string *const pointer |
||
5030 | # |
||
5031 | # - Functions and template specializations. |
||
5032 | # string Function<Type>(... |
||
5033 | # string Class<Type>::Method(... |
||
5034 | # |
||
5035 | # - Operators. These are matched separately because operator names |
||
5036 | # cross non-word boundaries, and trying to match both operators |
||
5037 | # and functions at the same time would decrease accuracy of |
||
5038 | # matching identifiers. |
||
5039 | # string Class::operator*() |
||
5040 | if (match and |
||
5041 | not Search(r'\bstring\b(\s+const)?\s*[\*\&]\s*(const\s+)?\w', line) and |
||
5042 | not Search(r'\boperator\W', line) and |
||
5043 | not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)*\s*\(([^"]|$)', match.group(4))): |
||
5044 | if Search(r'\bconst\b', line): |
||
5045 | error(filename, linenum, 'runtime/string', 4, |
||
5046 | 'For a static/global string constant, use a C style string ' |
||
5047 | 'instead: "%schar%s %s[]".' % |
||
5048 | (match.group(1), match.group(2) or '', match.group(3))) |
||
5049 | else: |
||
5050 | error(filename, linenum, 'runtime/string', 4, |
||
5051 | 'Static/global string variables are not permitted.') |
||
5052 | |||
5053 | if (Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line) or |
||
5054 | Search(r'\b([A-Za-z0-9_]*_)\(CHECK_NOTNULL\(\1\)\)', line)): |
||
5055 | error(filename, linenum, 'runtime/init', 4, |
||
5056 | 'You seem to be initializing a member variable with itself.') |
||
5057 | |||
5058 | |||
5059 | View Code Duplication | def CheckPrintf(filename, clean_lines, linenum, error): |
|
5060 | """Check for printf related issues. |
||
5061 | |||
5062 | Args: |
||
5063 | filename: The name of the current file. |
||
5064 | clean_lines: A CleansedLines instance containing the file. |
||
5065 | linenum: The number of the line to check. |
||
5066 | error: The function to call with any errors found. |
||
5067 | """ |
||
5068 | line = clean_lines.elided[linenum] |
||
5069 | |||
5070 | # When snprintf is used, the second argument shouldn't be a literal. |
||
5071 | match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line) |
||
5072 | if match and match.group(2) != '0': |
||
5073 | # If 2nd arg is zero, snprintf is used to calculate size. |
||
5074 | error(filename, linenum, 'runtime/printf', 3, |
||
5075 | 'If you can, use sizeof(%s) instead of %s as the 2nd arg ' |
||
5076 | 'to snprintf.' % (match.group(1), match.group(2))) |
||
5077 | |||
5078 | # Check if some verboten C functions are being used. |
||
5079 | if Search(r'\bsprintf\s*\(', line): |
||
5080 | error(filename, linenum, 'runtime/printf', 5, |
||
5081 | 'Never use sprintf. Use snprintf instead.') |
||
5082 | match = Search(r'\b(strcpy|strcat)\s*\(', line) |
||
5083 | if match: |
||
5084 | error(filename, linenum, 'runtime/printf', 4, |
||
5085 | 'Almost always, snprintf is better than %s' % match.group(1)) |
||
5086 | |||
5087 | |||
5088 | View Code Duplication | def IsDerivedFunction(clean_lines, linenum): |
|
5089 | """Check if current line contains an inherited function. |
||
5090 | |||
5091 | Args: |
||
5092 | clean_lines: A CleansedLines instance containing the file. |
||
5093 | linenum: The number of the line to check. |
||
5094 | Returns: |
||
5095 | True if current line contains a function with "override" |
||
5096 | virt-specifier. |
||
5097 | """ |
||
5098 | # Scan back a few lines for start of current function |
||
5099 | for i in xrange(linenum, max(-1, linenum - 10), -1): |
||
5100 | match = Match(r'^([^()]*\w+)\(', clean_lines.elided[i]) |
||
5101 | if match: |
||
5102 | # Look for "override" after the matching closing parenthesis |
||
5103 | line, _, closing_paren = CloseExpression( |
||
5104 | clean_lines, i, len(match.group(1))) |
||
5105 | return (closing_paren >= 0 and |
||
5106 | Search(r'\boverride\b', line[closing_paren:])) |
||
5107 | return False |
||
5108 | |||
5109 | |||
5110 | View Code Duplication | def IsOutOfLineMethodDefinition(clean_lines, linenum): |
|
5111 | """Check if current line contains an out-of-line method definition. |
||
5112 | |||
5113 | Args: |
||
5114 | clean_lines: A CleansedLines instance containing the file. |
||
5115 | linenum: The number of the line to check. |
||
5116 | Returns: |
||
5117 | True if current line contains an out-of-line method definition. |
||
5118 | """ |
||
5119 | # Scan back a few lines for start of current function |
||
5120 | for i in xrange(linenum, max(-1, linenum - 10), -1): |
||
5121 | if Match(r'^([^()]*\w+)\(', clean_lines.elided[i]): |
||
5122 | return Match(r'^[^()]*\w+::\w+\(', clean_lines.elided[i]) is not None |
||
5123 | return False |
||
5124 | |||
5125 | |||
5126 | View Code Duplication | def IsInitializerList(clean_lines, linenum): |
|
5127 | """Check if current line is inside constructor initializer list. |
||
5128 | |||
5129 | Args: |
||
5130 | clean_lines: A CleansedLines instance containing the file. |
||
5131 | linenum: The number of the line to check. |
||
5132 | Returns: |
||
5133 | True if current line appears to be inside constructor initializer |
||
5134 | list, False otherwise. |
||
5135 | """ |
||
5136 | for i in xrange(linenum, 1, -1): |
||
5137 | line = clean_lines.elided[i] |
||
5138 | if i == linenum: |
||
5139 | remove_function_body = Match(r'^(.*)\{\s*$', line) |
||
5140 | if remove_function_body: |
||
5141 | line = remove_function_body.group(1) |
||
5142 | |||
5143 | if Search(r'\s:\s*\w+[({]', line): |
||
5144 | # A lone colon tend to indicate the start of a constructor |
||
5145 | # initializer list. It could also be a ternary operator, which |
||
5146 | # also tend to appear in constructor initializer lists as |
||
5147 | # opposed to parameter lists. |
||
5148 | return True |
||
5149 | if Search(r'\}\s*,\s*$', line): |
||
5150 | # A closing brace followed by a comma is probably the end of a |
||
5151 | # brace-initialized member in constructor initializer list. |
||
5152 | return True |
||
5153 | if Search(r'[{};]\s*$', line): |
||
5154 | # Found one of the following: |
||
5155 | # - A closing brace or semicolon, probably the end of the previous |
||
5156 | # function. |
||
5157 | # - An opening brace, probably the start of current class or namespace. |
||
5158 | # |
||
5159 | # Current line is probably not inside an initializer list since |
||
5160 | # we saw one of those things without seeing the starting colon. |
||
5161 | return False |
||
5162 | |||
5163 | # Got to the beginning of the file without seeing the start of |
||
5164 | # constructor initializer list. |
||
5165 | return False |
||
5166 | |||
5167 | |||
5168 | View Code Duplication | def CheckForNonConstReference(filename, clean_lines, linenum, |
|
5169 | nesting_state, error): |
||
5170 | """Check for non-const references. |
||
5171 | |||
5172 | Separate from CheckLanguage since it scans backwards from current |
||
5173 | line, instead of scanning forward. |
||
5174 | |||
5175 | Args: |
||
5176 | filename: The name of the current file. |
||
5177 | clean_lines: A CleansedLines instance containing the file. |
||
5178 | linenum: The number of the line to check. |
||
5179 | nesting_state: A NestingState instance which maintains information about |
||
5180 | the current stack of nested blocks being parsed. |
||
5181 | error: The function to call with any errors found. |
||
5182 | """ |
||
5183 | # Do nothing if there is no '&' on current line. |
||
5184 | line = clean_lines.elided[linenum] |
||
5185 | if '&' not in line: |
||
5186 | return |
||
5187 | |||
5188 | # If a function is inherited, current function doesn't have much of |
||
5189 | # a choice, so any non-const references should not be blamed on |
||
5190 | # derived function. |
||
5191 | if IsDerivedFunction(clean_lines, linenum): |
||
5192 | return |
||
5193 | |||
5194 | # Don't warn on out-of-line method definitions, as we would warn on the |
||
5195 | # in-line declaration, if it isn't marked with 'override'. |
||
5196 | if IsOutOfLineMethodDefinition(clean_lines, linenum): |
||
5197 | return |
||
5198 | |||
5199 | # Long type names may be broken across multiple lines, usually in one |
||
5200 | # of these forms: |
||
5201 | # LongType |
||
5202 | # ::LongTypeContinued &identifier |
||
5203 | # LongType:: |
||
5204 | # LongTypeContinued &identifier |
||
5205 | # LongType< |
||
5206 | # ...>::LongTypeContinued &identifier |
||
5207 | # |
||
5208 | # If we detected a type split across two lines, join the previous |
||
5209 | # line to current line so that we can match const references |
||
5210 | # accordingly. |
||
5211 | # |
||
5212 | # Note that this only scans back one line, since scanning back |
||
5213 | # arbitrary number of lines would be expensive. If you have a type |
||
5214 | # that spans more than 2 lines, please use a typedef. |
||
5215 | if linenum > 1: |
||
5216 | previous = None |
||
5217 | if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line): |
||
5218 | # previous_line\n + ::current_line |
||
5219 | previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$', |
||
5220 | clean_lines.elided[linenum - 1]) |
||
5221 | elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line): |
||
5222 | # previous_line::\n + current_line |
||
5223 | previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$', |
||
5224 | clean_lines.elided[linenum - 1]) |
||
5225 | if previous: |
||
5226 | line = previous.group(1) + line.lstrip() |
||
5227 | else: |
||
5228 | # Check for templated parameter that is split across multiple lines |
||
5229 | endpos = line.rfind('>') |
||
5230 | if endpos > -1: |
||
5231 | (_, startline, startpos) = ReverseCloseExpression( |
||
5232 | clean_lines, linenum, endpos) |
||
5233 | if startpos > -1 and startline < linenum: |
||
5234 | # Found the matching < on an earlier line, collect all |
||
5235 | # pieces up to current line. |
||
5236 | line = '' |
||
5237 | for i in xrange(startline, linenum + 1): |
||
5238 | line += clean_lines.elided[i].strip() |
||
5239 | |||
5240 | # Check for non-const references in function parameters. A single '&' may |
||
5241 | # found in the following places: |
||
5242 | # inside expression: binary & for bitwise AND |
||
5243 | # inside expression: unary & for taking the address of something |
||
5244 | # inside declarators: reference parameter |
||
5245 | # We will exclude the first two cases by checking that we are not inside a |
||
5246 | # function body, including one that was just introduced by a trailing '{'. |
||
5247 | # TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare]. |
||
5248 | if (nesting_state.previous_stack_top and |
||
5249 | not (isinstance(nesting_state.previous_stack_top, _ClassInfo) or |
||
5250 | isinstance(nesting_state.previous_stack_top, _NamespaceInfo))): |
||
5251 | # Not at toplevel, not within a class, and not within a namespace |
||
5252 | return |
||
5253 | |||
5254 | # Avoid initializer lists. We only need to scan back from the |
||
5255 | # current line for something that starts with ':'. |
||
5256 | # |
||
5257 | # We don't need to check the current line, since the '&' would |
||
5258 | # appear inside the second set of parentheses on the current line as |
||
5259 | # opposed to the first set. |
||
5260 | if linenum > 0: |
||
5261 | for i in xrange(linenum - 1, max(0, linenum - 10), -1): |
||
5262 | previous_line = clean_lines.elided[i] |
||
5263 | if not Search(r'[),]\s*$', previous_line): |
||
5264 | break |
||
5265 | if Match(r'^\s*:\s+\S', previous_line): |
||
5266 | return |
||
5267 | |||
5268 | # Avoid preprocessors |
||
5269 | if Search(r'\\\s*$', line): |
||
5270 | return |
||
5271 | |||
5272 | # Avoid constructor initializer lists |
||
5273 | if IsInitializerList(clean_lines, linenum): |
||
5274 | return |
||
5275 | |||
5276 | # We allow non-const references in a few standard places, like functions |
||
5277 | # called "swap()" or iostream operators like "<<" or ">>". Do not check |
||
5278 | # those function parameters. |
||
5279 | # |
||
5280 | # We also accept & in static_assert, which looks like a function but |
||
5281 | # it's actually a declaration expression. |
||
5282 | whitelisted_functions = (r'(?:[sS]wap(?:<\w:+>)?|' |
||
5283 | r'operator\s*[<>][<>]|' |
||
5284 | r'static_assert|COMPILE_ASSERT' |
||
5285 | r')\s*\(') |
||
5286 | if Search(whitelisted_functions, line): |
||
5287 | return |
||
5288 | elif not Search(r'\S+\([^)]*$', line): |
||
5289 | # Don't see a whitelisted function on this line. Actually we |
||
5290 | # didn't see any function name on this line, so this is likely a |
||
5291 | # multi-line parameter list. Try a bit harder to catch this case. |
||
5292 | for i in xrange(2): |
||
5293 | if (linenum > i and |
||
5294 | Search(whitelisted_functions, clean_lines.elided[linenum - i - 1])): |
||
5295 | return |
||
5296 | |||
5297 | decls = ReplaceAll(r'{[^}]*}', ' ', line) # exclude function body |
||
5298 | for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls): |
||
5299 | if (not Match(_RE_PATTERN_CONST_REF_PARAM, parameter) and |
||
5300 | not Match(_RE_PATTERN_REF_STREAM_PARAM, parameter)): |
||
5301 | error(filename, linenum, 'runtime/references', 2, |
||
5302 | 'Is this a non-const reference? ' |
||
5303 | 'If so, make const or use a pointer: ' + |
||
5304 | ReplaceAll(' *<', '<', parameter)) |
||
5305 | |||
5306 | |||
5307 | View Code Duplication | def CheckCasts(filename, clean_lines, linenum, error): |
|
5308 | """Various cast related checks. |
||
5309 | |||
5310 | Args: |
||
5311 | filename: The name of the current file. |
||
5312 | clean_lines: A CleansedLines instance containing the file. |
||
5313 | linenum: The number of the line to check. |
||
5314 | error: The function to call with any errors found. |
||
5315 | """ |
||
5316 | line = clean_lines.elided[linenum] |
||
5317 | |||
5318 | # Check to see if they're using an conversion function cast. |
||
5319 | # I just try to capture the most common basic types, though there are more. |
||
5320 | # Parameterless conversion functions, such as bool(), are allowed as they are |
||
5321 | # probably a member operator declaration or default constructor. |
||
5322 | match = Search( |
||
5323 | r'(\bnew\s+(?:const\s+)?|\S<\s*(?:const\s+)?)?\b' |
||
5324 | r'(int|float|double|bool|char|int32|uint32|int64|uint64)' |
||
5325 | r'(\([^)].*)', line) |
||
5326 | expecting_function = ExpectingFunctionArgs(clean_lines, linenum) |
||
5327 | if match and not expecting_function: |
||
5328 | matched_type = match.group(2) |
||
5329 | |||
5330 | # matched_new_or_template is used to silence two false positives: |
||
5331 | # - New operators |
||
5332 | # - Template arguments with function types |
||
5333 | # |
||
5334 | # For template arguments, we match on types immediately following |
||
5335 | # an opening bracket without any spaces. This is a fast way to |
||
5336 | # silence the common case where the function type is the first |
||
5337 | # template argument. False negative with less-than comparison is |
||
5338 | # avoided because those operators are usually followed by a space. |
||
5339 | # |
||
5340 | # function<double(double)> // bracket + no space = false positive |
||
5341 | # value < double(42) // bracket + space = true positive |
||
5342 | matched_new_or_template = match.group(1) |
||
5343 | |||
5344 | # Avoid arrays by looking for brackets that come after the closing |
||
5345 | # parenthesis. |
||
5346 | if Match(r'\([^()]+\)\s*\[', match.group(3)): |
||
5347 | return |
||
5348 | |||
5349 | # Other things to ignore: |
||
5350 | # - Function pointers |
||
5351 | # - Casts to pointer types |
||
5352 | # - Placement new |
||
5353 | # - Alias declarations |
||
5354 | matched_funcptr = match.group(3) |
||
5355 | if (matched_new_or_template is None and |
||
5356 | not (matched_funcptr and |
||
5357 | (Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(', |
||
5358 | matched_funcptr) or |
||
5359 | matched_funcptr.startswith('(*)'))) and |
||
5360 | not Match(r'\s*using\s+\S+\s*=\s*' + matched_type, line) and |
||
5361 | not Search(r'new\(\S+\)\s*' + matched_type, line)): |
||
5362 | error(filename, linenum, 'readability/casting', 4, |
||
5363 | 'Using deprecated casting style. ' |
||
5364 | 'Use static_cast<%s>(...) instead' % |
||
5365 | matched_type) |
||
5366 | |||
5367 | if not expecting_function: |
||
5368 | CheckCStyleCast(filename, clean_lines, linenum, 'static_cast', |
||
5369 | r'\((int|float|double|bool|char|u?int(16|32|64))\)', error) |
||
5370 | |||
5371 | # This doesn't catch all cases. Consider (const char * const)"hello". |
||
5372 | # |
||
5373 | # (char *) "foo" should always be a const_cast (reinterpret_cast won't |
||
5374 | # compile). |
||
5375 | if CheckCStyleCast(filename, clean_lines, linenum, 'const_cast', |
||
5376 | r'\((char\s?\*+\s?)\)\s*"', error): |
||
5377 | pass |
||
5378 | else: |
||
5379 | # Check pointer casts for other than string constants |
||
5380 | CheckCStyleCast(filename, clean_lines, linenum, 'reinterpret_cast', |
||
5381 | r'\((\w+\s?\*+\s?)\)', error) |
||
5382 | |||
5383 | # In addition, we look for people taking the address of a cast. This |
||
5384 | # is dangerous -- casts can assign to temporaries, so the pointer doesn't |
||
5385 | # point where you think. |
||
5386 | # |
||
5387 | # Some non-identifier character is required before the '&' for the |
||
5388 | # expression to be recognized as a cast. These are casts: |
||
5389 | # expression = &static_cast<int*>(temporary()); |
||
5390 | # function(&(int*)(temporary())); |
||
5391 | # |
||
5392 | # This is not a cast: |
||
5393 | # reference_type&(int* function_param); |
||
5394 | match = Search( |
||
5395 | r'(?:[^\w]&\(([^)*][^)]*)\)[\w(])|' |
||
5396 | r'(?:[^\w]&(static|dynamic|down|reinterpret)_cast\b)', line) |
||
5397 | if match: |
||
5398 | # Try a better error message when the & is bound to something |
||
5399 | # dereferenced by the casted pointer, as opposed to the casted |
||
5400 | # pointer itself. |
||
5401 | parenthesis_error = False |
||
5402 | match = Match(r'^(.*&(?:static|dynamic|down|reinterpret)_cast\b)<', line) |
||
5403 | if match: |
||
5404 | _, y1, x1 = CloseExpression(clean_lines, linenum, len(match.group(1))) |
||
5405 | if x1 >= 0 and clean_lines.elided[y1][x1] == '(': |
||
5406 | _, y2, x2 = CloseExpression(clean_lines, y1, x1) |
||
5407 | if x2 >= 0: |
||
5408 | extended_line = clean_lines.elided[y2][x2:] |
||
5409 | if y2 < clean_lines.NumLines() - 1: |
||
5410 | extended_line += clean_lines.elided[y2 + 1] |
||
5411 | if Match(r'\s*(?:->|\[)', extended_line): |
||
5412 | parenthesis_error = True |
||
5413 | |||
5414 | if parenthesis_error: |
||
5415 | error(filename, linenum, 'readability/casting', 4, |
||
5416 | ('Are you taking an address of something dereferenced ' |
||
5417 | 'from a cast? Wrapping the dereferenced expression in ' |
||
5418 | 'parentheses will make the binding more obvious')) |
||
5419 | else: |
||
5420 | error(filename, linenum, 'runtime/casting', 4, |
||
5421 | ('Are you taking an address of a cast? ' |
||
5422 | 'This is dangerous: could be a temp var. ' |
||
5423 | 'Take the address before doing the cast, rather than after')) |
||
5424 | |||
5425 | |||
5426 | View Code Duplication | def CheckCStyleCast(filename, clean_lines, linenum, cast_type, pattern, error): |
|
5427 | """Checks for a C-style cast by looking for the pattern. |
||
5428 | |||
5429 | Args: |
||
5430 | filename: The name of the current file. |
||
5431 | clean_lines: A CleansedLines instance containing the file. |
||
5432 | linenum: The number of the line to check. |
||
5433 | cast_type: The string for the C++ cast to recommend. This is either |
||
5434 | reinterpret_cast, static_cast, or const_cast, depending. |
||
5435 | pattern: The regular expression used to find C-style casts. |
||
5436 | error: The function to call with any errors found. |
||
5437 | |||
5438 | Returns: |
||
5439 | True if an error was emitted. |
||
5440 | False otherwise. |
||
5441 | """ |
||
5442 | line = clean_lines.elided[linenum] |
||
5443 | match = Search(pattern, line) |
||
5444 | if not match: |
||
5445 | return False |
||
5446 | |||
5447 | # Exclude lines with keywords that tend to look like casts |
||
5448 | context = line[0:match.start(1) - 1] |
||
5449 | if Match(r'.*\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\s*$', context): |
||
5450 | return False |
||
5451 | |||
5452 | # Try expanding current context to see if we one level of |
||
5453 | # parentheses inside a macro. |
||
5454 | if linenum > 0: |
||
5455 | for i in xrange(linenum - 1, max(0, linenum - 5), -1): |
||
5456 | context = clean_lines.elided[i] + context |
||
5457 | if Match(r'.*\b[_A-Z][_A-Z0-9]*\s*\((?:\([^()]*\)|[^()])*$', context): |
||
5458 | return False |
||
5459 | |||
5460 | # operator++(int) and operator--(int) |
||
5461 | if context.endswith(' operator++') or context.endswith(' operator--'): |
||
5462 | return False |
||
5463 | |||
5464 | # A single unnamed argument for a function tends to look like old style cast. |
||
5465 | # If we see those, don't issue warnings for deprecated casts. |
||
5466 | remainder = line[match.end(0):] |
||
5467 | if Match(r'^\s*(?:;|const\b|throw\b|final\b|override\b|[=>{),]|->)', |
||
5468 | remainder): |
||
5469 | return False |
||
5470 | |||
5471 | # At this point, all that should be left is actual casts. |
||
5472 | error(filename, linenum, 'readability/casting', 4, |
||
5473 | 'Using C-style cast. Use %s<%s>(...) instead' % |
||
5474 | (cast_type, match.group(1))) |
||
5475 | |||
5476 | return True |
||
5477 | |||
5478 | |||
5479 | View Code Duplication | def ExpectingFunctionArgs(clean_lines, linenum): |
|
5480 | """Checks whether where function type arguments are expected. |
||
5481 | |||
5482 | Args: |
||
5483 | clean_lines: A CleansedLines instance containing the file. |
||
5484 | linenum: The number of the line to check. |
||
5485 | |||
5486 | Returns: |
||
5487 | True if the line at 'linenum' is inside something that expects arguments |
||
5488 | of function types. |
||
5489 | """ |
||
5490 | line = clean_lines.elided[linenum] |
||
5491 | return (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or |
||
5492 | (linenum >= 2 and |
||
5493 | (Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$', |
||
5494 | clean_lines.elided[linenum - 1]) or |
||
5495 | Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$', |
||
5496 | clean_lines.elided[linenum - 2]) or |
||
5497 | Search(r'\bstd::m?function\s*\<\s*$', |
||
5498 | clean_lines.elided[linenum - 1])))) |
||
5499 | |||
5500 | |||
5501 | _HEADERS_CONTAINING_TEMPLATES = ( |
||
5502 | ('<deque>', ('deque',)), |
||
5503 | ('<functional>', ('unary_function', 'binary_function', |
||
5504 | 'plus', 'minus', 'multiplies', 'divides', 'modulus', |
||
5505 | 'negate', |
||
5506 | 'equal_to', 'not_equal_to', 'greater', 'less', |
||
5507 | 'greater_equal', 'less_equal', |
||
5508 | 'logical_and', 'logical_or', 'logical_not', |
||
5509 | 'unary_negate', 'not1', 'binary_negate', 'not2', |
||
5510 | 'bind1st', 'bind2nd', |
||
5511 | 'pointer_to_unary_function', |
||
5512 | 'pointer_to_binary_function', |
||
5513 | 'ptr_fun', |
||
5514 | 'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t', |
||
5515 | 'mem_fun_ref_t', |
||
5516 | 'const_mem_fun_t', 'const_mem_fun1_t', |
||
5517 | 'const_mem_fun_ref_t', 'const_mem_fun1_ref_t', |
||
5518 | 'mem_fun_ref', |
||
5519 | )), |
||
5520 | ('<limits>', ('numeric_limits',)), |
||
5521 | ('<list>', ('list',)), |
||
5522 | ('<map>', ('map', 'multimap',)), |
||
5523 | ('<memory>', ('allocator', 'make_shared', 'make_unique', 'shared_ptr', |
||
5524 | 'unique_ptr', 'weak_ptr')), |
||
5525 | ('<queue>', ('queue', 'priority_queue',)), |
||
5526 | ('<set>', ('set', 'multiset',)), |
||
5527 | ('<stack>', ('stack',)), |
||
5528 | ('<string>', ('char_traits', 'basic_string',)), |
||
5529 | ('<tuple>', ('tuple',)), |
||
5530 | ('<unordered_map>', ('unordered_map', 'unordered_multimap')), |
||
5531 | ('<unordered_set>', ('unordered_set', 'unordered_multiset')), |
||
5532 | ('<utility>', ('pair',)), |
||
5533 | ('<vector>', ('vector',)), |
||
5534 | |||
5535 | # gcc extensions. |
||
5536 | # Note: std::hash is their hash, ::hash is our hash |
||
5537 | ('<hash_map>', ('hash_map', 'hash_multimap',)), |
||
5538 | ('<hash_set>', ('hash_set', 'hash_multiset',)), |
||
5539 | ('<slist>', ('slist',)), |
||
5540 | ) |
||
5541 | |||
5542 | _HEADERS_MAYBE_TEMPLATES = ( |
||
5543 | ('<algorithm>', ('copy', 'max', 'min', 'min_element', 'sort', |
||
5544 | 'transform', |
||
5545 | )), |
||
5546 | ('<utility>', ('forward', 'make_pair', 'move', 'swap')), |
||
5547 | ) |
||
5548 | |||
5549 | _RE_PATTERN_STRING = re.compile(r'\bstring\b') |
||
5550 | |||
5551 | _re_pattern_headers_maybe_templates = [] |
||
5552 | for _header, _templates in _HEADERS_MAYBE_TEMPLATES: |
||
5553 | for _template in _templates: |
||
5554 | # Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or |
||
5555 | # type::max(). |
||
5556 | _re_pattern_headers_maybe_templates.append( |
||
5557 | (re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'), |
||
5558 | _template, |
||
5559 | _header)) |
||
5560 | |||
5561 | # Other scripts may reach in and modify this pattern. |
||
5562 | _re_pattern_templates = [] |
||
5563 | for _header, _templates in _HEADERS_CONTAINING_TEMPLATES: |
||
5564 | for _template in _templates: |
||
5565 | _re_pattern_templates.append( |
||
5566 | (re.compile(r'(\<|\b)' + _template + r'\s*\<'), |
||
5567 | _template + '<>', |
||
5568 | _header)) |
||
5569 | |||
5570 | |||
5571 | View Code Duplication | def FilesBelongToSameModule(filename_cc, filename_h): |
|
5572 | """Check if these two filenames belong to the same module. |
||
5573 | |||
5574 | The concept of a 'module' here is a as follows: |
||
5575 | foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the |
||
5576 | same 'module' if they are in the same directory. |
||
5577 | some/path/public/xyzzy and some/path/internal/xyzzy are also considered |
||
5578 | to belong to the same module here. |
||
5579 | |||
5580 | If the filename_cc contains a longer path than the filename_h, for example, |
||
5581 | '/absolute/path/to/base/sysinfo.cc', and this file would include |
||
5582 | 'base/sysinfo.h', this function also produces the prefix needed to open the |
||
5583 | header. This is used by the caller of this function to more robustly open the |
||
5584 | header file. We don't have access to the real include paths in this context, |
||
5585 | so we need this guesswork here. |
||
5586 | |||
5587 | Known bugs: tools/base/bar.cc and base/bar.h belong to the same module |
||
5588 | according to this implementation. Because of this, this function gives |
||
5589 | some false positives. This should be sufficiently rare in practice. |
||
5590 | |||
5591 | Args: |
||
5592 | filename_cc: is the path for the source (e.g. .cc) file |
||
5593 | filename_h: is the path for the header path |
||
5594 | |||
5595 | Returns: |
||
5596 | Tuple with a bool and a string: |
||
5597 | bool: True if filename_cc and filename_h belong to the same module. |
||
5598 | string: the additional prefix needed to open the header file. |
||
5599 | """ |
||
5600 | fileinfo_cc = FileInfo(filename_cc) |
||
5601 | if not fileinfo_cc.Extension().lstrip('.') in GetNonHeaderExtensions(): |
||
5602 | return (False, '') |
||
5603 | |||
5604 | fileinfo_h = FileInfo(filename_h) |
||
5605 | if not fileinfo_h.Extension().lstrip('.') in GetHeaderExtensions(): |
||
5606 | return (False, '') |
||
5607 | |||
5608 | filename_cc = filename_cc[:-(len(fileinfo_cc.Extension()))] |
||
5609 | matched_test_suffix = Search(_TEST_FILE_SUFFIX, fileinfo_cc.BaseName()) |
||
5610 | if matched_test_suffix: |
||
5611 | filename_cc = filename_cc[:-len(matched_test_suffix.group(1))] |
||
5612 | |||
5613 | filename_cc = filename_cc.replace('/public/', '/') |
||
5614 | filename_cc = filename_cc.replace('/internal/', '/') |
||
5615 | |||
5616 | filename_h = filename_h[:-(len(fileinfo_h.Extension()))] |
||
5617 | if filename_h.endswith('-inl'): |
||
5618 | filename_h = filename_h[:-len('-inl')] |
||
5619 | filename_h = filename_h.replace('/public/', '/') |
||
5620 | filename_h = filename_h.replace('/internal/', '/') |
||
5621 | |||
5622 | files_belong_to_same_module = filename_cc.endswith(filename_h) |
||
5623 | common_path = '' |
||
5624 | if files_belong_to_same_module: |
||
5625 | common_path = filename_cc[:-len(filename_h)] |
||
5626 | return files_belong_to_same_module, common_path |
||
5627 | |||
5628 | |||
5629 | View Code Duplication | def UpdateIncludeState(filename, include_dict, io=codecs): |
|
5630 | """Fill up the include_dict with new includes found from the file. |
||
5631 | |||
5632 | Args: |
||
5633 | filename: the name of the header to read. |
||
5634 | include_dict: a dictionary in which the headers are inserted. |
||
5635 | io: The io factory to use to read the file. Provided for testability. |
||
5636 | |||
5637 | Returns: |
||
5638 | True if a header was successfully added. False otherwise. |
||
5639 | """ |
||
5640 | headerfile = None |
||
5641 | try: |
||
5642 | headerfile = io.open(filename, 'r', 'utf8', 'replace') |
||
5643 | except IOError: |
||
5644 | return False |
||
5645 | linenum = 0 |
||
5646 | for line in headerfile: |
||
5647 | linenum += 1 |
||
5648 | clean_line = CleanseComments(line) |
||
5649 | match = _RE_PATTERN_INCLUDE.search(clean_line) |
||
5650 | if match: |
||
5651 | include = match.group(2) |
||
5652 | include_dict.setdefault(include, linenum) |
||
5653 | return True |
||
5654 | |||
5655 | |||
5656 | View Code Duplication | def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error, |
|
5657 | io=codecs): |
||
5658 | """Reports for missing stl includes. |
||
5659 | |||
5660 | This function will output warnings to make sure you are including the headers |
||
5661 | necessary for the stl containers and functions that you use. We only give one |
||
5662 | reason to include a header. For example, if you use both equal_to<> and |
||
5663 | less<> in a .h file, only one (the latter in the file) of these will be |
||
5664 | reported as a reason to include the <functional>. |
||
5665 | |||
5666 | Args: |
||
5667 | filename: The name of the current file. |
||
5668 | clean_lines: A CleansedLines instance containing the file. |
||
5669 | include_state: An _IncludeState instance. |
||
5670 | error: The function to call with any errors found. |
||
5671 | io: The IO factory to use to read the header file. Provided for unittest |
||
5672 | injection. |
||
5673 | """ |
||
5674 | required = {} # A map of header name to linenumber and the template entity. |
||
5675 | # Example of required: { '<functional>': (1219, 'less<>') } |
||
5676 | |||
5677 | for linenum in range(clean_lines.NumLines()): |
||
5678 | line = clean_lines.elided[linenum] |
||
5679 | if not line or line[0] == '#': |
||
5680 | continue |
||
5681 | |||
5682 | # String is special -- it is a non-templatized type in STL. |
||
5683 | matched = _RE_PATTERN_STRING.search(line) |
||
5684 | if matched: |
||
5685 | # Don't warn about strings in non-STL namespaces: |
||
5686 | # (We check only the first match per line; good enough.) |
||
5687 | prefix = line[:matched.start()] |
||
5688 | if prefix.endswith('std::') or not prefix.endswith('::'): |
||
5689 | required['<string>'] = (linenum, 'string') |
||
5690 | |||
5691 | for pattern, template, header in _re_pattern_headers_maybe_templates: |
||
5692 | if pattern.search(line): |
||
5693 | required[header] = (linenum, template) |
||
5694 | |||
5695 | # The following function is just a speed up, no semantics are changed. |
||
5696 | if not '<' in line: # Reduces the cpu time usage by skipping lines. |
||
5697 | continue |
||
5698 | |||
5699 | for pattern, template, header in _re_pattern_templates: |
||
5700 | matched = pattern.search(line) |
||
5701 | if matched: |
||
5702 | # Don't warn about IWYU in non-STL namespaces: |
||
5703 | # (We check only the first match per line; good enough.) |
||
5704 | prefix = line[:matched.start()] |
||
5705 | if prefix.endswith('std::') or not prefix.endswith('::'): |
||
5706 | required[header] = (linenum, template) |
||
5707 | |||
5708 | # The policy is that if you #include something in foo.h you don't need to |
||
5709 | # include it again in foo.cc. Here, we will look at possible includes. |
||
5710 | # Let's flatten the include_state include_list and copy it into a dictionary. |
||
5711 | include_dict = dict([item for sublist in include_state.include_list |
||
5712 | for item in sublist]) |
||
5713 | |||
5714 | # Did we find the header for this file (if any) and successfully load it? |
||
5715 | header_found = False |
||
5716 | |||
5717 | # Use the absolute path so that matching works properly. |
||
5718 | abs_filename = FileInfo(filename).FullName() |
||
5719 | |||
5720 | # For Emacs's flymake. |
||
5721 | # If cpplint is invoked from Emacs's flymake, a temporary file is generated |
||
5722 | # by flymake and that file name might end with '_flymake.cc'. In that case, |
||
5723 | # restore original file name here so that the corresponding header file can be |
||
5724 | # found. |
||
5725 | # e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h' |
||
5726 | # instead of 'foo_flymake.h' |
||
5727 | abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename) |
||
5728 | |||
5729 | # include_dict is modified during iteration, so we iterate over a copy of |
||
5730 | # the keys. |
||
5731 | header_keys = list(include_dict.keys()) |
||
5732 | for header in header_keys: |
||
5733 | (same_module, common_path) = FilesBelongToSameModule(abs_filename, header) |
||
5734 | fullpath = common_path + header |
||
5735 | if same_module and UpdateIncludeState(fullpath, include_dict, io): |
||
5736 | header_found = True |
||
5737 | |||
5738 | # If we can't find the header file for a .cc, assume it's because we don't |
||
5739 | # know where to look. In that case we'll give up as we're not sure they |
||
5740 | # didn't include it in the .h file. |
||
5741 | # TODO(unknown): Do a better job of finding .h files so we are confident that |
||
5742 | # not having the .h file means there isn't one. |
||
5743 | if not header_found: |
||
5744 | for extension in GetNonHeaderExtensions(): |
||
5745 | if filename.endswith('.' + extension): |
||
5746 | return |
||
5747 | |||
5748 | # All the lines have been processed, report the errors found. |
||
5749 | for required_header_unstripped in sorted(required, key=required.__getitem__): |
||
5750 | template = required[required_header_unstripped][1] |
||
5751 | if required_header_unstripped.strip('<>"') not in include_dict: |
||
5752 | error(filename, required[required_header_unstripped][0], |
||
5753 | 'build/include_what_you_use', 4, |
||
5754 | 'Add #include ' + required_header_unstripped + ' for ' + template) |
||
5755 | |||
5756 | |||
5757 | _RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<') |
||
5758 | |||
5759 | |||
5760 | def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error): |
||
5761 | """Check that make_pair's template arguments are deduced. |
||
5762 | |||
5763 | G++ 4.6 in C++11 mode fails badly if make_pair's template arguments are |
||
5764 | specified explicitly, and such use isn't intended in any case. |
||
5765 | |||
5766 | Args: |
||
5767 | filename: The name of the current file. |
||
5768 | clean_lines: A CleansedLines instance containing the file. |
||
5769 | linenum: The number of the line to check. |
||
5770 | error: The function to call with any errors found. |
||
5771 | """ |
||
5772 | line = clean_lines.elided[linenum] |
||
5773 | match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line) |
||
5774 | if match: |
||
5775 | error(filename, linenum, 'build/explicit_make_pair', |
||
5776 | 4, # 4 = high confidence |
||
5777 | 'For C++11-compatibility, omit template arguments from make_pair' |
||
5778 | ' OR use pair directly OR if appropriate, construct a pair directly') |
||
5779 | |||
5780 | |||
5781 | View Code Duplication | def CheckRedundantVirtual(filename, clean_lines, linenum, error): |
|
5782 | """Check if line contains a redundant "virtual" function-specifier. |
||
5783 | |||
5784 | Args: |
||
5785 | filename: The name of the current file. |
||
5786 | clean_lines: A CleansedLines instance containing the file. |
||
5787 | linenum: The number of the line to check. |
||
5788 | error: The function to call with any errors found. |
||
5789 | """ |
||
5790 | # Look for "virtual" on current line. |
||
5791 | line = clean_lines.elided[linenum] |
||
5792 | virtual = Match(r'^(.*)(\bvirtual\b)(.*)$', line) |
||
5793 | if not virtual: return |
||
5794 | |||
5795 | # Ignore "virtual" keywords that are near access-specifiers. These |
||
5796 | # are only used in class base-specifier and do not apply to member |
||
5797 | # functions. |
||
5798 | if (Search(r'\b(public|protected|private)\s+$', virtual.group(1)) or |
||
5799 | Match(r'^\s+(public|protected|private)\b', virtual.group(3))): |
||
5800 | return |
||
5801 | |||
5802 | # Ignore the "virtual" keyword from virtual base classes. Usually |
||
5803 | # there is a column on the same line in these cases (virtual base |
||
5804 | # classes are rare in google3 because multiple inheritance is rare). |
||
5805 | if Match(r'^.*[^:]:[^:].*$', line): return |
||
5806 | |||
5807 | # Look for the next opening parenthesis. This is the start of the |
||
5808 | # parameter list (possibly on the next line shortly after virtual). |
||
5809 | # TODO(unknown): doesn't work if there are virtual functions with |
||
5810 | # decltype() or other things that use parentheses, but csearch suggests |
||
5811 | # that this is rare. |
||
5812 | end_col = -1 |
||
5813 | end_line = -1 |
||
5814 | start_col = len(virtual.group(2)) |
||
5815 | for start_line in xrange(linenum, min(linenum + 3, clean_lines.NumLines())): |
||
5816 | line = clean_lines.elided[start_line][start_col:] |
||
5817 | parameter_list = Match(r'^([^(]*)\(', line) |
||
5818 | if parameter_list: |
||
5819 | # Match parentheses to find the end of the parameter list |
||
5820 | (_, end_line, end_col) = CloseExpression( |
||
5821 | clean_lines, start_line, start_col + len(parameter_list.group(1))) |
||
5822 | break |
||
5823 | start_col = 0 |
||
5824 | |||
5825 | if end_col < 0: |
||
5826 | return # Couldn't find end of parameter list, give up |
||
5827 | |||
5828 | # Look for "override" or "final" after the parameter list |
||
5829 | # (possibly on the next few lines). |
||
5830 | for i in xrange(end_line, min(end_line + 3, clean_lines.NumLines())): |
||
5831 | line = clean_lines.elided[i][end_col:] |
||
5832 | match = Search(r'\b(override|final)\b', line) |
||
5833 | if match: |
||
5834 | error(filename, linenum, 'readability/inheritance', 4, |
||
5835 | ('"virtual" is redundant since function is ' |
||
5836 | 'already declared as "%s"' % match.group(1))) |
||
5837 | |||
5838 | # Set end_col to check whole lines after we are done with the |
||
5839 | # first line. |
||
5840 | end_col = 0 |
||
5841 | if Search(r'[^\w]\s*$', line): |
||
5842 | break |
||
5843 | |||
5844 | |||
5845 | View Code Duplication | def CheckRedundantOverrideOrFinal(filename, clean_lines, linenum, error): |
|
5846 | """Check if line contains a redundant "override" or "final" virt-specifier. |
||
5847 | |||
5848 | Args: |
||
5849 | filename: The name of the current file. |
||
5850 | clean_lines: A CleansedLines instance containing the file. |
||
5851 | linenum: The number of the line to check. |
||
5852 | error: The function to call with any errors found. |
||
5853 | """ |
||
5854 | # Look for closing parenthesis nearby. We need one to confirm where |
||
5855 | # the declarator ends and where the virt-specifier starts to avoid |
||
5856 | # false positives. |
||
5857 | line = clean_lines.elided[linenum] |
||
5858 | declarator_end = line.rfind(')') |
||
5859 | if declarator_end >= 0: |
||
5860 | fragment = line[declarator_end:] |
||
5861 | else: |
||
5862 | if linenum > 1 and clean_lines.elided[linenum - 1].rfind(')') >= 0: |
||
5863 | fragment = line |
||
5864 | else: |
||
5865 | return |
||
5866 | |||
5867 | # Check that at most one of "override" or "final" is present, not both |
||
5868 | if Search(r'\boverride\b', fragment) and Search(r'\bfinal\b', fragment): |
||
5869 | error(filename, linenum, 'readability/inheritance', 4, |
||
5870 | ('"override" is redundant since function is ' |
||
5871 | 'already declared as "final"')) |
||
5872 | |||
5873 | |||
5874 | |||
5875 | |||
5876 | # Returns true if we are at a new block, and it is directly |
||
5877 | # inside of a namespace. |
||
5878 | View Code Duplication | def IsBlockInNameSpace(nesting_state, is_forward_declaration): |
|
5879 | """Checks that the new block is directly in a namespace. |
||
5880 | |||
5881 | Args: |
||
5882 | nesting_state: The _NestingState object that contains info about our state. |
||
5883 | is_forward_declaration: If the class is a forward declared class. |
||
5884 | Returns: |
||
5885 | Whether or not the new block is directly in a namespace. |
||
5886 | """ |
||
5887 | if is_forward_declaration: |
||
5888 | return len(nesting_state.stack) >= 1 and ( |
||
5889 | isinstance(nesting_state.stack[-1], _NamespaceInfo)) |
||
5890 | |||
5891 | |||
5892 | return (len(nesting_state.stack) > 1 and |
||
5893 | nesting_state.stack[-1].check_namespace_indentation and |
||
5894 | isinstance(nesting_state.stack[-2], _NamespaceInfo)) |
||
5895 | |||
5896 | |||
5897 | View Code Duplication | def ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item, |
|
5898 | raw_lines_no_comments, linenum): |
||
5899 | """This method determines if we should apply our namespace indentation check. |
||
5900 | |||
5901 | Args: |
||
5902 | nesting_state: The current nesting state. |
||
5903 | is_namespace_indent_item: If we just put a new class on the stack, True. |
||
5904 | If the top of the stack is not a class, or we did not recently |
||
5905 | add the class, False. |
||
5906 | raw_lines_no_comments: The lines without the comments. |
||
5907 | linenum: The current line number we are processing. |
||
5908 | |||
5909 | Returns: |
||
5910 | True if we should apply our namespace indentation check. Currently, it |
||
5911 | only works for classes and namespaces inside of a namespace. |
||
5912 | """ |
||
5913 | |||
5914 | is_forward_declaration = IsForwardClassDeclaration(raw_lines_no_comments, |
||
5915 | linenum) |
||
5916 | |||
5917 | if not (is_namespace_indent_item or is_forward_declaration): |
||
5918 | return False |
||
5919 | |||
5920 | # If we are in a macro, we do not want to check the namespace indentation. |
||
5921 | if IsMacroDefinition(raw_lines_no_comments, linenum): |
||
5922 | return False |
||
5923 | |||
5924 | return IsBlockInNameSpace(nesting_state, is_forward_declaration) |
||
5925 | |||
5926 | |||
5927 | # Call this method if the line is directly inside of a namespace. |
||
5928 | # If the line above is blank (excluding comments) or the start of |
||
5929 | # an inner namespace, it cannot be indented. |
||
5930 | def CheckItemIndentationInNamespace(filename, raw_lines_no_comments, linenum, |
||
5931 | error): |
||
5932 | line = raw_lines_no_comments[linenum] |
||
5933 | if Match(r'^\s+', line): |
||
5934 | error(filename, linenum, 'runtime/indentation_namespace', 4, |
||
5935 | 'Do not indent within a namespace') |
||
5936 | |||
5937 | |||
5938 | View Code Duplication | def ProcessLine(filename, file_extension, clean_lines, line, |
|
5939 | include_state, function_state, nesting_state, error, |
||
5940 | extra_check_functions=None): |
||
5941 | """Processes a single line in the file. |
||
5942 | |||
5943 | Args: |
||
5944 | filename: Filename of the file that is being processed. |
||
5945 | file_extension: The extension (dot not included) of the file. |
||
5946 | clean_lines: An array of strings, each representing a line of the file, |
||
5947 | with comments stripped. |
||
5948 | line: Number of line being processed. |
||
5949 | include_state: An _IncludeState instance in which the headers are inserted. |
||
5950 | function_state: A _FunctionState instance which counts function lines, etc. |
||
5951 | nesting_state: A NestingState instance which maintains information about |
||
5952 | the current stack of nested blocks being parsed. |
||
5953 | error: A callable to which errors are reported, which takes 4 arguments: |
||
5954 | filename, line number, error level, and message |
||
5955 | extra_check_functions: An array of additional check functions that will be |
||
5956 | run on each source line. Each function takes 4 |
||
5957 | arguments: filename, clean_lines, line, error |
||
5958 | """ |
||
5959 | raw_lines = clean_lines.raw_lines |
||
5960 | ParseNolintSuppressions(filename, raw_lines[line], line, error) |
||
5961 | nesting_state.Update(filename, clean_lines, line, error) |
||
5962 | CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line, |
||
5963 | error) |
||
5964 | if nesting_state.InAsmBlock(): return |
||
5965 | CheckForFunctionLengths(filename, clean_lines, line, function_state, error) |
||
5966 | CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error) |
||
5967 | CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error) |
||
5968 | CheckLanguage(filename, clean_lines, line, file_extension, include_state, |
||
5969 | nesting_state, error) |
||
5970 | CheckForNonConstReference(filename, clean_lines, line, nesting_state, error) |
||
5971 | CheckForNonStandardConstructs(filename, clean_lines, line, |
||
5972 | nesting_state, error) |
||
5973 | CheckVlogArguments(filename, clean_lines, line, error) |
||
5974 | CheckPosixThreading(filename, clean_lines, line, error) |
||
5975 | CheckInvalidIncrement(filename, clean_lines, line, error) |
||
5976 | CheckMakePairUsesDeduction(filename, clean_lines, line, error) |
||
5977 | CheckRedundantVirtual(filename, clean_lines, line, error) |
||
5978 | CheckRedundantOverrideOrFinal(filename, clean_lines, line, error) |
||
5979 | if extra_check_functions: |
||
5980 | for check_fn in extra_check_functions: |
||
5981 | check_fn(filename, clean_lines, line, error) |
||
5982 | |||
5983 | View Code Duplication | def FlagCxx11Features(filename, clean_lines, linenum, error): |
|
5984 | """Flag those c++11 features that we only allow in certain places. |
||
5985 | |||
5986 | Args: |
||
5987 | filename: The name of the current file. |
||
5988 | clean_lines: A CleansedLines instance containing the file. |
||
5989 | linenum: The number of the line to check. |
||
5990 | error: The function to call with any errors found. |
||
5991 | """ |
||
5992 | line = clean_lines.elided[linenum] |
||
5993 | |||
5994 | include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line) |
||
5995 | |||
5996 | # Flag unapproved C++ TR1 headers. |
||
5997 | if include and include.group(1).startswith('tr1/'): |
||
5998 | error(filename, linenum, 'build/c++tr1', 5, |
||
5999 | ('C++ TR1 headers such as <%s> are unapproved.') % include.group(1)) |
||
6000 | |||
6001 | # Flag unapproved C++11 headers. |
||
6002 | if include and include.group(1) in ('cfenv', |
||
6003 | 'condition_variable', |
||
6004 | 'fenv.h', |
||
6005 | 'future', |
||
6006 | 'mutex', |
||
6007 | 'thread', |
||
6008 | 'chrono', |
||
6009 | 'ratio', |
||
6010 | 'regex', |
||
6011 | 'system_error', |
||
6012 | ): |
||
6013 | error(filename, linenum, 'build/c++11', 5, |
||
6014 | ('<%s> is an unapproved C++11 header.') % include.group(1)) |
||
6015 | |||
6016 | # The only place where we need to worry about C++11 keywords and library |
||
6017 | # features in preprocessor directives is in macro definitions. |
||
6018 | if Match(r'\s*#', line) and not Match(r'\s*#\s*define\b', line): return |
||
6019 | |||
6020 | # These are classes and free functions. The classes are always |
||
6021 | # mentioned as std::*, but we only catch the free functions if |
||
6022 | # they're not found by ADL. They're alphabetical by header. |
||
6023 | for top_name in ( |
||
6024 | # type_traits |
||
6025 | 'alignment_of', |
||
6026 | 'aligned_union', |
||
6027 | ): |
||
6028 | if Search(r'\bstd::%s\b' % top_name, line): |
||
6029 | error(filename, linenum, 'build/c++11', 5, |
||
6030 | ('std::%s is an unapproved C++11 class or function. Send c-style ' |
||
6031 | 'an example of where it would make your code more readable, and ' |
||
6032 | 'they may let you use it.') % top_name) |
||
6033 | |||
6034 | |||
6035 | View Code Duplication | def FlagCxx14Features(filename, clean_lines, linenum, error): |
|
6036 | """Flag those C++14 features that we restrict. |
||
6037 | |||
6038 | Args: |
||
6039 | filename: The name of the current file. |
||
6040 | clean_lines: A CleansedLines instance containing the file. |
||
6041 | linenum: The number of the line to check. |
||
6042 | error: The function to call with any errors found. |
||
6043 | """ |
||
6044 | line = clean_lines.elided[linenum] |
||
6045 | |||
6046 | include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line) |
||
6047 | |||
6048 | # Flag unapproved C++14 headers. |
||
6049 | if include and include.group(1) in ('scoped_allocator', 'shared_mutex'): |
||
6050 | error(filename, linenum, 'build/c++14', 5, |
||
6051 | ('<%s> is an unapproved C++14 header.') % include.group(1)) |
||
6052 | |||
6053 | |||
6054 | View Code Duplication | def ProcessFileData(filename, file_extension, lines, error, |
|
6055 | extra_check_functions=None): |
||
6056 | """Performs lint checks and reports any errors to the given error function. |
||
6057 | |||
6058 | Args: |
||
6059 | filename: Filename of the file that is being processed. |
||
6060 | file_extension: The extension (dot not included) of the file. |
||
6061 | lines: An array of strings, each representing a line of the file, with the |
||
6062 | last element being empty if the file is terminated with a newline. |
||
6063 | error: A callable to which errors are reported, which takes 4 arguments: |
||
6064 | filename, line number, error level, and message |
||
6065 | extra_check_functions: An array of additional check functions that will be |
||
6066 | run on each source line. Each function takes 4 |
||
6067 | arguments: filename, clean_lines, line, error |
||
6068 | """ |
||
6069 | lines = (['// marker so line numbers and indices both start at 1'] + lines + |
||
6070 | ['// marker so line numbers end in a known way']) |
||
6071 | |||
6072 | include_state = _IncludeState() |
||
6073 | function_state = _FunctionState() |
||
6074 | nesting_state = NestingState() |
||
6075 | |||
6076 | ResetNolintSuppressions() |
||
6077 | |||
6078 | CheckForCopyright(filename, lines, error) |
||
6079 | ProcessGlobalSuppresions(lines) |
||
6080 | RemoveMultiLineComments(filename, lines, error) |
||
6081 | clean_lines = CleansedLines(lines) |
||
6082 | |||
6083 | if file_extension in GetHeaderExtensions(): |
||
6084 | CheckForHeaderGuard(filename, clean_lines, error) |
||
6085 | |||
6086 | for line in range(clean_lines.NumLines()): |
||
6087 | ProcessLine(filename, file_extension, clean_lines, line, |
||
6088 | include_state, function_state, nesting_state, error, |
||
6089 | extra_check_functions) |
||
6090 | FlagCxx11Features(filename, clean_lines, line, error) |
||
6091 | nesting_state.CheckCompletedBlocks(filename, error) |
||
6092 | |||
6093 | CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error) |
||
6094 | |||
6095 | # Check that the .cc file has included its header if it exists. |
||
6096 | if _IsSourceExtension(file_extension): |
||
6097 | CheckHeaderFileIncluded(filename, include_state, error) |
||
6098 | |||
6099 | # We check here rather than inside ProcessLine so that we see raw |
||
6100 | # lines rather than "cleaned" lines. |
||
6101 | CheckForBadCharacters(filename, lines, error) |
||
6102 | |||
6103 | CheckForNewlineAtEOF(filename, lines, error) |
||
6104 | |||
6105 | View Code Duplication | def ProcessConfigOverrides(filename): |
|
6106 | """ Loads the configuration files and processes the config overrides. |
||
6107 | |||
6108 | Args: |
||
6109 | filename: The name of the file being processed by the linter. |
||
6110 | |||
6111 | Returns: |
||
6112 | False if the current |filename| should not be processed further. |
||
6113 | """ |
||
6114 | |||
6115 | abs_filename = os.path.abspath(filename) |
||
6116 | cfg_filters = [] |
||
6117 | keep_looking = True |
||
6118 | while keep_looking: |
||
6119 | abs_path, base_name = os.path.split(abs_filename) |
||
6120 | if not base_name: |
||
6121 | break # Reached the root directory. |
||
6122 | |||
6123 | cfg_file = os.path.join(abs_path, "CPPLINT.cfg") |
||
6124 | abs_filename = abs_path |
||
6125 | if not os.path.isfile(cfg_file): |
||
6126 | continue |
||
6127 | |||
6128 | try: |
||
6129 | with open(cfg_file) as file_handle: |
||
6130 | for line in file_handle: |
||
6131 | line, _, _ = line.partition('#') # Remove comments. |
||
6132 | if not line.strip(): |
||
6133 | continue |
||
6134 | |||
6135 | name, _, val = line.partition('=') |
||
6136 | name = name.strip() |
||
6137 | val = val.strip() |
||
6138 | if name == 'set noparent': |
||
6139 | keep_looking = False |
||
6140 | elif name == 'filter': |
||
6141 | cfg_filters.append(val) |
||
6142 | elif name == 'exclude_files': |
||
6143 | # When matching exclude_files pattern, use the base_name of |
||
6144 | # the current file name or the directory name we are processing. |
||
6145 | # For example, if we are checking for lint errors in /foo/bar/baz.cc |
||
6146 | # and we found the .cfg file at /foo/CPPLINT.cfg, then the config |
||
6147 | # file's "exclude_files" filter is meant to be checked against "bar" |
||
6148 | # and not "baz" nor "bar/baz.cc". |
||
6149 | if base_name: |
||
6150 | pattern = re.compile(val) |
||
6151 | if pattern.match(base_name): |
||
6152 | _cpplint_state.PrintInfo('Ignoring "%s": file excluded by ' |
||
6153 | '"%s". File path component "%s" matches pattern "%s"\n' % |
||
6154 | (filename, cfg_file, base_name, val)) |
||
6155 | return False |
||
6156 | elif name == 'linelength': |
||
6157 | global _line_length |
||
6158 | try: |
||
6159 | _line_length = int(val) |
||
6160 | except ValueError: |
||
6161 | _cpplint_state.PrintError('Line length must be numeric.') |
||
6162 | elif name == 'extensions': |
||
6163 | global _valid_extensions |
||
6164 | try: |
||
6165 | extensions = [ext.strip() for ext in val.split(',')] |
||
6166 | _valid_extensions = set(extensions) |
||
6167 | except ValueError: |
||
6168 | sys.stderr.write('Extensions should be a comma-separated list of values;' |
||
6169 | 'for example: extensions=hpp,cpp\n' |
||
6170 | 'This could not be parsed: "%s"' % (val,)) |
||
6171 | elif name == 'headers': |
||
6172 | global _header_extensions |
||
6173 | try: |
||
6174 | extensions = [ext.strip() for ext in val.split(',')] |
||
6175 | _header_extensions = set(extensions) |
||
6176 | except ValueError: |
||
6177 | sys.stderr.write('Extensions should be a comma-separated list of values;' |
||
6178 | 'for example: extensions=hpp,cpp\n' |
||
6179 | 'This could not be parsed: "%s"' % (val,)) |
||
6180 | elif name == 'root': |
||
6181 | global _root |
||
6182 | _root = val |
||
6183 | else: |
||
6184 | _cpplint_state.PrintError( |
||
6185 | 'Invalid configuration option (%s) in file %s\n' % |
||
6186 | (name, cfg_file)) |
||
6187 | |||
6188 | except IOError: |
||
6189 | _cpplint_state.PrintError( |
||
6190 | "Skipping config file '%s': Can't open for reading\n" % cfg_file) |
||
6191 | keep_looking = False |
||
6192 | |||
6193 | # Apply all the accumulated filters in reverse order (top-level directory |
||
6194 | # config options having the least priority). |
||
6195 | for cfg_filter in reversed(cfg_filters): |
||
6196 | _AddFilters(cfg_filter) |
||
6197 | |||
6198 | return True |
||
6199 | |||
6200 | |||
6201 | View Code Duplication | def ProcessFile(filename, vlevel, extra_check_functions=None): |
|
6202 | """Does google-lint on a single file. |
||
6203 | |||
6204 | Args: |
||
6205 | filename: The name of the file to parse. |
||
6206 | |||
6207 | vlevel: The level of errors to report. Every error of confidence |
||
6208 | >= verbose_level will be reported. 0 is a good default. |
||
6209 | |||
6210 | extra_check_functions: An array of additional check functions that will be |
||
6211 | run on each source line. Each function takes 4 |
||
6212 | arguments: filename, clean_lines, line, error |
||
6213 | """ |
||
6214 | |||
6215 | _SetVerboseLevel(vlevel) |
||
6216 | _BackupFilters() |
||
6217 | |||
6218 | if not ProcessConfigOverrides(filename): |
||
6219 | _RestoreFilters() |
||
6220 | return |
||
6221 | |||
6222 | lf_lines = [] |
||
6223 | crlf_lines = [] |
||
6224 | try: |
||
6225 | # Support the UNIX convention of using "-" for stdin. Note that |
||
6226 | # we are not opening the file with universal newline support |
||
6227 | # (which codecs doesn't support anyway), so the resulting lines do |
||
6228 | # contain trailing '\r' characters if we are reading a file that |
||
6229 | # has CRLF endings. |
||
6230 | # If after the split a trailing '\r' is present, it is removed |
||
6231 | # below. |
||
6232 | if filename == '-': |
||
6233 | lines = codecs.StreamReaderWriter(sys.stdin, |
||
6234 | codecs.getreader('utf8'), |
||
6235 | codecs.getwriter('utf8'), |
||
6236 | 'replace').read().split('\n') |
||
6237 | else: |
||
6238 | lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n') |
||
6239 | |||
6240 | # Remove trailing '\r'. |
||
6241 | # The -1 accounts for the extra trailing blank line we get from split() |
||
6242 | for linenum in range(len(lines) - 1): |
||
6243 | if lines[linenum].endswith('\r'): |
||
6244 | lines[linenum] = lines[linenum].rstrip('\r') |
||
6245 | crlf_lines.append(linenum + 1) |
||
6246 | else: |
||
6247 | lf_lines.append(linenum + 1) |
||
6248 | |||
6249 | except IOError: |
||
6250 | _cpplint_state.PrintError( |
||
6251 | "Skipping input '%s': Can't open for reading\n" % filename) |
||
6252 | _RestoreFilters() |
||
6253 | return |
||
6254 | |||
6255 | # Note, if no dot is found, this will give the entire filename as the ext. |
||
6256 | file_extension = filename[filename.rfind('.') + 1:] |
||
6257 | |||
6258 | # When reading from stdin, the extension is unknown, so no cpplint tests |
||
6259 | # should rely on the extension. |
||
6260 | if filename != '-' and file_extension not in GetAllExtensions(): |
||
6261 | _cpplint_state.PrintError('Ignoring %s; not a valid file name ' |
||
6262 | '(%s)\n' % (filename, ', '.join(GetAllExtensions()))) |
||
6263 | else: |
||
6264 | ProcessFileData(filename, file_extension, lines, Error, |
||
6265 | extra_check_functions) |
||
6266 | |||
6267 | # If end-of-line sequences are a mix of LF and CR-LF, issue |
||
6268 | # warnings on the lines with CR. |
||
6269 | # |
||
6270 | # Don't issue any warnings if all lines are uniformly LF or CR-LF, |
||
6271 | # since critique can handle these just fine, and the style guide |
||
6272 | # doesn't dictate a particular end of line sequence. |
||
6273 | # |
||
6274 | # We can't depend on os.linesep to determine what the desired |
||
6275 | # end-of-line sequence should be, since that will return the |
||
6276 | # server-side end-of-line sequence. |
||
6277 | if lf_lines and crlf_lines: |
||
6278 | # Warn on every line with CR. An alternative approach might be to |
||
6279 | # check whether the file is mostly CRLF or just LF, and warn on the |
||
6280 | # minority, we bias toward LF here since most tools prefer LF. |
||
6281 | for linenum in crlf_lines: |
||
6282 | Error(filename, linenum, 'whitespace/newline', 1, |
||
6283 | 'Unexpected \\r (^M) found; better to use only \\n') |
||
6284 | |||
6285 | _cpplint_state.PrintInfo('Done processing %s\n' % filename) |
||
6286 | _RestoreFilters() |
||
6287 | |||
6288 | |||
6289 | def PrintUsage(message): |
||
6290 | """Prints a brief usage string and exits, optionally with an error message. |
||
6291 | |||
6292 | Args: |
||
6293 | message: The optional error message. |
||
6294 | """ |
||
6295 | sys.stderr.write(_USAGE) |
||
6296 | |||
6297 | if message: |
||
6298 | sys.exit('\nFATAL ERROR: ' + message) |
||
6299 | else: |
||
6300 | sys.exit(0) |
||
6301 | |||
6302 | |||
6303 | def PrintCategories(): |
||
6304 | """Prints a list of all the error-categories used by error messages. |
||
6305 | |||
6306 | These are the categories used to filter messages via --filter. |
||
6307 | """ |
||
6308 | sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES)) |
||
6309 | sys.exit(0) |
||
6310 | |||
6311 | |||
6312 | View Code Duplication | def ParseArguments(args): |
|
6313 | """Parses the command line arguments. |
||
6314 | |||
6315 | This may set the output format and verbosity level as side-effects. |
||
6316 | |||
6317 | Args: |
||
6318 | args: The command line arguments: |
||
6319 | |||
6320 | Returns: |
||
6321 | The list of filenames to lint. |
||
6322 | """ |
||
6323 | try: |
||
6324 | (opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=', |
||
6325 | 'counting=', |
||
6326 | 'filter=', |
||
6327 | 'root=', |
||
6328 | 'repository=', |
||
6329 | 'linelength=', |
||
6330 | 'extensions=', |
||
6331 | 'exclude=', |
||
6332 | 'headers=', |
||
6333 | 'quiet', |
||
6334 | 'recursive']) |
||
6335 | except getopt.GetoptError: |
||
6336 | PrintUsage('Invalid arguments.') |
||
6337 | |||
6338 | verbosity = _VerboseLevel() |
||
6339 | output_format = _OutputFormat() |
||
6340 | filters = '' |
||
6341 | counting_style = '' |
||
6342 | recursive = False |
||
6343 | |||
6344 | for (opt, val) in opts: |
||
6345 | if opt == '--help': |
||
6346 | PrintUsage(None) |
||
6347 | elif opt == '--output': |
||
6348 | if val not in ('emacs', 'vs7', 'eclipse', 'junit'): |
||
6349 | PrintUsage('The only allowed output formats are emacs, vs7, eclipse ' |
||
6350 | 'and junit.') |
||
6351 | output_format = val |
||
6352 | elif opt == '--verbose': |
||
6353 | verbosity = int(val) |
||
6354 | elif opt == '--filter': |
||
6355 | filters = val |
||
6356 | if not filters: |
||
6357 | PrintCategories() |
||
6358 | elif opt == '--counting': |
||
6359 | if val not in ('total', 'toplevel', 'detailed'): |
||
6360 | PrintUsage('Valid counting options are total, toplevel, and detailed') |
||
6361 | counting_style = val |
||
6362 | elif opt == '--root': |
||
6363 | global _root |
||
6364 | _root = val |
||
6365 | elif opt == '--repository': |
||
6366 | global _repository |
||
6367 | _repository = val |
||
6368 | elif opt == '--linelength': |
||
6369 | global _line_length |
||
6370 | try: |
||
6371 | _line_length = int(val) |
||
6372 | except ValueError: |
||
6373 | PrintUsage('Line length must be digits.') |
||
6374 | elif opt == '--exclude': |
||
6375 | global _excludes |
||
6376 | if not _excludes: |
||
6377 | _excludes = set() |
||
6378 | _excludes.update(glob.glob(val)) |
||
6379 | elif opt == '--extensions': |
||
6380 | global _valid_extensions |
||
6381 | try: |
||
6382 | _valid_extensions = set(val.split(',')) |
||
6383 | except ValueError: |
||
6384 | PrintUsage('Extensions must be comma separated list.') |
||
6385 | elif opt == '--headers': |
||
6386 | global _header_extensions |
||
6387 | try: |
||
6388 | _header_extensions = set(val.split(',')) |
||
6389 | except ValueError: |
||
6390 | PrintUsage('Extensions must be comma separated list.') |
||
6391 | elif opt == '--recursive': |
||
6392 | recursive = True |
||
6393 | elif opt == '--quiet': |
||
6394 | global _quiet |
||
6395 | _quiet = True |
||
6396 | |||
6397 | if not filenames: |
||
6398 | PrintUsage('No files were specified.') |
||
6399 | |||
6400 | if recursive: |
||
6401 | filenames = _ExpandDirectories(filenames) |
||
6402 | |||
6403 | if _excludes: |
||
6404 | filenames = _FilterExcludedFiles(filenames) |
||
6405 | |||
6406 | _SetOutputFormat(output_format) |
||
6407 | _SetVerboseLevel(verbosity) |
||
6408 | _SetFilters(filters) |
||
6409 | _SetCountingStyle(counting_style) |
||
6410 | |||
6411 | return filenames |
||
6412 | |||
6413 | View Code Duplication | def _ExpandDirectories(filenames): |
|
6414 | """Searches a list of filenames and replaces directories in the list with |
||
6415 | all files descending from those directories. Files with extensions not in |
||
6416 | the valid extensions list are excluded. |
||
6417 | |||
6418 | Args: |
||
6419 | filenames: A list of files or directories |
||
6420 | |||
6421 | Returns: |
||
6422 | A list of all files that are members of filenames or descended from a |
||
6423 | directory in filenames |
||
6424 | """ |
||
6425 | expanded = set() |
||
6426 | for filename in filenames: |
||
6427 | if not os.path.isdir(filename): |
||
6428 | expanded.add(filename) |
||
6429 | continue |
||
6430 | |||
6431 | for root, _, files in os.walk(filename): |
||
6432 | for loopfile in files: |
||
6433 | fullname = os.path.join(root, loopfile) |
||
6434 | if fullname.startswith('.' + os.path.sep): |
||
6435 | fullname = fullname[len('.' + os.path.sep):] |
||
6436 | expanded.add(fullname) |
||
6437 | |||
6438 | filtered = [] |
||
6439 | for filename in expanded: |
||
6440 | if os.path.splitext(filename)[1][1:] in GetAllExtensions(): |
||
6441 | filtered.append(filename) |
||
6442 | |||
6443 | return filtered |
||
6444 | |||
6445 | def _FilterExcludedFiles(filenames): |
||
6446 | """Filters out files listed in the --exclude command line switch. File paths |
||
6447 | in the switch are evaluated relative to the current working directory |
||
6448 | """ |
||
6449 | exclude_paths = [os.path.abspath(f) for f in _excludes] |
||
6450 | return [f for f in filenames if os.path.abspath(f) not in exclude_paths] |
||
6451 | |||
6452 | View Code Duplication | def main(): |
|
6453 | filenames = ParseArguments(sys.argv[1:]) |
||
6454 | backup_err = sys.stderr |
||
6455 | try: |
||
6456 | # Change stderr to write with replacement characters so we don't die |
||
6457 | # if we try to print something containing non-ASCII characters. |
||
6458 | sys.stderr = codecs.StreamReader(sys.stderr, 'replace') |
||
6459 | |||
6460 | _cpplint_state.ResetErrorCounts() |
||
6461 | for filename in filenames: |
||
6462 | ProcessFile(filename, _cpplint_state.verbose_level) |
||
6463 | _cpplint_state.PrintErrorCounts() |
||
6464 | |||
6465 | if _cpplint_state.output_format == 'junit': |
||
6466 | sys.stderr.write(_cpplint_state.FormatJUnitXML()) |
||
6467 | |||
6468 | finally: |
||
6469 | sys.stderr = backup_err |
||
6470 | |||
6471 | sys.exit(_cpplint_state.error_count > 0) |
||
6472 | |||
6473 | |||
6474 | if __name__ == '__main__': |
||
6475 | main() |
||
6476 | |||
6477 |