Rietveld Code Review Tool
Help | Bug tracker | Discussion group | Source code

Delta Between Two Patch Sets: flake8-abp/flake8_abp.py

Issue 29342824: Issue 4044 - Added handling for __future__ unicode_literals import to check_quotes() (Closed)
Left Patch Set: Changed yield spacing, and moved check to above pytohn 3+ check and makes A112 yield with othe codes Created May 26, 2016, 3:25 p.m.
Right Patch Set: removed redundant comment Created June 2, 2016, 5:45 p.m.
Left:
Right:
Use n/p to move between diff chunks; N/P to move between comments.
Jump to:
Left: Side by side diff | Download
Right: Side by side diff | Download
« no previous file with change/comment | « flake8-abp/README.md ('k') | flake8-abp/tests/A109.py » ('j') | no next file with change/comment »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
LEFTRIGHT
1 # This file is part of Adblock Plus <https://adblockplus.org/>, 1 # This file is part of Adblock Plus <https://adblockplus.org/>,
2 # Copyright (C) 2006-2016 Eyeo GmbH 2 # Copyright (C) 2006-2016 Eyeo GmbH
3 # 3 #
4 # Adblock Plus is free software: you can redistribute it and/or modify 4 # Adblock Plus is free software: you can redistribute it and/or modify
5 # it under the terms of the GNU General Public License version 3 as 5 # it under the terms of the GNU General Public License version 3 as
6 # published by the Free Software Foundation. 6 # published by the Free Software Foundation.
7 # 7 #
8 # Adblock Plus is distributed in the hope that it will be useful, 8 # Adblock Plus is distributed in the hope that it will be useful,
9 # but WITHOUT ANY WARRANTY; without even the implied warranty of 9 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 10 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
(...skipping 26 matching lines...) Expand all
37 're.match': 're.search', 37 're.match': 're.search',
38 'codecs.open': 'io.open', 38 'codecs.open': 'io.open',
39 } 39 }
40 40
41 ESSENTIAL_BUILTINS = set(dir(builtins)) - {'apply', 'buffer', 'coerce', 41 ESSENTIAL_BUILTINS = set(dir(builtins)) - {'apply', 'buffer', 'coerce',
42 'intern', 'file'} 42 'intern', 'file'}
43 43
44 LEAVE_BLOCK = (ast.Return, ast.Raise, ast.Continue, ast.Break) 44 LEAVE_BLOCK = (ast.Return, ast.Raise, ast.Continue, ast.Break)
45 VOLATILE = object() 45 VOLATILE = object()
46 46
47 is_unicode_literals = False
48
49 47
50 def evaluate(node): 48 def evaluate(node):
51 try: 49 try:
52 return eval(compile(ast.Expression(node), '', 'eval'), {}) 50 return eval(compile(ast.Expression(node), '', 'eval'), {})
53 except Exception: 51 except Exception:
54 return VOLATILE 52 return VOLATILE
55 53
56 54
57 def is_const(node): 55 def is_const(node):
58 return evaluate(node) is not VOLATILE 56 return evaluate(node) is not VOLATILE
(...skipping 220 matching lines...) Expand 10 before | Expand all | Expand 10 after
279 substitute = DISCOURAGED_APIS.get(name) 277 substitute = DISCOURAGED_APIS.get(name)
280 if substitute: 278 if substitute:
281 self.errors.append((node, 'A301 use {}() instead of ' 279 self.errors.append((node, 'A301 use {}() instead of '
282 '{}()'.format(substitute, name))) 280 '{}()'.format(substitute, name)))
283 281
284 def visit_Call(self, node): 282 def visit_Call(self, node):
285 func = get_identifier(node.func) 283 func = get_identifier(node.func)
286 arg = next(iter(node.args), None) 284 arg = next(iter(node.args), None)
287 redundant_literal = False 285 redundant_literal = False
288 286
289 if isinstance(arg, ast.Lambda) and func in {'map', 'filter', 287 if isinstance(arg, ast.Lambda):
290 'imap', 'ifilter', 288 if len(node.args) == 2 and func in {'map', 'filter',
291 'itertools.imap', 289 'imap', 'ifilter',
292 'itertools.ifilter'}: 290 'itertools.imap',
293 self.errors.append((node, 'A104 use a comprehension ' 291 'itertools.ifilter'}:
294 'instead of calling {}() with ' 292 self.errors.append((node, 'A104 use a comprehension '
295 'lambda function'.format(func))) 293 'instead of calling {}() with '
294 'lambda function'.format(func)))
296 elif isinstance(arg, (ast.List, ast.Tuple)): 295 elif isinstance(arg, (ast.List, ast.Tuple)):
297 if func == 'dict': 296 if func == 'dict':
298 redundant_literal = all(isinstance(elt, (ast.Tuple, ast.List)) 297 redundant_literal = all(isinstance(elt, (ast.Tuple, ast.List))
299 for elt in arg.elts) 298 for elt in arg.elts)
300 else: 299 else:
301 redundant_literal = func in {'list', 'set', 'tuple'} 300 redundant_literal = func in {'list', 'set', 'tuple'}
302 elif isinstance(arg, (ast.ListComp, ast.GeneratorExp)): 301 elif isinstance(arg, (ast.ListComp, ast.GeneratorExp)):
303 if func == 'dict': 302 if func == 'dict':
304 redundant_literal = isinstance(arg.elt, (ast.Tuple, ast.List)) 303 redundant_literal = isinstance(arg.elt, (ast.Tuple, ast.List))
305 else: 304 else:
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after
370 369
371 370
372 def check_non_default_encoding(physical_line, line_number): 371 def check_non_default_encoding(physical_line, line_number):
373 if line_number <= 2 and re.search(r'^\s*#.*coding[:=]', physical_line): 372 if line_number <= 2 and re.search(r'^\s*#.*coding[:=]', physical_line):
374 return (0, 'A303 non-default file encoding') 373 return (0, 'A303 non-default file encoding')
375 374
376 check_non_default_encoding.name = 'abp-non-default-encoding' 375 check_non_default_encoding.name = 'abp-non-default-encoding'
377 check_non_default_encoding.version = __version__ 376 check_non_default_encoding.version = __version__
378 377
379 378
380 def check_quotes(logical_line, tokens, previous_logical): 379 def check_quotes(logical_line, tokens, previous_logical, checker_state):
381 first_token = True 380 first_token = True
382 global is_unicode_literals 381
383
384 # check if this is beginning of file
385 if tokens[0][3][0] == 1:
386 is_unicode_literals = False
387
388 # check if in unicode_literals mode
389 token_strings = [t[1] for t in tokens] 382 token_strings = [t[1] for t in tokens]
390 if token_strings[:3] == ['from', '__future__', 'import']: 383 future_import = token_strings[:3] == ['from', '__future__', 'import']
391 if 'unicode_literals' in token_strings: 384
392 is_unicode_literals = True 385 if future_import and 'unicode_literals' in token_strings:
386 checker_state['has_unicode_literals'] = True
393 387
394 for kind, token, start, end, _ in tokens: 388 for kind, token, start, end, _ in tokens:
395 if kind == tokenize.INDENT or kind == tokenize.DEDENT: 389 if kind == tokenize.INDENT or kind == tokenize.DEDENT:
396 continue 390 continue
397 391
398 if kind == tokenize.STRING: 392 if kind == tokenize.STRING:
399 match = re.search(r'^(u)?(b)?(r)?((""")?.*)$', 393 match = re.search(r'^([rub]*)([\'"]{1,3})(.*)\2$',
400 token, re.IGNORECASE | re.DOTALL) 394 token, re.IGNORECASE | re.DOTALL)
401 (is_unicode, is_bytes, is_raw, 395 prefixes, quote, text = match.groups()
402 literal, has_doc_quotes) = match.groups() 396 prefixes = prefixes.lower()
397
398 if 'u' in prefixes:
399 yield (start, 'A112 use "from __future__ import '
400 'unicode_literals" instead of '
401 'prefixing literals with "u"')
403 402
404 if first_token and re.search(r'^(?:(?:def|class)\s|$)', 403 if first_token and re.search(r'^(?:(?:def|class)\s|$)',
405 previous_logical): 404 previous_logical):
406 if not has_doc_quotes: 405 if quote != '"""':
407 yield (start, 'A109 use triple double ' 406 yield (start, 'A109 use triple double '
408 'quotes for docstrings') 407 'quotes for docstrings')
409 elif is_unicode or is_bytes or is_raw: 408 elif start[0] != end[0]:
410 yield (start, "A109 don't use u'', b'' " 409 pass
411 "or r'' for doc strings") 410 elif 'r' in prefixes:
412 elif start[0] == end[0]: 411 if quote != "'" and not (quote == '"' and "'" in text):
413 if is_raw: 412 yield (start, 'A110 use single quotes for raw string')
414 literal = re.sub(r'\\(?!{})'.format(literal[0]), 413 else:
415 '\\\\\\\\', literal) 414 prefix = ''
416 if is_unicode and not is_unicode_literals:
417 yield (start, 'A112 use "from __future__ import'
418 'unicode_literals" instead of '
419 'prefixing literals with "u"')
420 if sys.version_info[0] >= 3: 415 if sys.version_info[0] >= 3:
421 if is_bytes: 416 if 'b' in prefixes:
422 literal = 'b' + literal 417 prefix = 'b'
423 elif not is_bytes: 418 else:
424 literal = 'u' + literal 419 u_literals = checker_state.get('has_unicode_literals')
425 420 if 'u' in prefixes or u_literals and 'b' not in prefixes:
421 prefix = 'u'
422
423 literal = '{0}{1}{2}{1}'.format(prefix, quote, text)
426 if ascii(eval(literal)) != literal: 424 if ascii(eval(literal)) != literal:
427 yield (start, "A110 string literal doesn't match " 425 yield (start, "A110 string literal doesn't match "
428 '{}()'.format(ascii.__name__)) 426 '{}()'.format(ascii.__name__))
429 427
430 first_token = False 428 first_token = False
431 429
432 check_quotes.name = 'abp-quotes' 430 check_quotes.name = 'abp-quotes'
433 check_quotes.version = __version__ 431 check_quotes.version = __version__
434 432
435 433
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
478 if tokens[i + 1][:2] != (tokenize.OP, ':'): 476 if tokens[i + 1][:2] != (tokenize.OP, ':'):
479 break 477 break
480 478
481 return [(pos, 'A111 redundant parenthesis for {} ' 479 return [(pos, 'A111 redundant parenthesis for {} '
482 'statement'.format(statement))] 480 'statement'.format(statement))]
483 481
484 return [] 482 return []
485 483
486 check_redundant_parenthesis.name = 'abp-redundant-parenthesis' 484 check_redundant_parenthesis.name = 'abp-redundant-parenthesis'
487 check_redundant_parenthesis.version = __version__ 485 check_redundant_parenthesis.version = __version__
LEFTRIGHT

Powered by Google App Engine
This is Rietveld