Rietveld Code Review Tool
Help | Bug tracker | Discussion group | Source code

Side by Side Diff: flake8-abp/flake8_abp.py

Issue 29342824: Issue 4044 - Added handling for __future__ unicode_literals import to check_quotes() (Closed)
Patch Set: Changed yield spacing, and moved check to above pytohn 3+ check and makes A112 yield with othe codes Created May 26, 2016, 3:25 p.m.
Left:
Right:
Use n/p to move between diff chunks; N/P to move between comments.
Jump to:
View unified diff | Download patch
« no previous file with comments | « flake8-abp/README.md ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 # This file is part of Adblock Plus <https://adblockplus.org/>, 1 # This file is part of Adblock Plus <https://adblockplus.org/>,
2 # Copyright (C) 2006-2016 Eyeo GmbH 2 # Copyright (C) 2006-2016 Eyeo GmbH
3 # 3 #
4 # Adblock Plus is free software: you can redistribute it and/or modify 4 # Adblock Plus is free software: you can redistribute it and/or modify
5 # it under the terms of the GNU General Public License version 3 as 5 # it under the terms of the GNU General Public License version 3 as
6 # published by the Free Software Foundation. 6 # published by the Free Software Foundation.
7 # 7 #
8 # Adblock Plus is distributed in the hope that it will be useful, 8 # Adblock Plus is distributed in the hope that it will be useful,
9 # but WITHOUT ANY WARRANTY; without even the implied warranty of 9 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 10 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
(...skipping 26 matching lines...) Expand all
37 're.match': 're.search', 37 're.match': 're.search',
38 'codecs.open': 'io.open', 38 'codecs.open': 'io.open',
39 } 39 }
40 40
41 ESSENTIAL_BUILTINS = set(dir(builtins)) - {'apply', 'buffer', 'coerce', 41 ESSENTIAL_BUILTINS = set(dir(builtins)) - {'apply', 'buffer', 'coerce',
42 'intern', 'file'} 42 'intern', 'file'}
43 43
44 LEAVE_BLOCK = (ast.Return, ast.Raise, ast.Continue, ast.Break) 44 LEAVE_BLOCK = (ast.Return, ast.Raise, ast.Continue, ast.Break)
45 VOLATILE = object() 45 VOLATILE = object()
46 46
47 IS_UNICODE_LITERALS = False 47 is_unicode_literals = False
48 48
49 49
50 def evaluate(node): 50 def evaluate(node):
51 try: 51 try:
52 return eval(compile(ast.Expression(node), '', 'eval'), {}) 52 return eval(compile(ast.Expression(node), '', 'eval'), {})
53 except Exception: 53 except Exception:
54 return VOLATILE 54 return VOLATILE
55 55
56 56
57 def is_const(node): 57 def is_const(node):
(...skipping 314 matching lines...) Expand 10 before | Expand all | Expand 10 after
372 def check_non_default_encoding(physical_line, line_number): 372 def check_non_default_encoding(physical_line, line_number):
373 if line_number <= 2 and re.search(r'^\s*#.*coding[:=]', physical_line): 373 if line_number <= 2 and re.search(r'^\s*#.*coding[:=]', physical_line):
374 return (0, 'A303 non-default file encoding') 374 return (0, 'A303 non-default file encoding')
375 375
376 check_non_default_encoding.name = 'abp-non-default-encoding' 376 check_non_default_encoding.name = 'abp-non-default-encoding'
377 check_non_default_encoding.version = __version__ 377 check_non_default_encoding.version = __version__
378 378
379 379
380 def check_quotes(logical_line, tokens, previous_logical): 380 def check_quotes(logical_line, tokens, previous_logical):
381 first_token = True 381 first_token = True
382 global IS_UNICODE_LITERALS 382 global is_unicode_literals
383 383
384 # --- check if this is beginning of file 384 # check if this is beginning of file
385 if tokens[0][3][0] == 1: 385 if tokens[0][3][0] == 1:
386 IS_UNICODE_LITERALS = False 386 is_unicode_literals = False
387 387
388 # --- check if in unicode_literals mode 388 # check if in unicode_literals mode
389 token_strings = [t[1] for t in tokens] 389 token_strings = [t[1] for t in tokens]
390 if token_strings[:3] == ['from', '__future__', 'import']: 390 if token_strings[:3] == ['from', '__future__', 'import']:
391 IS_UNICODE_LITERALS = 'unicode_literals' in token_strings 391 if 'unicode_literals' in token_strings:
392 is_unicode_literals = True
392 393
393 for kind, token, start, end, _ in tokens: 394 for kind, token, start, end, _ in tokens:
394 if kind == tokenize.INDENT or kind == tokenize.DEDENT: 395 if kind == tokenize.INDENT or kind == tokenize.DEDENT:
395 continue 396 continue
396 397
397 if kind == tokenize.STRING: 398 if kind == tokenize.STRING:
398 match = re.search(r'^(u)?(b)?(r)?((""")?.*)$', 399 match = re.search(r'^(u)?(b)?(r)?((""")?.*)$',
399 token, re.IGNORECASE | re.DOTALL) 400 token, re.IGNORECASE | re.DOTALL)
400 (is_unicode, is_bytes, is_raw, 401 (is_unicode, is_bytes, is_raw,
401 literal, has_doc_quotes) = match.groups() 402 literal, has_doc_quotes) = match.groups()
402 403
403 if first_token and re.search(r'^(?:(?:def|class)\s|$)', 404 if first_token and re.search(r'^(?:(?:def|class)\s|$)',
404 previous_logical): 405 previous_logical):
405 if not has_doc_quotes: 406 if not has_doc_quotes:
406 yield (start, 'A109 use triple double ' 407 yield (start, 'A109 use triple double '
407 'quotes for docstrings') 408 'quotes for docstrings')
408 elif is_unicode or is_bytes or is_raw: 409 elif is_unicode or is_bytes or is_raw:
409 yield (start, "A109 don't use u'', b'' " 410 yield (start, "A109 don't use u'', b'' "
410 "or r'' for doc strings") 411 "or r'' for doc strings")
411 elif start[0] == end[0]: 412 elif start[0] == end[0]:
412 if is_raw: 413 if is_raw:
413 literal = re.sub(r'\\(?!{})'.format(literal[0]), 414 literal = re.sub(r'\\(?!{})'.format(literal[0]),
414 '\\\\\\\\', literal) 415 '\\\\\\\\', literal)
416 if is_unicode and not is_unicode_literals:
417 yield (start, 'A112 use "from __future__ import'
418 'unicode_literals" instead of '
419 'prefixing literals with "u"')
415 if sys.version_info[0] >= 3: 420 if sys.version_info[0] >= 3:
416 if is_bytes: 421 if is_bytes:
417 literal = 'b' + literal 422 literal = 'b' + literal
418 elif is_unicode and not IS_UNICODE_LITERALS:
419 yield(start, 'A112 use "from __future__ import"'
420 'unicode_literals instead of prefixing'
421 'literals with "u"')
422 elif not is_bytes: 423 elif not is_bytes:
423 literal = 'u' + literal 424 literal = 'u' + literal
424 425
425 if ascii(eval(literal)) != literal: 426 if ascii(eval(literal)) != literal:
426 yield (start, "A110 string literal doesn't match " 427 yield (start, "A110 string literal doesn't match "
427 '{}()'.format(ascii.__name__)) 428 '{}()'.format(ascii.__name__))
428 429
429 first_token = False 430 first_token = False
430 431
431 check_quotes.name = 'abp-quotes' 432 check_quotes.name = 'abp-quotes'
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
477 if tokens[i + 1][:2] != (tokenize.OP, ':'): 478 if tokens[i + 1][:2] != (tokenize.OP, ':'):
478 break 479 break
479 480
480 return [(pos, 'A111 redundant parenthesis for {} ' 481 return [(pos, 'A111 redundant parenthesis for {} '
481 'statement'.format(statement))] 482 'statement'.format(statement))]
482 483
483 return [] 484 return []
484 485
485 check_redundant_parenthesis.name = 'abp-redundant-parenthesis' 486 check_redundant_parenthesis.name = 'abp-redundant-parenthesis'
486 check_redundant_parenthesis.version = __version__ 487 check_redundant_parenthesis.version = __version__
OLDNEW
« no previous file with comments | « flake8-abp/README.md ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld