whoami7 - Manager
:
/
home
/
fresvfqn
/
waterdamagerestorationandrepairsmithtown.com
/
Compressed
/
Upload File:
files >> /home/fresvfqn/waterdamagerestorationandrepairsmithtown.com/Compressed/lib2to3.tar
refactor.py 0000644 00000066475 15053244155 0006751 0 ustar 00 # Copyright 2006 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Refactoring framework. Used as a main program, this can refactor any number of files and/or recursively descend down directories. Imported as a module, this provides infrastructure to write your own refactoring tool. """ __author__ = "Guido van Rossum <guido@python.org>" # Python imports import os import sys import logging import operator import collections import io from itertools import chain # Local imports from .pgen2 import driver, tokenize, token from .fixer_util import find_root from . import pytree, pygram from . import btm_matcher as bm def get_all_fix_names(fixer_pkg, remove_prefix=True): """Return a sorted list of all available fix names in the given package.""" pkg = __import__(fixer_pkg, [], [], ["*"]) fixer_dir = os.path.dirname(pkg.__file__) fix_names = [] for name in sorted(os.listdir(fixer_dir)): if name.startswith("fix_") and name.endswith(".py"): if remove_prefix: name = name[4:] fix_names.append(name[:-3]) return fix_names class _EveryNode(Exception): pass def _get_head_types(pat): """ Accepts a pytree Pattern Node and returns a set of the pattern types which will match first. """ if isinstance(pat, (pytree.NodePattern, pytree.LeafPattern)): # NodePatters must either have no type and no content # or a type and content -- so they don't get any farther # Always return leafs if pat.type is None: raise _EveryNode return {pat.type} if isinstance(pat, pytree.NegatedPattern): if pat.content: return _get_head_types(pat.content) raise _EveryNode # Negated Patterns don't have a type if isinstance(pat, pytree.WildcardPattern): # Recurse on each node in content r = set() for p in pat.content: for x in p: r.update(_get_head_types(x)) return r raise Exception("Oh no! I don't understand pattern %s" %(pat)) def _get_headnode_dict(fixer_list): """ Accepts a list of fixers and returns a dictionary of head node type --> fixer list. """ head_nodes = collections.defaultdict(list) every = [] for fixer in fixer_list: if fixer.pattern: try: heads = _get_head_types(fixer.pattern) except _EveryNode: every.append(fixer) else: for node_type in heads: head_nodes[node_type].append(fixer) else: if fixer._accept_type is not None: head_nodes[fixer._accept_type].append(fixer) else: every.append(fixer) for node_type in chain(pygram.python_grammar.symbol2number.values(), pygram.python_grammar.tokens): head_nodes[node_type].extend(every) return dict(head_nodes) def get_fixers_from_package(pkg_name): """ Return the fully qualified names for fixers in the package pkg_name. """ return [pkg_name + "." + fix_name for fix_name in get_all_fix_names(pkg_name, False)] def _identity(obj): return obj if sys.version_info < (3, 0): import codecs _open_with_encoding = codecs.open # codecs.open doesn't translate newlines sadly. def _from_system_newlines(input): return input.replace("\r\n", "\n") def _to_system_newlines(input): if os.linesep != "\n": return input.replace("\n", os.linesep) else: return input else: _open_with_encoding = open _from_system_newlines = _identity _to_system_newlines = _identity def _detect_future_features(source): have_docstring = False gen = tokenize.generate_tokens(io.StringIO(source).readline) def advance(): tok = next(gen) return tok[0], tok[1] ignore = frozenset({token.NEWLINE, tokenize.NL, token.COMMENT}) features = set() try: while True: tp, value = advance() if tp in ignore: continue elif tp == token.STRING: if have_docstring: break have_docstring = True elif tp == token.NAME and value == "from": tp, value = advance() if tp != token.NAME or value != "__future__": break tp, value = advance() if tp != token.NAME or value != "import": break tp, value = advance() if tp == token.OP and value == "(": tp, value = advance() while tp == token.NAME: features.add(value) tp, value = advance() if tp != token.OP or value != ",": break tp, value = advance() else: break except StopIteration: pass return frozenset(features) class FixerError(Exception): """A fixer could not be loaded.""" class RefactoringTool(object): _default_options = {"print_function" : False, "write_unchanged_files" : False} CLASS_PREFIX = "Fix" # The prefix for fixer classes FILE_PREFIX = "fix_" # The prefix for modules with a fixer within def __init__(self, fixer_names, options=None, explicit=None): """Initializer. Args: fixer_names: a list of fixers to import options: a dict with configuration. explicit: a list of fixers to run even if they are explicit. """ self.fixers = fixer_names self.explicit = explicit or [] self.options = self._default_options.copy() if options is not None: self.options.update(options) if self.options["print_function"]: self.grammar = pygram.python_grammar_no_print_statement else: self.grammar = pygram.python_grammar # When this is True, the refactor*() methods will call write_file() for # files processed even if they were not changed during refactoring. If # and only if the refactor method's write parameter was True. self.write_unchanged_files = self.options.get("write_unchanged_files") self.errors = [] self.logger = logging.getLogger("RefactoringTool") self.fixer_log = [] self.wrote = False self.driver = driver.Driver(self.grammar, convert=pytree.convert, logger=self.logger) self.pre_order, self.post_order = self.get_fixers() self.files = [] # List of files that were or should be modified self.BM = bm.BottomMatcher() self.bmi_pre_order = [] # Bottom Matcher incompatible fixers self.bmi_post_order = [] for fixer in chain(self.post_order, self.pre_order): if fixer.BM_compatible: self.BM.add_fixer(fixer) # remove fixers that will be handled by the bottom-up # matcher elif fixer in self.pre_order: self.bmi_pre_order.append(fixer) elif fixer in self.post_order: self.bmi_post_order.append(fixer) self.bmi_pre_order_heads = _get_headnode_dict(self.bmi_pre_order) self.bmi_post_order_heads = _get_headnode_dict(self.bmi_post_order) def get_fixers(self): """Inspects the options to load the requested patterns and handlers. Returns: (pre_order, post_order), where pre_order is the list of fixers that want a pre-order AST traversal, and post_order is the list that want post-order traversal. """ pre_order_fixers = [] post_order_fixers = [] for fix_mod_path in self.fixers: mod = __import__(fix_mod_path, {}, {}, ["*"]) fix_name = fix_mod_path.rsplit(".", 1)[-1] if fix_name.startswith(self.FILE_PREFIX): fix_name = fix_name[len(self.FILE_PREFIX):] parts = fix_name.split("_") class_name = self.CLASS_PREFIX + "".join([p.title() for p in parts]) try: fix_class = getattr(mod, class_name) except AttributeError: raise FixerError("Can't find %s.%s" % (fix_name, class_name)) fixer = fix_class(self.options, self.fixer_log) if fixer.explicit and self.explicit is not True and \ fix_mod_path not in self.explicit: self.log_message("Skipping optional fixer: %s", fix_name) continue self.log_debug("Adding transformation: %s", fix_name) if fixer.order == "pre": pre_order_fixers.append(fixer) elif fixer.order == "post": post_order_fixers.append(fixer) else: raise FixerError("Illegal fixer order: %r" % fixer.order) key_func = operator.attrgetter("run_order") pre_order_fixers.sort(key=key_func) post_order_fixers.sort(key=key_func) return (pre_order_fixers, post_order_fixers) def log_error(self, msg, *args, **kwds): """Called when an error occurs.""" raise def log_message(self, msg, *args): """Hook to log a message.""" if args: msg = msg % args self.logger.info(msg) def log_debug(self, msg, *args): if args: msg = msg % args self.logger.debug(msg) def print_output(self, old_text, new_text, filename, equal): """Called with the old version, new version, and filename of a refactored file.""" pass def refactor(self, items, write=False, doctests_only=False): """Refactor a list of files and directories.""" for dir_or_file in items: if os.path.isdir(dir_or_file): self.refactor_dir(dir_or_file, write, doctests_only) else: self.refactor_file(dir_or_file, write, doctests_only) def refactor_dir(self, dir_name, write=False, doctests_only=False): """Descends down a directory and refactor every Python file found. Python files are assumed to have a .py extension. Files and subdirectories starting with '.' are skipped. """ py_ext = os.extsep + "py" for dirpath, dirnames, filenames in os.walk(dir_name): self.log_debug("Descending into %s", dirpath) dirnames.sort() filenames.sort() for name in filenames: if (not name.startswith(".") and os.path.splitext(name)[1] == py_ext): fullname = os.path.join(dirpath, name) self.refactor_file(fullname, write, doctests_only) # Modify dirnames in-place to remove subdirs with leading dots dirnames[:] = [dn for dn in dirnames if not dn.startswith(".")] def _read_python_source(self, filename): """ Do our best to decode a Python source file correctly. """ try: f = open(filename, "rb") except OSError as err: self.log_error("Can't open %s: %s", filename, err) return None, None try: encoding = tokenize.detect_encoding(f.readline)[0] finally: f.close() with _open_with_encoding(filename, "r", encoding=encoding) as f: return _from_system_newlines(f.read()), encoding def refactor_file(self, filename, write=False, doctests_only=False): """Refactors a file.""" input, encoding = self._read_python_source(filename) if input is None: # Reading the file failed. return input += "\n" # Silence certain parse errors if doctests_only: self.log_debug("Refactoring doctests in %s", filename) output = self.refactor_docstring(input, filename) if self.write_unchanged_files or output != input: self.processed_file(output, filename, input, write, encoding) else: self.log_debug("No doctest changes in %s", filename) else: tree = self.refactor_string(input, filename) if self.write_unchanged_files or (tree and tree.was_changed): # The [:-1] is to take off the \n we added earlier self.processed_file(str(tree)[:-1], filename, write=write, encoding=encoding) else: self.log_debug("No changes in %s", filename) def refactor_string(self, data, name): """Refactor a given input string. Args: data: a string holding the code to be refactored. name: a human-readable name for use in error/log messages. Returns: An AST corresponding to the refactored input stream; None if there were errors during the parse. """ features = _detect_future_features(data) if "print_function" in features: self.driver.grammar = pygram.python_grammar_no_print_statement try: tree = self.driver.parse_string(data) except Exception as err: self.log_error("Can't parse %s: %s: %s", name, err.__class__.__name__, err) return finally: self.driver.grammar = self.grammar tree.future_features = features self.log_debug("Refactoring %s", name) self.refactor_tree(tree, name) return tree def refactor_stdin(self, doctests_only=False): input = sys.stdin.read() if doctests_only: self.log_debug("Refactoring doctests in stdin") output = self.refactor_docstring(input, "<stdin>") if self.write_unchanged_files or output != input: self.processed_file(output, "<stdin>", input) else: self.log_debug("No doctest changes in stdin") else: tree = self.refactor_string(input, "<stdin>") if self.write_unchanged_files or (tree and tree.was_changed): self.processed_file(str(tree), "<stdin>", input) else: self.log_debug("No changes in stdin") def refactor_tree(self, tree, name): """Refactors a parse tree (modifying the tree in place). For compatible patterns the bottom matcher module is used. Otherwise the tree is traversed node-to-node for matches. Args: tree: a pytree.Node instance representing the root of the tree to be refactored. name: a human-readable name for this tree. Returns: True if the tree was modified, False otherwise. """ for fixer in chain(self.pre_order, self.post_order): fixer.start_tree(tree, name) #use traditional matching for the incompatible fixers self.traverse_by(self.bmi_pre_order_heads, tree.pre_order()) self.traverse_by(self.bmi_post_order_heads, tree.post_order()) # obtain a set of candidate nodes match_set = self.BM.run(tree.leaves()) while any(match_set.values()): for fixer in self.BM.fixers: if fixer in match_set and match_set[fixer]: #sort by depth; apply fixers from bottom(of the AST) to top match_set[fixer].sort(key=pytree.Base.depth, reverse=True) if fixer.keep_line_order: #some fixers(eg fix_imports) must be applied #with the original file's line order match_set[fixer].sort(key=pytree.Base.get_lineno) for node in list(match_set[fixer]): if node in match_set[fixer]: match_set[fixer].remove(node) try: find_root(node) except ValueError: # this node has been cut off from a # previous transformation ; skip continue if node.fixers_applied and fixer in node.fixers_applied: # do not apply the same fixer again continue results = fixer.match(node) if results: new = fixer.transform(node, results) if new is not None: node.replace(new) #new.fixers_applied.append(fixer) for node in new.post_order(): # do not apply the fixer again to # this or any subnode if not node.fixers_applied: node.fixers_applied = [] node.fixers_applied.append(fixer) # update the original match set for # the added code new_matches = self.BM.run(new.leaves()) for fxr in new_matches: if not fxr in match_set: match_set[fxr]=[] match_set[fxr].extend(new_matches[fxr]) for fixer in chain(self.pre_order, self.post_order): fixer.finish_tree(tree, name) return tree.was_changed def traverse_by(self, fixers, traversal): """Traverse an AST, applying a set of fixers to each node. This is a helper method for refactor_tree(). Args: fixers: a list of fixer instances. traversal: a generator that yields AST nodes. Returns: None """ if not fixers: return for node in traversal: for fixer in fixers[node.type]: results = fixer.match(node) if results: new = fixer.transform(node, results) if new is not None: node.replace(new) node = new def processed_file(self, new_text, filename, old_text=None, write=False, encoding=None): """ Called when a file has been refactored and there may be changes. """ self.files.append(filename) if old_text is None: old_text = self._read_python_source(filename)[0] if old_text is None: return equal = old_text == new_text self.print_output(old_text, new_text, filename, equal) if equal: self.log_debug("No changes to %s", filename) if not self.write_unchanged_files: return if write: self.write_file(new_text, filename, old_text, encoding) else: self.log_debug("Not writing changes to %s", filename) def write_file(self, new_text, filename, old_text, encoding=None): """Writes a string to a file. It first shows a unified diff between the old text and the new text, and then rewrites the file; the latter is only done if the write option is set. """ try: f = _open_with_encoding(filename, "w", encoding=encoding) except OSError as err: self.log_error("Can't create %s: %s", filename, err) return try: f.write(_to_system_newlines(new_text)) except OSError as err: self.log_error("Can't write %s: %s", filename, err) finally: f.close() self.log_debug("Wrote changes to %s", filename) self.wrote = True PS1 = ">>> " PS2 = "... " def refactor_docstring(self, input, filename): """Refactors a docstring, looking for doctests. This returns a modified version of the input string. It looks for doctests, which start with a ">>>" prompt, and may be continued with "..." prompts, as long as the "..." is indented the same as the ">>>". (Unfortunately we can't use the doctest module's parser, since, like most parsers, it is not geared towards preserving the original source.) """ result = [] block = None block_lineno = None indent = None lineno = 0 for line in input.splitlines(keepends=True): lineno += 1 if line.lstrip().startswith(self.PS1): if block is not None: result.extend(self.refactor_doctest(block, block_lineno, indent, filename)) block_lineno = lineno block = [line] i = line.find(self.PS1) indent = line[:i] elif (indent is not None and (line.startswith(indent + self.PS2) or line == indent + self.PS2.rstrip() + "\n")): block.append(line) else: if block is not None: result.extend(self.refactor_doctest(block, block_lineno, indent, filename)) block = None indent = None result.append(line) if block is not None: result.extend(self.refactor_doctest(block, block_lineno, indent, filename)) return "".join(result) def refactor_doctest(self, block, lineno, indent, filename): """Refactors one doctest. A doctest is given as a block of lines, the first of which starts with ">>>" (possibly indented), while the remaining lines start with "..." (identically indented). """ try: tree = self.parse_block(block, lineno, indent) except Exception as err: if self.logger.isEnabledFor(logging.DEBUG): for line in block: self.log_debug("Source: %s", line.rstrip("\n")) self.log_error("Can't parse docstring in %s line %s: %s: %s", filename, lineno, err.__class__.__name__, err) return block if self.refactor_tree(tree, filename): new = str(tree).splitlines(keepends=True) # Undo the adjustment of the line numbers in wrap_toks() below. clipped, new = new[:lineno-1], new[lineno-1:] assert clipped == ["\n"] * (lineno-1), clipped if not new[-1].endswith("\n"): new[-1] += "\n" block = [indent + self.PS1 + new.pop(0)] if new: block += [indent + self.PS2 + line for line in new] return block def summarize(self): if self.wrote: were = "were" else: were = "need to be" if not self.files: self.log_message("No files %s modified.", were) else: self.log_message("Files that %s modified:", were) for file in self.files: self.log_message(file) if self.fixer_log: self.log_message("Warnings/messages while refactoring:") for message in self.fixer_log: self.log_message(message) if self.errors: if len(self.errors) == 1: self.log_message("There was 1 error:") else: self.log_message("There were %d errors:", len(self.errors)) for msg, args, kwds in self.errors: self.log_message(msg, *args, **kwds) def parse_block(self, block, lineno, indent): """Parses a block into a tree. This is necessary to get correct line number / offset information in the parser diagnostics and embedded into the parse tree. """ tree = self.driver.parse_tokens(self.wrap_toks(block, lineno, indent)) tree.future_features = frozenset() return tree def wrap_toks(self, block, lineno, indent): """Wraps a tokenize stream to systematically modify start/end.""" tokens = tokenize.generate_tokens(self.gen_lines(block, indent).__next__) for type, value, (line0, col0), (line1, col1), line_text in tokens: line0 += lineno - 1 line1 += lineno - 1 # Don't bother updating the columns; this is too complicated # since line_text would also have to be updated and it would # still break for tokens spanning lines. Let the user guess # that the column numbers for doctests are relative to the # end of the prompt string (PS1 or PS2). yield type, value, (line0, col0), (line1, col1), line_text def gen_lines(self, block, indent): """Generates lines as expected by tokenize from a list of lines. This strips the first len(indent + self.PS1) characters off each line. """ prefix1 = indent + self.PS1 prefix2 = indent + self.PS2 prefix = prefix1 for line in block: if line.startswith(prefix): yield line[len(prefix):] elif line == prefix.rstrip() + "\n": yield "\n" else: raise AssertionError("line=%r, prefix=%r" % (line, prefix)) prefix = prefix2 while True: yield "" class MultiprocessingUnsupported(Exception): pass class MultiprocessRefactoringTool(RefactoringTool): def __init__(self, *args, **kwargs): super(MultiprocessRefactoringTool, self).__init__(*args, **kwargs) self.queue = None self.output_lock = None def refactor(self, items, write=False, doctests_only=False, num_processes=1): if num_processes == 1: return super(MultiprocessRefactoringTool, self).refactor( items, write, doctests_only) try: import multiprocessing except ImportError: raise MultiprocessingUnsupported if self.queue is not None: raise RuntimeError("already doing multiple processes") self.queue = multiprocessing.JoinableQueue() self.output_lock = multiprocessing.Lock() processes = [multiprocessing.Process(target=self._child) for i in range(num_processes)] try: for p in processes: p.start() super(MultiprocessRefactoringTool, self).refactor(items, write, doctests_only) finally: self.queue.join() for i in range(num_processes): self.queue.put(None) for p in processes: if p.is_alive(): p.join() self.queue = None def _child(self): task = self.queue.get() while task is not None: args, kwargs = task try: super(MultiprocessRefactoringTool, self).refactor_file( *args, **kwargs) finally: self.queue.task_done() task = self.queue.get() def refactor_file(self, *args, **kwargs): if self.queue is not None: self.queue.put((args, kwargs)) else: return super(MultiprocessRefactoringTool, self).refactor_file( *args, **kwargs) fixer_base.py 0000644 00000015042 15053244155 0007233 0 ustar 00 # Copyright 2006 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Base class for fixers (optional, but recommended).""" # Python imports import itertools # Local imports from .patcomp import PatternCompiler from . import pygram from .fixer_util import does_tree_import class BaseFix(object): """Optional base class for fixers. The subclass name must be FixFooBar where FooBar is the result of removing underscores and capitalizing the words of the fix name. For example, the class name for a fixer named 'has_key' should be FixHasKey. """ PATTERN = None # Most subclasses should override with a string literal pattern = None # Compiled pattern, set by compile_pattern() pattern_tree = None # Tree representation of the pattern options = None # Options object passed to initializer filename = None # The filename (set by set_filename) numbers = itertools.count(1) # For new_name() used_names = set() # A set of all used NAMEs order = "post" # Does the fixer prefer pre- or post-order traversal explicit = False # Is this ignored by refactor.py -f all? run_order = 5 # Fixers will be sorted by run order before execution # Lower numbers will be run first. _accept_type = None # [Advanced and not public] This tells RefactoringTool # which node type to accept when there's not a pattern. keep_line_order = False # For the bottom matcher: match with the # original line order BM_compatible = False # Compatibility with the bottom matching # module; every fixer should set this # manually # Shortcut for access to Python grammar symbols syms = pygram.python_symbols def __init__(self, options, log): """Initializer. Subclass may override. Args: options: a dict containing the options passed to RefactoringTool that could be used to customize the fixer through the command line. log: a list to append warnings and other messages to. """ self.options = options self.log = log self.compile_pattern() def compile_pattern(self): """Compiles self.PATTERN into self.pattern. Subclass may override if it doesn't want to use self.{pattern,PATTERN} in .match(). """ if self.PATTERN is not None: PC = PatternCompiler() self.pattern, self.pattern_tree = PC.compile_pattern(self.PATTERN, with_tree=True) def set_filename(self, filename): """Set the filename. The main refactoring tool should call this. """ self.filename = filename def match(self, node): """Returns match for a given parse tree node. Should return a true or false object (not necessarily a bool). It may return a non-empty dict of matching sub-nodes as returned by a matching pattern. Subclass may override. """ results = {"node": node} return self.pattern.match(node, results) and results def transform(self, node, results): """Returns the transformation for a given parse tree node. Args: node: the root of the parse tree that matched the fixer. results: a dict mapping symbolic names to part of the match. Returns: None, or a node that is a modified copy of the argument node. The node argument may also be modified in-place to effect the same change. Subclass *must* override. """ raise NotImplementedError() def new_name(self, template="xxx_todo_changeme"): """Return a string suitable for use as an identifier The new name is guaranteed not to conflict with other identifiers. """ name = template while name in self.used_names: name = template + str(next(self.numbers)) self.used_names.add(name) return name def log_message(self, message): if self.first_log: self.first_log = False self.log.append("### In file %s ###" % self.filename) self.log.append(message) def cannot_convert(self, node, reason=None): """Warn the user that a given chunk of code is not valid Python 3, but that it cannot be converted automatically. First argument is the top-level node for the code in question. Optional second argument is why it can't be converted. """ lineno = node.get_lineno() for_output = node.clone() for_output.prefix = "" msg = "Line %d: could not convert: %s" self.log_message(msg % (lineno, for_output)) if reason: self.log_message(reason) def warning(self, node, reason): """Used for warning the user about possible uncertainty in the translation. First argument is the top-level node for the code in question. Optional second argument is why it can't be converted. """ lineno = node.get_lineno() self.log_message("Line %d: %s" % (lineno, reason)) def start_tree(self, tree, filename): """Some fixers need to maintain tree-wide state. This method is called once, at the start of tree fix-up. tree - the root node of the tree to be processed. filename - the name of the file the tree came from. """ self.used_names = tree.used_names self.set_filename(filename) self.numbers = itertools.count(1) self.first_log = True def finish_tree(self, tree, filename): """Some fixers need to maintain tree-wide state. This method is called once, at the conclusion of tree fix-up. tree - the root node of the tree to be processed. filename - the name of the file the tree came from. """ pass class ConditionalFix(BaseFix): """ Base class for fixers which not execute if an import is found. """ # This is the name of the import which, if found, will cause the test to be skipped skip_on = None def start_tree(self, *args): super(ConditionalFix, self).start_tree(*args) self._should_skip = None def should_skip(self, node): if self._should_skip is not None: return self._should_skip pkg = self.skip_on.split(".") name = pkg[-1] pkg = ".".join(pkg[:-1]) self._should_skip = does_tree_import(pkg, name, node) return self._should_skip pygram.py 0000644 00000002202 15053244155 0006415 0 ustar 00 # Copyright 2006 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Export the Python grammar and symbols.""" # Python imports import os # Local imports from .pgen2 import token from .pgen2 import driver from . import pytree # The grammar file _GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), "Grammar.txt") _PATTERN_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), "PatternGrammar.txt") class Symbols(object): def __init__(self, grammar): """Initializer. Creates an attribute for each grammar symbol (nonterminal), whose value is the symbol's type (an int >= 256). """ for name, symbol in grammar.symbol2number.items(): setattr(self, name, symbol) python_grammar = driver.load_packaged_grammar("lib2to3", _GRAMMAR_FILE) python_symbols = Symbols(python_grammar) python_grammar_no_print_statement = python_grammar.copy() del python_grammar_no_print_statement.keywords["print"] pattern_grammar = driver.load_packaged_grammar("lib2to3", _PATTERN_GRAMMAR_FILE) pattern_symbols = Symbols(pattern_grammar) PatternGrammar3.6.8.final.0.pickle 0000644 00000004055 15053244155 0012514 0 ustar 00 �ccollections OrderedDict q )Rq(X dfasqh )Rq(M ]q(]qKK�qa]qKK�qa]q K K�q aeh )Rq(KKKKKKKKKKu�qM]q (]q(KK�qK K�qe]q(KK�qK K�qK K�qeeh )Rq(KKKKKKKKKKu�qM]q(]qK K�qa]q(KK �qK K�qeeh )Rq(KKKKKKKKKKu�qM]q(]q KK�q!a]q"KK�q#a]q$K K�q%a]q&K K�q'aeh )Rq(KKs�q)M]q*(]q+KK�q,a]q-(KK�q.KK�q/KK�q0e]q1KK�q2a]q3(KK�q4K K�q5e]q6K K�q7a]q8KK�q9aeh )Rq:KKs�q;M]q<(]q=(KK�q>KK�q?KK�q@e]qAK K�qBa]qCKK�qDa]qE(KK�qFKK�qGe]qHKK�qIa]qJKK�qKaeh )RqL(KKKKKKu�qMM]qN(]qO(KK�qPKK�qQKK�qRKK�qSe]qTKK�qUa]qVKK�qWa]qX(KK�qYKK�qZKK�q[K K�q\e]q](KK�q^K K�q_e]q`KK�qaa]qbKK�qca]qd(KK�qeKK�qfKK �qgKK�qhe]qiK K�qja]qk(KK�qlKK�qmK K �qneeh )Rqo(KKKKKKKKu�qpuX keywordsqqh )RqrX notqsKsX labelsqt]qu(K X EMPTYqv�qwMN�qxK N�qyKN�qzK N�q{Khs�q|KN�q}KN�q~MN�qMN�q�MN�q�KN�q�KN�q�KN�q�MN�q�KN�q�KN�q�KN�q�KN�q�KN�q�KN�q�KN�q�KN�q�MN�q�K N�q�eX number2symbolq�h )Rq�(M X Matcherq�MX Alternativeq�MX Alternativesq�MX Detailsq�MX NegatedUnitq�MX Repeaterq�MX Unitq�uX startq�M X statesq�]q�(]q�(]q�KK�q�a]q�KK�q�a]q�K K�q�ae]q�(]q�(KK�q�K K�q�e]q�(KK�q�K K�q�K K�q�ee]q�(]q�K K�q�a]q�(KK �q�K K�q�ee]q�(]q�KK�q�a]q�KK�q�a]q�K K�q�a]q�K K�q�ae]q�(]q�KK�q�a]q�(KK�q�KK�q�KK�q�e]q�KK�q�a]q�(KK�q�K K�q�e]q�K K�q�a]q�KK�q�ae]q�(]q�(KK�q�KK�q�KK�q�e]q�K K�q�a]q�KK�q�a]q�(KK�q�KK�q�e]q�KK�q�a]q�KK�q�ae]q�(]q�(KK�q�KK�q�KK�q�KK�q�e]q�KK�q�a]q�KK�q�a]q�(KK�q�KK�q�KK�q�K K�q�e]q�(KK�q�K K�q�e]q�KK�q�a]q�KK�q�a]q�(KK�q�KK�q�KK �q�KK�q�e]q�K K�q�a]q�(KK�q�KK�q�K K �q�eeeX symbol2labelq�h )Rq�(X Alternativeq�K X Alternativesq�KX Detailsq�KX NegatedUnitr KX Repeaterr KX Unitr K uX symbol2numberr h )Rr (h�Mh�Mh�Mh�M h�Mh�Mh�MuX tokensr h )Rr (K KKKKKKKKKKKK KK KKKKKKKKKKKKK KKKKKKuu. PatternGrammar.txt 0000644 00000001431 15053244155 0010234 0 ustar 00 # Copyright 2006 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. # A grammar to describe tree matching patterns. # Not shown here: # - 'TOKEN' stands for any token (leaf node) # - 'any' stands for any node (leaf or interior) # With 'any' we can still specify the sub-structure. # The start symbol is 'Matcher'. Matcher: Alternatives ENDMARKER Alternatives: Alternative ('|' Alternative)* Alternative: (Unit | NegatedUnit)+ Unit: [NAME '='] ( STRING [Repeater] | NAME [Details] [Repeater] | '(' Alternatives ')' [Repeater] | '[' Alternatives ']' ) NegatedUnit: 'not' (STRING | NAME [Details] | '(' Alternatives ')') Repeater: '*' | '+' | '{' NUMBER [',' NUMBER] '}' Details: '<' Alternatives '>' btm_utils.py 0000644 00000023356 15053244155 0007135 0 ustar 00 "Utility functions used by the btm_matcher module" from . import pytree from .pgen2 import grammar, token from .pygram import pattern_symbols, python_symbols syms = pattern_symbols pysyms = python_symbols tokens = grammar.opmap token_labels = token TYPE_ANY = -1 TYPE_ALTERNATIVES = -2 TYPE_GROUP = -3 class MinNode(object): """This class serves as an intermediate representation of the pattern tree during the conversion to sets of leaf-to-root subpatterns""" def __init__(self, type=None, name=None): self.type = type self.name = name self.children = [] self.leaf = False self.parent = None self.alternatives = [] self.group = [] def __repr__(self): return str(self.type) + ' ' + str(self.name) def leaf_to_root(self): """Internal method. Returns a characteristic path of the pattern tree. This method must be run for all leaves until the linear subpatterns are merged into a single""" node = self subp = [] while node: if node.type == TYPE_ALTERNATIVES: node.alternatives.append(subp) if len(node.alternatives) == len(node.children): #last alternative subp = [tuple(node.alternatives)] node.alternatives = [] node = node.parent continue else: node = node.parent subp = None break if node.type == TYPE_GROUP: node.group.append(subp) #probably should check the number of leaves if len(node.group) == len(node.children): subp = get_characteristic_subpattern(node.group) node.group = [] node = node.parent continue else: node = node.parent subp = None break if node.type == token_labels.NAME and node.name: #in case of type=name, use the name instead subp.append(node.name) else: subp.append(node.type) node = node.parent return subp def get_linear_subpattern(self): """Drives the leaf_to_root method. The reason that leaf_to_root must be run multiple times is because we need to reject 'group' matches; for example the alternative form (a | b c) creates a group [b c] that needs to be matched. Since matching multiple linear patterns overcomes the automaton's capabilities, leaf_to_root merges each group into a single choice based on 'characteristic'ity, i.e. (a|b c) -> (a|b) if b more characteristic than c Returns: The most 'characteristic'(as defined by get_characteristic_subpattern) path for the compiled pattern tree. """ for l in self.leaves(): subp = l.leaf_to_root() if subp: return subp def leaves(self): "Generator that returns the leaves of the tree" for child in self.children: yield from child.leaves() if not self.children: yield self def reduce_tree(node, parent=None): """ Internal function. Reduces a compiled pattern tree to an intermediate representation suitable for feeding the automaton. This also trims off any optional pattern elements(like [a], a*). """ new_node = None #switch on the node type if node.type == syms.Matcher: #skip node = node.children[0] if node.type == syms.Alternatives : #2 cases if len(node.children) <= 2: #just a single 'Alternative', skip this node new_node = reduce_tree(node.children[0], parent) else: #real alternatives new_node = MinNode(type=TYPE_ALTERNATIVES) #skip odd children('|' tokens) for child in node.children: if node.children.index(child)%2: continue reduced = reduce_tree(child, new_node) if reduced is not None: new_node.children.append(reduced) elif node.type == syms.Alternative: if len(node.children) > 1: new_node = MinNode(type=TYPE_GROUP) for child in node.children: reduced = reduce_tree(child, new_node) if reduced: new_node.children.append(reduced) if not new_node.children: # delete the group if all of the children were reduced to None new_node = None else: new_node = reduce_tree(node.children[0], parent) elif node.type == syms.Unit: if (isinstance(node.children[0], pytree.Leaf) and node.children[0].value == '('): #skip parentheses return reduce_tree(node.children[1], parent) if ((isinstance(node.children[0], pytree.Leaf) and node.children[0].value == '[') or (len(node.children)>1 and hasattr(node.children[1], "value") and node.children[1].value == '[')): #skip whole unit if its optional return None leaf = True details_node = None alternatives_node = None has_repeater = False repeater_node = None has_variable_name = False for child in node.children: if child.type == syms.Details: leaf = False details_node = child elif child.type == syms.Repeater: has_repeater = True repeater_node = child elif child.type == syms.Alternatives: alternatives_node = child if hasattr(child, 'value') and child.value == '=': # variable name has_variable_name = True #skip variable name if has_variable_name: #skip variable name, '=' name_leaf = node.children[2] if hasattr(name_leaf, 'value') and name_leaf.value == '(': # skip parenthesis name_leaf = node.children[3] else: name_leaf = node.children[0] #set node type if name_leaf.type == token_labels.NAME: #(python) non-name or wildcard if name_leaf.value == 'any': new_node = MinNode(type=TYPE_ANY) else: if hasattr(token_labels, name_leaf.value): new_node = MinNode(type=getattr(token_labels, name_leaf.value)) else: new_node = MinNode(type=getattr(pysyms, name_leaf.value)) elif name_leaf.type == token_labels.STRING: #(python) name or character; remove the apostrophes from #the string value name = name_leaf.value.strip("'") if name in tokens: new_node = MinNode(type=tokens[name]) else: new_node = MinNode(type=token_labels.NAME, name=name) elif name_leaf.type == syms.Alternatives: new_node = reduce_tree(alternatives_node, parent) #handle repeaters if has_repeater: if repeater_node.children[0].value == '*': #reduce to None new_node = None elif repeater_node.children[0].value == '+': #reduce to a single occurrence i.e. do nothing pass else: #TODO: handle {min, max} repeaters raise NotImplementedError pass #add children if details_node and new_node is not None: for child in details_node.children[1:-1]: #skip '<', '>' markers reduced = reduce_tree(child, new_node) if reduced is not None: new_node.children.append(reduced) if new_node: new_node.parent = parent return new_node def get_characteristic_subpattern(subpatterns): """Picks the most characteristic from a list of linear patterns Current order used is: names > common_names > common_chars """ if not isinstance(subpatterns, list): return subpatterns if len(subpatterns)==1: return subpatterns[0] # first pick out the ones containing variable names subpatterns_with_names = [] subpatterns_with_common_names = [] common_names = ['in', 'for', 'if' , 'not', 'None'] subpatterns_with_common_chars = [] common_chars = "[]().,:" for subpattern in subpatterns: if any(rec_test(subpattern, lambda x: type(x) is str)): if any(rec_test(subpattern, lambda x: isinstance(x, str) and x in common_chars)): subpatterns_with_common_chars.append(subpattern) elif any(rec_test(subpattern, lambda x: isinstance(x, str) and x in common_names)): subpatterns_with_common_names.append(subpattern) else: subpatterns_with_names.append(subpattern) if subpatterns_with_names: subpatterns = subpatterns_with_names elif subpatterns_with_common_names: subpatterns = subpatterns_with_common_names elif subpatterns_with_common_chars: subpatterns = subpatterns_with_common_chars # of the remaining subpatterns pick out the longest one return max(subpatterns, key=len) def rec_test(sequence, test_func): """Tests test_func on all items of sequence and items of included sub-iterables""" for x in sequence: if isinstance(x, (list, tuple)): yield from rec_test(x, test_func) else: yield test_func(x) fixer_util.py 0000644 00000035547 15053244155 0007312 0 ustar 00 """Utility functions, node construction macros, etc.""" # Author: Collin Winter # Local imports from .pgen2 import token from .pytree import Leaf, Node from .pygram import python_symbols as syms from . import patcomp ########################################################### ### Common node-construction "macros" ########################################################### def KeywordArg(keyword, value): return Node(syms.argument, [keyword, Leaf(token.EQUAL, "="), value]) def LParen(): return Leaf(token.LPAR, "(") def RParen(): return Leaf(token.RPAR, ")") def Assign(target, source): """Build an assignment statement""" if not isinstance(target, list): target = [target] if not isinstance(source, list): source.prefix = " " source = [source] return Node(syms.atom, target + [Leaf(token.EQUAL, "=", prefix=" ")] + source) def Name(name, prefix=None): """Return a NAME leaf""" return Leaf(token.NAME, name, prefix=prefix) def Attr(obj, attr): """A node tuple for obj.attr""" return [obj, Node(syms.trailer, [Dot(), attr])] def Comma(): """A comma leaf""" return Leaf(token.COMMA, ",") def Dot(): """A period (.) leaf""" return Leaf(token.DOT, ".") def ArgList(args, lparen=LParen(), rparen=RParen()): """A parenthesised argument list, used by Call()""" node = Node(syms.trailer, [lparen.clone(), rparen.clone()]) if args: node.insert_child(1, Node(syms.arglist, args)) return node def Call(func_name, args=None, prefix=None): """A function call""" node = Node(syms.power, [func_name, ArgList(args)]) if prefix is not None: node.prefix = prefix return node def Newline(): """A newline literal""" return Leaf(token.NEWLINE, "\n") def BlankLine(): """A blank line""" return Leaf(token.NEWLINE, "") def Number(n, prefix=None): return Leaf(token.NUMBER, n, prefix=prefix) def Subscript(index_node): """A numeric or string subscript""" return Node(syms.trailer, [Leaf(token.LBRACE, "["), index_node, Leaf(token.RBRACE, "]")]) def String(string, prefix=None): """A string leaf""" return Leaf(token.STRING, string, prefix=prefix) def ListComp(xp, fp, it, test=None): """A list comprehension of the form [xp for fp in it if test]. If test is None, the "if test" part is omitted. """ xp.prefix = "" fp.prefix = " " it.prefix = " " for_leaf = Leaf(token.NAME, "for") for_leaf.prefix = " " in_leaf = Leaf(token.NAME, "in") in_leaf.prefix = " " inner_args = [for_leaf, fp, in_leaf, it] if test: test.prefix = " " if_leaf = Leaf(token.NAME, "if") if_leaf.prefix = " " inner_args.append(Node(syms.comp_if, [if_leaf, test])) inner = Node(syms.listmaker, [xp, Node(syms.comp_for, inner_args)]) return Node(syms.atom, [Leaf(token.LBRACE, "["), inner, Leaf(token.RBRACE, "]")]) def FromImport(package_name, name_leafs): """ Return an import statement in the form: from package import name_leafs""" # XXX: May not handle dotted imports properly (eg, package_name='foo.bar') #assert package_name == '.' or '.' not in package_name, "FromImport has "\ # "not been tested with dotted package names -- use at your own "\ # "peril!" for leaf in name_leafs: # Pull the leaves out of their old tree leaf.remove() children = [Leaf(token.NAME, "from"), Leaf(token.NAME, package_name, prefix=" "), Leaf(token.NAME, "import", prefix=" "), Node(syms.import_as_names, name_leafs)] imp = Node(syms.import_from, children) return imp def ImportAndCall(node, results, names): """Returns an import statement and calls a method of the module: import module module.name()""" obj = results["obj"].clone() if obj.type == syms.arglist: newarglist = obj.clone() else: newarglist = Node(syms.arglist, [obj.clone()]) after = results["after"] if after: after = [n.clone() for n in after] new = Node(syms.power, Attr(Name(names[0]), Name(names[1])) + [Node(syms.trailer, [results["lpar"].clone(), newarglist, results["rpar"].clone()])] + after) new.prefix = node.prefix return new ########################################################### ### Determine whether a node represents a given literal ########################################################### def is_tuple(node): """Does the node represent a tuple literal?""" if isinstance(node, Node) and node.children == [LParen(), RParen()]: return True return (isinstance(node, Node) and len(node.children) == 3 and isinstance(node.children[0], Leaf) and isinstance(node.children[1], Node) and isinstance(node.children[2], Leaf) and node.children[0].value == "(" and node.children[2].value == ")") def is_list(node): """Does the node represent a list literal?""" return (isinstance(node, Node) and len(node.children) > 1 and isinstance(node.children[0], Leaf) and isinstance(node.children[-1], Leaf) and node.children[0].value == "[" and node.children[-1].value == "]") ########################################################### ### Misc ########################################################### def parenthesize(node): return Node(syms.atom, [LParen(), node, RParen()]) consuming_calls = {"sorted", "list", "set", "any", "all", "tuple", "sum", "min", "max", "enumerate"} def attr_chain(obj, attr): """Follow an attribute chain. If you have a chain of objects where a.foo -> b, b.foo-> c, etc, use this to iterate over all objects in the chain. Iteration is terminated by getattr(x, attr) is None. Args: obj: the starting object attr: the name of the chaining attribute Yields: Each successive object in the chain. """ next = getattr(obj, attr) while next: yield next next = getattr(next, attr) p0 = """for_stmt< 'for' any 'in' node=any ':' any* > | comp_for< 'for' any 'in' node=any any* > """ p1 = """ power< ( 'iter' | 'list' | 'tuple' | 'sorted' | 'set' | 'sum' | 'any' | 'all' | 'enumerate' | (any* trailer< '.' 'join' >) ) trailer< '(' node=any ')' > any* > """ p2 = """ power< ( 'sorted' | 'enumerate' ) trailer< '(' arglist<node=any any*> ')' > any* > """ pats_built = False def in_special_context(node): """ Returns true if node is in an environment where all that is required of it is being iterable (ie, it doesn't matter if it returns a list or an iterator). See test_map_nochange in test_fixers.py for some examples and tests. """ global p0, p1, p2, pats_built if not pats_built: p0 = patcomp.compile_pattern(p0) p1 = patcomp.compile_pattern(p1) p2 = patcomp.compile_pattern(p2) pats_built = True patterns = [p0, p1, p2] for pattern, parent in zip(patterns, attr_chain(node, "parent")): results = {} if pattern.match(parent, results) and results["node"] is node: return True return False def is_probably_builtin(node): """ Check that something isn't an attribute or function name etc. """ prev = node.prev_sibling if prev is not None and prev.type == token.DOT: # Attribute lookup. return False parent = node.parent if parent.type in (syms.funcdef, syms.classdef): return False if parent.type == syms.expr_stmt and parent.children[0] is node: # Assignment. return False if parent.type == syms.parameters or \ (parent.type == syms.typedargslist and ( (prev is not None and prev.type == token.COMMA) or parent.children[0] is node )): # The name of an argument. return False return True def find_indentation(node): """Find the indentation of *node*.""" while node is not None: if node.type == syms.suite and len(node.children) > 2: indent = node.children[1] if indent.type == token.INDENT: return indent.value node = node.parent return "" ########################################################### ### The following functions are to find bindings in a suite ########################################################### def make_suite(node): if node.type == syms.suite: return node node = node.clone() parent, node.parent = node.parent, None suite = Node(syms.suite, [node]) suite.parent = parent return suite def find_root(node): """Find the top level namespace.""" # Scamper up to the top level namespace while node.type != syms.file_input: node = node.parent if not node: raise ValueError("root found before file_input node was found.") return node def does_tree_import(package, name, node): """ Returns true if name is imported from package at the top level of the tree which node belongs to. To cover the case of an import like 'import foo', use None for the package and 'foo' for the name. """ binding = find_binding(name, find_root(node), package) return bool(binding) def is_import(node): """Returns true if the node is an import statement.""" return node.type in (syms.import_name, syms.import_from) def touch_import(package, name, node): """ Works like `does_tree_import` but adds an import statement if it was not imported. """ def is_import_stmt(node): return (node.type == syms.simple_stmt and node.children and is_import(node.children[0])) root = find_root(node) if does_tree_import(package, name, root): return # figure out where to insert the new import. First try to find # the first import and then skip to the last one. insert_pos = offset = 0 for idx, node in enumerate(root.children): if not is_import_stmt(node): continue for offset, node2 in enumerate(root.children[idx:]): if not is_import_stmt(node2): break insert_pos = idx + offset break # if there are no imports where we can insert, find the docstring. # if that also fails, we stick to the beginning of the file if insert_pos == 0: for idx, node in enumerate(root.children): if (node.type == syms.simple_stmt and node.children and node.children[0].type == token.STRING): insert_pos = idx + 1 break if package is None: import_ = Node(syms.import_name, [ Leaf(token.NAME, "import"), Leaf(token.NAME, name, prefix=" ") ]) else: import_ = FromImport(package, [Leaf(token.NAME, name, prefix=" ")]) children = [import_, Newline()] root.insert_child(insert_pos, Node(syms.simple_stmt, children)) _def_syms = {syms.classdef, syms.funcdef} def find_binding(name, node, package=None): """ Returns the node which binds variable name, otherwise None. If optional argument package is supplied, only imports will be returned. See test cases for examples.""" for child in node.children: ret = None if child.type == syms.for_stmt: if _find(name, child.children[1]): return child n = find_binding(name, make_suite(child.children[-1]), package) if n: ret = n elif child.type in (syms.if_stmt, syms.while_stmt): n = find_binding(name, make_suite(child.children[-1]), package) if n: ret = n elif child.type == syms.try_stmt: n = find_binding(name, make_suite(child.children[2]), package) if n: ret = n else: for i, kid in enumerate(child.children[3:]): if kid.type == token.COLON and kid.value == ":": # i+3 is the colon, i+4 is the suite n = find_binding(name, make_suite(child.children[i+4]), package) if n: ret = n elif child.type in _def_syms and child.children[1].value == name: ret = child elif _is_import_binding(child, name, package): ret = child elif child.type == syms.simple_stmt: ret = find_binding(name, child, package) elif child.type == syms.expr_stmt: if _find(name, child.children[0]): ret = child if ret: if not package: return ret if is_import(ret): return ret return None _block_syms = {syms.funcdef, syms.classdef, syms.trailer} def _find(name, node): nodes = [node] while nodes: node = nodes.pop() if node.type > 256 and node.type not in _block_syms: nodes.extend(node.children) elif node.type == token.NAME and node.value == name: return node return None def _is_import_binding(node, name, package=None): """ Will reuturn node if node will import name, or node will import * from package. None is returned otherwise. See test cases for examples. """ if node.type == syms.import_name and not package: imp = node.children[1] if imp.type == syms.dotted_as_names: for child in imp.children: if child.type == syms.dotted_as_name: if child.children[2].value == name: return node elif child.type == token.NAME and child.value == name: return node elif imp.type == syms.dotted_as_name: last = imp.children[-1] if last.type == token.NAME and last.value == name: return node elif imp.type == token.NAME and imp.value == name: return node elif node.type == syms.import_from: # str(...) is used to make life easier here, because # from a.b import parses to ['import', ['a', '.', 'b'], ...] if package and str(node.children[1]).strip() != package: return None n = node.children[3] if package and _find("as", n): # See test_from_import_as for explanation return None elif n.type == syms.import_as_names and _find(name, n): return node elif n.type == syms.import_as_name: child = n.children[2] if child.type == token.NAME and child.value == name: return node elif n.type == token.NAME and n.value == name: return node elif package and n.type == token.STAR: return node return None fixes/fix_isinstance.py 0000644 00000003110 15053244155 0011241 0 ustar 00 # Copyright 2008 Armin Ronacher. # Licensed to PSF under a Contributor Agreement. """Fixer that cleans up a tuple argument to isinstance after the tokens in it were fixed. This is mainly used to remove double occurrences of tokens as a leftover of the long -> int / unicode -> str conversion. eg. isinstance(x, (int, long)) -> isinstance(x, (int, int)) -> isinstance(x, int) """ from .. import fixer_base from ..fixer_util import token class FixIsinstance(fixer_base.BaseFix): BM_compatible = True PATTERN = """ power< 'isinstance' trailer< '(' arglist< any ',' atom< '(' args=testlist_gexp< any+ > ')' > > ')' > > """ run_order = 6 def transform(self, node, results): names_inserted = set() testlist = results["args"] args = testlist.children new_args = [] iterator = enumerate(args) for idx, arg in iterator: if arg.type == token.NAME and arg.value in names_inserted: if idx < len(args) - 1 and args[idx + 1].type == token.COMMA: next(iterator) continue else: new_args.append(arg) if arg.type == token.NAME: names_inserted.add(arg.value) if new_args and new_args[-1].type == token.COMMA: del new_args[-1] if len(new_args) == 1: atom = testlist.parent new_args[0].prefix = atom.prefix atom.replace(new_args[0]) else: args[:] = new_args node.changed() fixes/fix_nonzero.py 0000644 00000001117 15053244155 0010600 0 ustar 00 """Fixer for __nonzero__ -> __bool__ methods.""" # Author: Collin Winter # Local imports from .. import fixer_base from ..fixer_util import Name class FixNonzero(fixer_base.BaseFix): BM_compatible = True PATTERN = """ classdef< 'class' any+ ':' suite< any* funcdef< 'def' name='__nonzero__' parameters< '(' NAME ')' > any+ > any* > > """ def transform(self, node, results): name = results["name"] new = Name("__bool__", prefix=name.prefix) name.replace(new) fixes/fix_idioms.py 0000644 00000011414 15053244155 0010373 0 ustar 00 """Adjust some old Python 2 idioms to their modern counterparts. * Change some type comparisons to isinstance() calls: type(x) == T -> isinstance(x, T) type(x) is T -> isinstance(x, T) type(x) != T -> not isinstance(x, T) type(x) is not T -> not isinstance(x, T) * Change "while 1:" into "while True:". * Change both v = list(EXPR) v.sort() foo(v) and the more general v = EXPR v.sort() foo(v) into v = sorted(EXPR) foo(v) """ # Author: Jacques Frechet, Collin Winter # Local imports from .. import fixer_base from ..fixer_util import Call, Comma, Name, Node, BlankLine, syms CMP = "(n='!=' | '==' | 'is' | n=comp_op< 'is' 'not' >)" TYPE = "power< 'type' trailer< '(' x=any ')' > >" class FixIdioms(fixer_base.BaseFix): explicit = True # The user must ask for this fixer PATTERN = r""" isinstance=comparison< %s %s T=any > | isinstance=comparison< T=any %s %s > | while_stmt< 'while' while='1' ':' any+ > | sorted=any< any* simple_stmt< expr_stmt< id1=any '=' power< list='list' trailer< '(' (not arglist<any+>) any ')' > > > '\n' > sort= simple_stmt< power< id2=any trailer< '.' 'sort' > trailer< '(' ')' > > '\n' > next=any* > | sorted=any< any* simple_stmt< expr_stmt< id1=any '=' expr=any > '\n' > sort= simple_stmt< power< id2=any trailer< '.' 'sort' > trailer< '(' ')' > > '\n' > next=any* > """ % (TYPE, CMP, CMP, TYPE) def match(self, node): r = super(FixIdioms, self).match(node) # If we've matched one of the sort/sorted subpatterns above, we # want to reject matches where the initial assignment and the # subsequent .sort() call involve different identifiers. if r and "sorted" in r: if r["id1"] == r["id2"]: return r return None return r def transform(self, node, results): if "isinstance" in results: return self.transform_isinstance(node, results) elif "while" in results: return self.transform_while(node, results) elif "sorted" in results: return self.transform_sort(node, results) else: raise RuntimeError("Invalid match") def transform_isinstance(self, node, results): x = results["x"].clone() # The thing inside of type() T = results["T"].clone() # The type being compared against x.prefix = "" T.prefix = " " test = Call(Name("isinstance"), [x, Comma(), T]) if "n" in results: test.prefix = " " test = Node(syms.not_test, [Name("not"), test]) test.prefix = node.prefix return test def transform_while(self, node, results): one = results["while"] one.replace(Name("True", prefix=one.prefix)) def transform_sort(self, node, results): sort_stmt = results["sort"] next_stmt = results["next"] list_call = results.get("list") simple_expr = results.get("expr") if list_call: list_call.replace(Name("sorted", prefix=list_call.prefix)) elif simple_expr: new = simple_expr.clone() new.prefix = "" simple_expr.replace(Call(Name("sorted"), [new], prefix=simple_expr.prefix)) else: raise RuntimeError("should not have reached here") sort_stmt.remove() btwn = sort_stmt.prefix # Keep any prefix lines between the sort_stmt and the list_call and # shove them right after the sorted() call. if "\n" in btwn: if next_stmt: # The new prefix should be everything from the sort_stmt's # prefix up to the last newline, then the old prefix after a new # line. prefix_lines = (btwn.rpartition("\n")[0], next_stmt[0].prefix) next_stmt[0].prefix = "\n".join(prefix_lines) else: assert list_call.parent assert list_call.next_sibling is None # Put a blank line after list_call and set its prefix. end_line = BlankLine() list_call.parent.append_child(end_line) assert list_call.next_sibling is end_line # The new prefix should be everything up to the first new line # of sort_stmt's prefix. end_line.prefix = btwn.rpartition("\n")[0] fixes/fix_set_literal.py 0000644 00000003241 15053244155 0011415 0 ustar 00 """ Optional fixer to transform set() calls to set literals. """ # Author: Benjamin Peterson from lib2to3 import fixer_base, pytree from lib2to3.fixer_util import token, syms class FixSetLiteral(fixer_base.BaseFix): BM_compatible = True explicit = True PATTERN = """power< 'set' trailer< '(' (atom=atom< '[' (items=listmaker< any ((',' any)* [',']) > | single=any) ']' > | atom< '(' items=testlist_gexp< any ((',' any)* [',']) > ')' > ) ')' > > """ def transform(self, node, results): single = results.get("single") if single: # Make a fake listmaker fake = pytree.Node(syms.listmaker, [single.clone()]) single.replace(fake) items = fake else: items = results["items"] # Build the contents of the literal literal = [pytree.Leaf(token.LBRACE, "{")] literal.extend(n.clone() for n in items.children) literal.append(pytree.Leaf(token.RBRACE, "}")) # Set the prefix of the right brace to that of the ')' or ']' literal[-1].prefix = items.next_sibling.prefix maker = pytree.Node(syms.dictsetmaker, literal) maker.prefix = node.prefix # If the original was a one tuple, we need to remove the extra comma. if len(maker.children) == 4: n = maker.children[2] n.remove() maker.children[-1].prefix = n.prefix # Finally, replace the set call with our shiny new literal. return maker fixes/fix_buffer.py 0000644 00000001116 15053244155 0010356 0 ustar 00 # Copyright 2007 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Fixer that changes buffer(...) into memoryview(...).""" # Local imports from .. import fixer_base from ..fixer_util import Name class FixBuffer(fixer_base.BaseFix): BM_compatible = True explicit = True # The user must ask for this fixer PATTERN = """ power< name='buffer' trailer< '(' [any] ')' > any* > """ def transform(self, node, results): name = results["name"] name.replace(Name("memoryview", prefix=name.prefix)) fixes/fix_sys_exc.py 0000644 00000002012 15053244155 0010556 0 ustar 00 """Fixer for sys.exc_{type, value, traceback} sys.exc_type -> sys.exc_info()[0] sys.exc_value -> sys.exc_info()[1] sys.exc_traceback -> sys.exc_info()[2] """ # By Jeff Balogh and Benjamin Peterson # Local imports from .. import fixer_base from ..fixer_util import Attr, Call, Name, Number, Subscript, Node, syms class FixSysExc(fixer_base.BaseFix): # This order matches the ordering of sys.exc_info(). exc_info = ["exc_type", "exc_value", "exc_traceback"] BM_compatible = True PATTERN = """ power< 'sys' trailer< dot='.' attribute=(%s) > > """ % '|'.join("'%s'" % e for e in exc_info) def transform(self, node, results): sys_attr = results["attribute"][0] index = Number(self.exc_info.index(sys_attr.value)) call = Call(Name("exc_info"), prefix=sys_attr.prefix) attr = Attr(Name("sys"), call) attr[1].children[0].prefix = results["dot"].prefix attr.append(Subscript(index)) return Node(syms.power, attr, prefix=node.prefix) fixes/fix_unicode.py 0000644 00000002350 15053244155 0010534 0 ustar 00 r"""Fixer for unicode. * Changes unicode to str and unichr to chr. * If "...\u..." is not unicode literal change it into "...\\u...". * Change u"..." into "...". """ from ..pgen2 import token from .. import fixer_base _mapping = {"unichr" : "chr", "unicode" : "str"} class FixUnicode(fixer_base.BaseFix): BM_compatible = True PATTERN = "STRING | 'unicode' | 'unichr'" def start_tree(self, tree, filename): super(FixUnicode, self).start_tree(tree, filename) self.unicode_literals = 'unicode_literals' in tree.future_features def transform(self, node, results): if node.type == token.NAME: new = node.clone() new.value = _mapping[node.value] return new elif node.type == token.STRING: val = node.value if not self.unicode_literals and val[0] in '\'"' and '\\' in val: val = r'\\'.join([ v.replace('\\u', r'\\u').replace('\\U', r'\\U') for v in val.split(r'\\') ]) if val[0] in 'uU': val = val[1:] if val == node.value: return node new = node.clone() new.value = val return new fixes/fix_imports.py 0000644 00000013064 15053244155 0010607 0 ustar 00 """Fix incompatible imports and module references.""" # Authors: Collin Winter, Nick Edds # Local imports from .. import fixer_base from ..fixer_util import Name, attr_chain MAPPING = {'StringIO': 'io', 'cStringIO': 'io', 'cPickle': 'pickle', '__builtin__' : 'builtins', 'copy_reg': 'copyreg', 'Queue': 'queue', 'SocketServer': 'socketserver', 'ConfigParser': 'configparser', 'repr': 'reprlib', 'FileDialog': 'tkinter.filedialog', 'tkFileDialog': 'tkinter.filedialog', 'SimpleDialog': 'tkinter.simpledialog', 'tkSimpleDialog': 'tkinter.simpledialog', 'tkColorChooser': 'tkinter.colorchooser', 'tkCommonDialog': 'tkinter.commondialog', 'Dialog': 'tkinter.dialog', 'Tkdnd': 'tkinter.dnd', 'tkFont': 'tkinter.font', 'tkMessageBox': 'tkinter.messagebox', 'ScrolledText': 'tkinter.scrolledtext', 'Tkconstants': 'tkinter.constants', 'Tix': 'tkinter.tix', 'ttk': 'tkinter.ttk', 'Tkinter': 'tkinter', 'markupbase': '_markupbase', '_winreg': 'winreg', 'thread': '_thread', 'dummy_thread': '_dummy_thread', # anydbm and whichdb are handled by fix_imports2 'dbhash': 'dbm.bsd', 'dumbdbm': 'dbm.dumb', 'dbm': 'dbm.ndbm', 'gdbm': 'dbm.gnu', 'xmlrpclib': 'xmlrpc.client', 'DocXMLRPCServer': 'xmlrpc.server', 'SimpleXMLRPCServer': 'xmlrpc.server', 'httplib': 'http.client', 'htmlentitydefs' : 'html.entities', 'HTMLParser' : 'html.parser', 'Cookie': 'http.cookies', 'cookielib': 'http.cookiejar', 'BaseHTTPServer': 'http.server', 'SimpleHTTPServer': 'http.server', 'CGIHTTPServer': 'http.server', #'test.test_support': 'test.support', 'commands': 'subprocess', 'UserString' : 'collections', 'UserList' : 'collections', 'urlparse' : 'urllib.parse', 'robotparser' : 'urllib.robotparser', } def alternates(members): return "(" + "|".join(map(repr, members)) + ")" def build_pattern(mapping=MAPPING): mod_list = ' | '.join(["module_name='%s'" % key for key in mapping]) bare_names = alternates(mapping.keys()) yield """name_import=import_name< 'import' ((%s) | multiple_imports=dotted_as_names< any* (%s) any* >) > """ % (mod_list, mod_list) yield """import_from< 'from' (%s) 'import' ['('] ( any | import_as_name< any 'as' any > | import_as_names< any* >) [')'] > """ % mod_list yield """import_name< 'import' (dotted_as_name< (%s) 'as' any > | multiple_imports=dotted_as_names< any* dotted_as_name< (%s) 'as' any > any* >) > """ % (mod_list, mod_list) # Find usages of module members in code e.g. thread.foo(bar) yield "power< bare_with_attr=(%s) trailer<'.' any > any* >" % bare_names class FixImports(fixer_base.BaseFix): BM_compatible = True keep_line_order = True # This is overridden in fix_imports2. mapping = MAPPING # We want to run this fixer late, so fix_import doesn't try to make stdlib # renames into relative imports. run_order = 6 def build_pattern(self): return "|".join(build_pattern(self.mapping)) def compile_pattern(self): # We override this, so MAPPING can be pragmatically altered and the # changes will be reflected in PATTERN. self.PATTERN = self.build_pattern() super(FixImports, self).compile_pattern() # Don't match the node if it's within another match. def match(self, node): match = super(FixImports, self).match results = match(node) if results: # Module usage could be in the trailer of an attribute lookup, so we # might have nested matches when "bare_with_attr" is present. if "bare_with_attr" not in results and \ any(match(obj) for obj in attr_chain(node, "parent")): return False return results return False def start_tree(self, tree, filename): super(FixImports, self).start_tree(tree, filename) self.replace = {} def transform(self, node, results): import_mod = results.get("module_name") if import_mod: mod_name = import_mod.value new_name = self.mapping[mod_name] import_mod.replace(Name(new_name, prefix=import_mod.prefix)) if "name_import" in results: # If it's not a "from x import x, y" or "import x as y" import, # marked its usage to be replaced. self.replace[mod_name] = new_name if "multiple_imports" in results: # This is a nasty hack to fix multiple imports on a line (e.g., # "import StringIO, urlparse"). The problem is that I can't # figure out an easy way to make a pattern recognize the keys of # MAPPING randomly sprinkled in an import statement. results = self.match(node) if results: self.transform(node, results) else: # Replace usage of the module. bare_name = results["bare_with_attr"][0] new_name = self.replace.get(bare_name.value) if new_name: bare_name.replace(Name(new_name, prefix=bare_name.prefix)) fixes/fix_asserts.py 0000644 00000001730 15053244155 0010573 0 ustar 00 """Fixer that replaces deprecated unittest method names.""" # Author: Ezio Melotti from ..fixer_base import BaseFix from ..fixer_util import Name NAMES = dict( assert_="assertTrue", assertEquals="assertEqual", assertNotEquals="assertNotEqual", assertAlmostEquals="assertAlmostEqual", assertNotAlmostEquals="assertNotAlmostEqual", assertRegexpMatches="assertRegex", assertRaisesRegexp="assertRaisesRegex", failUnlessEqual="assertEqual", failIfEqual="assertNotEqual", failUnlessAlmostEqual="assertAlmostEqual", failIfAlmostEqual="assertNotAlmostEqual", failUnless="assertTrue", failUnlessRaises="assertRaises", failIf="assertFalse", ) class FixAsserts(BaseFix): PATTERN = """ power< any+ trailer< '.' meth=(%s)> any* > """ % '|'.join(map(repr, NAMES)) def transform(self, node, results): name = results["meth"][0] name.replace(Name(NAMES[str(name)], prefix=name.prefix)) fixes/fix_itertools.py 0000644 00000003014 15053244155 0011130 0 ustar 00 """ Fixer for itertools.(imap|ifilter|izip) --> (map|filter|zip) and itertools.ifilterfalse --> itertools.filterfalse (bugs 2360-2363) imports from itertools are fixed in fix_itertools_import.py If itertools is imported as something else (ie: import itertools as it; it.izip(spam, eggs)) method calls will not get fixed. """ # Local imports from .. import fixer_base from ..fixer_util import Name class FixItertools(fixer_base.BaseFix): BM_compatible = True it_funcs = "('imap'|'ifilter'|'izip'|'izip_longest'|'ifilterfalse')" PATTERN = """ power< it='itertools' trailer< dot='.' func=%(it_funcs)s > trailer< '(' [any] ')' > > | power< func=%(it_funcs)s trailer< '(' [any] ')' > > """ %(locals()) # Needs to be run after fix_(map|zip|filter) run_order = 6 def transform(self, node, results): prefix = None func = results['func'][0] if ('it' in results and func.value not in ('ifilterfalse', 'izip_longest')): dot, it = (results['dot'], results['it']) # Remove the 'itertools' prefix = it.prefix it.remove() # Replace the node which contains ('.', 'function') with the # function (to be consistent with the second part of the pattern) dot.remove() func.parent.replace(func) prefix = prefix or func.prefix func.replace(Name(func.value[1:], prefix=prefix)) fixes/fix_funcattrs.py 0000644 00000001204 15053244155 0011114 0 ustar 00 """Fix function attribute names (f.func_x -> f.__x__).""" # Author: Collin Winter # Local imports from .. import fixer_base from ..fixer_util import Name class FixFuncattrs(fixer_base.BaseFix): BM_compatible = True PATTERN = """ power< any+ trailer< '.' attr=('func_closure' | 'func_doc' | 'func_globals' | 'func_name' | 'func_defaults' | 'func_code' | 'func_dict') > any* > """ def transform(self, node, results): attr = results["attr"][0] attr.replace(Name(("__%s__" % attr.value[5:]), prefix=attr.prefix)) fixes/fix_future.py 0000644 00000001043 15053244155 0010416 0 ustar 00 """Remove __future__ imports from __future__ import foo is replaced with an empty line. """ # Author: Christian Heimes # Local imports from .. import fixer_base from ..fixer_util import BlankLine class FixFuture(fixer_base.BaseFix): BM_compatible = True PATTERN = """import_from< 'from' module_name="__future__" 'import' any >""" # This should be run last -- some things check for the import run_order = 10 def transform(self, node, results): new = BlankLine() new.prefix = node.prefix return new fixes/fix_methodattrs.py 0000644 00000001136 15053244155 0011445 0 ustar 00 """Fix bound method attributes (method.im_? -> method.__?__). """ # Author: Christian Heimes # Local imports from .. import fixer_base from ..fixer_util import Name MAP = { "im_func" : "__func__", "im_self" : "__self__", "im_class" : "__self__.__class__" } class FixMethodattrs(fixer_base.BaseFix): BM_compatible = True PATTERN = """ power< any+ trailer< '.' attr=('im_func' | 'im_self' | 'im_class') > any* > """ def transform(self, node, results): attr = results["attr"][0] new = MAP[attr.value] attr.replace(Name(new, prefix=attr.prefix)) fixes/fix_has_key.py 0000644 00000006174 15053244155 0010541 0 ustar 00 # Copyright 2006 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Fixer for has_key(). Calls to .has_key() methods are expressed in terms of the 'in' operator: d.has_key(k) -> k in d CAVEATS: 1) While the primary target of this fixer is dict.has_key(), the fixer will change any has_key() method call, regardless of its class. 2) Cases like this will not be converted: m = d.has_key if m(k): ... Only *calls* to has_key() are converted. While it is possible to convert the above to something like m = d.__contains__ if m(k): ... this is currently not done. """ # Local imports from .. import pytree from .. import fixer_base from ..fixer_util import Name, parenthesize class FixHasKey(fixer_base.BaseFix): BM_compatible = True PATTERN = """ anchor=power< before=any+ trailer< '.' 'has_key' > trailer< '(' ( not(arglist | argument<any '=' any>) arg=any | arglist<(not argument<any '=' any>) arg=any ','> ) ')' > after=any* > | negation=not_test< 'not' anchor=power< before=any+ trailer< '.' 'has_key' > trailer< '(' ( not(arglist | argument<any '=' any>) arg=any | arglist<(not argument<any '=' any>) arg=any ','> ) ')' > > > """ def transform(self, node, results): assert results syms = self.syms if (node.parent.type == syms.not_test and self.pattern.match(node.parent)): # Don't transform a node matching the first alternative of the # pattern when its parent matches the second alternative return None negation = results.get("negation") anchor = results["anchor"] prefix = node.prefix before = [n.clone() for n in results["before"]] arg = results["arg"].clone() after = results.get("after") if after: after = [n.clone() for n in after] if arg.type in (syms.comparison, syms.not_test, syms.and_test, syms.or_test, syms.test, syms.lambdef, syms.argument): arg = parenthesize(arg) if len(before) == 1: before = before[0] else: before = pytree.Node(syms.power, before) before.prefix = " " n_op = Name("in", prefix=" ") if negation: n_not = Name("not", prefix=" ") n_op = pytree.Node(syms.comp_op, (n_not, n_op)) new = pytree.Node(syms.comparison, (arg, n_op, before)) if after: new = parenthesize(new) new = pytree.Node(syms.power, (new,) + tuple(after)) if node.parent.type in (syms.comparison, syms.expr, syms.xor_expr, syms.and_expr, syms.shift_expr, syms.arith_expr, syms.term, syms.factor, syms.power): new = parenthesize(new) new.prefix = prefix return new fixes/fix_standarderror.py 0000644 00000000701 15053244155 0011756 0 ustar 00 # Copyright 2007 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Fixer for StandardError -> Exception.""" # Local imports from .. import fixer_base from ..fixer_util import Name class FixStandarderror(fixer_base.BaseFix): BM_compatible = True PATTERN = """ 'StandardError' """ def transform(self, node, results): return Name("Exception", prefix=node.prefix) fixes/fix_urllib.py 0000644 00000020241 15053244155 0010376 0 ustar 00 """Fix changes imports of urllib which are now incompatible. This is rather similar to fix_imports, but because of the more complex nature of the fixing for urllib, it has its own fixer. """ # Author: Nick Edds # Local imports from lib2to3.fixes.fix_imports import alternates, FixImports from lib2to3.fixer_util import (Name, Comma, FromImport, Newline, find_indentation, Node, syms) MAPPING = {"urllib": [ ("urllib.request", ["URLopener", "FancyURLopener", "urlretrieve", "_urlopener", "urlopen", "urlcleanup", "pathname2url", "url2pathname"]), ("urllib.parse", ["quote", "quote_plus", "unquote", "unquote_plus", "urlencode", "splitattr", "splithost", "splitnport", "splitpasswd", "splitport", "splitquery", "splittag", "splittype", "splituser", "splitvalue", ]), ("urllib.error", ["ContentTooShortError"])], "urllib2" : [ ("urllib.request", ["urlopen", "install_opener", "build_opener", "Request", "OpenerDirector", "BaseHandler", "HTTPDefaultErrorHandler", "HTTPRedirectHandler", "HTTPCookieProcessor", "ProxyHandler", "HTTPPasswordMgr", "HTTPPasswordMgrWithDefaultRealm", "AbstractBasicAuthHandler", "HTTPBasicAuthHandler", "ProxyBasicAuthHandler", "AbstractDigestAuthHandler", "HTTPDigestAuthHandler", "ProxyDigestAuthHandler", "HTTPHandler", "HTTPSHandler", "FileHandler", "FTPHandler", "CacheFTPHandler", "UnknownHandler"]), ("urllib.error", ["URLError", "HTTPError"]), ] } # Duplicate the url parsing functions for urllib2. MAPPING["urllib2"].append(MAPPING["urllib"][1]) def build_pattern(): bare = set() for old_module, changes in MAPPING.items(): for change in changes: new_module, members = change members = alternates(members) yield """import_name< 'import' (module=%r | dotted_as_names< any* module=%r any* >) > """ % (old_module, old_module) yield """import_from< 'from' mod_member=%r 'import' ( member=%s | import_as_name< member=%s 'as' any > | import_as_names< members=any* >) > """ % (old_module, members, members) yield """import_from< 'from' module_star=%r 'import' star='*' > """ % old_module yield """import_name< 'import' dotted_as_name< module_as=%r 'as' any > > """ % old_module # bare_with_attr has a special significance for FixImports.match(). yield """power< bare_with_attr=%r trailer< '.' member=%s > any* > """ % (old_module, members) class FixUrllib(FixImports): def build_pattern(self): return "|".join(build_pattern()) def transform_import(self, node, results): """Transform for the basic import case. Replaces the old import name with a comma separated list of its replacements. """ import_mod = results.get("module") pref = import_mod.prefix names = [] # create a Node list of the replacement modules for name in MAPPING[import_mod.value][:-1]: names.extend([Name(name[0], prefix=pref), Comma()]) names.append(Name(MAPPING[import_mod.value][-1][0], prefix=pref)) import_mod.replace(names) def transform_member(self, node, results): """Transform for imports of specific module elements. Replaces the module to be imported from with the appropriate new module. """ mod_member = results.get("mod_member") pref = mod_member.prefix member = results.get("member") # Simple case with only a single member being imported if member: # this may be a list of length one, or just a node if isinstance(member, list): member = member[0] new_name = None for change in MAPPING[mod_member.value]: if member.value in change[1]: new_name = change[0] break if new_name: mod_member.replace(Name(new_name, prefix=pref)) else: self.cannot_convert(node, "This is an invalid module element") # Multiple members being imported else: # a dictionary for replacements, order matters modules = [] mod_dict = {} members = results["members"] for member in members: # we only care about the actual members if member.type == syms.import_as_name: as_name = member.children[2].value member_name = member.children[0].value else: member_name = member.value as_name = None if member_name != ",": for change in MAPPING[mod_member.value]: if member_name in change[1]: if change[0] not in mod_dict: modules.append(change[0]) mod_dict.setdefault(change[0], []).append(member) new_nodes = [] indentation = find_indentation(node) first = True def handle_name(name, prefix): if name.type == syms.import_as_name: kids = [Name(name.children[0].value, prefix=prefix), name.children[1].clone(), name.children[2].clone()] return [Node(syms.import_as_name, kids)] return [Name(name.value, prefix=prefix)] for module in modules: elts = mod_dict[module] names = [] for elt in elts[:-1]: names.extend(handle_name(elt, pref)) names.append(Comma()) names.extend(handle_name(elts[-1], pref)) new = FromImport(module, names) if not first or node.parent.prefix.endswith(indentation): new.prefix = indentation new_nodes.append(new) first = False if new_nodes: nodes = [] for new_node in new_nodes[:-1]: nodes.extend([new_node, Newline()]) nodes.append(new_nodes[-1]) node.replace(nodes) else: self.cannot_convert(node, "All module elements are invalid") def transform_dot(self, node, results): """Transform for calls to module members in code.""" module_dot = results.get("bare_with_attr") member = results.get("member") new_name = None if isinstance(member, list): member = member[0] for change in MAPPING[module_dot.value]: if member.value in change[1]: new_name = change[0] break if new_name: module_dot.replace(Name(new_name, prefix=module_dot.prefix)) else: self.cannot_convert(node, "This is an invalid module element") def transform(self, node, results): if results.get("module"): self.transform_import(node, results) elif results.get("mod_member"): self.transform_member(node, results) elif results.get("bare_with_attr"): self.transform_dot(node, results) # Renaming and star imports are not supported for these modules. elif results.get("module_star"): self.cannot_convert(node, "Cannot handle star imports.") elif results.get("module_as"): self.cannot_convert(node, "This module is now multiple modules") fixes/fix_paren.py 0000644 00000002313 15053244155 0010212 0 ustar 00 """Fixer that addes parentheses where they are required This converts ``[x for x in 1, 2]`` to ``[x for x in (1, 2)]``.""" # By Taek Joo Kim and Benjamin Peterson # Local imports from .. import fixer_base from ..fixer_util import LParen, RParen # XXX This doesn't support nested for loops like [x for x in 1, 2 for x in 1, 2] class FixParen(fixer_base.BaseFix): BM_compatible = True PATTERN = """ atom< ('[' | '(') (listmaker< any comp_for< 'for' NAME 'in' target=testlist_safe< any (',' any)+ [','] > [any] > > | testlist_gexp< any comp_for< 'for' NAME 'in' target=testlist_safe< any (',' any)+ [','] > [any] > >) (']' | ')') > """ def transform(self, node, results): target = results["target"] lparen = LParen() lparen.prefix = target.prefix target.prefix = "" # Make it hug the parentheses target.insert_child(0, lparen) target.append_child(RParen()) fixes/fix_raw_input.py 0000644 00000000706 15053244155 0011121 0 ustar 00 """Fixer that changes raw_input(...) into input(...).""" # Author: Andre Roberge # Local imports from .. import fixer_base from ..fixer_util import Name class FixRawInput(fixer_base.BaseFix): BM_compatible = True PATTERN = """ power< name='raw_input' trailer< '(' [any] ')' > any* > """ def transform(self, node, results): name = results["name"] name.replace(Name("input", prefix=name.prefix)) fixes/fix_imports2.py 0000644 00000000441 15053244155 0010664 0 ustar 00 """Fix incompatible imports and module references that must be fixed after fix_imports.""" from . import fix_imports MAPPING = { 'whichdb': 'dbm', 'anydbm': 'dbm', } class FixImports2(fix_imports.FixImports): run_order = 7 mapping = MAPPING fixes/fix_ne.py 0000644 00000001073 15053244155 0007511 0 ustar 00 # Copyright 2006 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Fixer that turns <> into !=.""" # Local imports from .. import pytree from ..pgen2 import token from .. import fixer_base class FixNe(fixer_base.BaseFix): # This is so simple that we don't need the pattern compiler. _accept_type = token.NOTEQUAL def match(self, node): # Override return node.value == "<>" def transform(self, node, results): new = pytree.Leaf(token.NOTEQUAL, "!=", prefix=node.prefix) return new fixes/fix_reduce.py 0000644 00000001505 15053244155 0010356 0 ustar 00 # Copyright 2008 Armin Ronacher. # Licensed to PSF under a Contributor Agreement. """Fixer for reduce(). Makes sure reduce() is imported from the functools module if reduce is used in that module. """ from lib2to3 import fixer_base from lib2to3.fixer_util import touch_import class FixReduce(fixer_base.BaseFix): BM_compatible = True order = "pre" PATTERN = """ power< 'reduce' trailer< '(' arglist< ( (not(argument<any '=' any>) any ',' not(argument<any '=' any>) any) | (not(argument<any '=' any>) any ',' not(argument<any '=' any>) any ',' not(argument<any '=' any>) any) ) > ')' > > """ def transform(self, node, results): touch_import('functools', 'reduce', node) fixes/fix_tuple_params.py 0000644 00000012675 15053244155 0011615 0 ustar 00 """Fixer for function definitions with tuple parameters. def func(((a, b), c), d): ... -> def func(x, d): ((a, b), c) = x ... It will also support lambdas: lambda (x, y): x + y -> lambda t: t[0] + t[1] # The parens are a syntax error in Python 3 lambda (x): x + y -> lambda x: x + y """ # Author: Collin Winter # Local imports from .. import pytree from ..pgen2 import token from .. import fixer_base from ..fixer_util import Assign, Name, Newline, Number, Subscript, syms def is_docstring(stmt): return isinstance(stmt, pytree.Node) and \ stmt.children[0].type == token.STRING class FixTupleParams(fixer_base.BaseFix): run_order = 4 #use a lower order since lambda is part of other #patterns BM_compatible = True PATTERN = """ funcdef< 'def' any parameters< '(' args=any ')' > ['->' any] ':' suite=any+ > | lambda= lambdef< 'lambda' args=vfpdef< '(' inner=any ')' > ':' body=any > """ def transform(self, node, results): if "lambda" in results: return self.transform_lambda(node, results) new_lines = [] suite = results["suite"] args = results["args"] # This crap is so "def foo(...): x = 5; y = 7" is handled correctly. # TODO(cwinter): suite-cleanup if suite[0].children[1].type == token.INDENT: start = 2 indent = suite[0].children[1].value end = Newline() else: start = 0 indent = "; " end = pytree.Leaf(token.INDENT, "") # We need access to self for new_name(), and making this a method # doesn't feel right. Closing over self and new_lines makes the # code below cleaner. def handle_tuple(tuple_arg, add_prefix=False): n = Name(self.new_name()) arg = tuple_arg.clone() arg.prefix = "" stmt = Assign(arg, n.clone()) if add_prefix: n.prefix = " " tuple_arg.replace(n) new_lines.append(pytree.Node(syms.simple_stmt, [stmt, end.clone()])) if args.type == syms.tfpdef: handle_tuple(args) elif args.type == syms.typedargslist: for i, arg in enumerate(args.children): if arg.type == syms.tfpdef: # Without add_prefix, the emitted code is correct, # just ugly. handle_tuple(arg, add_prefix=(i > 0)) if not new_lines: return # This isn't strictly necessary, but it plays nicely with other fixers. # TODO(cwinter) get rid of this when children becomes a smart list for line in new_lines: line.parent = suite[0] # TODO(cwinter) suite-cleanup after = start if start == 0: new_lines[0].prefix = " " elif is_docstring(suite[0].children[start]): new_lines[0].prefix = indent after = start + 1 for line in new_lines: line.parent = suite[0] suite[0].children[after:after] = new_lines for i in range(after+1, after+len(new_lines)+1): suite[0].children[i].prefix = indent suite[0].changed() def transform_lambda(self, node, results): args = results["args"] body = results["body"] inner = simplify_args(results["inner"]) # Replace lambda ((((x)))): x with lambda x: x if inner.type == token.NAME: inner = inner.clone() inner.prefix = " " args.replace(inner) return params = find_params(args) to_index = map_to_index(params) tup_name = self.new_name(tuple_name(params)) new_param = Name(tup_name, prefix=" ") args.replace(new_param.clone()) for n in body.post_order(): if n.type == token.NAME and n.value in to_index: subscripts = [c.clone() for c in to_index[n.value]] new = pytree.Node(syms.power, [new_param.clone()] + subscripts) new.prefix = n.prefix n.replace(new) ### Helper functions for transform_lambda() def simplify_args(node): if node.type in (syms.vfplist, token.NAME): return node elif node.type == syms.vfpdef: # These look like vfpdef< '(' x ')' > where x is NAME # or another vfpdef instance (leading to recursion). while node.type == syms.vfpdef: node = node.children[1] return node raise RuntimeError("Received unexpected node %s" % node) def find_params(node): if node.type == syms.vfpdef: return find_params(node.children[1]) elif node.type == token.NAME: return node.value return [find_params(c) for c in node.children if c.type != token.COMMA] def map_to_index(param_list, prefix=[], d=None): if d is None: d = {} for i, obj in enumerate(param_list): trailer = [Subscript(Number(str(i)))] if isinstance(obj, list): map_to_index(obj, trailer, d=d) else: d[obj] = prefix + trailer return d def tuple_name(param_list): l = [] for obj in param_list: if isinstance(obj, list): l.append(tuple_name(obj)) else: l.append(obj) return "_".join(l) fixes/fix_import.py 0000644 00000006270 15053244155 0010425 0 ustar 00 """Fixer for import statements. If spam is being imported from the local directory, this import: from spam import eggs Becomes: from .spam import eggs And this import: import spam Becomes: from . import spam """ # Local imports from .. import fixer_base from os.path import dirname, join, exists, sep from ..fixer_util import FromImport, syms, token def traverse_imports(names): """ Walks over all the names imported in a dotted_as_names node. """ pending = [names] while pending: node = pending.pop() if node.type == token.NAME: yield node.value elif node.type == syms.dotted_name: yield "".join([ch.value for ch in node.children]) elif node.type == syms.dotted_as_name: pending.append(node.children[0]) elif node.type == syms.dotted_as_names: pending.extend(node.children[::-2]) else: raise AssertionError("unknown node type") class FixImport(fixer_base.BaseFix): BM_compatible = True PATTERN = """ import_from< 'from' imp=any 'import' ['('] any [')'] > | import_name< 'import' imp=any > """ def start_tree(self, tree, name): super(FixImport, self).start_tree(tree, name) self.skip = "absolute_import" in tree.future_features def transform(self, node, results): if self.skip: return imp = results['imp'] if node.type == syms.import_from: # Some imps are top-level (eg: 'import ham') # some are first level (eg: 'import ham.eggs') # some are third level (eg: 'import ham.eggs as spam') # Hence, the loop while not hasattr(imp, 'value'): imp = imp.children[0] if self.probably_a_local_import(imp.value): imp.value = "." + imp.value imp.changed() else: have_local = False have_absolute = False for mod_name in traverse_imports(imp): if self.probably_a_local_import(mod_name): have_local = True else: have_absolute = True if have_absolute: if have_local: # We won't handle both sibling and absolute imports in the # same statement at the moment. self.warning(node, "absolute and local imports together") return new = FromImport(".", [imp]) new.prefix = node.prefix return new def probably_a_local_import(self, imp_name): if imp_name.startswith("."): # Relative imports are certainly not local imports. return False imp_name = imp_name.split(".", 1)[0] base_path = dirname(self.filename) base_path = join(base_path, imp_name) # If there is no __init__.py next to the file its not in a package # so can't be a relative import. if not exists(join(dirname(base_path), "__init__.py")): return False for ext in [".py", sep, ".pyc", ".so", ".sl", ".pyd"]: if exists(base_path + ext): return True return False fixes/fix_reload.py 0000644 00000002202 15053244155 0010350 0 ustar 00 """Fixer for reload(). reload(s) -> imp.reload(s)""" # Local imports from .. import fixer_base from ..fixer_util import ImportAndCall, touch_import class FixReload(fixer_base.BaseFix): BM_compatible = True order = "pre" PATTERN = """ power< 'reload' trailer< lpar='(' ( not(arglist | argument<any '=' any>) obj=any | obj=arglist<(not argument<any '=' any>) any ','> ) rpar=')' > after=any* > """ def transform(self, node, results): if results: # I feel like we should be able to express this logic in the # PATTERN above but I don't know how to do it so... obj = results['obj'] if obj: if obj.type == self.syms.star_expr: return # Make no change. if (obj.type == self.syms.argument and obj.children[0].value == '**'): return # Make no change. names = ('imp', 'reload') new = ImportAndCall(node, results, names) touch_import(None, 'imp', node) return new fixes/fix_print.py 0000644 00000005434 15053244155 0010250 0 ustar 00 # Copyright 2006 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Fixer for print. Change: 'print' into 'print()' 'print ...' into 'print(...)' 'print ... ,' into 'print(..., end=" ")' 'print >>x, ...' into 'print(..., file=x)' No changes are applied if print_function is imported from __future__ """ # Local imports from .. import patcomp from .. import pytree from ..pgen2 import token from .. import fixer_base from ..fixer_util import Name, Call, Comma, String parend_expr = patcomp.compile_pattern( """atom< '(' [atom|STRING|NAME] ')' >""" ) class FixPrint(fixer_base.BaseFix): BM_compatible = True PATTERN = """ simple_stmt< any* bare='print' any* > | print_stmt """ def transform(self, node, results): assert results bare_print = results.get("bare") if bare_print: # Special-case print all by itself bare_print.replace(Call(Name("print"), [], prefix=bare_print.prefix)) return assert node.children[0] == Name("print") args = node.children[1:] if len(args) == 1 and parend_expr.match(args[0]): # We don't want to keep sticking parens around an # already-parenthesised expression. return sep = end = file = None if args and args[-1] == Comma(): args = args[:-1] end = " " if args and args[0] == pytree.Leaf(token.RIGHTSHIFT, ">>"): assert len(args) >= 2 file = args[1].clone() args = args[3:] # Strip a possible comma after the file expression # Now synthesize a print(args, sep=..., end=..., file=...) node. l_args = [arg.clone() for arg in args] if l_args: l_args[0].prefix = "" if sep is not None or end is not None or file is not None: if sep is not None: self.add_kwarg(l_args, "sep", String(repr(sep))) if end is not None: self.add_kwarg(l_args, "end", String(repr(end))) if file is not None: self.add_kwarg(l_args, "file", file) n_stmt = Call(Name("print"), l_args) n_stmt.prefix = node.prefix return n_stmt def add_kwarg(self, l_nodes, s_kwd, n_expr): # XXX All this prefix-setting may lose comments (though rarely) n_expr.prefix = "" n_argument = pytree.Node(self.syms.argument, (Name(s_kwd), pytree.Leaf(token.EQUAL, "="), n_expr)) if l_nodes: l_nodes.append(Comma()) n_argument.prefix = " " l_nodes.append(n_argument) fixes/fix_operator.py 0000644 00000006617 15053244155 0010753 0 ustar 00 """Fixer for operator functions. operator.isCallable(obj) -> hasattr(obj, '__call__') operator.sequenceIncludes(obj) -> operator.contains(obj) operator.isSequenceType(obj) -> isinstance(obj, collections.Sequence) operator.isMappingType(obj) -> isinstance(obj, collections.Mapping) operator.isNumberType(obj) -> isinstance(obj, numbers.Number) operator.repeat(obj, n) -> operator.mul(obj, n) operator.irepeat(obj, n) -> operator.imul(obj, n) """ import collections # Local imports from lib2to3 import fixer_base from lib2to3.fixer_util import Call, Name, String, touch_import def invocation(s): def dec(f): f.invocation = s return f return dec class FixOperator(fixer_base.BaseFix): BM_compatible = True order = "pre" methods = """ method=('isCallable'|'sequenceIncludes' |'isSequenceType'|'isMappingType'|'isNumberType' |'repeat'|'irepeat') """ obj = "'(' obj=any ')'" PATTERN = """ power< module='operator' trailer< '.' %(methods)s > trailer< %(obj)s > > | power< %(methods)s trailer< %(obj)s > > """ % dict(methods=methods, obj=obj) def transform(self, node, results): method = self._check_method(node, results) if method is not None: return method(node, results) @invocation("operator.contains(%s)") def _sequenceIncludes(self, node, results): return self._handle_rename(node, results, "contains") @invocation("hasattr(%s, '__call__')") def _isCallable(self, node, results): obj = results["obj"] args = [obj.clone(), String(", "), String("'__call__'")] return Call(Name("hasattr"), args, prefix=node.prefix) @invocation("operator.mul(%s)") def _repeat(self, node, results): return self._handle_rename(node, results, "mul") @invocation("operator.imul(%s)") def _irepeat(self, node, results): return self._handle_rename(node, results, "imul") @invocation("isinstance(%s, collections.Sequence)") def _isSequenceType(self, node, results): return self._handle_type2abc(node, results, "collections", "Sequence") @invocation("isinstance(%s, collections.Mapping)") def _isMappingType(self, node, results): return self._handle_type2abc(node, results, "collections", "Mapping") @invocation("isinstance(%s, numbers.Number)") def _isNumberType(self, node, results): return self._handle_type2abc(node, results, "numbers", "Number") def _handle_rename(self, node, results, name): method = results["method"][0] method.value = name method.changed() def _handle_type2abc(self, node, results, module, abc): touch_import(None, module, node) obj = results["obj"] args = [obj.clone(), String(", " + ".".join([module, abc]))] return Call(Name("isinstance"), args, prefix=node.prefix) def _check_method(self, node, results): method = getattr(self, "_" + results["method"][0].value) if isinstance(method, collections.Callable): if "module" in results: return method else: sub = (str(results["obj"]),) invocation_str = method.invocation % sub self.warning(node, "You should use '%s' here." % invocation_str) return None fixes/fix_zip.py 0000644 00000002411 15053244155 0007706 0 ustar 00 """ Fixer that changes zip(seq0, seq1, ...) into list(zip(seq0, seq1, ...) unless there exists a 'from future_builtins import zip' statement in the top-level namespace. We avoid the transformation if the zip() call is directly contained in iter(<>), list(<>), tuple(<>), sorted(<>), ...join(<>), or for V in <>:. """ # Local imports from .. import fixer_base from ..pytree import Node from ..pygram import python_symbols as syms from ..fixer_util import Name, ArgList, in_special_context class FixZip(fixer_base.ConditionalFix): BM_compatible = True PATTERN = """ power< 'zip' args=trailer< '(' [any] ')' > [trailers=trailer*] > """ skip_on = "future_builtins.zip" def transform(self, node, results): if self.should_skip(node): return if in_special_context(node): return None args = results['args'].clone() args.prefix = "" trailers = [] if 'trailers' in results: trailers = [n.clone() for n in results['trailers']] for n in trailers: n.prefix = "" new = Node(syms.power, [Name("zip"), args], prefix="") new = Node(syms.power, [Name("list"), ArgList([new])] + trailers) new.prefix = node.prefix return new fixes/fix_metaclass.py 0000644 00000020005 15053244155 0011057 0 ustar 00 """Fixer for __metaclass__ = X -> (metaclass=X) methods. The various forms of classef (inherits nothing, inherits once, inherints many) don't parse the same in the CST so we look at ALL classes for a __metaclass__ and if we find one normalize the inherits to all be an arglist. For one-liner classes ('class X: pass') there is no indent/dedent so we normalize those into having a suite. Moving the __metaclass__ into the classdef can also cause the class body to be empty so there is some special casing for that as well. This fixer also tries very hard to keep original indenting and spacing in all those corner cases. """ # Author: Jack Diederich # Local imports from .. import fixer_base from ..pygram import token from ..fixer_util import syms, Node, Leaf def has_metaclass(parent): """ we have to check the cls_node without changing it. There are two possibilities: 1) clsdef => suite => simple_stmt => expr_stmt => Leaf('__meta') 2) clsdef => simple_stmt => expr_stmt => Leaf('__meta') """ for node in parent.children: if node.type == syms.suite: return has_metaclass(node) elif node.type == syms.simple_stmt and node.children: expr_node = node.children[0] if expr_node.type == syms.expr_stmt and expr_node.children: left_side = expr_node.children[0] if isinstance(left_side, Leaf) and \ left_side.value == '__metaclass__': return True return False def fixup_parse_tree(cls_node): """ one-line classes don't get a suite in the parse tree so we add one to normalize the tree """ for node in cls_node.children: if node.type == syms.suite: # already in the preferred format, do nothing return # !%@#! oneliners have no suite node, we have to fake one up for i, node in enumerate(cls_node.children): if node.type == token.COLON: break else: raise ValueError("No class suite and no ':'!") # move everything into a suite node suite = Node(syms.suite, []) while cls_node.children[i+1:]: move_node = cls_node.children[i+1] suite.append_child(move_node.clone()) move_node.remove() cls_node.append_child(suite) node = suite def fixup_simple_stmt(parent, i, stmt_node): """ if there is a semi-colon all the parts count as part of the same simple_stmt. We just want the __metaclass__ part so we move everything after the semi-colon into its own simple_stmt node """ for semi_ind, node in enumerate(stmt_node.children): if node.type == token.SEMI: # *sigh* break else: return node.remove() # kill the semicolon new_expr = Node(syms.expr_stmt, []) new_stmt = Node(syms.simple_stmt, [new_expr]) while stmt_node.children[semi_ind:]: move_node = stmt_node.children[semi_ind] new_expr.append_child(move_node.clone()) move_node.remove() parent.insert_child(i, new_stmt) new_leaf1 = new_stmt.children[0].children[0] old_leaf1 = stmt_node.children[0].children[0] new_leaf1.prefix = old_leaf1.prefix def remove_trailing_newline(node): if node.children and node.children[-1].type == token.NEWLINE: node.children[-1].remove() def find_metas(cls_node): # find the suite node (Mmm, sweet nodes) for node in cls_node.children: if node.type == syms.suite: break else: raise ValueError("No class suite!") # look for simple_stmt[ expr_stmt[ Leaf('__metaclass__') ] ] for i, simple_node in list(enumerate(node.children)): if simple_node.type == syms.simple_stmt and simple_node.children: expr_node = simple_node.children[0] if expr_node.type == syms.expr_stmt and expr_node.children: # Check if the expr_node is a simple assignment. left_node = expr_node.children[0] if isinstance(left_node, Leaf) and \ left_node.value == '__metaclass__': # We found an assignment to __metaclass__. fixup_simple_stmt(node, i, simple_node) remove_trailing_newline(simple_node) yield (node, i, simple_node) def fixup_indent(suite): """ If an INDENT is followed by a thing with a prefix then nuke the prefix Otherwise we get in trouble when removing __metaclass__ at suite start """ kids = suite.children[::-1] # find the first indent while kids: node = kids.pop() if node.type == token.INDENT: break # find the first Leaf while kids: node = kids.pop() if isinstance(node, Leaf) and node.type != token.DEDENT: if node.prefix: node.prefix = '' return else: kids.extend(node.children[::-1]) class FixMetaclass(fixer_base.BaseFix): BM_compatible = True PATTERN = """ classdef<any*> """ def transform(self, node, results): if not has_metaclass(node): return fixup_parse_tree(node) # find metaclasses, keep the last one last_metaclass = None for suite, i, stmt in find_metas(node): last_metaclass = stmt stmt.remove() text_type = node.children[0].type # always Leaf(nnn, 'class') # figure out what kind of classdef we have if len(node.children) == 7: # Node(classdef, ['class', 'name', '(', arglist, ')', ':', suite]) # 0 1 2 3 4 5 6 if node.children[3].type == syms.arglist: arglist = node.children[3] # Node(classdef, ['class', 'name', '(', 'Parent', ')', ':', suite]) else: parent = node.children[3].clone() arglist = Node(syms.arglist, [parent]) node.set_child(3, arglist) elif len(node.children) == 6: # Node(classdef, ['class', 'name', '(', ')', ':', suite]) # 0 1 2 3 4 5 arglist = Node(syms.arglist, []) node.insert_child(3, arglist) elif len(node.children) == 4: # Node(classdef, ['class', 'name', ':', suite]) # 0 1 2 3 arglist = Node(syms.arglist, []) node.insert_child(2, Leaf(token.RPAR, ')')) node.insert_child(2, arglist) node.insert_child(2, Leaf(token.LPAR, '(')) else: raise ValueError("Unexpected class definition") # now stick the metaclass in the arglist meta_txt = last_metaclass.children[0].children[0] meta_txt.value = 'metaclass' orig_meta_prefix = meta_txt.prefix if arglist.children: arglist.append_child(Leaf(token.COMMA, ',')) meta_txt.prefix = ' ' else: meta_txt.prefix = '' # compact the expression "metaclass = Meta" -> "metaclass=Meta" expr_stmt = last_metaclass.children[0] assert expr_stmt.type == syms.expr_stmt expr_stmt.children[1].prefix = '' expr_stmt.children[2].prefix = '' arglist.append_child(last_metaclass) fixup_indent(suite) # check for empty suite if not suite.children: # one-liner that was just __metaclass_ suite.remove() pass_leaf = Leaf(text_type, 'pass') pass_leaf.prefix = orig_meta_prefix node.append_child(pass_leaf) node.append_child(Leaf(token.NEWLINE, '\n')) elif len(suite.children) > 1 and \ (suite.children[-2].type == token.INDENT and suite.children[-1].type == token.DEDENT): # there was only one line in the class body and it was __metaclass__ pass_leaf = Leaf(text_type, 'pass') suite.insert_child(-1, pass_leaf) suite.insert_child(-1, Leaf(token.NEWLINE, '\n')) fixes/fix_types.py 0000644 00000003356 15053244155 0010261 0 ustar 00 # Copyright 2007 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Fixer for removing uses of the types module. These work for only the known names in the types module. The forms above can include types. or not. ie, It is assumed the module is imported either as: import types from types import ... # either * or specific types The import statements are not modified. There should be another fixer that handles at least the following constants: type([]) -> list type(()) -> tuple type('') -> str """ # Local imports from .. import fixer_base from ..fixer_util import Name _TYPE_MAPPING = { 'BooleanType' : 'bool', 'BufferType' : 'memoryview', 'ClassType' : 'type', 'ComplexType' : 'complex', 'DictType': 'dict', 'DictionaryType' : 'dict', 'EllipsisType' : 'type(Ellipsis)', #'FileType' : 'io.IOBase', 'FloatType': 'float', 'IntType': 'int', 'ListType': 'list', 'LongType': 'int', 'ObjectType' : 'object', 'NoneType': 'type(None)', 'NotImplementedType' : 'type(NotImplemented)', 'SliceType' : 'slice', 'StringType': 'bytes', # XXX ? 'StringTypes' : '(str,)', # XXX ? 'TupleType': 'tuple', 'TypeType' : 'type', 'UnicodeType': 'str', 'XRangeType' : 'range', } _pats = ["power< 'types' trailer< '.' name='%s' > >" % t for t in _TYPE_MAPPING] class FixTypes(fixer_base.BaseFix): BM_compatible = True PATTERN = '|'.join(_pats) def transform(self, node, results): new_value = _TYPE_MAPPING.get(results["name"].value) if new_value: return Name(new_value, prefix=node.prefix) return None fixes/fix_itertools_imports.py 0000644 00000004046 15053244155 0012713 0 ustar 00 """ Fixer for imports of itertools.(imap|ifilter|izip|ifilterfalse) """ # Local imports from lib2to3 import fixer_base from lib2to3.fixer_util import BlankLine, syms, token class FixItertoolsImports(fixer_base.BaseFix): BM_compatible = True PATTERN = """ import_from< 'from' 'itertools' 'import' imports=any > """ %(locals()) def transform(self, node, results): imports = results['imports'] if imports.type == syms.import_as_name or not imports.children: children = [imports] else: children = imports.children for child in children[::2]: if child.type == token.NAME: member = child.value name_node = child elif child.type == token.STAR: # Just leave the import as is. return else: assert child.type == syms.import_as_name name_node = child.children[0] member_name = name_node.value if member_name in ('imap', 'izip', 'ifilter'): child.value = None child.remove() elif member_name in ('ifilterfalse', 'izip_longest'): node.changed() name_node.value = ('filterfalse' if member_name[1] == 'f' else 'zip_longest') # Make sure the import statement is still sane children = imports.children[:] or [imports] remove_comma = True for child in children: if remove_comma and child.type == token.COMMA: child.remove() else: remove_comma ^= True while children and children[-1].type == token.COMMA: children.pop().remove() # If there are no imports left, just get rid of the entire statement if (not (imports.children or getattr(imports, 'value', None)) or imports.parent is None): p = node.prefix node = BlankLine() node.prefix = p return node fixes/fix_long.py 0000644 00000000734 15053244155 0010051 0 ustar 00 # Copyright 2006 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Fixer that turns 'long' into 'int' everywhere. """ # Local imports from lib2to3 import fixer_base from lib2to3.fixer_util import is_probably_builtin class FixLong(fixer_base.BaseFix): BM_compatible = True PATTERN = "'long'" def transform(self, node, results): if is_probably_builtin(node): node.value = "int" node.changed() fixes/fix_apply.py 0000644 00000004576 15053244155 0010247 0 ustar 00 # Copyright 2006 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Fixer for apply(). This converts apply(func, v, k) into (func)(*v, **k).""" # Local imports from .. import pytree from ..pgen2 import token from .. import fixer_base from ..fixer_util import Call, Comma, parenthesize class FixApply(fixer_base.BaseFix): BM_compatible = True PATTERN = """ power< 'apply' trailer< '(' arglist< (not argument<NAME '=' any>) func=any ',' (not argument<NAME '=' any>) args=any [',' (not argument<NAME '=' any>) kwds=any] [','] > ')' > > """ def transform(self, node, results): syms = self.syms assert results func = results["func"] args = results["args"] kwds = results.get("kwds") # I feel like we should be able to express this logic in the # PATTERN above but I don't know how to do it so... if args: if args.type == self.syms.star_expr: return # Make no change. if (args.type == self.syms.argument and args.children[0].value == '**'): return # Make no change. if kwds and (kwds.type == self.syms.argument and kwds.children[0].value == '**'): return # Make no change. prefix = node.prefix func = func.clone() if (func.type not in (token.NAME, syms.atom) and (func.type != syms.power or func.children[-2].type == token.DOUBLESTAR)): # Need to parenthesize func = parenthesize(func) func.prefix = "" args = args.clone() args.prefix = "" if kwds is not None: kwds = kwds.clone() kwds.prefix = "" l_newargs = [pytree.Leaf(token.STAR, "*"), args] if kwds is not None: l_newargs.extend([Comma(), pytree.Leaf(token.DOUBLESTAR, "**"), kwds]) l_newargs[-2].prefix = " " # that's the ** token # XXX Sometimes we could be cleverer, e.g. apply(f, (x, y) + t) # can be translated into f(x, y, *t) instead of f(*(x, y) + t) #new = pytree.Node(syms.power, (func, ArgList(l_newargs))) return Call(func, l_newargs, prefix=prefix) fixes/fix_exitfunc.py 0000644 00000004677 15053244155 0010751 0 ustar 00 """ Convert use of sys.exitfunc to use the atexit module. """ # Author: Benjamin Peterson from lib2to3 import pytree, fixer_base from lib2to3.fixer_util import Name, Attr, Call, Comma, Newline, syms class FixExitfunc(fixer_base.BaseFix): keep_line_order = True BM_compatible = True PATTERN = """ ( sys_import=import_name<'import' ('sys' | dotted_as_names< (any ',')* 'sys' (',' any)* > ) > | expr_stmt< power< 'sys' trailer< '.' 'exitfunc' > > '=' func=any > ) """ def __init__(self, *args): super(FixExitfunc, self).__init__(*args) def start_tree(self, tree, filename): super(FixExitfunc, self).start_tree(tree, filename) self.sys_import = None def transform(self, node, results): # First, find the sys import. We'll just hope it's global scope. if "sys_import" in results: if self.sys_import is None: self.sys_import = results["sys_import"] return func = results["func"].clone() func.prefix = "" register = pytree.Node(syms.power, Attr(Name("atexit"), Name("register")) ) call = Call(register, [func], node.prefix) node.replace(call) if self.sys_import is None: # That's interesting. self.warning(node, "Can't find sys import; Please add an atexit " "import at the top of your file.") return # Now add an atexit import after the sys import. names = self.sys_import.children[1] if names.type == syms.dotted_as_names: names.append_child(Comma()) names.append_child(Name("atexit", " ")) else: containing_stmt = self.sys_import.parent position = containing_stmt.children.index(self.sys_import) stmt_container = containing_stmt.parent new_import = pytree.Node(syms.import_name, [Name("import"), Name("atexit", " ")] ) new = pytree.Node(syms.simple_stmt, [new_import]) containing_stmt.insert_child(position + 1, Newline()) containing_stmt.insert_child(position + 2, new) fixes/fix_except.py 0000644 00000006420 15053244155 0010400 0 ustar 00 """Fixer for except statements with named exceptions. The following cases will be converted: - "except E, T:" where T is a name: except E as T: - "except E, T:" where T is not a name, tuple or list: except E as t: T = t This is done because the target of an "except" clause must be a name. - "except E, T:" where T is a tuple or list literal: except E as t: T = t.args """ # Author: Collin Winter # Local imports from .. import pytree from ..pgen2 import token from .. import fixer_base from ..fixer_util import Assign, Attr, Name, is_tuple, is_list, syms def find_excepts(nodes): for i, n in enumerate(nodes): if n.type == syms.except_clause: if n.children[0].value == 'except': yield (n, nodes[i+2]) class FixExcept(fixer_base.BaseFix): BM_compatible = True PATTERN = """ try_stmt< 'try' ':' (simple_stmt | suite) cleanup=(except_clause ':' (simple_stmt | suite))+ tail=(['except' ':' (simple_stmt | suite)] ['else' ':' (simple_stmt | suite)] ['finally' ':' (simple_stmt | suite)]) > """ def transform(self, node, results): syms = self.syms tail = [n.clone() for n in results["tail"]] try_cleanup = [ch.clone() for ch in results["cleanup"]] for except_clause, e_suite in find_excepts(try_cleanup): if len(except_clause.children) == 4: (E, comma, N) = except_clause.children[1:4] comma.replace(Name("as", prefix=" ")) if N.type != token.NAME: # Generate a new N for the except clause new_N = Name(self.new_name(), prefix=" ") target = N.clone() target.prefix = "" N.replace(new_N) new_N = new_N.clone() # Insert "old_N = new_N" as the first statement in # the except body. This loop skips leading whitespace # and indents #TODO(cwinter) suite-cleanup suite_stmts = e_suite.children for i, stmt in enumerate(suite_stmts): if isinstance(stmt, pytree.Node): break # The assignment is different if old_N is a tuple or list # In that case, the assignment is old_N = new_N.args if is_tuple(N) or is_list(N): assign = Assign(target, Attr(new_N, Name('args'))) else: assign = Assign(target, new_N) #TODO(cwinter) stopgap until children becomes a smart list for child in reversed(suite_stmts[:i]): e_suite.insert_child(0, child) e_suite.insert_child(i, assign) elif N.prefix == "": # No space after a comma is legal; no space after "as", # not so much. N.prefix = " " #TODO(cwinter) fix this when children becomes a smart list children = [c.clone() for c in node.children[:3]] + try_cleanup + tail return pytree.Node(node.type, children) fixes/__pycache__/fix_sys_exc.cpython-36.pyc 0000644 00000002544 15053244155 0015054 0 ustar 00 3 \ � @ sJ d Z ddlmZ ddlmZmZmZmZmZm Z m Z G dd� dej�ZdS )z�Fixer for sys.exc_{type, value, traceback} sys.exc_type -> sys.exc_info()[0] sys.exc_value -> sys.exc_info()[1] sys.exc_traceback -> sys.exc_info()[2] � )� fixer_base)�Attr�Call�Name�Number� Subscript�Node�symsc @ s: e Zd ZdddgZdZddjdd� eD �� Zd d � ZdS )� FixSysExc�exc_type� exc_value� exc_tracebackTzN power< 'sys' trailer< dot='.' attribute=(%s) > > �|c c s | ]}d | V qdS )z'%s'N� )�.0�er r �1/usr/lib64/python3.6/lib2to3/fixes/fix_sys_exc.py� <genexpr> s zFixSysExc.<genexpr>c C st |d d }t | jj|j��}ttd�|jd�}ttd�|�}|d j|d jd _|j t |�� ttj ||jd�S )NZ attribute� �exc_info)�prefix�sys�dot� )r r �index�valuer r r r Zchildren�appendr r r Zpower)�selfZnodeZresultsZsys_attrr Zcall�attrr r r � transform s zFixSysExc.transformN)�__name__� __module__�__qualname__r Z BM_compatible�joinZPATTERNr r r r r r s r N) �__doc__� r Z fixer_utilr r r r r r r ZBaseFixr r r r r �<module> s $ fixes/__pycache__/fix_map.cpython-36.pyc 0000644 00000006000 15053244155 0014143 0 ustar 00 3 \8 � @ sf d Z ddlmZ ddlmZ ddlmZmZmZm Z m Z ddlmZ ddlmZ G dd� dej�Zd S ) a Fixer that changes map(F, ...) into list(map(F, ...)) unless there exists a 'from future_builtins import map' statement in the top-level namespace. As a special case, map(None, X) is changed into list(X). (This is necessary because the semantics are changed in this case -- the new map(None, X) is equivalent to [(x,) for x in X].) We avoid the transformation (except for the special case mentioned above) if the map() call is directly contained in iter(<>), list(<>), tuple(<>), sorted(<>), ...join(<>), or for V in <>:. NOTE: This is still not correct if the original code was depending on map(F, X, Y, ...) to go on until the longest argument is exhausted, substituting None for missing values -- like zip(), it now stops as soon as the shortest argument is exhausted. � )�token)� fixer_base)�Name�ArgList�Call�ListComp�in_special_context)�python_symbols)�Nodec @ s e Zd ZdZdZdZdd� ZdS )�FixMapTaL map_none=power< 'map' trailer< '(' arglist< 'None' ',' arg=any [','] > ')' > [extra_trailers=trailer*] > | map_lambda=power< 'map' trailer< '(' arglist< lambdef< 'lambda' (fp=NAME | vfpdef< '(' fp=NAME ')'> ) ':' xp=any > ',' it=any > ')' > [extra_trailers=trailer*] > | power< 'map' args=trailer< '(' [any] ')' > [extra_trailers=trailer*] > zfuture_builtins.mapc C s� | j |�rd S g }d|kr:x|d D ]}|j|j� � q$W |jjtjkrv| j|d� |j� }d|_t t d�|g�}�n&d|kr�t|d j� |d j� |d j� �}ttj |g| dd �}n�d |kr�|d j� }d|_n�d|k�rj|d }|jtjk�rL|jd jtjk�rL|jd jd jtjk�rL|jd jd jdk�rL| j|d� d S ttj t d�|j� g�}d|_t|��rxd S ttj t d�t|g�g| �}d|_|j|_|S )NZextra_trailerszYou should use a for loop here� �listZ map_lambdaZxp�fp�it)�prefixZmap_none�arg�args� � �Nonezjcannot convert map(None, ...) with multiple arguments because map() now truncates to the shortest sequence�map)Zshould_skip�appendZclone�parent�type�symsZsimple_stmtZwarningr r r r r ZpowerZtrailerZchildrenZarglistr �NAME�valuer r )�selfZnodeZresultsZtrailers�t�newr � r �-/usr/lib64/python3.6/lib2to3/fixes/fix_map.py� transform@ sF zFixMap.transformN)�__name__� __module__�__qualname__Z BM_compatibleZPATTERNZskip_onr"