From 85b5ed4d675e321f606d14051d9e53e15fe3c169 Mon Sep 17 00:00:00 2001 From: Alex Hall Date: Thu, 1 Apr 2021 12:55:55 +0200 Subject: [PATCH 1/4] Package into single file with stickytape --- single_file/package.sh | 15 +++++ single_file/single_file_icecream.py | 86 ++++++++++++++++++++++++ single_file/site_packages_path.py | 9 +++ single_file/stickytape_entry.py | 6 ++ single_file/test_single_file_icecream.py | 9 +++ 5 files changed, 125 insertions(+) create mode 100755 single_file/package.sh create mode 100644 single_file/single_file_icecream.py create mode 100644 single_file/site_packages_path.py create mode 100644 single_file/stickytape_entry.py create mode 100644 single_file/test_single_file_icecream.py diff --git a/single_file/package.sh b/single_file/package.sh new file mode 100755 index 0000000..ef4fc22 --- /dev/null +++ b/single_file/package.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +set -eux + +stickytape stickytape_entry.py \ + --add-python-path .. \ + --add-python-module pygments.formatters.terminal256 \ + --add-python-module pygments.lexers.python \ + --add-python-module icecream.__version__ \ + --add-python-path $(python site_packages_path.py) \ + --output-file single_file_icecream.py + +ls -lh single_file_icecream.py + +python test_single_file_icecream.py diff --git a/single_file/single_file_icecream.py b/single_file/single_file_icecream.py new file mode 100644 index 0000000..99726cc --- /dev/null +++ b/single_file/single_file_icecream.py @@ -0,0 +1,86 @@ +#!/usr/bin/env python +import contextlib as __stickytape_contextlib + +@__stickytape_contextlib.contextmanager +def __stickytape_temporary_dir(): + import tempfile + import shutil + dir_path = tempfile.mkdtemp() + try: + yield dir_path + finally: + shutil.rmtree(dir_path) + +with __stickytape_temporary_dir() as __stickytape_working_dir: + def __stickytape_write_module(path, contents): + import os, os.path + + def make_package(path): + parts = path.split("/") + partial_path = __stickytape_working_dir + for part in parts: + partial_path = os.path.join(partial_path, part) + if not os.path.exists(partial_path): + os.mkdir(partial_path) + with open(os.path.join(partial_path, "__init__.py"), "wb") as f: + f.write(b"\n") + + make_package(os.path.dirname(path)) + + full_path = os.path.join(__stickytape_working_dir, path) + with open(full_path, "wb") as module_file: + module_file.write(contents) + + import sys as __stickytape_sys + __stickytape_sys.path.insert(0, __stickytape_working_dir) + + __stickytape_write_module('asttokens/__init__.py', b'# Copyright 2016 Grist Labs, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n"""\nThis module enhances the Python AST tree with token and source code information, sufficent to\ndetect the source text of each AST node. This is helpful for tools that make source code\ntransformations.\n"""\n\nfrom .line_numbers import LineNumbers\nfrom .asttokens import ASTTokens\n') + __stickytape_write_module('asttokens/line_numbers.py', b'# Copyright 2016 Grist Labs, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport bisect\nimport re\n\n_line_start_re = re.compile(r\'^\', re.M)\n\nclass LineNumbers(object):\n """\n Class to convert between character offsets in a text string, and pairs (line, column) of 1-based\n line and 0-based column numbers, as used by tokens and AST nodes.\n\n This class expects unicode for input and stores positions in unicode. But it supports\n translating to and from utf8 offsets, which are used by ast parsing.\n """\n def __init__(self, text):\n # A list of character offsets of each line\'s first character.\n self._line_offsets = [m.start(0) for m in _line_start_re.finditer(text)]\n self._text = text\n self._text_len = len(text)\n self._utf8_offset_cache = {} # maps line num to list of char offset for each byte in line\n\n def from_utf8_col(self, line, utf8_column):\n """\n Given a 1-based line number and 0-based utf8 column, returns a 0-based unicode column.\n """\n offsets = self._utf8_offset_cache.get(line)\n if offsets is None:\n end_offset = self._line_offsets[line] if line < len(self._line_offsets) else self._text_len\n line_text = self._text[self._line_offsets[line - 1] : end_offset]\n\n offsets = [i for i,c in enumerate(line_text) for byte in c.encode(\'utf8\')]\n offsets.append(len(line_text))\n self._utf8_offset_cache[line] = offsets\n\n return offsets[max(0, min(len(offsets)-1, utf8_column))]\n\n def line_to_offset(self, line, column):\n """\n Converts 1-based line number and 0-based column to 0-based character offset into text.\n """\n line -= 1\n if line >= len(self._line_offsets):\n return self._text_len\n elif line < 0:\n return 0\n else:\n return min(self._line_offsets[line] + max(0, column), self._text_len)\n\n def offset_to_line(self, offset):\n """\n Converts 0-based character offset to pair (line, col) of 1-based line and 0-based column\n numbers.\n """\n offset = max(0, min(self._text_len, offset))\n line_index = bisect.bisect_right(self._line_offsets, offset) - 1\n return (line_index + 1, offset - self._line_offsets[line_index])\n\n\n') + __stickytape_write_module('asttokens/asttokens.py', b'# Copyright 2016 Grist Labs, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport ast\nimport bisect\nimport token\nimport tokenize\nimport io\nimport six\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nfrom .line_numbers import LineNumbers\nfrom .util import Token, match_token, is_non_coding_token\nfrom .mark_tokens import MarkTokens\n\nclass ASTTokens(object):\n """\n ASTTokens maintains the text of Python code in several forms: as a string, as line numbers, and\n as tokens, and is used to mark and access token and position information.\n\n ``source_text`` must be a unicode or UTF8-encoded string. If you pass in UTF8 bytes, remember\n that all offsets you\'ll get are to the unicode text, which is available as the ``.text``\n property.\n\n If ``parse`` is set, the ``source_text`` will be parsed with ``ast.parse()``, and the resulting\n tree marked with token info and made available as the ``.tree`` property.\n\n If ``tree`` is given, it will be marked and made available as the ``.tree`` property. In\n addition to the trees produced by the ``ast`` module, ASTTokens will also mark trees produced\n using ``astroid`` library .\n\n If only ``source_text`` is given, you may use ``.mark_tokens(tree)`` to mark the nodes of an AST\n tree created separately.\n """\n def __init__(self, source_text, parse=False, tree=None, filename=\'\'):\n self._filename = filename\n self._tree = ast.parse(source_text, filename) if parse else tree\n\n # Decode source after parsing to let Python 2 handle coding declarations.\n # (If the encoding was not utf-8 compatible, then even if it parses correctly,\n # we\'ll fail with a unicode error here.)\n if isinstance(source_text, six.binary_type):\n source_text = source_text.decode(\'utf8\')\n\n self._text = source_text\n self._line_numbers = LineNumbers(source_text)\n\n # Tokenize the code.\n self._tokens = list(self._generate_tokens(source_text))\n\n # Extract the start positions of all tokens, so that we can quickly map positions to tokens.\n self._token_offsets = [tok.startpos for tok in self._tokens]\n\n if self._tree:\n self.mark_tokens(self._tree)\n\n\n def mark_tokens(self, root_node):\n """\n Given the root of the AST or Astroid tree produced from source_text, visits all nodes marking\n them with token and position information by adding ``.first_token`` and\n ``.last_token``attributes. This is done automatically in the constructor when ``parse`` or\n ``tree`` arguments are set, but may be used manually with a separate AST or Astroid tree.\n """\n # The hard work of this class is done by MarkTokens\n MarkTokens(self).visit_tree(root_node)\n\n\n def _generate_tokens(self, text):\n """\n Generates tokens for the given code.\n """\n # This is technically an undocumented API for Python3, but allows us to use the same API as for\n # Python2. See http://stackoverflow.com/a/4952291/328565.\n for index, tok in enumerate(tokenize.generate_tokens(io.StringIO(text).readline)):\n tok_type, tok_str, start, end, line = tok\n yield Token(tok_type, tok_str, start, end, line, index,\n self._line_numbers.line_to_offset(start[0], start[1]),\n self._line_numbers.line_to_offset(end[0], end[1]))\n\n @property\n def text(self):\n """The source code passed into the constructor."""\n return self._text\n\n @property\n def tokens(self):\n """The list of tokens corresponding to the source code from the constructor."""\n return self._tokens\n\n @property\n def tree(self):\n """The root of the AST tree passed into the constructor or parsed from the source code."""\n return self._tree\n\n @property\n def filename(self):\n """The filename that was parsed"""\n return self._filename\n\n def get_token_from_offset(self, offset):\n """\n Returns the token containing the given character offset (0-based position in source text),\n or the preceeding token if the position is between tokens.\n """\n return self._tokens[bisect.bisect(self._token_offsets, offset) - 1]\n\n def get_token(self, lineno, col_offset):\n """\n Returns the token containing the given (lineno, col_offset) position, or the preceeding token\n if the position is between tokens.\n """\n # TODO: add test for multibyte unicode. We need to translate offsets from ast module (which\n # are in utf8) to offsets into the unicode text. tokenize module seems to use unicode offsets\n # but isn\'t explicit.\n return self.get_token_from_offset(self._line_numbers.line_to_offset(lineno, col_offset))\n\n def get_token_from_utf8(self, lineno, col_offset):\n """\n Same as get_token(), but interprets col_offset as a UTF8 offset, which is what `ast` uses.\n """\n return self.get_token(lineno, self._line_numbers.from_utf8_col(lineno, col_offset))\n\n def next_token(self, tok, include_extra=False):\n """\n Returns the next token after the given one. If include_extra is True, includes non-coding\n tokens from the tokenize module, such as NL and COMMENT.\n """\n i = tok.index + 1\n if not include_extra:\n while is_non_coding_token(self._tokens[i].type):\n i += 1\n return self._tokens[i]\n\n def prev_token(self, tok, include_extra=False):\n """\n Returns the previous token before the given one. If include_extra is True, includes non-coding\n tokens from the tokenize module, such as NL and COMMENT.\n """\n i = tok.index - 1\n if not include_extra:\n while is_non_coding_token(self._tokens[i].type):\n i -= 1\n return self._tokens[i]\n\n def find_token(self, start_token, tok_type, tok_str=None, reverse=False):\n """\n Looks for the first token, starting at start_token, that matches tok_type and, if given, the\n token string. Searches backwards if reverse is True. Returns ENDMARKER token if not found (you\n can check it with `token.ISEOF(t.type)`.\n """\n t = start_token\n advance = self.prev_token if reverse else self.next_token\n while not match_token(t, tok_type, tok_str) and not token.ISEOF(t.type):\n t = advance(t, include_extra=True)\n return t\n\n def token_range(self, first_token, last_token, include_extra=False):\n """\n Yields all tokens in order from first_token through and including last_token. If\n include_extra is True, includes non-coding tokens such as tokenize.NL and .COMMENT.\n """\n for i in xrange(first_token.index, last_token.index + 1):\n if include_extra or not is_non_coding_token(self._tokens[i].type):\n yield self._tokens[i]\n\n def get_tokens(self, node, include_extra=False):\n """\n Yields all tokens making up the given node. If include_extra is True, includes non-coding\n tokens such as tokenize.NL and .COMMENT.\n """\n return self.token_range(node.first_token, node.last_token, include_extra=include_extra)\n\n def get_text_range(self, node):\n """\n After mark_tokens() has been called, returns the (startpos, endpos) positions in source text\n corresponding to the given node. Returns (0, 0) for nodes (like `Load`) that don\'t correspond\n to any particular text.\n """\n if not hasattr(node, \'first_token\'):\n return (0, 0)\n\n start = node.first_token.startpos\n if any(match_token(t, token.NEWLINE) for t in self.get_tokens(node)):\n # Multi-line nodes would be invalid unless we keep the indentation of the first node.\n start = self._text.rfind(\'\\n\', 0, start) + 1\n\n return (start, node.last_token.endpos)\n\n def get_text(self, node):\n """\n After mark_tokens() has been called, returns the text corresponding to the given node. Returns\n \'\' for nodes (like `Load`) that don\'t correspond to any particular text.\n """\n start, end = self.get_text_range(node)\n return self._text[start : end]\n') + __stickytape_write_module('six.py', b'# Copyright (c) 2010-2020 Benjamin Peterson\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the "Software"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n"""Utilities for writing code that runs on Python 2 and 3"""\n\nfrom __future__ import absolute_import\n\nimport functools\nimport itertools\nimport operator\nimport sys\nimport types\n\n__author__ = "Benjamin Peterson "\n__version__ = "1.15.0"\n\n\n# Useful for very coarse version differentiation.\nPY2 = sys.version_info[0] == 2\nPY3 = sys.version_info[0] == 3\nPY34 = sys.version_info[0:2] >= (3, 4)\n\nif PY3:\n string_types = str,\n integer_types = int,\n class_types = type,\n text_type = str\n binary_type = bytes\n\n MAXSIZE = sys.maxsize\nelse:\n string_types = basestring,\n integer_types = (int, long)\n class_types = (type, types.ClassType)\n text_type = unicode\n binary_type = str\n\n if sys.platform.startswith("java"):\n # Jython always uses 32 bits.\n MAXSIZE = int((1 << 31) - 1)\n else:\n # It\'s possible to have sizeof(long) != sizeof(Py_ssize_t).\n class X(object):\n\n def __len__(self):\n return 1 << 31\n try:\n len(X())\n except OverflowError:\n # 32-bit\n MAXSIZE = int((1 << 31) - 1)\n else:\n # 64-bit\n MAXSIZE = int((1 << 63) - 1)\n del X\n\n\ndef _add_doc(func, doc):\n """Add documentation to a function."""\n func.__doc__ = doc\n\n\ndef _import_module(name):\n """Import module, returning the module after the last dot."""\n __import__(name)\n return sys.modules[name]\n\n\nclass _LazyDescr(object):\n\n def __init__(self, name):\n self.name = name\n\n def __get__(self, obj, tp):\n result = self._resolve()\n setattr(obj, self.name, result) # Invokes __set__.\n try:\n # This is a bit ugly, but it avoids running this again by\n # removing this descriptor.\n delattr(obj.__class__, self.name)\n except AttributeError:\n pass\n return result\n\n\nclass MovedModule(_LazyDescr):\n\n def __init__(self, name, old, new=None):\n super(MovedModule, self).__init__(name)\n if PY3:\n if new is None:\n new = name\n self.mod = new\n else:\n self.mod = old\n\n def _resolve(self):\n return _import_module(self.mod)\n\n def __getattr__(self, attr):\n _module = self._resolve()\n value = getattr(_module, attr)\n setattr(self, attr, value)\n return value\n\n\nclass _LazyModule(types.ModuleType):\n\n def __init__(self, name):\n super(_LazyModule, self).__init__(name)\n self.__doc__ = self.__class__.__doc__\n\n def __dir__(self):\n attrs = ["__doc__", "__name__"]\n attrs += [attr.name for attr in self._moved_attributes]\n return attrs\n\n # Subclasses should override this\n _moved_attributes = []\n\n\nclass MovedAttribute(_LazyDescr):\n\n def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):\n super(MovedAttribute, self).__init__(name)\n if PY3:\n if new_mod is None:\n new_mod = name\n self.mod = new_mod\n if new_attr is None:\n if old_attr is None:\n new_attr = name\n else:\n new_attr = old_attr\n self.attr = new_attr\n else:\n self.mod = old_mod\n if old_attr is None:\n old_attr = name\n self.attr = old_attr\n\n def _resolve(self):\n module = _import_module(self.mod)\n return getattr(module, self.attr)\n\n\nclass _SixMetaPathImporter(object):\n\n """\n A meta path importer to import six.moves and its submodules.\n\n This class implements a PEP302 finder and loader. It should be compatible\n with Python 2.5 and all existing versions of Python3\n """\n\n def __init__(self, six_module_name):\n self.name = six_module_name\n self.known_modules = {}\n\n def _add_module(self, mod, *fullnames):\n for fullname in fullnames:\n self.known_modules[self.name + "." + fullname] = mod\n\n def _get_module(self, fullname):\n return self.known_modules[self.name + "." + fullname]\n\n def find_module(self, fullname, path=None):\n if fullname in self.known_modules:\n return self\n return None\n\n def __get_module(self, fullname):\n try:\n return self.known_modules[fullname]\n except KeyError:\n raise ImportError("This loader does not know module " + fullname)\n\n def load_module(self, fullname):\n try:\n # in case of a reload\n return sys.modules[fullname]\n except KeyError:\n pass\n mod = self.__get_module(fullname)\n if isinstance(mod, MovedModule):\n mod = mod._resolve()\n else:\n mod.__loader__ = self\n sys.modules[fullname] = mod\n return mod\n\n def is_package(self, fullname):\n """\n Return true, if the named module is a package.\n\n We need this method to get correct spec objects with\n Python 3.4 (see PEP451)\n """\n return hasattr(self.__get_module(fullname), "__path__")\n\n def get_code(self, fullname):\n """Return None\n\n Required, if is_package is implemented"""\n self.__get_module(fullname) # eventually raises ImportError\n return None\n get_source = get_code # same as get_code\n\n_importer = _SixMetaPathImporter(__name__)\n\n\nclass _MovedItems(_LazyModule):\n\n """Lazy loading of moved objects"""\n __path__ = [] # mark as package\n\n\n_moved_attributes = [\n MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),\n MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),\n MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),\n MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),\n MovedAttribute("intern", "__builtin__", "sys"),\n MovedAttribute("map", "itertools", "builtins", "imap", "map"),\n MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),\n MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),\n MovedAttribute("getoutput", "commands", "subprocess"),\n MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),\n MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),\n MovedAttribute("reduce", "__builtin__", "functools"),\n MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),\n MovedAttribute("StringIO", "StringIO", "io"),\n MovedAttribute("UserDict", "UserDict", "collections"),\n MovedAttribute("UserList", "UserList", "collections"),\n MovedAttribute("UserString", "UserString", "collections"),\n MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),\n MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),\n MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),\n MovedModule("builtins", "__builtin__"),\n MovedModule("configparser", "ConfigParser"),\n MovedModule("collections_abc", "collections", "collections.abc" if sys.version_info >= (3, 3) else "collections"),\n MovedModule("copyreg", "copy_reg"),\n MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),\n MovedModule("dbm_ndbm", "dbm", "dbm.ndbm"),\n MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread" if sys.version_info < (3, 9) else "_thread"),\n MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),\n MovedModule("http_cookies", "Cookie", "http.cookies"),\n MovedModule("html_entities", "htmlentitydefs", "html.entities"),\n MovedModule("html_parser", "HTMLParser", "html.parser"),\n MovedModule("http_client", "httplib", "http.client"),\n MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),\n MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"),\n MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),\n MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),\n MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),\n MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),\n MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),\n MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),\n MovedModule("cPickle", "cPickle", "pickle"),\n MovedModule("queue", "Queue"),\n MovedModule("reprlib", "repr"),\n MovedModule("socketserver", "SocketServer"),\n MovedModule("_thread", "thread", "_thread"),\n MovedModule("tkinter", "Tkinter"),\n MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),\n MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),\n MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),\n MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),\n MovedModule("tkinter_tix", "Tix", "tkinter.tix"),\n MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),\n MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),\n MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),\n MovedModule("tkinter_colorchooser", "tkColorChooser",\n "tkinter.colorchooser"),\n MovedModule("tkinter_commondialog", "tkCommonDialog",\n "tkinter.commondialog"),\n MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),\n MovedModule("tkinter_font", "tkFont", "tkinter.font"),\n MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),\n MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",\n "tkinter.simpledialog"),\n MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),\n MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),\n MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),\n MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),\n MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),\n MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),\n]\n# Add windows specific modules.\nif sys.platform == "win32":\n _moved_attributes += [\n MovedModule("winreg", "_winreg"),\n ]\n\nfor attr in _moved_attributes:\n setattr(_MovedItems, attr.name, attr)\n if isinstance(attr, MovedModule):\n _importer._add_module(attr, "moves." + attr.name)\ndel attr\n\n_MovedItems._moved_attributes = _moved_attributes\n\nmoves = _MovedItems(__name__ + ".moves")\n_importer._add_module(moves, "moves")\n\n\nclass Module_six_moves_urllib_parse(_LazyModule):\n\n """Lazy loading of moved objects in six.moves.urllib_parse"""\n\n\n_urllib_parse_moved_attributes = [\n MovedAttribute("ParseResult", "urlparse", "urllib.parse"),\n MovedAttribute("SplitResult", "urlparse", "urllib.parse"),\n MovedAttribute("parse_qs", "urlparse", "urllib.parse"),\n MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),\n MovedAttribute("urldefrag", "urlparse", "urllib.parse"),\n MovedAttribute("urljoin", "urlparse", "urllib.parse"),\n MovedAttribute("urlparse", "urlparse", "urllib.parse"),\n MovedAttribute("urlsplit", "urlparse", "urllib.parse"),\n MovedAttribute("urlunparse", "urlparse", "urllib.parse"),\n MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),\n MovedAttribute("quote", "urllib", "urllib.parse"),\n MovedAttribute("quote_plus", "urllib", "urllib.parse"),\n MovedAttribute("unquote", "urllib", "urllib.parse"),\n MovedAttribute("unquote_plus", "urllib", "urllib.parse"),\n MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"),\n MovedAttribute("urlencode", "urllib", "urllib.parse"),\n MovedAttribute("splitquery", "urllib", "urllib.parse"),\n MovedAttribute("splittag", "urllib", "urllib.parse"),\n MovedAttribute("splituser", "urllib", "urllib.parse"),\n MovedAttribute("splitvalue", "urllib", "urllib.parse"),\n MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),\n MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),\n MovedAttribute("uses_params", "urlparse", "urllib.parse"),\n MovedAttribute("uses_query", "urlparse", "urllib.parse"),\n MovedAttribute("uses_relative", "urlparse", "urllib.parse"),\n]\nfor attr in _urllib_parse_moved_attributes:\n setattr(Module_six_moves_urllib_parse, attr.name, attr)\ndel attr\n\nModule_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes\n\n_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),\n "moves.urllib_parse", "moves.urllib.parse")\n\n\nclass Module_six_moves_urllib_error(_LazyModule):\n\n """Lazy loading of moved objects in six.moves.urllib_error"""\n\n\n_urllib_error_moved_attributes = [\n MovedAttribute("URLError", "urllib2", "urllib.error"),\n MovedAttribute("HTTPError", "urllib2", "urllib.error"),\n MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),\n]\nfor attr in _urllib_error_moved_attributes:\n setattr(Module_six_moves_urllib_error, attr.name, attr)\ndel attr\n\nModule_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes\n\n_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),\n "moves.urllib_error", "moves.urllib.error")\n\n\nclass Module_six_moves_urllib_request(_LazyModule):\n\n """Lazy loading of moved objects in six.moves.urllib_request"""\n\n\n_urllib_request_moved_attributes = [\n MovedAttribute("urlopen", "urllib2", "urllib.request"),\n MovedAttribute("install_opener", "urllib2", "urllib.request"),\n MovedAttribute("build_opener", "urllib2", "urllib.request"),\n MovedAttribute("pathname2url", "urllib", "urllib.request"),\n MovedAttribute("url2pathname", "urllib", "urllib.request"),\n MovedAttribute("getproxies", "urllib", "urllib.request"),\n MovedAttribute("Request", "urllib2", "urllib.request"),\n MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),\n MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),\n MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),\n MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),\n MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),\n MovedAttribute("BaseHandler", "urllib2", "urllib.request"),\n MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),\n MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),\n MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),\n MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),\n MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),\n MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),\n MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),\n MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),\n MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),\n MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),\n MovedAttribute("FileHandler", "urllib2", "urllib.request"),\n MovedAttribute("FTPHandler", "urllib2", "urllib.request"),\n MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),\n MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),\n MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),\n MovedAttribute("urlretrieve", "urllib", "urllib.request"),\n MovedAttribute("urlcleanup", "urllib", "urllib.request"),\n MovedAttribute("URLopener", "urllib", "urllib.request"),\n MovedAttribute("FancyURLopener", "urllib", "urllib.request"),\n MovedAttribute("proxy_bypass", "urllib", "urllib.request"),\n MovedAttribute("parse_http_list", "urllib2", "urllib.request"),\n MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"),\n]\nfor attr in _urllib_request_moved_attributes:\n setattr(Module_six_moves_urllib_request, attr.name, attr)\ndel attr\n\nModule_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes\n\n_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),\n "moves.urllib_request", "moves.urllib.request")\n\n\nclass Module_six_moves_urllib_response(_LazyModule):\n\n """Lazy loading of moved objects in six.moves.urllib_response"""\n\n\n_urllib_response_moved_attributes = [\n MovedAttribute("addbase", "urllib", "urllib.response"),\n MovedAttribute("addclosehook", "urllib", "urllib.response"),\n MovedAttribute("addinfo", "urllib", "urllib.response"),\n MovedAttribute("addinfourl", "urllib", "urllib.response"),\n]\nfor attr in _urllib_response_moved_attributes:\n setattr(Module_six_moves_urllib_response, attr.name, attr)\ndel attr\n\nModule_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes\n\n_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),\n "moves.urllib_response", "moves.urllib.response")\n\n\nclass Module_six_moves_urllib_robotparser(_LazyModule):\n\n """Lazy loading of moved objects in six.moves.urllib_robotparser"""\n\n\n_urllib_robotparser_moved_attributes = [\n MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),\n]\nfor attr in _urllib_robotparser_moved_attributes:\n setattr(Module_six_moves_urllib_robotparser, attr.name, attr)\ndel attr\n\nModule_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes\n\n_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),\n "moves.urllib_robotparser", "moves.urllib.robotparser")\n\n\nclass Module_six_moves_urllib(types.ModuleType):\n\n """Create a six.moves.urllib namespace that resembles the Python 3 namespace"""\n __path__ = [] # mark as package\n parse = _importer._get_module("moves.urllib_parse")\n error = _importer._get_module("moves.urllib_error")\n request = _importer._get_module("moves.urllib_request")\n response = _importer._get_module("moves.urllib_response")\n robotparser = _importer._get_module("moves.urllib_robotparser")\n\n def __dir__(self):\n return [\'parse\', \'error\', \'request\', \'response\', \'robotparser\']\n\n_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),\n "moves.urllib")\n\n\ndef add_move(move):\n """Add an item to six.moves."""\n setattr(_MovedItems, move.name, move)\n\n\ndef remove_move(name):\n """Remove item from six.moves."""\n try:\n delattr(_MovedItems, name)\n except AttributeError:\n try:\n del moves.__dict__[name]\n except KeyError:\n raise AttributeError("no such move, %r" % (name,))\n\n\nif PY3:\n _meth_func = "__func__"\n _meth_self = "__self__"\n\n _func_closure = "__closure__"\n _func_code = "__code__"\n _func_defaults = "__defaults__"\n _func_globals = "__globals__"\nelse:\n _meth_func = "im_func"\n _meth_self = "im_self"\n\n _func_closure = "func_closure"\n _func_code = "func_code"\n _func_defaults = "func_defaults"\n _func_globals = "func_globals"\n\n\ntry:\n advance_iterator = next\nexcept NameError:\n def advance_iterator(it):\n return it.next()\nnext = advance_iterator\n\n\ntry:\n callable = callable\nexcept NameError:\n def callable(obj):\n return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)\n\n\nif PY3:\n def get_unbound_function(unbound):\n return unbound\n\n create_bound_method = types.MethodType\n\n def create_unbound_method(func, cls):\n return func\n\n Iterator = object\nelse:\n def get_unbound_function(unbound):\n return unbound.im_func\n\n def create_bound_method(func, obj):\n return types.MethodType(func, obj, obj.__class__)\n\n def create_unbound_method(func, cls):\n return types.MethodType(func, None, cls)\n\n class Iterator(object):\n\n def next(self):\n return type(self).__next__(self)\n\n callable = callable\n_add_doc(get_unbound_function,\n """Get the function out of a possibly unbound function""")\n\n\nget_method_function = operator.attrgetter(_meth_func)\nget_method_self = operator.attrgetter(_meth_self)\nget_function_closure = operator.attrgetter(_func_closure)\nget_function_code = operator.attrgetter(_func_code)\nget_function_defaults = operator.attrgetter(_func_defaults)\nget_function_globals = operator.attrgetter(_func_globals)\n\n\nif PY3:\n def iterkeys(d, **kw):\n return iter(d.keys(**kw))\n\n def itervalues(d, **kw):\n return iter(d.values(**kw))\n\n def iteritems(d, **kw):\n return iter(d.items(**kw))\n\n def iterlists(d, **kw):\n return iter(d.lists(**kw))\n\n viewkeys = operator.methodcaller("keys")\n\n viewvalues = operator.methodcaller("values")\n\n viewitems = operator.methodcaller("items")\nelse:\n def iterkeys(d, **kw):\n return d.iterkeys(**kw)\n\n def itervalues(d, **kw):\n return d.itervalues(**kw)\n\n def iteritems(d, **kw):\n return d.iteritems(**kw)\n\n def iterlists(d, **kw):\n return d.iterlists(**kw)\n\n viewkeys = operator.methodcaller("viewkeys")\n\n viewvalues = operator.methodcaller("viewvalues")\n\n viewitems = operator.methodcaller("viewitems")\n\n_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")\n_add_doc(itervalues, "Return an iterator over the values of a dictionary.")\n_add_doc(iteritems,\n "Return an iterator over the (key, value) pairs of a dictionary.")\n_add_doc(iterlists,\n "Return an iterator over the (key, [values]) pairs of a dictionary.")\n\n\nif PY3:\n def b(s):\n return s.encode("latin-1")\n\n def u(s):\n return s\n unichr = chr\n import struct\n int2byte = struct.Struct(">B").pack\n del struct\n byte2int = operator.itemgetter(0)\n indexbytes = operator.getitem\n iterbytes = iter\n import io\n StringIO = io.StringIO\n BytesIO = io.BytesIO\n del io\n _assertCountEqual = "assertCountEqual"\n if sys.version_info[1] <= 1:\n _assertRaisesRegex = "assertRaisesRegexp"\n _assertRegex = "assertRegexpMatches"\n _assertNotRegex = "assertNotRegexpMatches"\n else:\n _assertRaisesRegex = "assertRaisesRegex"\n _assertRegex = "assertRegex"\n _assertNotRegex = "assertNotRegex"\nelse:\n def b(s):\n return s\n # Workaround for standalone backslash\n\n def u(s):\n return unicode(s.replace(r\'\\\\\', r\'\\\\\\\\\'), "unicode_escape")\n unichr = unichr\n int2byte = chr\n\n def byte2int(bs):\n return ord(bs[0])\n\n def indexbytes(buf, i):\n return ord(buf[i])\n iterbytes = functools.partial(itertools.imap, ord)\n import StringIO\n StringIO = BytesIO = StringIO.StringIO\n _assertCountEqual = "assertItemsEqual"\n _assertRaisesRegex = "assertRaisesRegexp"\n _assertRegex = "assertRegexpMatches"\n _assertNotRegex = "assertNotRegexpMatches"\n_add_doc(b, """Byte literal""")\n_add_doc(u, """Text literal""")\n\n\ndef assertCountEqual(self, *args, **kwargs):\n return getattr(self, _assertCountEqual)(*args, **kwargs)\n\n\ndef assertRaisesRegex(self, *args, **kwargs):\n return getattr(self, _assertRaisesRegex)(*args, **kwargs)\n\n\ndef assertRegex(self, *args, **kwargs):\n return getattr(self, _assertRegex)(*args, **kwargs)\n\n\ndef assertNotRegex(self, *args, **kwargs):\n return getattr(self, _assertNotRegex)(*args, **kwargs)\n\n\nif PY3:\n exec_ = getattr(moves.builtins, "exec")\n\n def reraise(tp, value, tb=None):\n try:\n if value is None:\n value = tp()\n if value.__traceback__ is not tb:\n raise value.with_traceback(tb)\n raise value\n finally:\n value = None\n tb = None\n\nelse:\n def exec_(_code_, _globs_=None, _locs_=None):\n """Execute code in a namespace."""\n if _globs_ is None:\n frame = sys._getframe(1)\n _globs_ = frame.f_globals\n if _locs_ is None:\n _locs_ = frame.f_locals\n del frame\n elif _locs_ is None:\n _locs_ = _globs_\n exec("""exec _code_ in _globs_, _locs_""")\n\n exec_("""def reraise(tp, value, tb=None):\n try:\n raise tp, value, tb\n finally:\n tb = None\n""")\n\n\nif sys.version_info[:2] > (3,):\n exec_("""def raise_from(value, from_value):\n try:\n raise value from from_value\n finally:\n value = None\n""")\nelse:\n def raise_from(value, from_value):\n raise value\n\n\nprint_ = getattr(moves.builtins, "print", None)\nif print_ is None:\n def print_(*args, **kwargs):\n """The new-style print function for Python 2.4 and 2.5."""\n fp = kwargs.pop("file", sys.stdout)\n if fp is None:\n return\n\n def write(data):\n if not isinstance(data, basestring):\n data = str(data)\n # If the file has an encoding, encode unicode with it.\n if (isinstance(fp, file) and\n isinstance(data, unicode) and\n fp.encoding is not None):\n errors = getattr(fp, "errors", None)\n if errors is None:\n errors = "strict"\n data = data.encode(fp.encoding, errors)\n fp.write(data)\n want_unicode = False\n sep = kwargs.pop("sep", None)\n if sep is not None:\n if isinstance(sep, unicode):\n want_unicode = True\n elif not isinstance(sep, str):\n raise TypeError("sep must be None or a string")\n end = kwargs.pop("end", None)\n if end is not None:\n if isinstance(end, unicode):\n want_unicode = True\n elif not isinstance(end, str):\n raise TypeError("end must be None or a string")\n if kwargs:\n raise TypeError("invalid keyword arguments to print()")\n if not want_unicode:\n for arg in args:\n if isinstance(arg, unicode):\n want_unicode = True\n break\n if want_unicode:\n newline = unicode("\\n")\n space = unicode(" ")\n else:\n newline = "\\n"\n space = " "\n if sep is None:\n sep = space\n if end is None:\n end = newline\n for i, arg in enumerate(args):\n if i:\n write(sep)\n write(arg)\n write(end)\nif sys.version_info[:2] < (3, 3):\n _print = print_\n\n def print_(*args, **kwargs):\n fp = kwargs.get("file", sys.stdout)\n flush = kwargs.pop("flush", False)\n _print(*args, **kwargs)\n if flush and fp is not None:\n fp.flush()\n\n_add_doc(reraise, """Reraise an exception.""")\n\nif sys.version_info[0:2] < (3, 4):\n # This does exactly the same what the :func:`py3:functools.update_wrapper`\n # function does on Python versions after 3.2. It sets the ``__wrapped__``\n # attribute on ``wrapper`` object and it doesn\'t raise an error if any of\n # the attributes mentioned in ``assigned`` and ``updated`` are missing on\n # ``wrapped`` object.\n def _update_wrapper(wrapper, wrapped,\n assigned=functools.WRAPPER_ASSIGNMENTS,\n updated=functools.WRAPPER_UPDATES):\n for attr in assigned:\n try:\n value = getattr(wrapped, attr)\n except AttributeError:\n continue\n else:\n setattr(wrapper, attr, value)\n for attr in updated:\n getattr(wrapper, attr).update(getattr(wrapped, attr, {}))\n wrapper.__wrapped__ = wrapped\n return wrapper\n _update_wrapper.__doc__ = functools.update_wrapper.__doc__\n\n def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,\n updated=functools.WRAPPER_UPDATES):\n return functools.partial(_update_wrapper, wrapped=wrapped,\n assigned=assigned, updated=updated)\n wraps.__doc__ = functools.wraps.__doc__\n\nelse:\n wraps = functools.wraps\n\n\ndef with_metaclass(meta, *bases):\n """Create a base class with a metaclass."""\n # This requires a bit of explanation: the basic idea is to make a dummy\n # metaclass for one level of class instantiation that replaces itself with\n # the actual metaclass.\n class metaclass(type):\n\n def __new__(cls, name, this_bases, d):\n if sys.version_info[:2] >= (3, 7):\n # This version introduced PEP 560 that requires a bit\n # of extra care (we mimic what is done by __build_class__).\n resolved_bases = types.resolve_bases(bases)\n if resolved_bases is not bases:\n d[\'__orig_bases__\'] = bases\n else:\n resolved_bases = bases\n return meta(name, resolved_bases, d)\n\n @classmethod\n def __prepare__(cls, name, this_bases):\n return meta.__prepare__(name, bases)\n return type.__new__(metaclass, \'temporary_class\', (), {})\n\n\ndef add_metaclass(metaclass):\n """Class decorator for creating a class with a metaclass."""\n def wrapper(cls):\n orig_vars = cls.__dict__.copy()\n slots = orig_vars.get(\'__slots__\')\n if slots is not None:\n if isinstance(slots, str):\n slots = [slots]\n for slots_var in slots:\n orig_vars.pop(slots_var)\n orig_vars.pop(\'__dict__\', None)\n orig_vars.pop(\'__weakref__\', None)\n if hasattr(cls, \'__qualname__\'):\n orig_vars[\'__qualname__\'] = cls.__qualname__\n return metaclass(cls.__name__, cls.__bases__, orig_vars)\n return wrapper\n\n\ndef ensure_binary(s, encoding=\'utf-8\', errors=\'strict\'):\n """Coerce **s** to six.binary_type.\n\n For Python 2:\n - `unicode` -> encoded to `str`\n - `str` -> `str`\n\n For Python 3:\n - `str` -> encoded to `bytes`\n - `bytes` -> `bytes`\n """\n if isinstance(s, binary_type):\n return s\n if isinstance(s, text_type):\n return s.encode(encoding, errors)\n raise TypeError("not expecting type \'%s\'" % type(s))\n\n\ndef ensure_str(s, encoding=\'utf-8\', errors=\'strict\'):\n """Coerce *s* to `str`.\n\n For Python 2:\n - `unicode` -> encoded to `str`\n - `str` -> `str`\n\n For Python 3:\n - `str` -> `str`\n - `bytes` -> decoded to `str`\n """\n # Optimization: Fast return for the common case.\n if type(s) is str:\n return s\n if PY2 and isinstance(s, text_type):\n return s.encode(encoding, errors)\n elif PY3 and isinstance(s, binary_type):\n return s.decode(encoding, errors)\n elif not isinstance(s, (text_type, binary_type)):\n raise TypeError("not expecting type \'%s\'" % type(s))\n return s\n\n\ndef ensure_text(s, encoding=\'utf-8\', errors=\'strict\'):\n """Coerce *s* to six.text_type.\n\n For Python 2:\n - `unicode` -> `unicode`\n - `str` -> `unicode`\n\n For Python 3:\n - `str` -> `str`\n - `bytes` -> decoded to `str`\n """\n if isinstance(s, binary_type):\n return s.decode(encoding, errors)\n elif isinstance(s, text_type):\n return s\n else:\n raise TypeError("not expecting type \'%s\'" % type(s))\n\n\ndef python_2_unicode_compatible(klass):\n """\n A class decorator that defines __unicode__ and __str__ methods under Python 2.\n Under Python 3 it does nothing.\n\n To support Python 2 and 3 with a single code base, define a __str__ method\n returning text and apply this decorator to the class.\n """\n if PY2:\n if \'__str__\' not in klass.__dict__:\n raise ValueError("@python_2_unicode_compatible cannot be applied "\n "to %s because it doesn\'t define __str__()." %\n klass.__name__)\n klass.__unicode__ = klass.__str__\n klass.__str__ = lambda self: self.__unicode__().encode(\'utf-8\')\n return klass\n\n\n# Complete the moves implementation.\n# This code is at the end of this module to speed up module loading.\n# Turn this module into a package.\n__path__ = [] # required for PEP 302 and PEP 451\n__package__ = __name__ # see PEP 366 @ReservedAssignment\nif globals().get("__spec__") is not None:\n __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable\n# Remove other six meta path importers, since they cause problems. This can\n# happen if six is removed from sys.modules and then reloaded. (Setuptools does\n# this for some reason.)\nif sys.meta_path:\n for i, importer in enumerate(sys.meta_path):\n # Here\'s some real nastiness: Another "instance" of the six module might\n # be floating around. Therefore, we can\'t use isinstance() to check for\n # the six meta path importer, since the other six instance will have\n # inserted an importer with different class.\n if (type(importer).__name__ == "_SixMetaPathImporter" and\n importer.name == __name__):\n del sys.meta_path[i]\n break\n del i, importer\n# Finally, add the importer to the meta path import hook.\nsys.meta_path.append(_importer)\n') + __stickytape_write_module('asttokens/util.py', b'# Copyright 2016 Grist Labs, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport ast\nimport collections\nimport token\nfrom six import iteritems\n\n\ndef token_repr(tok_type, string):\n """Returns a human-friendly representation of a token with the given type and string."""\n # repr() prefixes unicode with \'u\' on Python2 but not Python3; strip it out for consistency.\n return \'%s:%s\' % (token.tok_name[tok_type], repr(string).lstrip(\'u\'))\n\n\nclass Token(collections.namedtuple(\'Token\', \'type string start end line index startpos endpos\')):\n """\n TokenInfo is an 8-tuple containing the same 5 fields as the tokens produced by the tokenize\n module, and 3 additional ones useful for this module:\n\n - [0] .type Token type (see token.py)\n - [1] .string Token (a string)\n - [2] .start Starting (row, column) indices of the token (a 2-tuple of ints)\n - [3] .end Ending (row, column) indices of the token (a 2-tuple of ints)\n - [4] .line Original line (string)\n - [5] .index Index of the token in the list of tokens that it belongs to.\n - [6] .startpos Starting character offset into the input text.\n - [7] .endpos Ending character offset into the input text.\n """\n def __str__(self):\n return token_repr(self.type, self.string)\n\n\ndef match_token(token, tok_type, tok_str=None):\n """Returns true if token is of the given type and, if a string is given, has that string."""\n return token.type == tok_type and (tok_str is None or token.string == tok_str)\n\n\ndef expect_token(token, tok_type, tok_str=None):\n """\n Verifies that the given token is of the expected type. If tok_str is given, the token string\n is verified too. If the token doesn\'t match, raises an informative ValueError.\n """\n if not match_token(token, tok_type, tok_str):\n raise ValueError("Expected token %s, got %s on line %s col %s" % (\n token_repr(tok_type, tok_str), str(token),\n token.start[0], token.start[1] + 1))\n\n# These were previously defined in tokenize.py and distinguishable by being greater than\n# token.N_TOKEN. As of python3.7, they are in token.py, and we check for them explicitly.\nif hasattr(token, \'ENCODING\'):\n def is_non_coding_token(token_type):\n """\n These are considered non-coding tokens, as they don\'t affect the syntax tree.\n """\n return token_type in (token.NL, token.COMMENT, token.ENCODING)\nelse:\n def is_non_coding_token(token_type):\n """\n These are considered non-coding tokens, as they don\'t affect the syntax tree.\n """\n return token_type >= token.N_TOKENS\n\n\ndef iter_children_func(node):\n """\n Returns a function which yields all direct children of a AST node,\n skipping children that are singleton nodes.\n The function depends on whether ``node`` is from ``ast`` or from the ``astroid`` module.\n """\n return iter_children_astroid if hasattr(node, \'get_children\') else iter_children_ast\n\n\ndef iter_children_astroid(node):\n # Don\'t attempt to process children of JoinedStr nodes, which we can\'t fully handle yet.\n if is_joined_str(node):\n return []\n\n return node.get_children()\n\n\nSINGLETONS = {c for n, c in iteritems(ast.__dict__) if isinstance(c, type) and\n issubclass(c, (ast.expr_context, ast.boolop, ast.operator, ast.unaryop, ast.cmpop))}\n\ndef iter_children_ast(node):\n # Don\'t attempt to process children of JoinedStr nodes, which we can\'t fully handle yet.\n if is_joined_str(node):\n return\n\n if isinstance(node, ast.Dict):\n # override the iteration order: instead of , ,\n # yield keys and values in source order (key1, value1, key2, value2, ...)\n for (key, value) in zip(node.keys, node.values):\n if key is not None:\n yield key\n yield value\n return\n\n for child in ast.iter_child_nodes(node):\n # Skip singleton children; they don\'t reflect particular positions in the code and break the\n # assumptions about the tree consisting of distinct nodes. Note that collecting classes\n # beforehand and checking them in a set is faster than using isinstance each time.\n if child.__class__ not in SINGLETONS:\n yield child\n\n\nstmt_class_names = {n for n, c in iteritems(ast.__dict__)\n if isinstance(c, type) and issubclass(c, ast.stmt)}\nexpr_class_names = ({n for n, c in iteritems(ast.__dict__)\n if isinstance(c, type) and issubclass(c, ast.expr)} |\n {\'AssignName\', \'DelName\', \'Const\', \'AssignAttr\', \'DelAttr\'})\n\n# These feel hacky compared to isinstance() but allow us to work with both ast and astroid nodes\n# in the same way, and without even importing astroid.\ndef is_expr(node):\n """Returns whether node is an expression node."""\n return node.__class__.__name__ in expr_class_names\n\ndef is_stmt(node):\n """Returns whether node is a statement node."""\n return node.__class__.__name__ in stmt_class_names\n\ndef is_module(node):\n """Returns whether node is a module node."""\n return node.__class__.__name__ == \'Module\'\n\ndef is_joined_str(node):\n """Returns whether node is a JoinedStr node, used to represent f-strings."""\n # At the moment, nodes below JoinedStr have wrong line/col info, and trying to process them only\n # leads to errors.\n return node.__class__.__name__ == \'JoinedStr\'\n\n\ndef is_slice(node):\n """Returns whether node represents a slice, e.g. `1:2` in `x[1:2]`"""\n # Before 3.9, a tuple containing a slice is an ExtSlice,\n # but this was removed in https://bugs.python.org/issue34822\n return (\n node.__class__.__name__ in (\'Slice\', \'ExtSlice\')\n or (\n node.__class__.__name__ == \'Tuple\'\n and any(map(is_slice, node.elts))\n )\n )\n\n\n# Sentinel value used by visit_tree().\n_PREVISIT = object()\n\ndef visit_tree(node, previsit, postvisit):\n """\n Scans the tree under the node depth-first using an explicit stack. It avoids implicit recursion\n via the function call stack to avoid hitting \'maximum recursion depth exceeded\' error.\n\n It calls ``previsit()`` and ``postvisit()`` as follows:\n\n * ``previsit(node, par_value)`` - should return ``(par_value, value)``\n ``par_value`` is as returned from ``previsit()`` of the parent.\n\n * ``postvisit(node, par_value, value)`` - should return ``value``\n ``par_value`` is as returned from ``previsit()`` of the parent, and ``value`` is as\n returned from ``previsit()`` of this node itself. The return ``value`` is ignored except\n the one for the root node, which is returned from the overall ``visit_tree()`` call.\n\n For the initial node, ``par_value`` is None. ``postvisit`` may be None.\n """\n if not postvisit:\n postvisit = lambda node, pvalue, value: None\n\n iter_children = iter_children_func(node)\n done = set()\n ret = None\n stack = [(node, None, _PREVISIT)]\n while stack:\n current, par_value, value = stack.pop()\n if value is _PREVISIT:\n assert current not in done # protect againt infinite loop in case of a bad tree.\n done.add(current)\n\n pvalue, post_value = previsit(current, par_value)\n stack.append((current, par_value, post_value))\n\n # Insert all children in reverse order (so that first child ends up on top of the stack).\n ins = len(stack)\n for n in iter_children(current):\n stack.insert(ins, (n, pvalue, _PREVISIT))\n else:\n ret = postvisit(current, par_value, value)\n return ret\n\n\n\ndef walk(node):\n """\n Recursively yield all descendant nodes in the tree starting at ``node`` (including ``node``\n itself), using depth-first pre-order traversal (yieling parents before their children).\n\n This is similar to ``ast.walk()``, but with a different order, and it works for both ``ast`` and\n ``astroid`` trees. Also, as ``iter_children()``, it skips singleton nodes generated by ``ast``.\n """\n iter_children = iter_children_func(node)\n done = set()\n stack = [node]\n while stack:\n current = stack.pop()\n assert current not in done # protect againt infinite loop in case of a bad tree.\n done.add(current)\n\n yield current\n\n # Insert all children in reverse order (so that first child ends up on top of the stack).\n # This is faster than building a list and reversing it.\n ins = len(stack)\n for c in iter_children(current):\n stack.insert(ins, c)\n\n\ndef replace(text, replacements):\n """\n Replaces multiple slices of text with new values. This is a convenience method for making code\n modifications of ranges e.g. as identified by ``ASTTokens.get_text_range(node)``. Replacements is\n an iterable of ``(start, end, new_text)`` tuples.\n\n For example, ``replace("this is a test", [(0, 4, "X"), (8, 9, "THE")])`` produces\n ``"X is THE test"``.\n """\n p = 0\n parts = []\n for (start, end, new_text) in sorted(replacements):\n parts.append(text[p:start])\n parts.append(new_text)\n p = end\n parts.append(text[p:])\n return \'\'.join(parts)\n\n\nclass NodeMethods(object):\n """\n Helper to get `visit_{node_type}` methods given a node\'s class and cache the results.\n """\n def __init__(self):\n self._cache = {}\n\n def get(self, obj, cls):\n """\n Using the lowercase name of the class as node_type, returns `obj.visit_{node_type}`,\n or `obj.visit_default` if the type-specific method is not found.\n """\n method = self._cache.get(cls)\n if not method:\n name = "visit_" + cls.__name__.lower()\n method = getattr(obj, name, obj.visit_default)\n self._cache[cls] = method\n return method\n') + __stickytape_write_module('asttokens/mark_tokens.py', b'# Copyright 2016 Grist Labs, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numbers\nimport sys\nimport token\n\nimport six\n\nfrom . import util\n\n# Mapping of matching braces. To find a token here, look up token[:2].\n_matching_pairs_left = {\n (token.OP, \'(\'): (token.OP, \')\'),\n (token.OP, \'[\'): (token.OP, \']\'),\n (token.OP, \'{\'): (token.OP, \'}\'),\n}\n\n_matching_pairs_right = {\n (token.OP, \')\'): (token.OP, \'(\'),\n (token.OP, \']\'): (token.OP, \'[\'),\n (token.OP, \'}\'): (token.OP, \'{\'),\n}\n\n\nclass MarkTokens(object):\n """\n Helper that visits all nodes in the AST tree and assigns .first_token and .last_token attributes\n to each of them. This is the heart of the token-marking logic.\n """\n def __init__(self, code):\n self._code = code\n self._methods = util.NodeMethods()\n self._iter_children = None\n\n def visit_tree(self, node):\n self._iter_children = util.iter_children_func(node)\n util.visit_tree(node, self._visit_before_children, self._visit_after_children)\n\n def _visit_before_children(self, node, parent_token):\n col = getattr(node, \'col_offset\', None)\n token = self._code.get_token_from_utf8(node.lineno, col) if col is not None else None\n\n if not token and util.is_module(node):\n # We\'ll assume that a Module node starts at the start of the source code.\n token = self._code.get_token(1, 0)\n\n # Use our own token, or our parent\'s if we don\'t have one, to pass to child calls as\n # parent_token argument. The second value becomes the token argument of _visit_after_children.\n return (token or parent_token, token)\n\n def _visit_after_children(self, node, parent_token, token):\n # This processes the node generically first, after all children have been processed.\n\n # Get the first and last tokens that belong to children. Note how this doesn\'t assume that we\n # iterate through children in order that corresponds to occurrence in source code. This\n # assumption can fail (e.g. with return annotations).\n first = token\n last = None\n for child in self._iter_children(node):\n if not first or child.first_token.index < first.index:\n first = child.first_token\n if not last or child.last_token.index > last.index:\n last = child.last_token\n\n # If we don\'t have a first token from _visit_before_children, and there were no children, then\n # use the parent\'s token as the first token.\n first = first or parent_token\n\n # If no children, set last token to the first one.\n last = last or first\n\n # Statements continue to before NEWLINE. This helps cover a few different cases at once.\n if util.is_stmt(node):\n last = self._find_last_in_stmt(last)\n\n # Capture any unmatched brackets.\n first, last = self._expand_to_matching_pairs(first, last, node)\n\n # Give a chance to node-specific methods to adjust.\n nfirst, nlast = self._methods.get(self, node.__class__)(node, first, last)\n\n if (nfirst, nlast) != (first, last):\n # If anything changed, expand again to capture any unmatched brackets.\n nfirst, nlast = self._expand_to_matching_pairs(nfirst, nlast, node)\n\n node.first_token = nfirst\n node.last_token = nlast\n\n def _find_last_in_stmt(self, start_token):\n t = start_token\n while (not util.match_token(t, token.NEWLINE) and\n not util.match_token(t, token.OP, \';\') and\n not token.ISEOF(t.type)):\n t = self._code.next_token(t, include_extra=True)\n return self._code.prev_token(t)\n\n def _expand_to_matching_pairs(self, first_token, last_token, node):\n """\n Scan tokens in [first_token, last_token] range that are between node\'s children, and for any\n unmatched brackets, adjust first/last tokens to include the closing pair.\n """\n # We look for opening parens/braces among non-child tokens (i.e. tokens between our actual\n # child nodes). If we find any closing ones, we match them to the opens.\n to_match_right = []\n to_match_left = []\n for tok in self._code.token_range(first_token, last_token):\n tok_info = tok[:2]\n if to_match_right and tok_info == to_match_right[-1]:\n to_match_right.pop()\n elif tok_info in _matching_pairs_left:\n to_match_right.append(_matching_pairs_left[tok_info])\n elif tok_info in _matching_pairs_right:\n to_match_left.append(_matching_pairs_right[tok_info])\n\n # Once done, extend `last_token` to match any unclosed parens/braces.\n for match in reversed(to_match_right):\n last = self._code.next_token(last_token)\n # Allow for trailing commas or colons (allowed in subscripts) before the closing delimiter\n while any(util.match_token(last, token.OP, x) for x in (\',\', \':\')):\n last = self._code.next_token(last)\n # Now check for the actual closing delimiter.\n if util.match_token(last, *match):\n last_token = last\n\n # And extend `first_token` to match any unclosed opening parens/braces.\n for match in to_match_left:\n first = self._code.prev_token(first_token)\n if util.match_token(first, *match):\n first_token = first\n\n return (first_token, last_token)\n\n #----------------------------------------------------------------------\n # Node visitors. Each takes a preliminary first and last tokens, and returns the adjusted pair\n # that will actually be assigned.\n\n def visit_default(self, node, first_token, last_token):\n # pylint: disable=no-self-use\n # By default, we don\'t need to adjust the token we computed earlier.\n return (first_token, last_token)\n\n def handle_comp(self, open_brace, node, first_token, last_token):\n # For list/set/dict comprehensions, we only get the token of the first child, so adjust it to\n # include the opening brace (the closing brace will be matched automatically).\n before = self._code.prev_token(first_token)\n util.expect_token(before, token.OP, open_brace)\n return (before, last_token)\n\n # Python 3.8 fixed the starting position of list comprehensions:\n # https://bugs.python.org/issue31241\n if sys.version_info < (3, 8):\n def visit_listcomp(self, node, first_token, last_token):\n return self.handle_comp(\'[\', node, first_token, last_token)\n\n if six.PY2:\n # We shouldn\'t do this on PY3 because its SetComp/DictComp already have a correct start.\n def visit_setcomp(self, node, first_token, last_token):\n return self.handle_comp(\'{\', node, first_token, last_token)\n\n def visit_dictcomp(self, node, first_token, last_token):\n return self.handle_comp(\'{\', node, first_token, last_token)\n\n def visit_comprehension(self, node, first_token, last_token):\n # The \'comprehension\' node starts with \'for\' but we only get first child; we search backwards\n # to find the \'for\' keyword.\n first = self._code.find_token(first_token, token.NAME, \'for\', reverse=True)\n return (first, last_token)\n\n def visit_if(self, node, first_token, last_token):\n while first_token.string not in (\'if\', \'elif\'):\n first_token = self._code.prev_token(first_token)\n return first_token, last_token\n\n def handle_attr(self, node, first_token, last_token):\n # Attribute node has ".attr" (2 tokens) after the last child.\n dot = self._code.find_token(last_token, token.OP, \'.\')\n name = self._code.next_token(dot)\n util.expect_token(name, token.NAME)\n return (first_token, name)\n\n visit_attribute = handle_attr\n visit_assignattr = handle_attr\n visit_delattr = handle_attr\n\n def handle_def(self, node, first_token, last_token):\n # With astroid, nodes that start with a doc-string can have an empty body, in which case we\n # need to adjust the last token to include the doc string.\n if not node.body and getattr(node, \'doc\', None):\n last_token = self._code.find_token(last_token, token.STRING)\n\n # Include @ from decorator\n if first_token.index > 0:\n prev = self._code.prev_token(first_token)\n if util.match_token(prev, token.OP, \'@\'):\n first_token = prev\n return (first_token, last_token)\n\n visit_classdef = handle_def\n visit_functiondef = handle_def\n\n def handle_following_brackets(self, node, last_token, opening_bracket):\n # This is for calls and subscripts, which have a pair of brackets\n # at the end which may contain no nodes, e.g. foo() or bar[:].\n # We look for the opening bracket and then let the matching pair be found automatically\n # Remember that last_token is at the end of all children,\n # so we are not worried about encountering a bracket that belongs to a child.\n first_child = next(self._iter_children(node))\n call_start = self._code.find_token(first_child.last_token, token.OP, opening_bracket)\n if call_start.index > last_token.index:\n last_token = call_start\n return last_token\n\n def visit_call(self, node, first_token, last_token):\n last_token = self.handle_following_brackets(node, last_token, \'(\')\n\n # Handling a python bug with decorators with empty parens, e.g.\n # @deco()\n # def ...\n if util.match_token(first_token, token.OP, \'@\'):\n first_token = self._code.next_token(first_token)\n return (first_token, last_token)\n\n def visit_subscript(self, node, first_token, last_token):\n last_token = self.handle_following_brackets(node, last_token, \'[\')\n return (first_token, last_token)\n\n def handle_bare_tuple(self, node, first_token, last_token):\n # A bare tuple doesn\'t include parens; if there is a trailing comma, make it part of the tuple.\n maybe_comma = self._code.next_token(last_token)\n if util.match_token(maybe_comma, token.OP, \',\'):\n last_token = maybe_comma\n return (first_token, last_token)\n\n if sys.version_info >= (3, 8):\n # In Python3.8 parsed tuples include parentheses when present.\n def handle_tuple_nonempty(self, node, first_token, last_token):\n # It\'s a bare tuple if the first token belongs to the first child. The first child may\n # include extraneous parentheses (which don\'t create new nodes), so account for those too.\n child = node.elts[0]\n child_first, child_last = self._gobble_parens(child.first_token, child.last_token, True)\n if first_token == child_first:\n return self.handle_bare_tuple(node, first_token, last_token)\n return (first_token, last_token)\n else:\n # Before python 3.8, parsed tuples do not include parens.\n def handle_tuple_nonempty(self, node, first_token, last_token):\n (first_token, last_token) = self.handle_bare_tuple(node, first_token, last_token)\n return self._gobble_parens(first_token, last_token, False)\n\n def visit_tuple(self, node, first_token, last_token):\n if not node.elts:\n # An empty tuple is just "()", and we need no further info.\n return (first_token, last_token)\n return self.handle_tuple_nonempty(node, first_token, last_token)\n\n def _gobble_parens(self, first_token, last_token, include_all=False):\n # Expands a range of tokens to include one or all pairs of surrounding parentheses, and\n # returns (first, last) tokens that include these parens.\n while first_token.index > 0:\n prev = self._code.prev_token(first_token)\n next = self._code.next_token(last_token)\n if util.match_token(prev, token.OP, \'(\') and util.match_token(next, token.OP, \')\'):\n first_token, last_token = prev, next\n if include_all:\n continue\n break\n return (first_token, last_token)\n\n def visit_str(self, node, first_token, last_token):\n return self.handle_str(first_token, last_token)\n\n def visit_joinedstr(self, node, first_token, last_token):\n return self.handle_str(first_token, last_token)\n\n def visit_bytes(self, node, first_token, last_token):\n return self.handle_str(first_token, last_token)\n\n def handle_str(self, first_token, last_token):\n # Multiple adjacent STRING tokens form a single string.\n last = self._code.next_token(last_token)\n while util.match_token(last, token.STRING):\n last_token = last\n last = self._code.next_token(last_token)\n return (first_token, last_token)\n\n def handle_num(self, node, value, first_token, last_token):\n # A constant like \'-1\' gets turned into two tokens; this will skip the \'-\'.\n while util.match_token(last_token, token.OP):\n last_token = self._code.next_token(last_token)\n\n if isinstance(value, complex):\n # A complex number like -2j cannot be compared directly to 0\n # A complex number like 1-2j is expressed as a binary operation\n # so we don\'t need to worry about it\n value = value.imag\n\n # This makes sure that the - is included\n if value < 0 and first_token.type == token.NUMBER:\n first_token = self._code.prev_token(first_token)\n return (first_token, last_token)\n\n def visit_num(self, node, first_token, last_token):\n return self.handle_num(node, node.n, first_token, last_token)\n\n # In Astroid, the Num and Str nodes are replaced by Const.\n def visit_const(self, node, first_token, last_token):\n if isinstance(node.value, numbers.Number):\n return self.handle_num(node, node.value, first_token, last_token)\n elif isinstance(node.value, (six.text_type, six.binary_type)):\n return self.visit_str(node, first_token, last_token)\n return (first_token, last_token)\n\n # In Python >= 3.6, there is a similar class \'Constant\' for literals\n # In 3.8 it became the type produced by ast.parse\n # https://bugs.python.org/issue32892\n visit_constant = visit_const\n\n def visit_keyword(self, node, first_token, last_token):\n # Until python 3.9 (https://bugs.python.org/issue40141),\n # ast.keyword nodes didn\'t have line info. Astroid has lineno None.\n if node.arg is not None and getattr(node, \'lineno\', None) is None:\n equals = self._code.find_token(first_token, token.OP, \'=\', reverse=True)\n name = self._code.prev_token(equals)\n util.expect_token(name, token.NAME, node.arg)\n first_token = name\n return (first_token, last_token)\n\n def visit_starred(self, node, first_token, last_token):\n # Astroid has \'Starred\' nodes (for "foo(*bar)" type args), but they need to be adjusted.\n if not util.match_token(first_token, token.OP, \'*\'):\n star = self._code.prev_token(first_token)\n if util.match_token(star, token.OP, \'*\'):\n first_token = star\n return (first_token, last_token)\n\n def visit_assignname(self, node, first_token, last_token):\n # Astroid may turn \'except\' clause into AssignName, but we need to adjust it.\n if util.match_token(first_token, token.NAME, \'except\'):\n colon = self._code.find_token(last_token, token.OP, \':\')\n first_token = last_token = self._code.prev_token(colon)\n return (first_token, last_token)\n\n if six.PY2:\n # No need for this on Python3, which already handles \'with\' nodes correctly.\n def visit_with(self, node, first_token, last_token):\n first = self._code.find_token(first_token, token.NAME, \'with\', reverse=True)\n return (first, last_token)\n\n # Async nodes should typically start with the word \'async\'\n # but Python < 3.7 doesn\'t put the col_offset there\n # AsyncFunctionDef is slightly different because it might have\n # decorators before that, which visit_functiondef handles\n def handle_async(self, node, first_token, last_token):\n if not first_token.string == \'async\':\n first_token = self._code.prev_token(first_token)\n return (first_token, last_token)\n\n visit_asyncfor = handle_async\n visit_asyncwith = handle_async\n\n def visit_asyncfunctiondef(self, node, first_token, last_token):\n if util.match_token(first_token, token.NAME, \'def\'):\n # Include the \'async\' token\n first_token = self._code.prev_token(first_token)\n return self.visit_functiondef(node, first_token, last_token)\n') + __stickytape_write_module('icecream/__init__.py', b"# -*- coding: utf-8 -*-\n\n#\n# IceCream - Never use print() to debug again\n#\n# Ansgar Grunseid\n# grunseid.com\n# grunseid@gmail.com\n#\n# License: MIT\n#\n\nfrom os.path import dirname, join as pjoin\n\nfrom .icecream import * # noqa\nfrom .builtins import install, uninstall\n\n# Import all variables in __version__.py without explicit imports.\nmeta = {}\nwith open(pjoin(dirname(__file__), '__version__.py')) as f:\n exec(f.read(), meta)\nglobals().update(dict((k, v) for k, v in meta.items() if k not in globals()))\n") + __stickytape_write_module('icecream/icecream.py', b'#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#\n# IceCream - Never use print() to debug again\n#\n# Ansgar Grunseid\n# grunseid.com\n# grunseid@gmail.com\n#\n# License: MIT\n#\n\nfrom __future__ import print_function\n\nimport ast\nimport inspect\nimport pprint\nimport sys\nfrom datetime import datetime\nfrom contextlib import contextmanager\nfrom os.path import basename\nfrom textwrap import dedent\n\nimport colorama\nimport executing\nfrom pygments import highlight\n# See https://gist.github.com/XVilka/8346728 for color support in various\n# terminals and thus whether to use Terminal256Formatter or\n# TerminalTrueColorFormatter.\nfrom pygments.formatters import Terminal256Formatter\nfrom pygments.lexers import PythonLexer as PyLexer, Python3Lexer as Py3Lexer\n\nfrom .coloring import SolarizedDark\n\n\nPYTHON2 = (sys.version_info[0] == 2)\n\n\n_absent = object()\n\n\ndef bindStaticVariable(name, value):\n def decorator(fn):\n setattr(fn, name, value)\n return fn\n return decorator\n\n\n@bindStaticVariable(\'formatter\', Terminal256Formatter(style=SolarizedDark))\n@bindStaticVariable(\n \'lexer\', PyLexer(ensurenl=False) if PYTHON2 else Py3Lexer(ensurenl=False))\ndef colorize(s):\n self = colorize\n return highlight(s, self.lexer, self.formatter)\n\n\n@contextmanager\ndef supportTerminalColorsInWindows():\n # Filter and replace ANSI escape sequences on Windows with equivalent Win32\n # API calls. This code does nothing on non-Windows systems.\n colorama.init()\n yield\n colorama.deinit()\n\n\ndef stderrPrint(*args):\n print(*args, file=sys.stderr)\n\n\ndef isLiteral(s):\n try:\n ast.literal_eval(s)\n except Exception:\n return False\n return True\n\n\ndef colorizedStderrPrint(s):\n colored = colorize(s)\n with supportTerminalColorsInWindows():\n stderrPrint(colored)\n\n\nDEFAULT_PREFIX = \'ic| \'\nDEFAULT_LINE_WRAP_WIDTH = 70 # Characters.\nDEFAULT_CONTEXT_DELIMITER = \'- \'\nDEFAULT_OUTPUT_FUNCTION = colorizedStderrPrint\nDEFAULT_ARG_TO_STRING_FUNCTION = pprint.pformat\n\n\nclass NoSourceAvailableError(OSError):\n """\n Raised when icecream fails to find or access source code that\'s\n required to parse and analyze. This can happen, for example, when\n\n - ic() is invoked inside an interactive shell, e.g. python -i.\n\n - The source code is mangled and/or packaged, e.g. with a project\n freezer like PyInstaller.\n\n - The underlying source code changed during execution. See\n https://stackoverflow.com/a/33175832.\n """\n infoMessage = (\n \'Failed to access the underlying source code for analysis. Was ic() \'\n \'invoked in an interpreter (e.g. python -i), a frozen application \'\n \'(e.g. packaged with PyInstaller), or did the underlying source code \'\n \'change during execution?\')\n\n\ndef callOrValue(obj):\n return obj() if callable(obj) else obj\n\n\nclass Source(executing.Source):\n def get_text_with_indentation(self, node):\n result = self.asttokens().get_text(node)\n if \'\\n\' in result:\n result = \' \' * node.first_token.start[1] + result\n result = dedent(result)\n result = result.strip()\n return result\n\n\ndef prefixLinesAfterFirst(prefix, s):\n lines = s.splitlines(True)\n\n for i in range(1, len(lines)):\n lines[i] = prefix + lines[i]\n\n return \'\'.join(lines)\n\n\ndef indented_lines(prefix, string):\n lines = string.splitlines()\n return [prefix + lines[0]] + [\n \' \' * len(prefix) + line\n for line in lines[1:]\n ]\n\n\ndef format_pair(prefix, arg, value):\n arg_lines = indented_lines(prefix, arg)\n value_prefix = arg_lines[-1] + \': \'\n\n looksLikeAString = value[0] + value[-1] in ["\'\'", \'""\']\n if looksLikeAString: # Align the start of multiline strings.\n value = prefixLinesAfterFirst(\' \', value)\n\n value_lines = indented_lines(value_prefix, value)\n lines = arg_lines[:-1] + value_lines\n return \'\\n\'.join(lines)\n\n\ndef argumentToString(obj):\n s = DEFAULT_ARG_TO_STRING_FUNCTION(obj)\n s = s.replace(\'\\\\n\', \'\\n\') # Preserve string newlines in output.\n return s\n\n\nclass IceCreamDebugger:\n _pairDelimiter = \', \' # Used by the tests in tests/.\n lineWrapWidth = DEFAULT_LINE_WRAP_WIDTH\n contextDelimiter = DEFAULT_CONTEXT_DELIMITER\n\n def __init__(self, prefix=DEFAULT_PREFIX,\n outputFunction=DEFAULT_OUTPUT_FUNCTION,\n argToStringFunction=argumentToString, includeContext=False):\n self.enabled = True\n self.prefix = prefix\n self.includeContext = includeContext\n self.outputFunction = outputFunction\n self.argToStringFunction = argToStringFunction\n\n def __call__(self, *args):\n if self.enabled:\n callFrame = inspect.currentframe().f_back\n try:\n out = self._format(callFrame, *args)\n except NoSourceAvailableError as err:\n prefix = callOrValue(self.prefix)\n out = prefix + \'Error: \' + err.infoMessage\n self.outputFunction(out)\n\n if not args: # E.g. ic().\n passthrough = None\n elif len(args) == 1: # E.g. ic(1).\n passthrough = args[0]\n else: # E.g. ic(1, 2, 3).\n passthrough = args\n\n return passthrough\n\n def format(self, *args):\n callFrame = inspect.currentframe().f_back\n out = self._format(callFrame, *args)\n return out\n\n def _format(self, callFrame, *args):\n prefix = callOrValue(self.prefix)\n\n callNode = Source.executing(callFrame).node\n if callNode is None:\n raise NoSourceAvailableError()\n\n context = self._formatContext(callFrame, callNode)\n if not args:\n time = self._formatTime()\n out = prefix + context + time\n else:\n if not self.includeContext:\n context = \'\'\n out = self._formatArgs(\n callFrame, callNode, prefix, context, args)\n\n return out\n\n def _formatArgs(self, callFrame, callNode, prefix, context, args):\n source = Source.for_frame(callFrame)\n sanitizedArgStrs = [\n source.get_text_with_indentation(arg)\n for arg in callNode.args]\n\n pairs = list(zip(sanitizedArgStrs, args))\n\n out = self._constructArgumentOutput(prefix, context, pairs)\n return out\n\n def _constructArgumentOutput(self, prefix, context, pairs):\n def argPrefix(arg):\n return \'%s: \' % arg\n\n pairs = [(arg, self.argToStringFunction(val)) for arg, val in pairs]\n # For cleaner output, if is a literal, eg 3, "string", b\'bytes\',\n # etc, only output the value, not the argument and the value, as the\n # argument and the value will be identical or nigh identical. Ex: with\n # ic("hello"), just output\n #\n # ic| \'hello\',\n #\n # instead of\n #\n # ic| "hello": \'hello\'.\n #\n pairStrs = [\n val if isLiteral(arg) else (argPrefix(arg) + val)\n for arg, val in pairs]\n\n allArgsOnOneLine = self._pairDelimiter.join(pairStrs)\n multilineArgs = len(allArgsOnOneLine.splitlines()) > 1\n\n contextDelimiter = self.contextDelimiter if context else \'\'\n allPairs = prefix + context + contextDelimiter + allArgsOnOneLine\n firstLineTooLong = len(allPairs.splitlines()[0]) > self.lineWrapWidth\n\n if multilineArgs or firstLineTooLong:\n # ic| foo.py:11 in foo()\n # multilineStr: \'line1\n # line2\'\n #\n # ic| foo.py:11 in foo()\n # a: 11111111111111111111\n # b: 22222222222222222222\n if context:\n lines = [prefix + context] + [\n format_pair(len(prefix) * \' \', arg, value)\n for arg, value in pairs\n ]\n # ic| multilineStr: \'line1\n # line2\'\n #\n # ic| a: 11111111111111111111\n # b: 22222222222222222222\n else:\n arg_lines = [\n format_pair(\'\', arg, value)\n for arg, value in pairs\n ]\n lines = indented_lines(prefix, \'\\n\'.join(arg_lines))\n # ic| foo.py:11 in foo()- a: 1, b: 2\n # ic| a: 1, b: 2, c: 3\n else:\n lines = [prefix + context + contextDelimiter + allArgsOnOneLine]\n\n return \'\\n\'.join(lines)\n\n def _formatContext(self, callFrame, callNode):\n filename, lineNumber, parentFunction = self._getContext(\n callFrame, callNode)\n\n if parentFunction != \'\':\n parentFunction = \'%s()\' % parentFunction\n\n context = \'%s:%s in %s\' % (filename, lineNumber, parentFunction)\n return context\n\n def _formatTime(self):\n now = datetime.now()\n formatted = now.strftime(\'%H:%M:%S.%f\')[:-3]\n return \' at %s\' % formatted\n\n def _getContext(self, callFrame, callNode):\n lineNumber = callNode.lineno\n frameInfo = inspect.getframeinfo(callFrame)\n parentFunction = frameInfo.function\n filename = basename(frameInfo.filename)\n\n return filename, lineNumber, parentFunction\n\n def enable(self):\n self.enabled = True\n\n def disable(self):\n self.enabled = False\n\n def configureOutput(self, prefix=_absent, outputFunction=_absent,\n argToStringFunction=_absent, includeContext=_absent):\n if prefix is not _absent:\n self.prefix = prefix\n\n if outputFunction is not _absent:\n self.outputFunction = outputFunction\n\n if argToStringFunction is not _absent:\n self.argToStringFunction = argToStringFunction\n\n if includeContext is not _absent:\n self.includeContext = includeContext\n\n\nic = IceCreamDebugger()\n') + __stickytape_write_module('colorama/__init__.py', b"# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.\nfrom .initialise import init, deinit, reinit, colorama_text\nfrom .ansi import Fore, Back, Style, Cursor\nfrom .ansitowin32 import AnsiToWin32\n\n__version__ = '0.4.3'\n") + __stickytape_write_module('colorama/initialise.py', b"# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.\nimport atexit\nimport contextlib\nimport sys\n\nfrom .ansitowin32 import AnsiToWin32\n\n\norig_stdout = None\norig_stderr = None\n\nwrapped_stdout = None\nwrapped_stderr = None\n\natexit_done = False\n\n\ndef reset_all():\n if AnsiToWin32 is not None: # Issue #74: objects might become None at exit\n AnsiToWin32(orig_stdout).reset_all()\n\n\ndef init(autoreset=False, convert=None, strip=None, wrap=True):\n\n if not wrap and any([autoreset, convert, strip]):\n raise ValueError('wrap=False conflicts with any other arg=True')\n\n global wrapped_stdout, wrapped_stderr\n global orig_stdout, orig_stderr\n\n orig_stdout = sys.stdout\n orig_stderr = sys.stderr\n\n if sys.stdout is None:\n wrapped_stdout = None\n else:\n sys.stdout = wrapped_stdout = \\\n wrap_stream(orig_stdout, convert, strip, autoreset, wrap)\n if sys.stderr is None:\n wrapped_stderr = None\n else:\n sys.stderr = wrapped_stderr = \\\n wrap_stream(orig_stderr, convert, strip, autoreset, wrap)\n\n global atexit_done\n if not atexit_done:\n atexit.register(reset_all)\n atexit_done = True\n\n\ndef deinit():\n if orig_stdout is not None:\n sys.stdout = orig_stdout\n if orig_stderr is not None:\n sys.stderr = orig_stderr\n\n\n@contextlib.contextmanager\ndef colorama_text(*args, **kwargs):\n init(*args, **kwargs)\n try:\n yield\n finally:\n deinit()\n\n\ndef reinit():\n if wrapped_stdout is not None:\n sys.stdout = wrapped_stdout\n if wrapped_stderr is not None:\n sys.stderr = wrapped_stderr\n\n\ndef wrap_stream(stream, convert, strip, autoreset, wrap):\n if wrap:\n wrapper = AnsiToWin32(stream,\n convert=convert, strip=strip, autoreset=autoreset)\n if wrapper.should_wrap():\n stream = wrapper.stream\n return stream\n") + __stickytape_write_module('colorama/ansitowin32.py', b'# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.\nimport re\nimport sys\nimport os\n\nfrom .ansi import AnsiFore, AnsiBack, AnsiStyle, Style\nfrom .winterm import WinTerm, WinColor, WinStyle\nfrom .win32 import windll, winapi_test\n\n\nwinterm = None\nif windll is not None:\n winterm = WinTerm()\n\n\nclass StreamWrapper(object):\n \'\'\'\n Wraps a stream (such as stdout), acting as a transparent proxy for all\n attribute access apart from method \'write()\', which is delegated to our\n Converter instance.\n \'\'\'\n def __init__(self, wrapped, converter):\n # double-underscore everything to prevent clashes with names of\n # attributes on the wrapped stream object.\n self.__wrapped = wrapped\n self.__convertor = converter\n\n def __getattr__(self, name):\n return getattr(self.__wrapped, name)\n\n def __enter__(self, *args, **kwargs):\n # special method lookup bypasses __getattr__/__getattribute__, see\n # https://stackoverflow.com/questions/12632894/why-doesnt-getattr-work-with-exit\n # thus, contextlib magic methods are not proxied via __getattr__\n return self.__wrapped.__enter__(*args, **kwargs)\n\n def __exit__(self, *args, **kwargs):\n return self.__wrapped.__exit__(*args, **kwargs)\n\n def write(self, text):\n self.__convertor.write(text)\n\n def isatty(self):\n stream = self.__wrapped\n if \'PYCHARM_HOSTED\' in os.environ:\n if stream is not None and (stream is sys.__stdout__ or stream is sys.__stderr__):\n return True\n try:\n stream_isatty = stream.isatty\n except AttributeError:\n return False\n else:\n return stream_isatty()\n\n @property\n def closed(self):\n stream = self.__wrapped\n try:\n return stream.closed\n except AttributeError:\n return True\n\n\nclass AnsiToWin32(object):\n \'\'\'\n Implements a \'write()\' method which, on Windows, will strip ANSI character\n sequences from the text, and if outputting to a tty, will convert them into\n win32 function calls.\n \'\'\'\n ANSI_CSI_RE = re.compile(\'\\001?\\033\\\\[((?:\\\\d|;)*)([a-zA-Z])\\002?\') # Control Sequence Introducer\n ANSI_OSC_RE = re.compile(\'\\001?\\033\\\\]((?:.|;)*?)(\\x07)\\002?\') # Operating System Command\n\n def __init__(self, wrapped, convert=None, strip=None, autoreset=False):\n # The wrapped stream (normally sys.stdout or sys.stderr)\n self.wrapped = wrapped\n\n # should we reset colors to defaults after every .write()\n self.autoreset = autoreset\n\n # create the proxy wrapping our output stream\n self.stream = StreamWrapper(wrapped, self)\n\n on_windows = os.name == \'nt\'\n # We test if the WinAPI works, because even if we are on Windows\n # we may be using a terminal that doesn\'t support the WinAPI\n # (e.g. Cygwin Terminal). In this case it\'s up to the terminal\n # to support the ANSI codes.\n conversion_supported = on_windows and winapi_test()\n\n # should we strip ANSI sequences from our output?\n if strip is None:\n strip = conversion_supported or (not self.stream.closed and not self.stream.isatty())\n self.strip = strip\n\n # should we should convert ANSI sequences into win32 calls?\n if convert is None:\n convert = conversion_supported and not self.stream.closed and self.stream.isatty()\n self.convert = convert\n\n # dict of ansi codes to win32 functions and parameters\n self.win32_calls = self.get_win32_calls()\n\n # are we wrapping stderr?\n self.on_stderr = self.wrapped is sys.stderr\n\n def should_wrap(self):\n \'\'\'\n True if this class is actually needed. If false, then the output\n stream will not be affected, nor will win32 calls be issued, so\n wrapping stdout is not actually required. This will generally be\n False on non-Windows platforms, unless optional functionality like\n autoreset has been requested using kwargs to init()\n \'\'\'\n return self.convert or self.strip or self.autoreset\n\n def get_win32_calls(self):\n if self.convert and winterm:\n return {\n AnsiStyle.RESET_ALL: (winterm.reset_all, ),\n AnsiStyle.BRIGHT: (winterm.style, WinStyle.BRIGHT),\n AnsiStyle.DIM: (winterm.style, WinStyle.NORMAL),\n AnsiStyle.NORMAL: (winterm.style, WinStyle.NORMAL),\n AnsiFore.BLACK: (winterm.fore, WinColor.BLACK),\n AnsiFore.RED: (winterm.fore, WinColor.RED),\n AnsiFore.GREEN: (winterm.fore, WinColor.GREEN),\n AnsiFore.YELLOW: (winterm.fore, WinColor.YELLOW),\n AnsiFore.BLUE: (winterm.fore, WinColor.BLUE),\n AnsiFore.MAGENTA: (winterm.fore, WinColor.MAGENTA),\n AnsiFore.CYAN: (winterm.fore, WinColor.CYAN),\n AnsiFore.WHITE: (winterm.fore, WinColor.GREY),\n AnsiFore.RESET: (winterm.fore, ),\n AnsiFore.LIGHTBLACK_EX: (winterm.fore, WinColor.BLACK, True),\n AnsiFore.LIGHTRED_EX: (winterm.fore, WinColor.RED, True),\n AnsiFore.LIGHTGREEN_EX: (winterm.fore, WinColor.GREEN, True),\n AnsiFore.LIGHTYELLOW_EX: (winterm.fore, WinColor.YELLOW, True),\n AnsiFore.LIGHTBLUE_EX: (winterm.fore, WinColor.BLUE, True),\n AnsiFore.LIGHTMAGENTA_EX: (winterm.fore, WinColor.MAGENTA, True),\n AnsiFore.LIGHTCYAN_EX: (winterm.fore, WinColor.CYAN, True),\n AnsiFore.LIGHTWHITE_EX: (winterm.fore, WinColor.GREY, True),\n AnsiBack.BLACK: (winterm.back, WinColor.BLACK),\n AnsiBack.RED: (winterm.back, WinColor.RED),\n AnsiBack.GREEN: (winterm.back, WinColor.GREEN),\n AnsiBack.YELLOW: (winterm.back, WinColor.YELLOW),\n AnsiBack.BLUE: (winterm.back, WinColor.BLUE),\n AnsiBack.MAGENTA: (winterm.back, WinColor.MAGENTA),\n AnsiBack.CYAN: (winterm.back, WinColor.CYAN),\n AnsiBack.WHITE: (winterm.back, WinColor.GREY),\n AnsiBack.RESET: (winterm.back, ),\n AnsiBack.LIGHTBLACK_EX: (winterm.back, WinColor.BLACK, True),\n AnsiBack.LIGHTRED_EX: (winterm.back, WinColor.RED, True),\n AnsiBack.LIGHTGREEN_EX: (winterm.back, WinColor.GREEN, True),\n AnsiBack.LIGHTYELLOW_EX: (winterm.back, WinColor.YELLOW, True),\n AnsiBack.LIGHTBLUE_EX: (winterm.back, WinColor.BLUE, True),\n AnsiBack.LIGHTMAGENTA_EX: (winterm.back, WinColor.MAGENTA, True),\n AnsiBack.LIGHTCYAN_EX: (winterm.back, WinColor.CYAN, True),\n AnsiBack.LIGHTWHITE_EX: (winterm.back, WinColor.GREY, True),\n }\n return dict()\n\n def write(self, text):\n if self.strip or self.convert:\n self.write_and_convert(text)\n else:\n self.wrapped.write(text)\n self.wrapped.flush()\n if self.autoreset:\n self.reset_all()\n\n\n def reset_all(self):\n if self.convert:\n self.call_win32(\'m\', (0,))\n elif not self.strip and not self.stream.closed:\n self.wrapped.write(Style.RESET_ALL)\n\n\n def write_and_convert(self, text):\n \'\'\'\n Write the given text to our wrapped stream, stripping any ANSI\n sequences from the text, and optionally converting them into win32\n calls.\n \'\'\'\n cursor = 0\n text = self.convert_osc(text)\n for match in self.ANSI_CSI_RE.finditer(text):\n start, end = match.span()\n self.write_plain_text(text, cursor, start)\n self.convert_ansi(*match.groups())\n cursor = end\n self.write_plain_text(text, cursor, len(text))\n\n\n def write_plain_text(self, text, start, end):\n if start < end:\n self.wrapped.write(text[start:end])\n self.wrapped.flush()\n\n\n def convert_ansi(self, paramstring, command):\n if self.convert:\n params = self.extract_params(command, paramstring)\n self.call_win32(command, params)\n\n\n def extract_params(self, command, paramstring):\n if command in \'Hf\':\n params = tuple(int(p) if len(p) != 0 else 1 for p in paramstring.split(\';\'))\n while len(params) < 2:\n # defaults:\n params = params + (1,)\n else:\n params = tuple(int(p) for p in paramstring.split(\';\') if len(p) != 0)\n if len(params) == 0:\n # defaults:\n if command in \'JKm\':\n params = (0,)\n elif command in \'ABCD\':\n params = (1,)\n\n return params\n\n\n def call_win32(self, command, params):\n if command == \'m\':\n for param in params:\n if param in self.win32_calls:\n func_args = self.win32_calls[param]\n func = func_args[0]\n args = func_args[1:]\n kwargs = dict(on_stderr=self.on_stderr)\n func(*args, **kwargs)\n elif command in \'J\':\n winterm.erase_screen(params[0], on_stderr=self.on_stderr)\n elif command in \'K\':\n winterm.erase_line(params[0], on_stderr=self.on_stderr)\n elif command in \'Hf\': # cursor position - absolute\n winterm.set_cursor_position(params, on_stderr=self.on_stderr)\n elif command in \'ABCD\': # cursor position - relative\n n = params[0]\n # A - up, B - down, C - forward, D - back\n x, y = {\'A\': (0, -n), \'B\': (0, n), \'C\': (n, 0), \'D\': (-n, 0)}[command]\n winterm.cursor_adjust(x, y, on_stderr=self.on_stderr)\n\n\n def convert_osc(self, text):\n for match in self.ANSI_OSC_RE.finditer(text):\n start, end = match.span()\n text = text[:start] + text[end:]\n paramstring, command = match.groups()\n if command in \'\\x07\': # \\x07 = BEL\n params = paramstring.split(";")\n # 0 - change title and icon (we will only change title)\n # 1 - change icon (we don\'t support this)\n # 2 - change title\n if params[0] in \'02\':\n winterm.set_title(params[1])\n return text\n') + __stickytape_write_module('colorama/ansi.py', b"# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.\n'''\nThis module generates ANSI character codes to printing colors to terminals.\nSee: http://en.wikipedia.org/wiki/ANSI_escape_code\n'''\n\nCSI = '\\033['\nOSC = '\\033]'\nBEL = '\\007'\n\n\ndef code_to_chars(code):\n return CSI + str(code) + 'm'\n\ndef set_title(title):\n return OSC + '2;' + title + BEL\n\ndef clear_screen(mode=2):\n return CSI + str(mode) + 'J'\n\ndef clear_line(mode=2):\n return CSI + str(mode) + 'K'\n\n\nclass AnsiCodes(object):\n def __init__(self):\n # the subclasses declare class attributes which are numbers.\n # Upon instantiation we define instance attributes, which are the same\n # as the class attributes but wrapped with the ANSI escape sequence\n for name in dir(self):\n if not name.startswith('_'):\n value = getattr(self, name)\n setattr(self, name, code_to_chars(value))\n\n\nclass AnsiCursor(object):\n def UP(self, n=1):\n return CSI + str(n) + 'A'\n def DOWN(self, n=1):\n return CSI + str(n) + 'B'\n def FORWARD(self, n=1):\n return CSI + str(n) + 'C'\n def BACK(self, n=1):\n return CSI + str(n) + 'D'\n def POS(self, x=1, y=1):\n return CSI + str(y) + ';' + str(x) + 'H'\n\n\nclass AnsiFore(AnsiCodes):\n BLACK = 30\n RED = 31\n GREEN = 32\n YELLOW = 33\n BLUE = 34\n MAGENTA = 35\n CYAN = 36\n WHITE = 37\n RESET = 39\n\n # These are fairly well supported, but not part of the standard.\n LIGHTBLACK_EX = 90\n LIGHTRED_EX = 91\n LIGHTGREEN_EX = 92\n LIGHTYELLOW_EX = 93\n LIGHTBLUE_EX = 94\n LIGHTMAGENTA_EX = 95\n LIGHTCYAN_EX = 96\n LIGHTWHITE_EX = 97\n\n\nclass AnsiBack(AnsiCodes):\n BLACK = 40\n RED = 41\n GREEN = 42\n YELLOW = 43\n BLUE = 44\n MAGENTA = 45\n CYAN = 46\n WHITE = 47\n RESET = 49\n\n # These are fairly well supported, but not part of the standard.\n LIGHTBLACK_EX = 100\n LIGHTRED_EX = 101\n LIGHTGREEN_EX = 102\n LIGHTYELLOW_EX = 103\n LIGHTBLUE_EX = 104\n LIGHTMAGENTA_EX = 105\n LIGHTCYAN_EX = 106\n LIGHTWHITE_EX = 107\n\n\nclass AnsiStyle(AnsiCodes):\n BRIGHT = 1\n DIM = 2\n NORMAL = 22\n RESET_ALL = 0\n\nFore = AnsiFore()\nBack = AnsiBack()\nStyle = AnsiStyle()\nCursor = AnsiCursor()\n") + __stickytape_write_module('colorama/winterm.py', b"# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.\nfrom . import win32\n\n\n# from wincon.h\nclass WinColor(object):\n BLACK = 0\n BLUE = 1\n GREEN = 2\n CYAN = 3\n RED = 4\n MAGENTA = 5\n YELLOW = 6\n GREY = 7\n\n# from wincon.h\nclass WinStyle(object):\n NORMAL = 0x00 # dim text, dim background\n BRIGHT = 0x08 # bright text, dim background\n BRIGHT_BACKGROUND = 0x80 # dim text, bright background\n\nclass WinTerm(object):\n\n def __init__(self):\n self._default = win32.GetConsoleScreenBufferInfo(win32.STDOUT).wAttributes\n self.set_attrs(self._default)\n self._default_fore = self._fore\n self._default_back = self._back\n self._default_style = self._style\n # In order to emulate LIGHT_EX in windows, we borrow the BRIGHT style.\n # So that LIGHT_EX colors and BRIGHT style do not clobber each other,\n # we track them separately, since LIGHT_EX is overwritten by Fore/Back\n # and BRIGHT is overwritten by Style codes.\n self._light = 0\n\n def get_attrs(self):\n return self._fore + self._back * 16 + (self._style | self._light)\n\n def set_attrs(self, value):\n self._fore = value & 7\n self._back = (value >> 4) & 7\n self._style = value & (WinStyle.BRIGHT | WinStyle.BRIGHT_BACKGROUND)\n\n def reset_all(self, on_stderr=None):\n self.set_attrs(self._default)\n self.set_console(attrs=self._default)\n self._light = 0\n\n def fore(self, fore=None, light=False, on_stderr=False):\n if fore is None:\n fore = self._default_fore\n self._fore = fore\n # Emulate LIGHT_EX with BRIGHT Style\n if light:\n self._light |= WinStyle.BRIGHT\n else:\n self._light &= ~WinStyle.BRIGHT\n self.set_console(on_stderr=on_stderr)\n\n def back(self, back=None, light=False, on_stderr=False):\n if back is None:\n back = self._default_back\n self._back = back\n # Emulate LIGHT_EX with BRIGHT_BACKGROUND Style\n if light:\n self._light |= WinStyle.BRIGHT_BACKGROUND\n else:\n self._light &= ~WinStyle.BRIGHT_BACKGROUND\n self.set_console(on_stderr=on_stderr)\n\n def style(self, style=None, on_stderr=False):\n if style is None:\n style = self._default_style\n self._style = style\n self.set_console(on_stderr=on_stderr)\n\n def set_console(self, attrs=None, on_stderr=False):\n if attrs is None:\n attrs = self.get_attrs()\n handle = win32.STDOUT\n if on_stderr:\n handle = win32.STDERR\n win32.SetConsoleTextAttribute(handle, attrs)\n\n def get_position(self, handle):\n position = win32.GetConsoleScreenBufferInfo(handle).dwCursorPosition\n # Because Windows coordinates are 0-based,\n # and win32.SetConsoleCursorPosition expects 1-based.\n position.X += 1\n position.Y += 1\n return position\n\n def set_cursor_position(self, position=None, on_stderr=False):\n if position is None:\n # I'm not currently tracking the position, so there is no default.\n # position = self.get_position()\n return\n handle = win32.STDOUT\n if on_stderr:\n handle = win32.STDERR\n win32.SetConsoleCursorPosition(handle, position)\n\n def cursor_adjust(self, x, y, on_stderr=False):\n handle = win32.STDOUT\n if on_stderr:\n handle = win32.STDERR\n position = self.get_position(handle)\n adjusted_position = (position.Y + y, position.X + x)\n win32.SetConsoleCursorPosition(handle, adjusted_position, adjust=False)\n\n def erase_screen(self, mode=0, on_stderr=False):\n # 0 should clear from the cursor to the end of the screen.\n # 1 should clear from the cursor to the beginning of the screen.\n # 2 should clear the entire screen, and move cursor to (1,1)\n handle = win32.STDOUT\n if on_stderr:\n handle = win32.STDERR\n csbi = win32.GetConsoleScreenBufferInfo(handle)\n # get the number of character cells in the current buffer\n cells_in_screen = csbi.dwSize.X * csbi.dwSize.Y\n # get number of character cells before current cursor position\n cells_before_cursor = csbi.dwSize.X * csbi.dwCursorPosition.Y + csbi.dwCursorPosition.X\n if mode == 0:\n from_coord = csbi.dwCursorPosition\n cells_to_erase = cells_in_screen - cells_before_cursor\n elif mode == 1:\n from_coord = win32.COORD(0, 0)\n cells_to_erase = cells_before_cursor\n elif mode == 2:\n from_coord = win32.COORD(0, 0)\n cells_to_erase = cells_in_screen\n else:\n # invalid mode\n return\n # fill the entire screen with blanks\n win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord)\n # now set the buffer's attributes accordingly\n win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord)\n if mode == 2:\n # put the cursor where needed\n win32.SetConsoleCursorPosition(handle, (1, 1))\n\n def erase_line(self, mode=0, on_stderr=False):\n # 0 should clear from the cursor to the end of the line.\n # 1 should clear from the cursor to the beginning of the line.\n # 2 should clear the entire line.\n handle = win32.STDOUT\n if on_stderr:\n handle = win32.STDERR\n csbi = win32.GetConsoleScreenBufferInfo(handle)\n if mode == 0:\n from_coord = csbi.dwCursorPosition\n cells_to_erase = csbi.dwSize.X - csbi.dwCursorPosition.X\n elif mode == 1:\n from_coord = win32.COORD(0, csbi.dwCursorPosition.Y)\n cells_to_erase = csbi.dwCursorPosition.X\n elif mode == 2:\n from_coord = win32.COORD(0, csbi.dwCursorPosition.Y)\n cells_to_erase = csbi.dwSize.X\n else:\n # invalid mode\n return\n # fill the entire screen with blanks\n win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord)\n # now set the buffer's attributes accordingly\n win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord)\n\n def set_title(self, title):\n win32.SetConsoleTitle(title)\n") + __stickytape_write_module('colorama/win32.py', b'# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.\n\n# from winbase.h\nSTDOUT = -11\nSTDERR = -12\n\ntry:\n import ctypes\n from ctypes import LibraryLoader\n windll = LibraryLoader(ctypes.WinDLL)\n from ctypes import wintypes\nexcept (AttributeError, ImportError):\n windll = None\n SetConsoleTextAttribute = lambda *_: None\n winapi_test = lambda *_: None\nelse:\n from ctypes import byref, Structure, c_char, POINTER\n\n COORD = wintypes._COORD\n\n class CONSOLE_SCREEN_BUFFER_INFO(Structure):\n """struct in wincon.h."""\n _fields_ = [\n ("dwSize", COORD),\n ("dwCursorPosition", COORD),\n ("wAttributes", wintypes.WORD),\n ("srWindow", wintypes.SMALL_RECT),\n ("dwMaximumWindowSize", COORD),\n ]\n def __str__(self):\n return \'(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)\' % (\n self.dwSize.Y, self.dwSize.X\n , self.dwCursorPosition.Y, self.dwCursorPosition.X\n , self.wAttributes\n , self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right\n , self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X\n )\n\n _GetStdHandle = windll.kernel32.GetStdHandle\n _GetStdHandle.argtypes = [\n wintypes.DWORD,\n ]\n _GetStdHandle.restype = wintypes.HANDLE\n\n _GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo\n _GetConsoleScreenBufferInfo.argtypes = [\n wintypes.HANDLE,\n POINTER(CONSOLE_SCREEN_BUFFER_INFO),\n ]\n _GetConsoleScreenBufferInfo.restype = wintypes.BOOL\n\n _SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute\n _SetConsoleTextAttribute.argtypes = [\n wintypes.HANDLE,\n wintypes.WORD,\n ]\n _SetConsoleTextAttribute.restype = wintypes.BOOL\n\n _SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition\n _SetConsoleCursorPosition.argtypes = [\n wintypes.HANDLE,\n COORD,\n ]\n _SetConsoleCursorPosition.restype = wintypes.BOOL\n\n _FillConsoleOutputCharacterA = windll.kernel32.FillConsoleOutputCharacterA\n _FillConsoleOutputCharacterA.argtypes = [\n wintypes.HANDLE,\n c_char,\n wintypes.DWORD,\n COORD,\n POINTER(wintypes.DWORD),\n ]\n _FillConsoleOutputCharacterA.restype = wintypes.BOOL\n\n _FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute\n _FillConsoleOutputAttribute.argtypes = [\n wintypes.HANDLE,\n wintypes.WORD,\n wintypes.DWORD,\n COORD,\n POINTER(wintypes.DWORD),\n ]\n _FillConsoleOutputAttribute.restype = wintypes.BOOL\n\n _SetConsoleTitleW = windll.kernel32.SetConsoleTitleW\n _SetConsoleTitleW.argtypes = [\n wintypes.LPCWSTR\n ]\n _SetConsoleTitleW.restype = wintypes.BOOL\n\n def _winapi_test(handle):\n csbi = CONSOLE_SCREEN_BUFFER_INFO()\n success = _GetConsoleScreenBufferInfo(\n handle, byref(csbi))\n return bool(success)\n\n def winapi_test():\n return any(_winapi_test(h) for h in\n (_GetStdHandle(STDOUT), _GetStdHandle(STDERR)))\n\n def GetConsoleScreenBufferInfo(stream_id=STDOUT):\n handle = _GetStdHandle(stream_id)\n csbi = CONSOLE_SCREEN_BUFFER_INFO()\n success = _GetConsoleScreenBufferInfo(\n handle, byref(csbi))\n return csbi\n\n def SetConsoleTextAttribute(stream_id, attrs):\n handle = _GetStdHandle(stream_id)\n return _SetConsoleTextAttribute(handle, attrs)\n\n def SetConsoleCursorPosition(stream_id, position, adjust=True):\n position = COORD(*position)\n # If the position is out of range, do nothing.\n if position.Y <= 0 or position.X <= 0:\n return\n # Adjust for Windows\' SetConsoleCursorPosition:\n # 1. being 0-based, while ANSI is 1-based.\n # 2. expecting (x,y), while ANSI uses (y,x).\n adjusted_position = COORD(position.Y - 1, position.X - 1)\n if adjust:\n # Adjust for viewport\'s scroll position\n sr = GetConsoleScreenBufferInfo(STDOUT).srWindow\n adjusted_position.Y += sr.Top\n adjusted_position.X += sr.Left\n # Resume normal processing\n handle = _GetStdHandle(stream_id)\n return _SetConsoleCursorPosition(handle, adjusted_position)\n\n def FillConsoleOutputCharacter(stream_id, char, length, start):\n handle = _GetStdHandle(stream_id)\n char = c_char(char.encode())\n length = wintypes.DWORD(length)\n num_written = wintypes.DWORD(0)\n # Note that this is hard-coded for ANSI (vs wide) bytes.\n success = _FillConsoleOutputCharacterA(\n handle, char, length, start, byref(num_written))\n return num_written.value\n\n def FillConsoleOutputAttribute(stream_id, attr, length, start):\n \'\'\' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )\'\'\'\n handle = _GetStdHandle(stream_id)\n attribute = wintypes.WORD(attr)\n length = wintypes.DWORD(length)\n num_written = wintypes.DWORD(0)\n # Note that this is hard-coded for ANSI (vs wide) bytes.\n return _FillConsoleOutputAttribute(\n handle, attribute, length, start, byref(num_written))\n\n def SetConsoleTitle(title):\n return _SetConsoleTitleW(title)\n') + __stickytape_write_module('executing/__init__.py', b'"""\nGet information about what a frame is currently doing. Typical usage:\n\n import executing\n\n node = executing.Source.executing(frame).node\n # node will be an AST node or None\n"""\n\nfrom collections import namedtuple\n_VersionInfo = namedtuple(\'VersionInfo\', (\'major\', \'minor\', \'micro\'))\nfrom .executing import Source, Executing, only, NotOneValueFound, cache, future_flags\ntry:\n from .version import __version__\n if "dev" in __version__:\n raise ValueError\nexcept Exception:\n # version.py is auto-generated with the git tag when building\n __version__ = "???"\n __version_info__ = _VersionInfo(-1, -1, -1)\nelse:\n __version_info__ = _VersionInfo(*map(int, __version__.split(\'.\')))\n\n\n__all__ = ["Source"]\n') + __stickytape_write_module('executing/executing.py', b'import __future__\nimport ast\nimport dis\nimport functools\nimport inspect\nimport io\nimport linecache\nimport sys\nimport types\nfrom collections import defaultdict, namedtuple\nfrom itertools import islice\nfrom operator import attrgetter\nfrom threading import RLock\n\nPY3 = sys.version_info[0] == 3\n\nif PY3:\n # noinspection PyUnresolvedReferences\n from functools import lru_cache\n # noinspection PyUnresolvedReferences\n from tokenize import detect_encoding\n from itertools import zip_longest\n # noinspection PyUnresolvedReferences,PyCompatibility\n from pathlib import Path\n\n cache = lru_cache(maxsize=None)\n text_type = str\nelse:\n from lib2to3.pgen2.tokenize import detect_encoding, cookie_re as encoding_pattern\n from itertools import izip_longest as zip_longest\n\n\n class Path(object):\n pass\n\n\n def cache(func):\n d = {}\n\n @functools.wraps(func)\n def wrapper(*args):\n if args in d:\n return d[args]\n result = d[args] = func(*args)\n return result\n\n return wrapper\n\n\n # noinspection PyUnresolvedReferences\n text_type = unicode\ntry:\n # noinspection PyUnresolvedReferences\n _get_instructions = dis.get_instructions\nexcept AttributeError:\n class Instruction(namedtuple(\'Instruction\', \'offset argval opname starts_line\')):\n lineno = None\n\n\n from dis import HAVE_ARGUMENT, EXTENDED_ARG, hasconst, opname, findlinestarts\n\n # Based on dis.disassemble from 2.7\n # Left as similar as possible for easy diff\n\n def _get_instructions(co):\n code = co.co_code\n linestarts = dict(findlinestarts(co))\n n = len(code)\n i = 0\n extended_arg = 0\n while i < n:\n offset = i\n c = code[i]\n op = ord(c)\n lineno = linestarts.get(i)\n argval = None\n i = i + 1\n if op >= HAVE_ARGUMENT:\n oparg = ord(code[i]) + ord(code[i + 1]) * 256 + extended_arg\n extended_arg = 0\n i = i + 2\n if op == EXTENDED_ARG:\n extended_arg = oparg * 65536\n\n if op in hasconst:\n argval = co.co_consts[oparg]\n yield Instruction(offset, argval, opname[op], lineno)\n\n\ndef assert_(condition, message=""):\n """\n Like an assert statement, but unaffected by -O\n :param condition: value that is expected to be truthy\n :type message: Any\n """\n if not condition:\n raise AssertionError(str(message))\n\n\ndef get_instructions(co):\n lineno = None\n for inst in _get_instructions(co):\n lineno = inst.starts_line or lineno\n assert_(lineno)\n inst.lineno = lineno\n yield inst\n\n\nTESTING = 0\n\n\nclass NotOneValueFound(Exception):\n pass\n\n\ndef only(it):\n if hasattr(it, \'__len__\'):\n if len(it) != 1:\n raise NotOneValueFound(\'Expected one value, found %s\' % len(it))\n # noinspection PyTypeChecker\n return list(it)[0]\n\n lst = tuple(islice(it, 2))\n if len(lst) == 0:\n raise NotOneValueFound(\'Expected one value, found 0\')\n if len(lst) > 1:\n raise NotOneValueFound(\'Expected one value, found several\')\n return lst[0]\n\n\nclass Source(object):\n """\n The source code of a single file and associated metadata.\n\n The main method of interest is the classmethod `executing(frame)`.\n\n If you want an instance of this class, don\'t construct it.\n Ideally use the classmethod `for_frame(frame)`.\n If you don\'t have a frame, use `for_filename(filename [, module_globals])`.\n These methods cache instances by filename, so at most one instance exists per filename.\n\n Attributes:\n - filename\n - text\n - lines\n - tree: AST parsed from text, or None if text is not valid Python\n All nodes in the tree have an extra `parent` attribute\n\n Other methods of interest:\n - statements_at_line\n - asttokens\n - code_qualname\n """\n\n def __init__(self, filename, lines):\n """\n Don\'t call this constructor, see the class docstring.\n """\n\n self.filename = filename\n text = \'\'.join(lines)\n\n if not isinstance(text, text_type):\n encoding = self.detect_encoding(text)\n # noinspection PyUnresolvedReferences\n text = text.decode(encoding)\n lines = [line.decode(encoding) for line in lines]\n\n self.text = text\n self.lines = [line.rstrip(\'\\r\\n\') for line in lines]\n\n if PY3:\n ast_text = text\n else:\n # In python 2 it\'s a syntax error to parse unicode\n # with an encoding declaration, so we remove it but\n # leave empty lines in its place to keep line numbers the same\n ast_text = \'\'.join([\n \'\\n\' if i < 2 and encoding_pattern.match(line)\n else line\n for i, line in enumerate(lines)\n ])\n\n self._nodes_by_line = defaultdict(list)\n self.tree = None\n self._qualnames = {}\n\n try:\n self.tree = ast.parse(ast_text, filename=filename)\n except SyntaxError:\n pass\n else:\n for node in ast.walk(self.tree):\n for child in ast.iter_child_nodes(node):\n child.parent = node\n if hasattr(node, \'lineno\'):\n self._nodes_by_line[node.lineno].append(node)\n\n visitor = QualnameVisitor()\n visitor.visit(self.tree)\n self._qualnames = visitor.qualnames\n\n @classmethod\n def for_frame(cls, frame, use_cache=True):\n """\n Returns the `Source` object corresponding to the file the frame is executing in.\n """\n return cls.for_filename(frame.f_code.co_filename, frame.f_globals or {}, use_cache)\n\n @classmethod\n def for_filename(cls, filename, module_globals=None, use_cache=True):\n if isinstance(filename, Path):\n filename = str(filename)\n\n source_cache = cls._class_local(\'__source_cache\', {})\n if use_cache:\n try:\n return source_cache[filename]\n except KeyError:\n pass\n\n if not use_cache:\n linecache.checkcache(filename)\n\n lines = tuple(linecache.getlines(filename, module_globals))\n result = source_cache[filename] = cls._for_filename_and_lines(filename, lines)\n return result\n\n @classmethod\n def _for_filename_and_lines(cls, filename, lines):\n source_cache = cls._class_local(\'__source_cache_with_lines\', {})\n try:\n return source_cache[(filename, lines)]\n except KeyError:\n pass\n\n result = source_cache[(filename, lines)] = cls(filename, lines)\n return result\n\n @classmethod\n def lazycache(cls, frame):\n if hasattr(linecache, \'lazycache\'):\n linecache.lazycache(frame.f_code.co_filename, frame.f_globals)\n\n @classmethod\n def executing(cls, frame_or_tb):\n """\n Returns an `Executing` object representing the operation\n currently executing in the given frame or traceback object.\n """\n if isinstance(frame_or_tb, types.TracebackType):\n # https://docs.python.org/3/reference/datamodel.html#traceback-objects\n # "tb_lineno gives the line number where the exception occurred;\n # tb_lasti indicates the precise instruction.\n # The line number and last instruction in the traceback may differ\n # from the line number of its frame object\n # if the exception occurred in a try statement with no matching except clause\n # or with a finally clause."\n tb = frame_or_tb\n frame = tb.tb_frame\n lineno = tb.tb_lineno\n lasti = tb.tb_lasti\n else:\n frame = frame_or_tb\n lineno = frame.f_lineno\n lasti = frame.f_lasti\n\n code = frame.f_code\n key = (code, id(code), lasti)\n executing_cache = cls._class_local(\'__executing_cache\', {})\n\n try:\n args = executing_cache[key]\n except KeyError:\n def find(source, retry_cache):\n node = stmts = None\n tree = source.tree\n if tree:\n try:\n stmts = source.statements_at_line(lineno)\n if stmts:\n if code.co_filename.startswith(\'\':\n tree = _extract_ipython_statement(stmts, tree)\n node = NodeFinder(frame, stmts, tree, lasti).result\n except Exception as e:\n # These exceptions can be caused by the source code having changed\n # so the cached Source doesn\'t match the running code\n # (e.g. when using IPython %autoreload)\n # Try again with a fresh Source object\n if retry_cache and isinstance(e, (NotOneValueFound, AssertionError)):\n return find(\n source=cls.for_frame(frame, use_cache=False),\n retry_cache=False,\n )\n if TESTING:\n raise\n\n if node:\n new_stmts = {statement_containing_node(node)}\n assert_(new_stmts <= stmts)\n stmts = new_stmts\n\n return source, node, stmts\n\n args = find(source=cls.for_frame(frame), retry_cache=True)\n executing_cache[key] = args\n\n return Executing(frame, *args)\n\n @classmethod\n def _class_local(cls, name, default):\n """\n Returns an attribute directly associated with this class\n (as opposed to subclasses), setting default if necessary\n """\n # classes have a mappingproxy preventing us from using setdefault\n result = cls.__dict__.get(name, default)\n setattr(cls, name, result)\n return result\n\n @cache\n def statements_at_line(self, lineno):\n """\n Returns the statement nodes overlapping the given line.\n\n Returns at most one statement unless semicolons are present.\n\n If the `text` attribute is not valid python, meaning\n `tree` is None, returns an empty set.\n\n Otherwise, `Source.for_frame(frame).statements_at_line(frame.f_lineno)`\n should return at least one statement.\n """\n\n return {\n statement_containing_node(node)\n for node in\n self._nodes_by_line[lineno]\n }\n\n @cache\n def asttokens(self):\n """\n Returns an ASTTokens object for getting the source of specific AST nodes.\n\n See http://asttokens.readthedocs.io/en/latest/api-index.html\n """\n from asttokens import ASTTokens # must be installed separately\n return ASTTokens(\n self.text,\n tree=self.tree,\n filename=self.filename,\n )\n\n @staticmethod\n def decode_source(source):\n if isinstance(source, bytes):\n encoding = Source.detect_encoding(source)\n source = source.decode(encoding)\n return source\n\n @staticmethod\n def detect_encoding(source):\n return detect_encoding(io.BytesIO(source).readline)[0]\n\n def code_qualname(self, code):\n """\n Imitates the __qualname__ attribute of functions for code objects.\n Given:\n\n - A function `func`\n - A frame `frame` for an execution of `func`, meaning:\n `frame.f_code is func.__code__`\n\n `Source.for_frame(frame).code_qualname(frame.f_code)`\n will be equal to `func.__qualname__`*. Works for Python 2 as well,\n where of course no `__qualname__` attribute exists.\n\n Falls back to `code.co_name` if there is no appropriate qualname.\n\n Based on https://github.com/wbolster/qualname\n\n (* unless `func` is a lambda\n nested inside another lambda on the same line, in which case\n the outer lambda\'s qualname will be returned for the codes\n of both lambdas)\n """\n assert_(code.co_filename == self.filename)\n return self._qualnames.get((code.co_name, code.co_firstlineno), code.co_name)\n\n\nclass Executing(object):\n """\n Information about the operation a frame is currently executing.\n\n Generally you will just want `node`, which is the AST node being executed,\n or None if it\'s unknown.\n """\n\n def __init__(self, frame, source, node, stmts):\n self.frame = frame\n self.source = source\n self.node = node\n self.statements = stmts\n\n def code_qualname(self):\n return self.source.code_qualname(self.frame.f_code)\n\n def text(self):\n return self.source.asttokens().get_text(self.node)\n\n def text_range(self):\n return self.source.asttokens().get_text_range(self.node)\n\n\nclass QualnameVisitor(ast.NodeVisitor):\n def __init__(self):\n super(QualnameVisitor, self).__init__()\n self.stack = []\n self.qualnames = {}\n\n def add_qualname(self, node, name=None):\n name = name or node.name\n self.stack.append(name)\n if getattr(node, \'decorator_list\', ()):\n lineno = node.decorator_list[0].lineno\n else:\n lineno = node.lineno\n self.qualnames.setdefault((name, lineno), ".".join(self.stack))\n\n def visit_FunctionDef(self, node, name=None):\n self.add_qualname(node, name)\n self.stack.append(\'\')\n if isinstance(node, ast.Lambda):\n children = [node.body]\n else:\n children = node.body\n for child in children:\n self.visit(child)\n self.stack.pop()\n self.stack.pop()\n\n # Find lambdas in the function definition outside the body,\n # e.g. decorators or default arguments\n # Based on iter_child_nodes\n for field, child in ast.iter_fields(node):\n if field == \'body\':\n continue\n if isinstance(child, ast.AST):\n self.visit(child)\n elif isinstance(child, list):\n for grandchild in child:\n if isinstance(grandchild, ast.AST):\n self.visit(grandchild)\n\n visit_AsyncFunctionDef = visit_FunctionDef\n\n def visit_Lambda(self, node):\n # noinspection PyTypeChecker\n self.visit_FunctionDef(node, \'\')\n\n def visit_ClassDef(self, node):\n self.add_qualname(node)\n self.generic_visit(node)\n self.stack.pop()\n\n\nfuture_flags = sum(\n getattr(__future__, fname).compiler_flag\n for fname in __future__.all_feature_names\n)\n\n\ndef compile_similar_to(source, matching_code):\n return compile(\n source,\n matching_code.co_filename,\n \'exec\',\n flags=future_flags & matching_code.co_flags,\n dont_inherit=True,\n )\n\n\nsentinel = \'io8urthglkjdghvljusketgIYRFYUVGHFRTBGVHKGF78678957647698\'\n\n\nclass NodeFinder(object):\n def __init__(self, frame, stmts, tree, lasti):\n self.frame = frame\n self.tree = tree\n self.code = code = frame.f_code\n self.is_pytest = any(\n \'pytest\' in name.lower()\n for group in [code.co_names, code.co_varnames]\n for name in group\n )\n\n if self.is_pytest:\n self.ignore_linenos = frozenset(assert_linenos(tree))\n else:\n self.ignore_linenos = frozenset()\n\n instruction = self.get_actual_current_instruction(lasti)\n op_name = instruction.opname\n self.lasti = instruction.offset\n\n if op_name.startswith(\'CALL_\'):\n typ = ast.Call\n elif op_name.startswith((\'BINARY_SUBSCR\', \'SLICE+\')):\n typ = ast.Subscript\n elif op_name.startswith(\'BINARY_\'):\n typ = ast.BinOp\n elif op_name.startswith(\'UNARY_\'):\n typ = ast.UnaryOp\n elif op_name in (\'LOAD_ATTR\', \'LOAD_METHOD\', \'LOOKUP_METHOD\'):\n typ = ast.Attribute\n elif op_name in (\'COMPARE_OP\', \'IS_OP\', \'CONTAINS_OP\'):\n typ = ast.Compare\n else:\n raise RuntimeError(op_name)\n\n with lock:\n exprs = {\n node\n for stmt in stmts\n for node in ast.walk(stmt)\n if isinstance(node, typ)\n if not (hasattr(node, "ctx") and not isinstance(node.ctx, ast.Load))\n }\n\n self.result = only(list(self.matching_nodes(exprs)))\n\n def clean_instructions(self, code):\n return [\n inst\n for inst in get_instructions(code)\n if inst.opname != \'EXTENDED_ARG\'\n if inst.lineno not in self.ignore_linenos\n ]\n\n def get_original_clean_instructions(self):\n result = self.clean_instructions(self.code)\n\n # pypy sometimes (when is not clear)\n # inserts JUMP_IF_NOT_DEBUG instructions in bytecode\n # If they\'re not present in our compiled instructions,\n # ignore them in the original bytecode\n if not any(\n inst.opname == "JUMP_IF_NOT_DEBUG"\n for inst in self.compile_instructions()\n ):\n result = [\n inst for inst in result\n if inst.opname != "JUMP_IF_NOT_DEBUG"\n ]\n\n return result\n\n def matching_nodes(self, exprs):\n original_instructions = self.get_original_clean_instructions()\n original_index = only(\n i\n for i, inst in enumerate(original_instructions)\n if inst.offset == self.lasti\n )\n for i, expr in enumerate(exprs):\n setter = get_setter(expr)\n # noinspection PyArgumentList\n replacement = ast.BinOp(\n left=expr,\n op=ast.Pow(),\n right=ast.Str(s=sentinel),\n )\n ast.fix_missing_locations(replacement)\n setter(replacement)\n try:\n instructions = self.compile_instructions()\n finally:\n setter(expr)\n indices = [\n i\n for i, instruction in enumerate(instructions)\n if instruction.argval == sentinel\n ]\n\n # There can be several indices when the bytecode is duplicated,\n # as happens in a finally block in 3.9+\n # First we remove the opcodes caused by our modifications\n for index_num, sentinel_index in enumerate(indices):\n # Adjustment for removing sentinel instructions below\n # in past iterations\n sentinel_index -= index_num * 2\n\n assert_(instructions.pop(sentinel_index).opname == \'LOAD_CONST\')\n assert_(instructions.pop(sentinel_index).opname == \'BINARY_POWER\')\n\n # Then we see if any of the instruction indices match\n for index_num, sentinel_index in enumerate(indices):\n sentinel_index -= index_num * 2\n new_index = sentinel_index - 1\n\n if new_index != original_index:\n continue\n\n original_inst = original_instructions[original_index]\n new_inst = instructions[new_index]\n\n # In Python 3.9+, changing \'not x in y\' to \'not sentinel_transformation(x in y)\'\n # changes a CONTAINS_OP(invert=1) to CONTAINS_OP(invert=0),,UNARY_NOT\n if (\n original_inst.opname == new_inst.opname in (\'CONTAINS_OP\', \'IS_OP\')\n and original_inst.arg != new_inst.arg\n and (\n original_instructions[original_index + 1].opname\n != instructions[new_index + 1].opname == \'UNARY_NOT\'\n )):\n # Remove the difference for the upcoming assert\n instructions.pop(new_index + 1)\n\n # Check that the modified instructions don\'t have anything unexpected\n for inst1, inst2 in zip_longest(original_instructions, instructions):\n assert_(\n inst1.opname == inst2.opname or\n all(\n \'JUMP_IF_\' in inst.opname\n for inst in [inst1, inst2]\n ) or\n all(\n inst.opname in (\'JUMP_FORWARD\', \'JUMP_ABSOLUTE\')\n for inst in [inst1, inst2]\n )\n or (\n inst1.opname == \'PRINT_EXPR\' and\n inst2.opname == \'POP_TOP\'\n )\n or (\n inst1.opname in (\'LOAD_METHOD\', \'LOOKUP_METHOD\') and\n inst2.opname == \'LOAD_ATTR\'\n )\n or (\n inst1.opname == \'CALL_METHOD\' and\n inst2.opname == \'CALL_FUNCTION\'\n ),\n (inst1, inst2, ast.dump(expr), expr.lineno, self.code.co_filename)\n )\n\n yield expr\n\n def compile_instructions(self):\n module_code = compile_similar_to(self.tree, self.code)\n code = only(self.find_codes(module_code))\n return self.clean_instructions(code)\n\n def find_codes(self, root_code):\n checks = [\n attrgetter(\'co_firstlineno\'),\n attrgetter(\'co_name\'),\n attrgetter(\'co_freevars\'),\n attrgetter(\'co_cellvars\'),\n ]\n if not self.is_pytest:\n checks += [\n attrgetter(\'co_names\'),\n attrgetter(\'co_varnames\'),\n ]\n\n def matches(c):\n return all(\n f(c) == f(self.code)\n for f in checks\n )\n\n code_options = []\n if matches(root_code):\n code_options.append(root_code)\n\n def finder(code):\n for const in code.co_consts:\n if not inspect.iscode(const):\n continue\n\n if matches(const):\n code_options.append(const)\n finder(const)\n\n finder(root_code)\n return code_options\n\n def get_actual_current_instruction(self, lasti):\n """\n Get the instruction corresponding to the current\n frame offset, skipping EXTENDED_ARG instructions\n """\n # Don\'t use get_original_clean_instructions\n # because we need the actual instructions including\n # EXTENDED_ARG\n instructions = list(get_instructions(self.code))\n index = only(\n i\n for i, inst in enumerate(instructions)\n if inst.offset == lasti\n )\n\n while True:\n instruction = instructions[index]\n if instruction.opname != "EXTENDED_ARG":\n return instruction\n index += 1\n\n\ndef get_setter(node):\n parent = node.parent\n for name, field in ast.iter_fields(parent):\n if field is node:\n return lambda new_node: setattr(parent, name, new_node)\n elif isinstance(field, list):\n for i, item in enumerate(field):\n if item is node:\n def setter(new_node):\n field[i] = new_node\n\n return setter\n\n\nlock = RLock()\n\n\n@cache\ndef statement_containing_node(node):\n while not isinstance(node, ast.stmt):\n node = node.parent\n return node\n\n\ndef assert_linenos(tree):\n for node in ast.walk(tree):\n if (\n hasattr(node, \'parent\') and\n hasattr(node, \'lineno\') and\n isinstance(statement_containing_node(node), ast.Assert)\n ):\n yield node.lineno\n\n\ndef _extract_ipython_statement(stmts, tree):\n # IPython separates each statement in a cell to be executed separately\n # So NodeFinder should only compile one statement at a time or it\n # will find a code mismatch.\n stmt = list(stmts)[0]\n while not isinstance(stmt.parent, ast.Module):\n stmt = stmt.parent\n # use `ast.parse` instead of `ast.Module` for better portability\n # python3.8 changes the signature of `ast.Module`\n # Inspired by https://github.com/pallets/werkzeug/pull/1552/files\n tree = ast.parse("")\n tree.body = [stmt]\n ast.copy_location(tree, stmt)\n return tree\n') + __stickytape_write_module('executing/version.py', b"__version__ = '0.5.4'") + __stickytape_write_module('pygments/__init__.py', b'# -*- coding: utf-8 -*-\n"""\n Pygments\n ~~~~~~~~\n\n Pygments is a syntax highlighting package written in Python.\n\n It is a generic syntax highlighter for general use in all kinds of software\n such as forum systems, wikis or other applications that need to prettify\n source code. Highlights are:\n\n * a wide range of common languages and markup formats is supported\n * special attention is paid to details, increasing quality by a fair amount\n * support for new languages and formats are added easily\n * a number of output formats, presently HTML, LaTeX, RTF, SVG, all image\n formats that PIL supports, and ANSI sequences\n * it is usable as a command-line tool and as a library\n * ... and it highlights even Brainfuck!\n\n The `Pygments master branch`_ is installable with ``easy_install Pygments==dev``.\n\n .. _Pygments master branch:\n https://github.com/pygments/pygments/archive/master.zip#egg=Pygments-dev\n\n :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.\n :license: BSD, see LICENSE for details.\n"""\nimport sys\nfrom io import StringIO, BytesIO\n\n__version__ = \'2.7.1\'\n__docformat__ = \'restructuredtext\'\n\n__all__ = [\'lex\', \'format\', \'highlight\']\n\n\ndef lex(code, lexer):\n """\n Lex ``code`` with ``lexer`` and return an iterable of tokens.\n """\n try:\n return lexer.get_tokens(code)\n except TypeError as err:\n if (isinstance(err.args[0], str) and\n (\'unbound method get_tokens\' in err.args[0] or\n \'missing 1 required positional argument\' in err.args[0])):\n raise TypeError(\'lex() argument must be a lexer instance, \'\n \'not a class\')\n raise\n\n\ndef format(tokens, formatter, outfile=None): # pylint: disable=redefined-builtin\n """\n Format a tokenlist ``tokens`` with the formatter ``formatter``.\n\n If ``outfile`` is given and a valid file object (an object\n with a ``write`` method), the result will be written to it, otherwise\n it is returned as a string.\n """\n try:\n if not outfile:\n realoutfile = getattr(formatter, \'encoding\', None) and BytesIO() or StringIO()\n formatter.format(tokens, realoutfile)\n return realoutfile.getvalue()\n else:\n formatter.format(tokens, outfile)\n except TypeError as err:\n if (isinstance(err.args[0], str) and\n (\'unbound method format\' in err.args[0] or\n \'missing 1 required positional argument\' in err.args[0])):\n raise TypeError(\'format() argument must be a formatter instance, \'\n \'not a class\')\n raise\n\n\ndef highlight(code, lexer, formatter, outfile=None):\n """\n Lex ``code`` with ``lexer`` and format it with the formatter ``formatter``.\n\n If ``outfile`` is given and a valid file object (an object\n with a ``write`` method), the result will be written to it, otherwise\n it is returned as a string.\n """\n return format(lex(code, lexer), formatter, outfile)\n\n') + __stickytape_write_module('pygments/formatters/__init__.py', b'# -*- coding: utf-8 -*-\n"""\n pygments.formatters\n ~~~~~~~~~~~~~~~~~~~\n\n Pygments formatters.\n\n :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.\n :license: BSD, see LICENSE for details.\n"""\n\nimport re\nimport sys\nimport types\nimport fnmatch\nfrom os.path import basename\n\nfrom pygments.formatters._mapping import FORMATTERS\nfrom pygments.plugin import find_plugin_formatters\nfrom pygments.util import ClassNotFound\n\n__all__ = [\'get_formatter_by_name\', \'get_formatter_for_filename\',\n \'get_all_formatters\', \'load_formatter_from_file\'] + list(FORMATTERS)\n\n_formatter_cache = {} # classes by name\n_pattern_cache = {}\n\n\ndef _fn_matches(fn, glob):\n """Return whether the supplied file name fn matches pattern filename."""\n if glob not in _pattern_cache:\n pattern = _pattern_cache[glob] = re.compile(fnmatch.translate(glob))\n return pattern.match(fn)\n return _pattern_cache[glob].match(fn)\n\n\ndef _load_formatters(module_name):\n """Load a formatter (and all others in the module too)."""\n mod = __import__(module_name, None, None, [\'__all__\'])\n for formatter_name in mod.__all__:\n cls = getattr(mod, formatter_name)\n _formatter_cache[cls.name] = cls\n\n\ndef get_all_formatters():\n """Return a generator for all formatter classes."""\n # NB: this returns formatter classes, not info like get_all_lexers().\n for info in FORMATTERS.values():\n if info[1] not in _formatter_cache:\n _load_formatters(info[0])\n yield _formatter_cache[info[1]]\n for _, formatter in find_plugin_formatters():\n yield formatter\n\n\ndef find_formatter_class(alias):\n """Lookup a formatter by alias.\n\n Returns None if not found.\n """\n for module_name, name, aliases, _, _ in FORMATTERS.values():\n if alias in aliases:\n if name not in _formatter_cache:\n _load_formatters(module_name)\n return _formatter_cache[name]\n for _, cls in find_plugin_formatters():\n if alias in cls.aliases:\n return cls\n\n\ndef get_formatter_by_name(_alias, **options):\n """Lookup and instantiate a formatter by alias.\n\n Raises ClassNotFound if not found.\n """\n cls = find_formatter_class(_alias)\n if cls is None:\n raise ClassNotFound("no formatter found for name %r" % _alias)\n return cls(**options)\n\n\ndef load_formatter_from_file(filename, formattername="CustomFormatter",\n **options):\n """Load a formatter from a file.\n\n This method expects a file located relative to the current working\n directory, which contains a class named CustomFormatter. By default,\n it expects the Formatter to be named CustomFormatter; you can specify\n your own class name as the second argument to this function.\n\n Users should be very careful with the input, because this method\n is equivalent to running eval on the input file.\n\n Raises ClassNotFound if there are any problems importing the Formatter.\n\n .. versionadded:: 2.2\n """\n try:\n # This empty dict will contain the namespace for the exec\'d file\n custom_namespace = {}\n with open(filename, \'rb\') as f:\n exec(f.read(), custom_namespace)\n # Retrieve the class `formattername` from that namespace\n if formattername not in custom_namespace:\n raise ClassNotFound(\'no valid %s class found in %s\' %\n (formattername, filename))\n formatter_class = custom_namespace[formattername]\n # And finally instantiate it with the options\n return formatter_class(**options)\n except IOError as err:\n raise ClassNotFound(\'cannot read %s: %s\' % (filename, err))\n except ClassNotFound:\n raise\n except Exception as err:\n raise ClassNotFound(\'error when loading custom formatter: %s\' % err)\n\n\ndef get_formatter_for_filename(fn, **options):\n """Lookup and instantiate a formatter by filename pattern.\n\n Raises ClassNotFound if not found.\n """\n fn = basename(fn)\n for modname, name, _, filenames, _ in FORMATTERS.values():\n for filename in filenames:\n if _fn_matches(fn, filename):\n if name not in _formatter_cache:\n _load_formatters(modname)\n return _formatter_cache[name](**options)\n for cls in find_plugin_formatters():\n for filename in cls.filenames:\n if _fn_matches(fn, filename):\n return cls(**options)\n raise ClassNotFound("no formatter found for file name %r" % fn)\n\n\nclass _automodule(types.ModuleType):\n """Automatically import formatters."""\n\n def __getattr__(self, name):\n info = FORMATTERS.get(name)\n if info:\n _load_formatters(info[0])\n cls = _formatter_cache[info[1]]\n setattr(self, name, cls)\n return cls\n raise AttributeError(name)\n\n\noldmod = sys.modules[__name__]\nnewmod = _automodule(__name__)\nnewmod.__dict__.update(oldmod.__dict__)\nsys.modules[__name__] = newmod\ndel newmod.newmod, newmod.oldmod, newmod.sys, newmod.types\n') + __stickytape_write_module('pygments/formatters/_mapping.py', b'# -*- coding: utf-8 -*-\n"""\n pygments.formatters._mapping\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n Formatter mapping definitions. This file is generated by itself. Everytime\n you change something on a builtin formatter definition, run this script from\n the formatters folder to update it.\n\n Do not alter the FORMATTERS dictionary by hand.\n\n :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.\n :license: BSD, see LICENSE for details.\n"""\n\nFORMATTERS = {\n \'BBCodeFormatter\': (\'pygments.formatters.bbcode\', \'BBCode\', (\'bbcode\', \'bb\'), (), \'Format tokens with BBcodes. These formatting codes are used by many bulletin boards, so you can highlight your sourcecode with pygments before posting it there.\'),\n \'BmpImageFormatter\': (\'pygments.formatters.img\', \'img_bmp\', (\'bmp\', \'bitmap\'), (\'*.bmp\',), \'Create a bitmap image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.\'),\n \'GifImageFormatter\': (\'pygments.formatters.img\', \'img_gif\', (\'gif\',), (\'*.gif\',), \'Create a GIF image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.\'),\n \'HtmlFormatter\': (\'pygments.formatters.html\', \'HTML\', (\'html\',), (\'*.html\', \'*.htm\'), "Format tokens as HTML 4 ```` tags within a ``
`` tag, wrapped in a ``
`` tag. The ``
``\'s CSS class can be set by the `cssclass` option."),\n \'IRCFormatter\': (\'pygments.formatters.irc\', \'IRC\', (\'irc\', \'IRC\'), (), \'Format tokens with IRC color sequences\'),\n \'ImageFormatter\': (\'pygments.formatters.img\', \'img\', (\'img\', \'IMG\', \'png\'), (\'*.png\',), \'Create a PNG image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.\'),\n \'JpgImageFormatter\': (\'pygments.formatters.img\', \'img_jpg\', (\'jpg\', \'jpeg\'), (\'*.jpg\',), \'Create a JPEG image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.\'),\n \'LatexFormatter\': (\'pygments.formatters.latex\', \'LaTeX\', (\'latex\', \'tex\'), (\'*.tex\',), \'Format tokens as LaTeX code. This needs the `fancyvrb` and `color` standard packages.\'),\n \'NullFormatter\': (\'pygments.formatters.other\', \'Text only\', (\'text\', \'null\'), (\'*.txt\',), \'Output the text unchanged without any formatting.\'),\n \'RawTokenFormatter\': (\'pygments.formatters.other\', \'Raw tokens\', (\'raw\', \'tokens\'), (\'*.raw\',), \'Format tokens as a raw representation for storing token streams.\'),\n \'RtfFormatter\': (\'pygments.formatters.rtf\', \'RTF\', (\'rtf\',), (\'*.rtf\',), \'Format tokens as RTF markup. This formatter automatically outputs full RTF documents with color information and other useful stuff. Perfect for Copy and Paste into Microsoft(R) Word(R) documents.\'),\n \'SvgFormatter\': (\'pygments.formatters.svg\', \'SVG\', (\'svg\',), (\'*.svg\',), \'Format tokens as an SVG graphics file. This formatter is still experimental. Each line of code is a ```` element with explicit ``x`` and ``y`` coordinates containing ```` elements with the individual token styles.\'),\n \'Terminal256Formatter\': (\'pygments.formatters.terminal256\', \'Terminal256\', (\'terminal256\', \'console256\', \'256\'), (), \'Format tokens with ANSI color sequences, for output in a 256-color terminal or console. Like in `TerminalFormatter` color sequences are terminated at newlines, so that paging the output works correctly.\'),\n \'TerminalFormatter\': (\'pygments.formatters.terminal\', \'Terminal\', (\'terminal\', \'console\'), (), \'Format tokens with ANSI color sequences, for output in a text console. Color sequences are terminated at newlines, so that paging the output works correctly.\'),\n \'TerminalTrueColorFormatter\': (\'pygments.formatters.terminal256\', \'TerminalTrueColor\', (\'terminal16m\', \'console16m\', \'16m\'), (), \'Format tokens with ANSI color sequences, for output in a true-color terminal or console. Like in `TerminalFormatter` color sequences are terminated at newlines, so that paging the output works correctly.\'),\n \'TestcaseFormatter\': (\'pygments.formatters.other\', \'Testcase\', (\'testcase\',), (), \'Format tokens as appropriate for a new testcase.\')\n}\n\nif __name__ == \'__main__\': # pragma: no cover\n import sys\n import os\n\n # lookup formatters\n found_formatters = []\n imports = []\n sys.path.insert(0, os.path.join(os.path.dirname(__file__), \'..\', \'..\'))\n from pygments.util import docstring_headline\n\n for root, dirs, files in os.walk(\'.\'):\n for filename in files:\n if filename.endswith(\'.py\') and not filename.startswith(\'_\'):\n module_name = \'pygments.formatters%s.%s\' % (\n root[1:].replace(\'/\', \'.\'), filename[:-3])\n print(module_name)\n module = __import__(module_name, None, None, [\'\'])\n for formatter_name in module.__all__:\n formatter = getattr(module, formatter_name)\n found_formatters.append(\n \'%r: %r\' % (formatter_name,\n (module_name,\n formatter.name,\n tuple(formatter.aliases),\n tuple(formatter.filenames),\n docstring_headline(formatter))))\n # sort them to make the diff minimal\n found_formatters.sort()\n\n # extract useful sourcecode from this file\n with open(__file__) as fp:\n content = fp.read()\n # replace crnl to nl for Windows.\n #\n # Note that, originally, contributers should keep nl of master\n # repository, for example by using some kind of automatic\n # management EOL, like `EolExtension\n # `.\n content = content.replace("\\r\\n", "\\n")\n header = content[:content.find(\'FORMATTERS = {\')]\n footer = content[content.find("if __name__ == \'__main__\':"):]\n\n # write new file\n with open(__file__, \'w\') as fp:\n fp.write(header)\n fp.write(\'FORMATTERS = {\\n %s\\n}\\n\\n\' % \',\\n \'.join(found_formatters))\n fp.write(footer)\n\n print (\'=== %d formatters processed.\' % len(found_formatters))\n') + __stickytape_write_module('pygments/util.py', b'# -*- coding: utf-8 -*-\n"""\n pygments.util\n ~~~~~~~~~~~~~\n\n Utility functions.\n\n :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.\n :license: BSD, see LICENSE for details.\n"""\n\nimport re\nimport sys\nfrom io import TextIOWrapper\n\n\nsplit_path_re = re.compile(r\'[/\\\\ ]\')\ndoctype_lookup_re = re.compile(r\'\'\'\n (<\\?.*?\\?>)?\\s*\n ]*>\n\'\'\', re.DOTALL | re.MULTILINE | re.VERBOSE)\ntag_re = re.compile(r\'<(.+?)(\\s.*?)?>.*?\',\n re.UNICODE | re.IGNORECASE | re.DOTALL | re.MULTILINE)\nxml_decl_re = re.compile(r\'\\s*<\\?xml[^>]*\\?>\', re.I)\n\n\nclass ClassNotFound(ValueError):\n """Raised if one of the lookup functions didn\'t find a matching class."""\n\n\nclass OptionError(Exception):\n pass\n\n\ndef get_choice_opt(options, optname, allowed, default=None, normcase=False):\n string = options.get(optname, default)\n if normcase:\n string = string.lower()\n if string not in allowed:\n raise OptionError(\'Value for option %s must be one of %s\' %\n (optname, \', \'.join(map(str, allowed))))\n return string\n\n\ndef get_bool_opt(options, optname, default=None):\n string = options.get(optname, default)\n if isinstance(string, bool):\n return string\n elif isinstance(string, int):\n return bool(string)\n elif not isinstance(string, str):\n raise OptionError(\'Invalid type %r for option %s; use \'\n \'1/0, yes/no, true/false, on/off\' % (\n string, optname))\n elif string.lower() in (\'1\', \'yes\', \'true\', \'on\'):\n return True\n elif string.lower() in (\'0\', \'no\', \'false\', \'off\'):\n return False\n else:\n raise OptionError(\'Invalid value %r for option %s; use \'\n \'1/0, yes/no, true/false, on/off\' % (\n string, optname))\n\n\ndef get_int_opt(options, optname, default=None):\n string = options.get(optname, default)\n try:\n return int(string)\n except TypeError:\n raise OptionError(\'Invalid type %r for option %s; you \'\n \'must give an integer value\' % (\n string, optname))\n except ValueError:\n raise OptionError(\'Invalid value %r for option %s; you \'\n \'must give an integer value\' % (\n string, optname))\n\n\ndef get_list_opt(options, optname, default=None):\n val = options.get(optname, default)\n if isinstance(val, str):\n return val.split()\n elif isinstance(val, (list, tuple)):\n return list(val)\n else:\n raise OptionError(\'Invalid type %r for option %s; you \'\n \'must give a list value\' % (\n val, optname))\n\n\ndef docstring_headline(obj):\n if not obj.__doc__:\n return \'\'\n res = []\n for line in obj.__doc__.strip().splitlines():\n if line.strip():\n res.append(" " + line.strip())\n else:\n break\n return \'\'.join(res).lstrip()\n\n\ndef make_analysator(f):\n """Return a static text analyser function that returns float values."""\n def text_analyse(text):\n try:\n rv = f(text)\n except Exception:\n return 0.0\n if not rv:\n return 0.0\n try:\n return min(1.0, max(0.0, float(rv)))\n except (ValueError, TypeError):\n return 0.0\n text_analyse.__doc__ = f.__doc__\n return staticmethod(text_analyse)\n\n\ndef shebang_matches(text, regex):\n r"""Check if the given regular expression matches the last part of the\n shebang if one exists.\n\n >>> from pygments.util import shebang_matches\n >>> shebang_matches(\'#!/usr/bin/env python\', r\'python(2\\.\\d)?\')\n True\n >>> shebang_matches(\'#!/usr/bin/python2.4\', r\'python(2\\.\\d)?\')\n True\n >>> shebang_matches(\'#!/usr/bin/python-ruby\', r\'python(2\\.\\d)?\')\n False\n >>> shebang_matches(\'#!/usr/bin/python/ruby\', r\'python(2\\.\\d)?\')\n False\n >>> shebang_matches(\'#!/usr/bin/startsomethingwith python\',\n ... r\'python(2\\.\\d)?\')\n True\n\n It also checks for common windows executable file extensions::\n\n >>> shebang_matches(\'#!C:\\\\Python2.4\\\\Python.exe\', r\'python(2\\.\\d)?\')\n True\n\n Parameters (``\'-f\'`` or ``\'--foo\'`` are ignored so ``\'perl\'`` does\n the same as ``\'perl -e\'``)\n\n Note that this method automatically searches the whole string (eg:\n the regular expression is wrapped in ``\'^$\'``)\n """\n index = text.find(\'\\n\')\n if index >= 0:\n first_line = text[:index].lower()\n else:\n first_line = text.lower()\n if first_line.startswith(\'#!\'):\n try:\n found = [x for x in split_path_re.split(first_line[2:].strip())\n if x and not x.startswith(\'-\')][-1]\n except IndexError:\n return False\n regex = re.compile(r\'^%s(\\.(exe|cmd|bat|bin))?$\' % regex, re.IGNORECASE)\n if regex.search(found) is not None:\n return True\n return False\n\n\ndef doctype_matches(text, regex):\n """Check if the doctype matches a regular expression (if present).\n\n Note that this method only checks the first part of a DOCTYPE.\n eg: \'html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"\'\n """\n m = doctype_lookup_re.search(text)\n if m is None:\n return False\n doctype = m.group(2)\n return re.compile(regex, re.I).match(doctype.strip()) is not None\n\n\ndef html_doctype_matches(text):\n """Check if the file looks like it has a html doctype."""\n return doctype_matches(text, r\'html\')\n\n\n_looks_like_xml_cache = {}\n\n\ndef looks_like_xml(text):\n """Check if a doctype exists or if we have some tags."""\n if xml_decl_re.match(text):\n return True\n key = hash(text)\n try:\n return _looks_like_xml_cache[key]\n except KeyError:\n m = doctype_lookup_re.search(text)\n if m is not None:\n return True\n rv = tag_re.search(text[:1000]) is not None\n _looks_like_xml_cache[key] = rv\n return rv\n\n\ndef surrogatepair(c):\n """Given a unicode character code with length greater than 16 bits,\n return the two 16 bit surrogate pair.\n """\n # From example D28 of:\n # http://www.unicode.org/book/ch03.pdf\n return (0xd7c0 + (c >> 10), (0xdc00 + (c & 0x3ff)))\n\n\ndef format_lines(var_name, seq, raw=False, indent_level=0):\n """Formats a sequence of strings for output."""\n lines = []\n base_indent = \' \' * indent_level * 4\n inner_indent = \' \' * (indent_level + 1) * 4\n lines.append(base_indent + var_name + \' = (\')\n if raw:\n # These should be preformatted reprs of, say, tuples.\n for i in seq:\n lines.append(inner_indent + i + \',\')\n else:\n for i in seq:\n # Force use of single quotes\n r = repr(i + \'"\')\n lines.append(inner_indent + r[:-2] + r[-1] + \',\')\n lines.append(base_indent + \')\')\n return \'\\n\'.join(lines)\n\n\ndef duplicates_removed(it, already_seen=()):\n """\n Returns a list with duplicates removed from the iterable `it`.\n\n Order is preserved.\n """\n lst = []\n seen = set()\n for i in it:\n if i in seen or i in already_seen:\n continue\n lst.append(i)\n seen.add(i)\n return lst\n\n\nclass Future:\n """Generic class to defer some work.\n\n Handled specially in RegexLexerMeta, to support regex string construction at\n first use.\n """\n def get(self):\n raise NotImplementedError\n\n\ndef guess_decode(text):\n """Decode *text* with guessed encoding.\n\n First try UTF-8; this should fail for non-UTF-8 encodings.\n Then try the preferred locale encoding.\n Fall back to latin-1, which always works.\n """\n try:\n text = text.decode(\'utf-8\')\n return text, \'utf-8\'\n except UnicodeDecodeError:\n try:\n import locale\n prefencoding = locale.getpreferredencoding()\n text = text.decode()\n return text, prefencoding\n except (UnicodeDecodeError, LookupError):\n text = text.decode(\'latin1\')\n return text, \'latin1\'\n\n\ndef guess_decode_from_terminal(text, term):\n """Decode *text* coming from terminal *term*.\n\n First try the terminal encoding, if given.\n Then try UTF-8. Then try the preferred locale encoding.\n Fall back to latin-1, which always works.\n """\n if getattr(term, \'encoding\', None):\n try:\n text = text.decode(term.encoding)\n except UnicodeDecodeError:\n pass\n else:\n return text, term.encoding\n return guess_decode(text)\n\n\ndef terminal_encoding(term):\n """Return our best guess of encoding for the given *term*."""\n if getattr(term, \'encoding\', None):\n return term.encoding\n import locale\n return locale.getpreferredencoding()\n\n\nclass UnclosingTextIOWrapper(TextIOWrapper):\n # Don\'t close underlying buffer on destruction.\n def close(self):\n self.flush()\n') + __stickytape_write_module('pygments/plugin.py', b'# -*- coding: utf-8 -*-\n"""\n pygments.plugin\n ~~~~~~~~~~~~~~~\n\n Pygments setuptools plugin interface. The methods defined\n here also work if setuptools isn\'t installed but they just\n return nothing.\n\n lexer plugins::\n\n [pygments.lexers]\n yourlexer = yourmodule:YourLexer\n\n formatter plugins::\n\n [pygments.formatters]\n yourformatter = yourformatter:YourFormatter\n /.ext = yourformatter:YourFormatter\n\n As you can see, you can define extensions for the formatter\n with a leading slash.\n\n syntax plugins::\n\n [pygments.styles]\n yourstyle = yourstyle:YourStyle\n\n filter plugin::\n\n [pygments.filter]\n yourfilter = yourfilter:YourFilter\n\n\n :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.\n :license: BSD, see LICENSE for details.\n"""\nLEXER_ENTRY_POINT = \'pygments.lexers\'\nFORMATTER_ENTRY_POINT = \'pygments.formatters\'\nSTYLE_ENTRY_POINT = \'pygments.styles\'\nFILTER_ENTRY_POINT = \'pygments.filters\'\n\n\ndef iter_entry_points(group_name):\n try:\n import pkg_resources\n except (ImportError, IOError):\n return []\n\n return pkg_resources.iter_entry_points(group_name)\n\n\ndef find_plugin_lexers():\n for entrypoint in iter_entry_points(LEXER_ENTRY_POINT):\n yield entrypoint.load()\n\n\ndef find_plugin_formatters():\n for entrypoint in iter_entry_points(FORMATTER_ENTRY_POINT):\n yield entrypoint.name, entrypoint.load()\n\n\ndef find_plugin_styles():\n for entrypoint in iter_entry_points(STYLE_ENTRY_POINT):\n yield entrypoint.name, entrypoint.load()\n\n\ndef find_plugin_filters():\n for entrypoint in iter_entry_points(FILTER_ENTRY_POINT):\n yield entrypoint.name, entrypoint.load()\n') + __stickytape_write_module('pkg_resources/__init__.py', b'# coding: utf-8\n"""\nPackage resource API\n--------------------\n\nA resource is a logical file contained within a package, or a logical\nsubdirectory thereof. The package resource API expects resource names\nto have their path parts separated with ``/``, *not* whatever the local\npath separator is. Do not use os.path operations to manipulate resource\nnames being passed into the API.\n\nThe package resource API is designed to work with normal filesystem packages,\n.egg files, and unpacked .egg files. It can also work in a limited way with\n.zip files and with custom PEP 302 loaders that support the ``get_data()``\nmethod.\n"""\n\nfrom __future__ import absolute_import\n\nimport sys\nimport os\nimport io\nimport time\nimport re\nimport types\nimport zipfile\nimport zipimport\nimport warnings\nimport stat\nimport functools\nimport pkgutil\nimport operator\nimport platform\nimport collections\nimport plistlib\nimport email.parser\nimport errno\nimport tempfile\nimport textwrap\nimport itertools\nimport inspect\nimport ntpath\nimport posixpath\nfrom pkgutil import get_importer\n\ntry:\n import _imp\nexcept ImportError:\n # Python 3.2 compatibility\n import imp as _imp\n\ntry:\n FileExistsError\nexcept NameError:\n FileExistsError = OSError\n\nfrom pkg_resources.extern import six\nfrom pkg_resources.extern.six.moves import map, filter\n\n# capture these to bypass sandboxing\nfrom os import utime\ntry:\n from os import mkdir, rename, unlink\n WRITE_SUPPORT = True\nexcept ImportError:\n # no write support, probably under GAE\n WRITE_SUPPORT = False\n\nfrom os import open as os_open\nfrom os.path import isdir, split\n\ntry:\n import importlib.machinery as importlib_machinery\n # access attribute to force import under delayed import mechanisms.\n importlib_machinery.__name__\nexcept ImportError:\n importlib_machinery = None\n\nfrom . import py31compat\nfrom pkg_resources.extern import appdirs\nfrom pkg_resources.extern import packaging\n__import__(\'pkg_resources.extern.packaging.version\')\n__import__(\'pkg_resources.extern.packaging.specifiers\')\n__import__(\'pkg_resources.extern.packaging.requirements\')\n__import__(\'pkg_resources.extern.packaging.markers\')\n__import__(\'pkg_resources.py2_warn\')\n\n\n__metaclass__ = type\n\n\nif (3, 0) < sys.version_info < (3, 5):\n raise RuntimeError("Python 3.5 or later is required")\n\nif six.PY2:\n # Those builtin exceptions are only defined in Python 3\n PermissionError = None\n NotADirectoryError = None\n\n# declare some globals that will be defined later to\n# satisfy the linters.\nrequire = None\nworking_set = None\nadd_activation_listener = None\nresources_stream = None\ncleanup_resources = None\nresource_dir = None\nresource_stream = None\nset_extraction_path = None\nresource_isdir = None\nresource_string = None\niter_entry_points = None\nresource_listdir = None\nresource_filename = None\nresource_exists = None\n_distribution_finders = None\n_namespace_handlers = None\n_namespace_packages = None\n\n\nclass PEP440Warning(RuntimeWarning):\n """\n Used when there is an issue with a version or specifier not complying with\n PEP 440.\n """\n\n\ndef parse_version(v):\n try:\n return packaging.version.Version(v)\n except packaging.version.InvalidVersion:\n return packaging.version.LegacyVersion(v)\n\n\n_state_vars = {}\n\n\ndef _declare_state(vartype, **kw):\n globals().update(kw)\n _state_vars.update(dict.fromkeys(kw, vartype))\n\n\ndef __getstate__():\n state = {}\n g = globals()\n for k, v in _state_vars.items():\n state[k] = g[\'_sget_\' + v](g[k])\n return state\n\n\ndef __setstate__(state):\n g = globals()\n for k, v in state.items():\n g[\'_sset_\' + _state_vars[k]](k, g[k], v)\n return state\n\n\ndef _sget_dict(val):\n return val.copy()\n\n\ndef _sset_dict(key, ob, state):\n ob.clear()\n ob.update(state)\n\n\ndef _sget_object(val):\n return val.__getstate__()\n\n\ndef _sset_object(key, ob, state):\n ob.__setstate__(state)\n\n\n_sget_none = _sset_none = lambda *args: None\n\n\ndef get_supported_platform():\n """Return this platform\'s maximum compatible version.\n\n distutils.util.get_platform() normally reports the minimum version\n of macOS that would be required to *use* extensions produced by\n distutils. But what we want when checking compatibility is to know the\n version of macOS that we are *running*. To allow usage of packages that\n explicitly require a newer version of macOS, we must also know the\n current version of the OS.\n\n If this condition occurs for any other platform with a version in its\n platform strings, this function should be extended accordingly.\n """\n plat = get_build_platform()\n m = macosVersionString.match(plat)\n if m is not None and sys.platform == "darwin":\n try:\n plat = \'macosx-%s-%s\' % (\'.\'.join(_macos_vers()[:2]), m.group(3))\n except ValueError:\n # not macOS\n pass\n return plat\n\n\n__all__ = [\n # Basic resource access and distribution/entry point discovery\n \'require\', \'run_script\', \'get_provider\', \'get_distribution\',\n \'load_entry_point\', \'get_entry_map\', \'get_entry_info\',\n \'iter_entry_points\',\n \'resource_string\', \'resource_stream\', \'resource_filename\',\n \'resource_listdir\', \'resource_exists\', \'resource_isdir\',\n\n # Environmental control\n \'declare_namespace\', \'working_set\', \'add_activation_listener\',\n \'find_distributions\', \'set_extraction_path\', \'cleanup_resources\',\n \'get_default_cache\',\n\n # Primary implementation classes\n \'Environment\', \'WorkingSet\', \'ResourceManager\',\n \'Distribution\', \'Requirement\', \'EntryPoint\',\n\n # Exceptions\n \'ResolutionError\', \'VersionConflict\', \'DistributionNotFound\',\n \'UnknownExtra\', \'ExtractionError\',\n\n # Warnings\n \'PEP440Warning\',\n\n # Parsing functions and string utilities\n \'parse_requirements\', \'parse_version\', \'safe_name\', \'safe_version\',\n \'get_platform\', \'compatible_platforms\', \'yield_lines\', \'split_sections\',\n \'safe_extra\', \'to_filename\', \'invalid_marker\', \'evaluate_marker\',\n\n # filesystem utilities\n \'ensure_directory\', \'normalize_path\',\n\n # Distribution "precedence" constants\n \'EGG_DIST\', \'BINARY_DIST\', \'SOURCE_DIST\', \'CHECKOUT_DIST\', \'DEVELOP_DIST\',\n\n # "Provider" interfaces, implementations, and registration/lookup APIs\n \'IMetadataProvider\', \'IResourceProvider\', \'FileMetadata\',\n \'PathMetadata\', \'EggMetadata\', \'EmptyProvider\', \'empty_provider\',\n \'NullProvider\', \'EggProvider\', \'DefaultProvider\', \'ZipProvider\',\n \'register_finder\', \'register_namespace_handler\', \'register_loader_type\',\n \'fixup_namespace_packages\', \'get_importer\',\n\n # Warnings\n \'PkgResourcesDeprecationWarning\',\n\n # Deprecated/backward compatibility only\n \'run_main\', \'AvailableDistributions\',\n]\n\n\nclass ResolutionError(Exception):\n """Abstract base for dependency resolution errors"""\n\n def __repr__(self):\n return self.__class__.__name__ + repr(self.args)\n\n\nclass VersionConflict(ResolutionError):\n """\n An already-installed version conflicts with the requested version.\n\n Should be initialized with the installed Distribution and the requested\n Requirement.\n """\n\n _template = "{self.dist} is installed but {self.req} is required"\n\n @property\n def dist(self):\n return self.args[0]\n\n @property\n def req(self):\n return self.args[1]\n\n def report(self):\n return self._template.format(**locals())\n\n def with_context(self, required_by):\n """\n If required_by is non-empty, return a version of self that is a\n ContextualVersionConflict.\n """\n if not required_by:\n return self\n args = self.args + (required_by,)\n return ContextualVersionConflict(*args)\n\n\nclass ContextualVersionConflict(VersionConflict):\n """\n A VersionConflict that accepts a third parameter, the set of the\n requirements that required the installed Distribution.\n """\n\n _template = VersionConflict._template + \' by {self.required_by}\'\n\n @property\n def required_by(self):\n return self.args[2]\n\n\nclass DistributionNotFound(ResolutionError):\n """A requested distribution was not found"""\n\n _template = ("The \'{self.req}\' distribution was not found "\n "and is required by {self.requirers_str}")\n\n @property\n def req(self):\n return self.args[0]\n\n @property\n def requirers(self):\n return self.args[1]\n\n @property\n def requirers_str(self):\n if not self.requirers:\n return \'the application\'\n return \', \'.join(self.requirers)\n\n def report(self):\n return self._template.format(**locals())\n\n def __str__(self):\n return self.report()\n\n\nclass UnknownExtra(ResolutionError):\n """Distribution doesn\'t have an "extra feature" of the given name"""\n\n\n_provider_factories = {}\n\nPY_MAJOR = \'{}.{}\'.format(*sys.version_info)\nEGG_DIST = 3\nBINARY_DIST = 2\nSOURCE_DIST = 1\nCHECKOUT_DIST = 0\nDEVELOP_DIST = -1\n\n\ndef register_loader_type(loader_type, provider_factory):\n """Register `provider_factory` to make providers for `loader_type`\n\n `loader_type` is the type or class of a PEP 302 ``module.__loader__``,\n and `provider_factory` is a function that, passed a *module* object,\n returns an ``IResourceProvider`` for that module.\n """\n _provider_factories[loader_type] = provider_factory\n\n\ndef get_provider(moduleOrReq):\n """Return an IResourceProvider for the named module or requirement"""\n if isinstance(moduleOrReq, Requirement):\n return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]\n try:\n module = sys.modules[moduleOrReq]\n except KeyError:\n __import__(moduleOrReq)\n module = sys.modules[moduleOrReq]\n loader = getattr(module, \'__loader__\', None)\n return _find_adapter(_provider_factories, loader)(module)\n\n\ndef _macos_vers(_cache=[]):\n if not _cache:\n version = platform.mac_ver()[0]\n # fallback for MacPorts\n if version == \'\':\n plist = \'/System/Library/CoreServices/SystemVersion.plist\'\n if os.path.exists(plist):\n if hasattr(plistlib, \'readPlist\'):\n plist_content = plistlib.readPlist(plist)\n if \'ProductVersion\' in plist_content:\n version = plist_content[\'ProductVersion\']\n\n _cache.append(version.split(\'.\'))\n return _cache[0]\n\n\ndef _macos_arch(machine):\n return {\'PowerPC\': \'ppc\', \'Power_Macintosh\': \'ppc\'}.get(machine, machine)\n\n\ndef get_build_platform():\n """Return this platform\'s string for platform-specific distributions\n\n XXX Currently this is the same as ``distutils.util.get_platform()``, but it\n needs some hacks for Linux and macOS.\n """\n from sysconfig import get_platform\n\n plat = get_platform()\n if sys.platform == "darwin" and not plat.startswith(\'macosx-\'):\n try:\n version = _macos_vers()\n machine = os.uname()[4].replace(" ", "_")\n return "macosx-%d.%d-%s" % (\n int(version[0]), int(version[1]),\n _macos_arch(machine),\n )\n except ValueError:\n # if someone is running a non-Mac darwin system, this will fall\n # through to the default implementation\n pass\n return plat\n\n\nmacosVersionString = re.compile(r"macosx-(\\d+)\\.(\\d+)-(.*)")\ndarwinVersionString = re.compile(r"darwin-(\\d+)\\.(\\d+)\\.(\\d+)-(.*)")\n# XXX backward compat\nget_platform = get_build_platform\n\n\ndef compatible_platforms(provided, required):\n """Can code for the `provided` platform run on the `required` platform?\n\n Returns true if either platform is ``None``, or the platforms are equal.\n\n XXX Needs compatibility checks for Linux and other unixy OSes.\n """\n if provided is None or required is None or provided == required:\n # easy case\n return True\n\n # macOS special cases\n reqMac = macosVersionString.match(required)\n if reqMac:\n provMac = macosVersionString.match(provided)\n\n # is this a Mac package?\n if not provMac:\n # this is backwards compatibility for packages built before\n # setuptools 0.6. All packages built after this point will\n # use the new macOS designation.\n provDarwin = darwinVersionString.match(provided)\n if provDarwin:\n dversion = int(provDarwin.group(1))\n macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))\n if dversion == 7 and macosversion >= "10.3" or \\\n dversion == 8 and macosversion >= "10.4":\n return True\n # egg isn\'t macOS or legacy darwin\n return False\n\n # are they the same major version and machine type?\n if provMac.group(1) != reqMac.group(1) or \\\n provMac.group(3) != reqMac.group(3):\n return False\n\n # is the required OS major update >= the provided one?\n if int(provMac.group(2)) > int(reqMac.group(2)):\n return False\n\n return True\n\n # XXX Linux and other platforms\' special cases should go here\n return False\n\n\ndef run_script(dist_spec, script_name):\n """Locate distribution `dist_spec` and run its `script_name` script"""\n ns = sys._getframe(1).f_globals\n name = ns[\'__name__\']\n ns.clear()\n ns[\'__name__\'] = name\n require(dist_spec)[0].run_script(script_name, ns)\n\n\n# backward compatibility\nrun_main = run_script\n\n\ndef get_distribution(dist):\n """Return a current distribution object for a Requirement or string"""\n if isinstance(dist, six.string_types):\n dist = Requirement.parse(dist)\n if isinstance(dist, Requirement):\n dist = get_provider(dist)\n if not isinstance(dist, Distribution):\n raise TypeError("Expected string, Requirement, or Distribution", dist)\n return dist\n\n\ndef load_entry_point(dist, group, name):\n """Return `name` entry point of `group` for `dist` or raise ImportError"""\n return get_distribution(dist).load_entry_point(group, name)\n\n\ndef get_entry_map(dist, group=None):\n """Return the entry point map for `group`, or the full entry map"""\n return get_distribution(dist).get_entry_map(group)\n\n\ndef get_entry_info(dist, group, name):\n """Return the EntryPoint object for `group`+`name`, or ``None``"""\n return get_distribution(dist).get_entry_info(group, name)\n\n\nclass IMetadataProvider:\n def has_metadata(name):\n """Does the package\'s distribution contain the named metadata?"""\n\n def get_metadata(name):\n """The named metadata resource as a string"""\n\n def get_metadata_lines(name):\n """Yield named metadata resource as list of non-blank non-comment lines\n\n Leading and trailing whitespace is stripped from each line, and lines\n with ``#`` as the first non-blank character are omitted."""\n\n def metadata_isdir(name):\n """Is the named metadata a directory? (like ``os.path.isdir()``)"""\n\n def metadata_listdir(name):\n """List of metadata names in the directory (like ``os.listdir()``)"""\n\n def run_script(script_name, namespace):\n """Execute the named script in the supplied namespace dictionary"""\n\n\nclass IResourceProvider(IMetadataProvider):\n """An object that provides access to package resources"""\n\n def get_resource_filename(manager, resource_name):\n """Return a true filesystem path for `resource_name`\n\n `manager` must be an ``IResourceManager``"""\n\n def get_resource_stream(manager, resource_name):\n """Return a readable file-like object for `resource_name`\n\n `manager` must be an ``IResourceManager``"""\n\n def get_resource_string(manager, resource_name):\n """Return a string containing the contents of `resource_name`\n\n `manager` must be an ``IResourceManager``"""\n\n def has_resource(resource_name):\n """Does the package contain the named resource?"""\n\n def resource_isdir(resource_name):\n """Is the named resource a directory? (like ``os.path.isdir()``)"""\n\n def resource_listdir(resource_name):\n """List of resource names in the directory (like ``os.listdir()``)"""\n\n\nclass WorkingSet:\n """A collection of active distributions on sys.path (or a similar list)"""\n\n def __init__(self, entries=None):\n """Create working set from list of path entries (default=sys.path)"""\n self.entries = []\n self.entry_keys = {}\n self.by_key = {}\n self.callbacks = []\n\n if entries is None:\n entries = sys.path\n\n for entry in entries:\n self.add_entry(entry)\n\n @classmethod\n def _build_master(cls):\n """\n Prepare the master working set.\n """\n ws = cls()\n try:\n from __main__ import __requires__\n except ImportError:\n # The main program does not list any requirements\n return ws\n\n # ensure the requirements are met\n try:\n ws.require(__requires__)\n except VersionConflict:\n return cls._build_from_requirements(__requires__)\n\n return ws\n\n @classmethod\n def _build_from_requirements(cls, req_spec):\n """\n Build a working set from a requirement spec. Rewrites sys.path.\n """\n # try it without defaults already on sys.path\n # by starting with an empty path\n ws = cls([])\n reqs = parse_requirements(req_spec)\n dists = ws.resolve(reqs, Environment())\n for dist in dists:\n ws.add(dist)\n\n # add any missing entries from sys.path\n for entry in sys.path:\n if entry not in ws.entries:\n ws.add_entry(entry)\n\n # then copy back to sys.path\n sys.path[:] = ws.entries\n return ws\n\n def add_entry(self, entry):\n """Add a path item to ``.entries``, finding any distributions on it\n\n ``find_distributions(entry, True)`` is used to find distributions\n corresponding to the path entry, and they are added. `entry` is\n always appended to ``.entries``, even if it is already present.\n (This is because ``sys.path`` can contain the same value more than\n once, and the ``.entries`` of the ``sys.path`` WorkingSet should always\n equal ``sys.path``.)\n """\n self.entry_keys.setdefault(entry, [])\n self.entries.append(entry)\n for dist in find_distributions(entry, True):\n self.add(dist, entry, False)\n\n def __contains__(self, dist):\n """True if `dist` is the active distribution for its project"""\n return self.by_key.get(dist.key) == dist\n\n def find(self, req):\n """Find a distribution matching requirement `req`\n\n If there is an active distribution for the requested project, this\n returns it as long as it meets the version requirement specified by\n `req`. But, if there is an active distribution for the project and it\n does *not* meet the `req` requirement, ``VersionConflict`` is raised.\n If there is no active distribution for the requested project, ``None``\n is returned.\n """\n dist = self.by_key.get(req.key)\n if dist is not None and dist not in req:\n # XXX add more info\n raise VersionConflict(dist, req)\n return dist\n\n def iter_entry_points(self, group, name=None):\n """Yield entry point objects from `group` matching `name`\n\n If `name` is None, yields all entry points in `group` from all\n distributions in the working set, otherwise only ones matching\n both `group` and `name` are yielded (in distribution order).\n """\n return (\n entry\n for dist in self\n for entry in dist.get_entry_map(group).values()\n if name is None or name == entry.name\n )\n\n def run_script(self, requires, script_name):\n """Locate distribution for `requires` and run `script_name` script"""\n ns = sys._getframe(1).f_globals\n name = ns[\'__name__\']\n ns.clear()\n ns[\'__name__\'] = name\n self.require(requires)[0].run_script(script_name, ns)\n\n def __iter__(self):\n """Yield distributions for non-duplicate projects in the working set\n\n The yield order is the order in which the items\' path entries were\n added to the working set.\n """\n seen = {}\n for item in self.entries:\n if item not in self.entry_keys:\n # workaround a cache issue\n continue\n\n for key in self.entry_keys[item]:\n if key not in seen:\n seen[key] = 1\n yield self.by_key[key]\n\n def add(self, dist, entry=None, insert=True, replace=False):\n """Add `dist` to working set, associated with `entry`\n\n If `entry` is unspecified, it defaults to the ``.location`` of `dist`.\n On exit from this routine, `entry` is added to the end of the working\n set\'s ``.entries`` (if it wasn\'t already present).\n\n `dist` is only added to the working set if it\'s for a project that\n doesn\'t already have a distribution in the set, unless `replace=True`.\n If it\'s added, any callbacks registered with the ``subscribe()`` method\n will be called.\n """\n if insert:\n dist.insert_on(self.entries, entry, replace=replace)\n\n if entry is None:\n entry = dist.location\n keys = self.entry_keys.setdefault(entry, [])\n keys2 = self.entry_keys.setdefault(dist.location, [])\n if not replace and dist.key in self.by_key:\n # ignore hidden distros\n return\n\n self.by_key[dist.key] = dist\n if dist.key not in keys:\n keys.append(dist.key)\n if dist.key not in keys2:\n keys2.append(dist.key)\n self._added_new(dist)\n\n def resolve(self, requirements, env=None, installer=None,\n replace_conflicting=False, extras=None):\n """List all distributions needed to (recursively) meet `requirements`\n\n `requirements` must be a sequence of ``Requirement`` objects. `env`,\n if supplied, should be an ``Environment`` instance. If\n not supplied, it defaults to all distributions available within any\n entry or distribution in the working set. `installer`, if supplied,\n will be invoked with each requirement that cannot be met by an\n already-installed distribution; it should return a ``Distribution`` or\n ``None``.\n\n Unless `replace_conflicting=True`, raises a VersionConflict exception\n if\n any requirements are found on the path that have the correct name but\n the wrong version. Otherwise, if an `installer` is supplied it will be\n invoked to obtain the correct version of the requirement and activate\n it.\n\n `extras` is a list of the extras to be used with these requirements.\n This is important because extra requirements may look like `my_req;\n extra = "my_extra"`, which would otherwise be interpreted as a purely\n optional requirement. Instead, we want to be able to assert that these\n requirements are truly required.\n """\n\n # set up the stack\n requirements = list(requirements)[::-1]\n # set of processed requirements\n processed = {}\n # key -> dist\n best = {}\n to_activate = []\n\n req_extras = _ReqExtras()\n\n # Mapping of requirement to set of distributions that required it;\n # useful for reporting info about conflicts.\n required_by = collections.defaultdict(set)\n\n while requirements:\n # process dependencies breadth-first\n req = requirements.pop(0)\n if req in processed:\n # Ignore cyclic or redundant dependencies\n continue\n\n if not req_extras.markers_pass(req, extras):\n continue\n\n dist = best.get(req.key)\n if dist is None:\n # Find the best distribution and add it to the map\n dist = self.by_key.get(req.key)\n if dist is None or (dist not in req and replace_conflicting):\n ws = self\n if env is None:\n if dist is None:\n env = Environment(self.entries)\n else:\n # Use an empty environment and workingset to avoid\n # any further conflicts with the conflicting\n # distribution\n env = Environment([])\n ws = WorkingSet([])\n dist = best[req.key] = env.best_match(\n req, ws, installer,\n replace_conflicting=replace_conflicting\n )\n if dist is None:\n requirers = required_by.get(req, None)\n raise DistributionNotFound(req, requirers)\n to_activate.append(dist)\n if dist not in req:\n # Oops, the "best" so far conflicts with a dependency\n dependent_req = required_by[req]\n raise VersionConflict(dist, req).with_context(dependent_req)\n\n # push the new requirements onto the stack\n new_requirements = dist.requires(req.extras)[::-1]\n requirements.extend(new_requirements)\n\n # Register the new requirements needed by req\n for new_requirement in new_requirements:\n required_by[new_requirement].add(req.project_name)\n req_extras[new_requirement] = req.extras\n\n processed[req] = True\n\n # return list of distros to activate\n return to_activate\n\n def find_plugins(\n self, plugin_env, full_env=None, installer=None, fallback=True):\n """Find all activatable distributions in `plugin_env`\n\n Example usage::\n\n distributions, errors = working_set.find_plugins(\n Environment(plugin_dirlist)\n )\n # add plugins+libs to sys.path\n map(working_set.add, distributions)\n # display errors\n print(\'Could not load\', errors)\n\n The `plugin_env` should be an ``Environment`` instance that contains\n only distributions that are in the project\'s "plugin directory" or\n directories. The `full_env`, if supplied, should be an ``Environment``\n contains all currently-available distributions. If `full_env` is not\n supplied, one is created automatically from the ``WorkingSet`` this\n method is called on, which will typically mean that every directory on\n ``sys.path`` will be scanned for distributions.\n\n `installer` is a standard installer callback as used by the\n ``resolve()`` method. The `fallback` flag indicates whether we should\n attempt to resolve older versions of a plugin if the newest version\n cannot be resolved.\n\n This method returns a 2-tuple: (`distributions`, `error_info`), where\n `distributions` is a list of the distributions found in `plugin_env`\n that were loadable, along with any other distributions that are needed\n to resolve their dependencies. `error_info` is a dictionary mapping\n unloadable plugin distributions to an exception instance describing the\n error that occurred. Usually this will be a ``DistributionNotFound`` or\n ``VersionConflict`` instance.\n """\n\n plugin_projects = list(plugin_env)\n # scan project names in alphabetic order\n plugin_projects.sort()\n\n error_info = {}\n distributions = {}\n\n if full_env is None:\n env = Environment(self.entries)\n env += plugin_env\n else:\n env = full_env + plugin_env\n\n shadow_set = self.__class__([])\n # put all our entries in shadow_set\n list(map(shadow_set.add, self))\n\n for project_name in plugin_projects:\n\n for dist in plugin_env[project_name]:\n\n req = [dist.as_requirement()]\n\n try:\n resolvees = shadow_set.resolve(req, env, installer)\n\n except ResolutionError as v:\n # save error info\n error_info[dist] = v\n if fallback:\n # try the next older version of project\n continue\n else:\n # give up on this project, keep going\n break\n\n else:\n list(map(shadow_set.add, resolvees))\n distributions.update(dict.fromkeys(resolvees))\n\n # success, no need to try any more versions of this project\n break\n\n distributions = list(distributions)\n distributions.sort()\n\n return distributions, error_info\n\n def require(self, *requirements):\n """Ensure that distributions matching `requirements` are activated\n\n `requirements` must be a string or a (possibly-nested) sequence\n thereof, specifying the distributions and versions required. The\n return value is a sequence of the distributions that needed to be\n activated to fulfill the requirements; all relevant distributions are\n included, even if they were already activated in this working set.\n """\n needed = self.resolve(parse_requirements(requirements))\n\n for dist in needed:\n self.add(dist)\n\n return needed\n\n def subscribe(self, callback, existing=True):\n """Invoke `callback` for all distributions\n\n If `existing=True` (default),\n call on all existing ones, as well.\n """\n if callback in self.callbacks:\n return\n self.callbacks.append(callback)\n if not existing:\n return\n for dist in self:\n callback(dist)\n\n def _added_new(self, dist):\n for callback in self.callbacks:\n callback(dist)\n\n def __getstate__(self):\n return (\n self.entries[:], self.entry_keys.copy(), self.by_key.copy(),\n self.callbacks[:]\n )\n\n def __setstate__(self, e_k_b_c):\n entries, keys, by_key, callbacks = e_k_b_c\n self.entries = entries[:]\n self.entry_keys = keys.copy()\n self.by_key = by_key.copy()\n self.callbacks = callbacks[:]\n\n\nclass _ReqExtras(dict):\n """\n Map each requirement to the extras that demanded it.\n """\n\n def markers_pass(self, req, extras=None):\n """\n Evaluate markers for req against each extra that\n demanded it.\n\n Return False if the req has a marker and fails\n evaluation. Otherwise, return True.\n """\n extra_evals = (\n req.marker.evaluate({\'extra\': extra})\n for extra in self.get(req, ()) + (extras or (None,))\n )\n return not req.marker or any(extra_evals)\n\n\nclass Environment:\n """Searchable snapshot of distributions on a search path"""\n\n def __init__(\n self, search_path=None, platform=get_supported_platform(),\n python=PY_MAJOR):\n """Snapshot distributions available on a search path\n\n Any distributions found on `search_path` are added to the environment.\n `search_path` should be a sequence of ``sys.path`` items. If not\n supplied, ``sys.path`` is used.\n\n `platform` is an optional string specifying the name of the platform\n that platform-specific distributions must be compatible with. If\n unspecified, it defaults to the current platform. `python` is an\n optional string naming the desired version of Python (e.g. ``\'3.6\'``);\n it defaults to the current version.\n\n You may explicitly set `platform` (and/or `python`) to ``None`` if you\n wish to map *all* distributions, not just those compatible with the\n running platform or Python version.\n """\n self._distmap = {}\n self.platform = platform\n self.python = python\n self.scan(search_path)\n\n def can_add(self, dist):\n """Is distribution `dist` acceptable for this environment?\n\n The distribution must match the platform and python version\n requirements specified when this environment was created, or False\n is returned.\n """\n py_compat = (\n self.python is None\n or dist.py_version is None\n or dist.py_version == self.python\n )\n return py_compat and compatible_platforms(dist.platform, self.platform)\n\n def remove(self, dist):\n """Remove `dist` from the environment"""\n self._distmap[dist.key].remove(dist)\n\n def scan(self, search_path=None):\n """Scan `search_path` for distributions usable in this environment\n\n Any distributions found are added to the environment.\n `search_path` should be a sequence of ``sys.path`` items. If not\n supplied, ``sys.path`` is used. Only distributions conforming to\n the platform/python version defined at initialization are added.\n """\n if search_path is None:\n search_path = sys.path\n\n for item in search_path:\n for dist in find_distributions(item):\n self.add(dist)\n\n def __getitem__(self, project_name):\n """Return a newest-to-oldest list of distributions for `project_name`\n\n Uses case-insensitive `project_name` comparison, assuming all the\n project\'s distributions use their project\'s name converted to all\n lowercase as their key.\n\n """\n distribution_key = project_name.lower()\n return self._distmap.get(distribution_key, [])\n\n def add(self, dist):\n """Add `dist` if we ``can_add()`` it and it has not already been added\n """\n if self.can_add(dist) and dist.has_version():\n dists = self._distmap.setdefault(dist.key, [])\n if dist not in dists:\n dists.append(dist)\n dists.sort(key=operator.attrgetter(\'hashcmp\'), reverse=True)\n\n def best_match(\n self, req, working_set, installer=None, replace_conflicting=False):\n """Find distribution best matching `req` and usable on `working_set`\n\n This calls the ``find(req)`` method of the `working_set` to see if a\n suitable distribution is already active. (This may raise\n ``VersionConflict`` if an unsuitable version of the project is already\n active in the specified `working_set`.) If a suitable distribution\n isn\'t active, this method returns the newest distribution in the\n environment that meets the ``Requirement`` in `req`. If no suitable\n distribution is found, and `installer` is supplied, then the result of\n calling the environment\'s ``obtain(req, installer)`` method will be\n returned.\n """\n try:\n dist = working_set.find(req)\n except VersionConflict:\n if not replace_conflicting:\n raise\n dist = None\n if dist is not None:\n return dist\n for dist in self[req.key]:\n if dist in req:\n return dist\n # try to download/install\n return self.obtain(req, installer)\n\n def obtain(self, requirement, installer=None):\n """Obtain a distribution matching `requirement` (e.g. via download)\n\n Obtain a distro that matches requirement (e.g. via download). In the\n base ``Environment`` class, this routine just returns\n ``installer(requirement)``, unless `installer` is None, in which case\n None is returned instead. This method is a hook that allows subclasses\n to attempt other ways of obtaining a distribution before falling back\n to the `installer` argument."""\n if installer is not None:\n return installer(requirement)\n\n def __iter__(self):\n """Yield the unique project names of the available distributions"""\n for key in self._distmap.keys():\n if self[key]:\n yield key\n\n def __iadd__(self, other):\n """In-place addition of a distribution or environment"""\n if isinstance(other, Distribution):\n self.add(other)\n elif isinstance(other, Environment):\n for project in other:\n for dist in other[project]:\n self.add(dist)\n else:\n raise TypeError("Can\'t add %r to environment" % (other,))\n return self\n\n def __add__(self, other):\n """Add an environment or distribution to an environment"""\n new = self.__class__([], platform=None, python=None)\n for env in self, other:\n new += env\n return new\n\n\n# XXX backward compatibility\nAvailableDistributions = Environment\n\n\nclass ExtractionError(RuntimeError):\n """An error occurred extracting a resource\n\n The following attributes are available from instances of this exception:\n\n manager\n The resource manager that raised this exception\n\n cache_path\n The base directory for resource extraction\n\n original_error\n The exception instance that caused extraction to fail\n """\n\n\nclass ResourceManager:\n """Manage resource extraction and packages"""\n extraction_path = None\n\n def __init__(self):\n self.cached_files = {}\n\n def resource_exists(self, package_or_requirement, resource_name):\n """Does the named resource exist?"""\n return get_provider(package_or_requirement).has_resource(resource_name)\n\n def resource_isdir(self, package_or_requirement, resource_name):\n """Is the named resource an existing directory?"""\n return get_provider(package_or_requirement).resource_isdir(\n resource_name\n )\n\n def resource_filename(self, package_or_requirement, resource_name):\n """Return a true filesystem path for specified resource"""\n return get_provider(package_or_requirement).get_resource_filename(\n self, resource_name\n )\n\n def resource_stream(self, package_or_requirement, resource_name):\n """Return a readable file-like object for specified resource"""\n return get_provider(package_or_requirement).get_resource_stream(\n self, resource_name\n )\n\n def resource_string(self, package_or_requirement, resource_name):\n """Return specified resource as a string"""\n return get_provider(package_or_requirement).get_resource_string(\n self, resource_name\n )\n\n def resource_listdir(self, package_or_requirement, resource_name):\n """List the contents of the named resource directory"""\n return get_provider(package_or_requirement).resource_listdir(\n resource_name\n )\n\n def extraction_error(self):\n """Give an error message for problems extracting file(s)"""\n\n old_exc = sys.exc_info()[1]\n cache_path = self.extraction_path or get_default_cache()\n\n tmpl = textwrap.dedent("""\n Can\'t extract file(s) to egg cache\n\n The following error occurred while trying to extract file(s)\n to the Python egg cache:\n\n {old_exc}\n\n The Python egg cache directory is currently set to:\n\n {cache_path}\n\n Perhaps your account does not have write access to this directory?\n You can change the cache directory by setting the PYTHON_EGG_CACHE\n environment variable to point to an accessible directory.\n """).lstrip()\n err = ExtractionError(tmpl.format(**locals()))\n err.manager = self\n err.cache_path = cache_path\n err.original_error = old_exc\n raise err\n\n def get_cache_path(self, archive_name, names=()):\n """Return absolute location in cache for `archive_name` and `names`\n\n The parent directory of the resulting path will be created if it does\n not already exist. `archive_name` should be the base filename of the\n enclosing egg (which may not be the name of the enclosing zipfile!),\n including its ".egg" extension. `names`, if provided, should be a\n sequence of path name parts "under" the egg\'s extraction location.\n\n This method should only be called by resource providers that need to\n obtain an extraction location, and only for names they intend to\n extract, as it tracks the generated names for possible cleanup later.\n """\n extract_path = self.extraction_path or get_default_cache()\n target_path = os.path.join(extract_path, archive_name + \'-tmp\', *names)\n try:\n _bypass_ensure_directory(target_path)\n except Exception:\n self.extraction_error()\n\n self._warn_unsafe_extraction_path(extract_path)\n\n self.cached_files[target_path] = 1\n return target_path\n\n @staticmethod\n def _warn_unsafe_extraction_path(path):\n """\n If the default extraction path is overridden and set to an insecure\n location, such as /tmp, it opens up an opportunity for an attacker to\n replace an extracted file with an unauthorized payload. Warn the user\n if a known insecure location is used.\n\n See Distribute #375 for more details.\n """\n if os.name == \'nt\' and not path.startswith(os.environ[\'windir\']):\n # On Windows, permissions are generally restrictive by default\n # and temp directories are not writable by other users, so\n # bypass the warning.\n return\n mode = os.stat(path).st_mode\n if mode & stat.S_IWOTH or mode & stat.S_IWGRP:\n msg = (\n "Extraction path is writable by group/others "\n "and vulnerable to attack when "\n "used with get_resource_filename ({path}). "\n "Consider a more secure "\n "location (set with .set_extraction_path or the "\n "PYTHON_EGG_CACHE environment variable)."\n ).format(**locals())\n warnings.warn(msg, UserWarning)\n\n def postprocess(self, tempname, filename):\n """Perform any platform-specific postprocessing of `tempname`\n\n This is where Mac header rewrites should be done; other platforms don\'t\n have anything special they should do.\n\n Resource providers should call this method ONLY after successfully\n extracting a compressed resource. They must NOT call it on resources\n that are already in the filesystem.\n\n `tempname` is the current (temporary) name of the file, and `filename`\n is the name it will be renamed to by the caller after this routine\n returns.\n """\n\n if os.name == \'posix\':\n # Make the resource executable\n mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777\n os.chmod(tempname, mode)\n\n def set_extraction_path(self, path):\n """Set the base path where resources will be extracted to, if needed.\n\n If you do not call this routine before any extractions take place, the\n path defaults to the return value of ``get_default_cache()``. (Which\n is based on the ``PYTHON_EGG_CACHE`` environment variable, with various\n platform-specific fallbacks. See that routine\'s documentation for more\n details.)\n\n Resources are extracted to subdirectories of this path based upon\n information given by the ``IResourceProvider``. You may set this to a\n temporary directory, but then you must call ``cleanup_resources()`` to\n delete the extracted files when done. There is no guarantee that\n ``cleanup_resources()`` will be able to remove all extracted files.\n\n (Note: you may not change the extraction path for a given resource\n manager once resources have been extracted, unless you first call\n ``cleanup_resources()``.)\n """\n if self.cached_files:\n raise ValueError(\n "Can\'t change extraction path, files already extracted"\n )\n\n self.extraction_path = path\n\n def cleanup_resources(self, force=False):\n """\n Delete all extracted resource files and directories, returning a list\n of the file and directory names that could not be successfully removed.\n This function does not have any concurrency protection, so it should\n generally only be called when the extraction path is a temporary\n directory exclusive to a single process. This method is not\n automatically called; you must call it explicitly or register it as an\n ``atexit`` function if you wish to ensure cleanup of a temporary\n directory used for extractions.\n """\n # XXX\n\n\ndef get_default_cache():\n """\n Return the ``PYTHON_EGG_CACHE`` environment variable\n or a platform-relevant user cache dir for an app\n named "Python-Eggs".\n """\n return (\n os.environ.get(\'PYTHON_EGG_CACHE\')\n or appdirs.user_cache_dir(appname=\'Python-Eggs\')\n )\n\n\ndef safe_name(name):\n """Convert an arbitrary string to a standard distribution name\n\n Any runs of non-alphanumeric/. characters are replaced with a single \'-\'.\n """\n return re.sub(\'[^A-Za-z0-9.]+\', \'-\', name)\n\n\ndef safe_version(version):\n """\n Convert an arbitrary string to a standard version string\n """\n try:\n # normalize the version\n return str(packaging.version.Version(version))\n except packaging.version.InvalidVersion:\n version = version.replace(\' \', \'.\')\n return re.sub(\'[^A-Za-z0-9.]+\', \'-\', version)\n\n\ndef safe_extra(extra):\n """Convert an arbitrary string to a standard \'extra\' name\n\n Any runs of non-alphanumeric characters are replaced with a single \'_\',\n and the result is always lowercased.\n """\n return re.sub(\'[^A-Za-z0-9.-]+\', \'_\', extra).lower()\n\n\ndef to_filename(name):\n """Convert a project or version name to its filename-escaped form\n\n Any \'-\' characters are currently replaced with \'_\'.\n """\n return name.replace(\'-\', \'_\')\n\n\ndef invalid_marker(text):\n """\n Validate text as a PEP 508 environment marker; return an exception\n if invalid or False otherwise.\n """\n try:\n evaluate_marker(text)\n except SyntaxError as e:\n e.filename = None\n e.lineno = None\n return e\n return False\n\n\ndef evaluate_marker(text, extra=None):\n """\n Evaluate a PEP 508 environment marker.\n Return a boolean indicating the marker result in this environment.\n Raise SyntaxError if marker is invalid.\n\n This implementation uses the \'pyparsing\' module.\n """\n try:\n marker = packaging.markers.Marker(text)\n return marker.evaluate()\n except packaging.markers.InvalidMarker as e:\n raise SyntaxError(e)\n\n\nclass NullProvider:\n """Try to implement resources and metadata for arbitrary PEP 302 loaders"""\n\n egg_name = None\n egg_info = None\n loader = None\n\n def __init__(self, module):\n self.loader = getattr(module, \'__loader__\', None)\n self.module_path = os.path.dirname(getattr(module, \'__file__\', \'\'))\n\n def get_resource_filename(self, manager, resource_name):\n return self._fn(self.module_path, resource_name)\n\n def get_resource_stream(self, manager, resource_name):\n return io.BytesIO(self.get_resource_string(manager, resource_name))\n\n def get_resource_string(self, manager, resource_name):\n return self._get(self._fn(self.module_path, resource_name))\n\n def has_resource(self, resource_name):\n return self._has(self._fn(self.module_path, resource_name))\n\n def _get_metadata_path(self, name):\n return self._fn(self.egg_info, name)\n\n def has_metadata(self, name):\n if not self.egg_info:\n return self.egg_info\n\n path = self._get_metadata_path(name)\n return self._has(path)\n\n def get_metadata(self, name):\n if not self.egg_info:\n return ""\n path = self._get_metadata_path(name)\n value = self._get(path)\n if six.PY2:\n return value\n try:\n return value.decode(\'utf-8\')\n except UnicodeDecodeError as exc:\n # Include the path in the error message to simplify\n # troubleshooting, and without changing the exception type.\n exc.reason += \' in {} file at path: {}\'.format(name, path)\n raise\n\n def get_metadata_lines(self, name):\n return yield_lines(self.get_metadata(name))\n\n def resource_isdir(self, resource_name):\n return self._isdir(self._fn(self.module_path, resource_name))\n\n def metadata_isdir(self, name):\n return self.egg_info and self._isdir(self._fn(self.egg_info, name))\n\n def resource_listdir(self, resource_name):\n return self._listdir(self._fn(self.module_path, resource_name))\n\n def metadata_listdir(self, name):\n if self.egg_info:\n return self._listdir(self._fn(self.egg_info, name))\n return []\n\n def run_script(self, script_name, namespace):\n script = \'scripts/\' + script_name\n if not self.has_metadata(script):\n raise ResolutionError(\n "Script {script!r} not found in metadata at {self.egg_info!r}"\n .format(**locals()),\n )\n script_text = self.get_metadata(script).replace(\'\\r\\n\', \'\\n\')\n script_text = script_text.replace(\'\\r\', \'\\n\')\n script_filename = self._fn(self.egg_info, script)\n namespace[\'__file__\'] = script_filename\n if os.path.exists(script_filename):\n source = open(script_filename).read()\n code = compile(source, script_filename, \'exec\')\n exec(code, namespace, namespace)\n else:\n from linecache import cache\n cache[script_filename] = (\n len(script_text), 0, script_text.split(\'\\n\'), script_filename\n )\n script_code = compile(script_text, script_filename, \'exec\')\n exec(script_code, namespace, namespace)\n\n def _has(self, path):\n raise NotImplementedError(\n "Can\'t perform this operation for unregistered loader type"\n )\n\n def _isdir(self, path):\n raise NotImplementedError(\n "Can\'t perform this operation for unregistered loader type"\n )\n\n def _listdir(self, path):\n raise NotImplementedError(\n "Can\'t perform this operation for unregistered loader type"\n )\n\n def _fn(self, base, resource_name):\n self._validate_resource_path(resource_name)\n if resource_name:\n return os.path.join(base, *resource_name.split(\'/\'))\n return base\n\n @staticmethod\n def _validate_resource_path(path):\n """\n Validate the resource paths according to the docs.\n https://setuptools.readthedocs.io/en/latest/pkg_resources.html#basic-resource-access\n\n >>> warned = getfixture(\'recwarn\')\n >>> warnings.simplefilter(\'always\')\n >>> vrp = NullProvider._validate_resource_path\n >>> vrp(\'foo/bar.txt\')\n >>> bool(warned)\n False\n >>> vrp(\'../foo/bar.txt\')\n >>> bool(warned)\n True\n >>> warned.clear()\n >>> vrp(\'/foo/bar.txt\')\n >>> bool(warned)\n True\n >>> vrp(\'foo/../../bar.txt\')\n >>> bool(warned)\n True\n >>> warned.clear()\n >>> vrp(\'foo/f../bar.txt\')\n >>> bool(warned)\n False\n\n Windows path separators are straight-up disallowed.\n >>> vrp(r\'\\\\foo/bar.txt\')\n Traceback (most recent call last):\n ...\n ValueError: Use of .. or absolute path in a resource path \\\nis not allowed.\n\n >>> vrp(r\'C:\\\\foo/bar.txt\')\n Traceback (most recent call last):\n ...\n ValueError: Use of .. or absolute path in a resource path \\\nis not allowed.\n\n Blank values are allowed\n\n >>> vrp(\'\')\n >>> bool(warned)\n False\n\n Non-string values are not.\n\n >>> vrp(None)\n Traceback (most recent call last):\n ...\n AttributeError: ...\n """\n invalid = (\n os.path.pardir in path.split(posixpath.sep) or\n posixpath.isabs(path) or\n ntpath.isabs(path)\n )\n if not invalid:\n return\n\n msg = "Use of .. or absolute path in a resource path is not allowed."\n\n # Aggressively disallow Windows absolute paths\n if ntpath.isabs(path) and not posixpath.isabs(path):\n raise ValueError(msg)\n\n # for compatibility, warn; in future\n # raise ValueError(msg)\n warnings.warn(\n msg[:-1] + " and will raise exceptions in a future release.",\n DeprecationWarning,\n stacklevel=4,\n )\n\n def _get(self, path):\n if hasattr(self.loader, \'get_data\'):\n return self.loader.get_data(path)\n raise NotImplementedError(\n "Can\'t perform this operation for loaders without \'get_data()\'"\n )\n\n\nregister_loader_type(object, NullProvider)\n\n\ndef _parents(path):\n """\n yield all parents of path including path\n """\n last = None\n while path != last:\n yield path\n last = path\n path, _ = os.path.split(path)\n\n\nclass EggProvider(NullProvider):\n """Provider based on a virtual filesystem"""\n\n def __init__(self, module):\n NullProvider.__init__(self, module)\n self._setup_prefix()\n\n def _setup_prefix(self):\n # Assume that metadata may be nested inside a "basket"\n # of multiple eggs and use module_path instead of .archive.\n eggs = filter(_is_egg_path, _parents(self.module_path))\n egg = next(eggs, None)\n egg and self._set_egg(egg)\n\n def _set_egg(self, path):\n self.egg_name = os.path.basename(path)\n self.egg_info = os.path.join(path, \'EGG-INFO\')\n self.egg_root = path\n\n\nclass DefaultProvider(EggProvider):\n """Provides access to package resources in the filesystem"""\n\n def _has(self, path):\n return os.path.exists(path)\n\n def _isdir(self, path):\n return os.path.isdir(path)\n\n def _listdir(self, path):\n return os.listdir(path)\n\n def get_resource_stream(self, manager, resource_name):\n return open(self._fn(self.module_path, resource_name), \'rb\')\n\n def _get(self, path):\n with open(path, \'rb\') as stream:\n return stream.read()\n\n @classmethod\n def _register(cls):\n loader_names = \'SourceFileLoader\', \'SourcelessFileLoader\',\n for name in loader_names:\n loader_cls = getattr(importlib_machinery, name, type(None))\n register_loader_type(loader_cls, cls)\n\n\nDefaultProvider._register()\n\n\nclass EmptyProvider(NullProvider):\n """Provider that returns nothing for all requests"""\n\n module_path = None\n\n _isdir = _has = lambda self, path: False\n\n def _get(self, path):\n return \'\'\n\n def _listdir(self, path):\n return []\n\n def __init__(self):\n pass\n\n\nempty_provider = EmptyProvider()\n\n\nclass ZipManifests(dict):\n """\n zip manifest builder\n """\n\n @classmethod\n def build(cls, path):\n """\n Build a dictionary similar to the zipimport directory\n caches, except instead of tuples, store ZipInfo objects.\n\n Use a platform-specific path separator (os.sep) for the path keys\n for compatibility with pypy on Windows.\n """\n with zipfile.ZipFile(path) as zfile:\n items = (\n (\n name.replace(\'/\', os.sep),\n zfile.getinfo(name),\n )\n for name in zfile.namelist()\n )\n return dict(items)\n\n load = build\n\n\nclass MemoizedZipManifests(ZipManifests):\n """\n Memoized zipfile manifests.\n """\n manifest_mod = collections.namedtuple(\'manifest_mod\', \'manifest mtime\')\n\n def load(self, path):\n """\n Load a manifest at path or return a suitable manifest already loaded.\n """\n path = os.path.normpath(path)\n mtime = os.stat(path).st_mtime\n\n if path not in self or self[path].mtime != mtime:\n manifest = self.build(path)\n self[path] = self.manifest_mod(manifest, mtime)\n\n return self[path].manifest\n\n\nclass ZipProvider(EggProvider):\n """Resource support for zips and eggs"""\n\n eagers = None\n _zip_manifests = MemoizedZipManifests()\n\n def __init__(self, module):\n EggProvider.__init__(self, module)\n self.zip_pre = self.loader.archive + os.sep\n\n def _zipinfo_name(self, fspath):\n # Convert a virtual filename (full path to file) into a zipfile subpath\n # usable with the zipimport directory cache for our target archive\n fspath = fspath.rstrip(os.sep)\n if fspath == self.loader.archive:\n return \'\'\n if fspath.startswith(self.zip_pre):\n return fspath[len(self.zip_pre):]\n raise AssertionError(\n "%s is not a subpath of %s" % (fspath, self.zip_pre)\n )\n\n def _parts(self, zip_path):\n # Convert a zipfile subpath into an egg-relative path part list.\n # pseudo-fs path\n fspath = self.zip_pre + zip_path\n if fspath.startswith(self.egg_root + os.sep):\n return fspath[len(self.egg_root) + 1:].split(os.sep)\n raise AssertionError(\n "%s is not a subpath of %s" % (fspath, self.egg_root)\n )\n\n @property\n def zipinfo(self):\n return self._zip_manifests.load(self.loader.archive)\n\n def get_resource_filename(self, manager, resource_name):\n if not self.egg_name:\n raise NotImplementedError(\n "resource_filename() only supported for .egg, not .zip"\n )\n # no need to lock for extraction, since we use temp names\n zip_path = self._resource_to_zip(resource_name)\n eagers = self._get_eager_resources()\n if \'/\'.join(self._parts(zip_path)) in eagers:\n for name in eagers:\n self._extract_resource(manager, self._eager_to_zip(name))\n return self._extract_resource(manager, zip_path)\n\n @staticmethod\n def _get_date_and_size(zip_stat):\n size = zip_stat.file_size\n # ymdhms+wday, yday, dst\n date_time = zip_stat.date_time + (0, 0, -1)\n # 1980 offset already done\n timestamp = time.mktime(date_time)\n return timestamp, size\n\n def _extract_resource(self, manager, zip_path):\n\n if zip_path in self._index():\n for name in self._index()[zip_path]:\n last = self._extract_resource(\n manager, os.path.join(zip_path, name)\n )\n # return the extracted directory name\n return os.path.dirname(last)\n\n timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])\n\n if not WRITE_SUPPORT:\n raise IOError(\'"os.rename" and "os.unlink" are not supported \'\n \'on this platform\')\n try:\n\n real_path = manager.get_cache_path(\n self.egg_name, self._parts(zip_path)\n )\n\n if self._is_current(real_path, zip_path):\n return real_path\n\n outf, tmpnam = _mkstemp(\n ".$extract",\n dir=os.path.dirname(real_path),\n )\n os.write(outf, self.loader.get_data(zip_path))\n os.close(outf)\n utime(tmpnam, (timestamp, timestamp))\n manager.postprocess(tmpnam, real_path)\n\n try:\n rename(tmpnam, real_path)\n\n except os.error:\n if os.path.isfile(real_path):\n if self._is_current(real_path, zip_path):\n # the file became current since it was checked above,\n # so proceed.\n return real_path\n # Windows, del old file and retry\n elif os.name == \'nt\':\n unlink(real_path)\n rename(tmpnam, real_path)\n return real_path\n raise\n\n except os.error:\n # report a user-friendly error\n manager.extraction_error()\n\n return real_path\n\n def _is_current(self, file_path, zip_path):\n """\n Return True if the file_path is current for this zip_path\n """\n timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])\n if not os.path.isfile(file_path):\n return False\n stat = os.stat(file_path)\n if stat.st_size != size or stat.st_mtime != timestamp:\n return False\n # check that the contents match\n zip_contents = self.loader.get_data(zip_path)\n with open(file_path, \'rb\') as f:\n file_contents = f.read()\n return zip_contents == file_contents\n\n def _get_eager_resources(self):\n if self.eagers is None:\n eagers = []\n for name in (\'native_libs.txt\', \'eager_resources.txt\'):\n if self.has_metadata(name):\n eagers.extend(self.get_metadata_lines(name))\n self.eagers = eagers\n return self.eagers\n\n def _index(self):\n try:\n return self._dirindex\n except AttributeError:\n ind = {}\n for path in self.zipinfo:\n parts = path.split(os.sep)\n while parts:\n parent = os.sep.join(parts[:-1])\n if parent in ind:\n ind[parent].append(parts[-1])\n break\n else:\n ind[parent] = [parts.pop()]\n self._dirindex = ind\n return ind\n\n def _has(self, fspath):\n zip_path = self._zipinfo_name(fspath)\n return zip_path in self.zipinfo or zip_path in self._index()\n\n def _isdir(self, fspath):\n return self._zipinfo_name(fspath) in self._index()\n\n def _listdir(self, fspath):\n return list(self._index().get(self._zipinfo_name(fspath), ()))\n\n def _eager_to_zip(self, resource_name):\n return self._zipinfo_name(self._fn(self.egg_root, resource_name))\n\n def _resource_to_zip(self, resource_name):\n return self._zipinfo_name(self._fn(self.module_path, resource_name))\n\n\nregister_loader_type(zipimport.zipimporter, ZipProvider)\n\n\nclass FileMetadata(EmptyProvider):\n """Metadata handler for standalone PKG-INFO files\n\n Usage::\n\n metadata = FileMetadata("/path/to/PKG-INFO")\n\n This provider rejects all data and metadata requests except for PKG-INFO,\n which is treated as existing, and will be the contents of the file at\n the provided location.\n """\n\n def __init__(self, path):\n self.path = path\n\n def _get_metadata_path(self, name):\n return self.path\n\n def has_metadata(self, name):\n return name == \'PKG-INFO\' and os.path.isfile(self.path)\n\n def get_metadata(self, name):\n if name != \'PKG-INFO\':\n raise KeyError("No metadata except PKG-INFO is available")\n\n with io.open(self.path, encoding=\'utf-8\', errors="replace") as f:\n metadata = f.read()\n self._warn_on_replacement(metadata)\n return metadata\n\n def _warn_on_replacement(self, metadata):\n # Python 2.7 compat for: replacement_char = \'\xef\xbf\xbd\'\n replacement_char = b\'\\xef\\xbf\\xbd\'.decode(\'utf-8\')\n if replacement_char in metadata:\n tmpl = "{self.path} could not be properly decoded in UTF-8"\n msg = tmpl.format(**locals())\n warnings.warn(msg)\n\n def get_metadata_lines(self, name):\n return yield_lines(self.get_metadata(name))\n\n\nclass PathMetadata(DefaultProvider):\n """Metadata provider for egg directories\n\n Usage::\n\n # Development eggs:\n\n egg_info = "/path/to/PackageName.egg-info"\n base_dir = os.path.dirname(egg_info)\n metadata = PathMetadata(base_dir, egg_info)\n dist_name = os.path.splitext(os.path.basename(egg_info))[0]\n dist = Distribution(basedir, project_name=dist_name, metadata=metadata)\n\n # Unpacked egg directories:\n\n egg_path = "/path/to/PackageName-ver-pyver-etc.egg"\n metadata = PathMetadata(egg_path, os.path.join(egg_path,\'EGG-INFO\'))\n dist = Distribution.from_filename(egg_path, metadata=metadata)\n """\n\n def __init__(self, path, egg_info):\n self.module_path = path\n self.egg_info = egg_info\n\n\nclass EggMetadata(ZipProvider):\n """Metadata provider for .egg files"""\n\n def __init__(self, importer):\n """Create a metadata provider from a zipimporter"""\n\n self.zip_pre = importer.archive + os.sep\n self.loader = importer\n if importer.prefix:\n self.module_path = os.path.join(importer.archive, importer.prefix)\n else:\n self.module_path = importer.archive\n self._setup_prefix()\n\n\n_declare_state(\'dict\', _distribution_finders={})\n\n\ndef register_finder(importer_type, distribution_finder):\n """Register `distribution_finder` to find distributions in sys.path items\n\n `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item\n handler), and `distribution_finder` is a callable that, passed a path\n item and the importer instance, yields ``Distribution`` instances found on\n that path item. See ``pkg_resources.find_on_path`` for an example."""\n _distribution_finders[importer_type] = distribution_finder\n\n\ndef find_distributions(path_item, only=False):\n """Yield distributions accessible via `path_item`"""\n importer = get_importer(path_item)\n finder = _find_adapter(_distribution_finders, importer)\n return finder(importer, path_item, only)\n\n\ndef find_eggs_in_zip(importer, path_item, only=False):\n """\n Find eggs in zip files; possibly multiple nested eggs.\n """\n if importer.archive.endswith(\'.whl\'):\n # wheels are not supported with this finder\n # they don\'t have PKG-INFO metadata, and won\'t ever contain eggs\n return\n metadata = EggMetadata(importer)\n if metadata.has_metadata(\'PKG-INFO\'):\n yield Distribution.from_filename(path_item, metadata=metadata)\n if only:\n # don\'t yield nested distros\n return\n for subitem in metadata.resource_listdir(\'\'):\n if _is_egg_path(subitem):\n subpath = os.path.join(path_item, subitem)\n dists = find_eggs_in_zip(zipimport.zipimporter(subpath), subpath)\n for dist in dists:\n yield dist\n elif subitem.lower().endswith(\'.dist-info\'):\n subpath = os.path.join(path_item, subitem)\n submeta = EggMetadata(zipimport.zipimporter(subpath))\n submeta.egg_info = subpath\n yield Distribution.from_location(path_item, subitem, submeta)\n\n\nregister_finder(zipimport.zipimporter, find_eggs_in_zip)\n\n\ndef find_nothing(importer, path_item, only=False):\n return ()\n\n\nregister_finder(object, find_nothing)\n\n\ndef _by_version_descending(names):\n """\n Given a list of filenames, return them in descending order\n by version number.\n\n >>> names = \'bar\', \'foo\', \'Python-2.7.10.egg\', \'Python-2.7.2.egg\'\n >>> _by_version_descending(names)\n [\'Python-2.7.10.egg\', \'Python-2.7.2.egg\', \'foo\', \'bar\']\n >>> names = \'Setuptools-1.2.3b1.egg\', \'Setuptools-1.2.3.egg\'\n >>> _by_version_descending(names)\n [\'Setuptools-1.2.3.egg\', \'Setuptools-1.2.3b1.egg\']\n >>> names = \'Setuptools-1.2.3b1.egg\', \'Setuptools-1.2.3.post1.egg\'\n >>> _by_version_descending(names)\n [\'Setuptools-1.2.3.post1.egg\', \'Setuptools-1.2.3b1.egg\']\n """\n def _by_version(name):\n """\n Parse each component of the filename\n """\n name, ext = os.path.splitext(name)\n parts = itertools.chain(name.split(\'-\'), [ext])\n return [packaging.version.parse(part) for part in parts]\n\n return sorted(names, key=_by_version, reverse=True)\n\n\ndef find_on_path(importer, path_item, only=False):\n """Yield distributions accessible on a sys.path directory"""\n path_item = _normalize_cached(path_item)\n\n if _is_unpacked_egg(path_item):\n yield Distribution.from_filename(\n path_item, metadata=PathMetadata(\n path_item, os.path.join(path_item, \'EGG-INFO\')\n )\n )\n return\n\n entries = safe_listdir(path_item)\n\n # for performance, before sorting by version,\n # screen entries for only those that will yield\n # distributions\n filtered = (\n entry\n for entry in entries\n if dist_factory(path_item, entry, only)\n )\n\n # scan for .egg and .egg-info in directory\n path_item_entries = _by_version_descending(filtered)\n for entry in path_item_entries:\n fullpath = os.path.join(path_item, entry)\n factory = dist_factory(path_item, entry, only)\n for dist in factory(fullpath):\n yield dist\n\n\ndef dist_factory(path_item, entry, only):\n """Return a dist_factory for the given entry."""\n lower = entry.lower()\n is_egg_info = lower.endswith(\'.egg-info\')\n is_dist_info = (\n lower.endswith(\'.dist-info\') and\n os.path.isdir(os.path.join(path_item, entry))\n )\n is_meta = is_egg_info or is_dist_info\n return (\n distributions_from_metadata\n if is_meta else\n find_distributions\n if not only and _is_egg_path(entry) else\n resolve_egg_link\n if not only and lower.endswith(\'.egg-link\') else\n NoDists()\n )\n\n\nclass NoDists:\n """\n >>> bool(NoDists())\n False\n\n >>> list(NoDists()(\'anything\'))\n []\n """\n def __bool__(self):\n return False\n if six.PY2:\n __nonzero__ = __bool__\n\n def __call__(self, fullpath):\n return iter(())\n\n\ndef safe_listdir(path):\n """\n Attempt to list contents of path, but suppress some exceptions.\n """\n try:\n return os.listdir(path)\n except (PermissionError, NotADirectoryError):\n pass\n except OSError as e:\n # Ignore the directory if does not exist, not a directory or\n # permission denied\n ignorable = (\n e.errno in (errno.ENOTDIR, errno.EACCES, errno.ENOENT)\n # Python 2 on Windows needs to be handled this way :(\n or getattr(e, "winerror", None) == 267\n )\n if not ignorable:\n raise\n return ()\n\n\ndef distributions_from_metadata(path):\n root = os.path.dirname(path)\n if os.path.isdir(path):\n if len(os.listdir(path)) == 0:\n # empty metadata dir; skip\n return\n metadata = PathMetadata(root, path)\n else:\n metadata = FileMetadata(path)\n entry = os.path.basename(path)\n yield Distribution.from_location(\n root, entry, metadata, precedence=DEVELOP_DIST,\n )\n\n\ndef non_empty_lines(path):\n """\n Yield non-empty lines from file at path\n """\n with open(path) as f:\n for line in f:\n line = line.strip()\n if line:\n yield line\n\n\ndef resolve_egg_link(path):\n """\n Given a path to an .egg-link, resolve distributions\n present in the referenced path.\n """\n referenced_paths = non_empty_lines(path)\n resolved_paths = (\n os.path.join(os.path.dirname(path), ref)\n for ref in referenced_paths\n )\n dist_groups = map(find_distributions, resolved_paths)\n return next(dist_groups, ())\n\n\nregister_finder(pkgutil.ImpImporter, find_on_path)\n\nif hasattr(importlib_machinery, \'FileFinder\'):\n register_finder(importlib_machinery.FileFinder, find_on_path)\n\n_declare_state(\'dict\', _namespace_handlers={})\n_declare_state(\'dict\', _namespace_packages={})\n\n\ndef register_namespace_handler(importer_type, namespace_handler):\n """Register `namespace_handler` to declare namespace packages\n\n `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item\n handler), and `namespace_handler` is a callable like this::\n\n def namespace_handler(importer, path_entry, moduleName, module):\n # return a path_entry to use for child packages\n\n Namespace handlers are only called if the importer object has already\n agreed that it can handle the relevant path item, and they should only\n return a subpath if the module __path__ does not already contain an\n equivalent subpath. For an example namespace handler, see\n ``pkg_resources.file_ns_handler``.\n """\n _namespace_handlers[importer_type] = namespace_handler\n\n\ndef _handle_ns(packageName, path_item):\n """Ensure that named package includes a subpath of path_item (if needed)"""\n\n importer = get_importer(path_item)\n if importer is None:\n return None\n\n # use find_spec (PEP 451) and fall-back to find_module (PEP 302)\n try:\n loader = importer.find_spec(packageName).loader\n except AttributeError:\n # capture warnings due to #1111\n with warnings.catch_warnings():\n warnings.simplefilter("ignore")\n loader = importer.find_module(packageName)\n\n if loader is None:\n return None\n module = sys.modules.get(packageName)\n if module is None:\n module = sys.modules[packageName] = types.ModuleType(packageName)\n module.__path__ = []\n _set_parent_ns(packageName)\n elif not hasattr(module, \'__path__\'):\n raise TypeError("Not a package:", packageName)\n handler = _find_adapter(_namespace_handlers, importer)\n subpath = handler(importer, path_item, packageName, module)\n if subpath is not None:\n path = module.__path__\n path.append(subpath)\n loader.load_module(packageName)\n _rebuild_mod_path(path, packageName, module)\n return subpath\n\n\ndef _rebuild_mod_path(orig_path, package_name, module):\n """\n Rebuild module.__path__ ensuring that all entries are ordered\n corresponding to their sys.path order\n """\n sys_path = [_normalize_cached(p) for p in sys.path]\n\n def safe_sys_path_index(entry):\n """\n Workaround for #520 and #513.\n """\n try:\n return sys_path.index(entry)\n except ValueError:\n return float(\'inf\')\n\n def position_in_sys_path(path):\n """\n Return the ordinal of the path based on its position in sys.path\n """\n path_parts = path.split(os.sep)\n module_parts = package_name.count(\'.\') + 1\n parts = path_parts[:-module_parts]\n return safe_sys_path_index(_normalize_cached(os.sep.join(parts)))\n\n new_path = sorted(orig_path, key=position_in_sys_path)\n new_path = [_normalize_cached(p) for p in new_path]\n\n if isinstance(module.__path__, list):\n module.__path__[:] = new_path\n else:\n module.__path__ = new_path\n\n\ndef declare_namespace(packageName):\n """Declare that package \'packageName\' is a namespace package"""\n\n _imp.acquire_lock()\n try:\n if packageName in _namespace_packages:\n return\n\n path = sys.path\n parent, _, _ = packageName.rpartition(\'.\')\n\n if parent:\n declare_namespace(parent)\n if parent not in _namespace_packages:\n __import__(parent)\n try:\n path = sys.modules[parent].__path__\n except AttributeError:\n raise TypeError("Not a package:", parent)\n\n # Track what packages are namespaces, so when new path items are added,\n # they can be updated\n _namespace_packages.setdefault(parent or None, []).append(packageName)\n _namespace_packages.setdefault(packageName, [])\n\n for path_item in path:\n # Ensure all the parent\'s path items are reflected in the child,\n # if they apply\n _handle_ns(packageName, path_item)\n\n finally:\n _imp.release_lock()\n\n\ndef fixup_namespace_packages(path_item, parent=None):\n """Ensure that previously-declared namespace packages include path_item"""\n _imp.acquire_lock()\n try:\n for package in _namespace_packages.get(parent, ()):\n subpath = _handle_ns(package, path_item)\n if subpath:\n fixup_namespace_packages(subpath, package)\n finally:\n _imp.release_lock()\n\n\ndef file_ns_handler(importer, path_item, packageName, module):\n """Compute an ns-package subpath for a filesystem or zipfile importer"""\n\n subpath = os.path.join(path_item, packageName.split(\'.\')[-1])\n normalized = _normalize_cached(subpath)\n for item in module.__path__:\n if _normalize_cached(item) == normalized:\n break\n else:\n # Only return the path if it\'s not already there\n return subpath\n\n\nregister_namespace_handler(pkgutil.ImpImporter, file_ns_handler)\nregister_namespace_handler(zipimport.zipimporter, file_ns_handler)\n\nif hasattr(importlib_machinery, \'FileFinder\'):\n register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler)\n\n\ndef null_ns_handler(importer, path_item, packageName, module):\n return None\n\n\nregister_namespace_handler(object, null_ns_handler)\n\n\ndef normalize_path(filename):\n """Normalize a file/dir name for comparison purposes"""\n return os.path.normcase(os.path.realpath(os.path.normpath(\n _cygwin_patch(filename))))\n\n\ndef _cygwin_patch(filename): # pragma: nocover\n """\n Contrary to POSIX 2008, on Cygwin, getcwd (3) contains\n symlink components. Using\n os.path.abspath() works around this limitation. A fix in os.getcwd()\n would probably better, in Cygwin even more so, except\n that this seems to be by design...\n """\n return os.path.abspath(filename) if sys.platform == \'cygwin\' else filename\n\n\ndef _normalize_cached(filename, _cache={}):\n try:\n return _cache[filename]\n except KeyError:\n _cache[filename] = result = normalize_path(filename)\n return result\n\n\ndef _is_egg_path(path):\n """\n Determine if given path appears to be an egg.\n """\n return path.lower().endswith(\'.egg\')\n\n\ndef _is_unpacked_egg(path):\n """\n Determine if given path appears to be an unpacked egg.\n """\n return (\n _is_egg_path(path) and\n os.path.isfile(os.path.join(path, \'EGG-INFO\', \'PKG-INFO\'))\n )\n\n\ndef _set_parent_ns(packageName):\n parts = packageName.split(\'.\')\n name = parts.pop()\n if parts:\n parent = \'.\'.join(parts)\n setattr(sys.modules[parent], name, sys.modules[packageName])\n\n\ndef yield_lines(strs):\n """Yield non-empty/non-comment lines of a string or sequence"""\n if isinstance(strs, six.string_types):\n for s in strs.splitlines():\n s = s.strip()\n # skip blank lines/comments\n if s and not s.startswith(\'#\'):\n yield s\n else:\n for ss in strs:\n for s in yield_lines(ss):\n yield s\n\n\nMODULE = re.compile(r"\\w+(\\.\\w+)*$").match\nEGG_NAME = re.compile(\n r"""\n (?P[^-]+) (\n -(?P[^-]+) (\n -py(?P[^-]+) (\n -(?P.+)\n )?\n )?\n )?\n """,\n re.VERBOSE | re.IGNORECASE,\n).match\n\n\nclass EntryPoint:\n """Object representing an advertised importable object"""\n\n def __init__(self, name, module_name, attrs=(), extras=(), dist=None):\n if not MODULE(module_name):\n raise ValueError("Invalid module name", module_name)\n self.name = name\n self.module_name = module_name\n self.attrs = tuple(attrs)\n self.extras = tuple(extras)\n self.dist = dist\n\n def __str__(self):\n s = "%s = %s" % (self.name, self.module_name)\n if self.attrs:\n s += \':\' + \'.\'.join(self.attrs)\n if self.extras:\n s += \' [%s]\' % \',\'.join(self.extras)\n return s\n\n def __repr__(self):\n return "EntryPoint.parse(%r)" % str(self)\n\n def load(self, require=True, *args, **kwargs):\n """\n Require packages for this EntryPoint, then resolve it.\n """\n if not require or args or kwargs:\n warnings.warn(\n "Parameters to load are deprecated. Call .resolve and "\n ".require separately.",\n PkgResourcesDeprecationWarning,\n stacklevel=2,\n )\n if require:\n self.require(*args, **kwargs)\n return self.resolve()\n\n def resolve(self):\n """\n Resolve the entry point from its module and attrs.\n """\n module = __import__(self.module_name, fromlist=[\'__name__\'], level=0)\n try:\n return functools.reduce(getattr, self.attrs, module)\n except AttributeError as exc:\n raise ImportError(str(exc))\n\n def require(self, env=None, installer=None):\n if self.extras and not self.dist:\n raise UnknownExtra("Can\'t require() without a distribution", self)\n\n # Get the requirements for this entry point with all its extras and\n # then resolve them. We have to pass `extras` along when resolving so\n # that the working set knows what extras we want. Otherwise, for\n # dist-info distributions, the working set will assume that the\n # requirements for that extra are purely optional and skip over them.\n reqs = self.dist.requires(self.extras)\n items = working_set.resolve(reqs, env, installer, extras=self.extras)\n list(map(working_set.add, items))\n\n pattern = re.compile(\n r\'\\s*\'\n r\'(?P.+?)\\s*\'\n r\'=\\s*\'\n r\'(?P[\\w.]+)\\s*\'\n r\'(:\\s*(?P[\\w.]+))?\\s*\'\n r\'(?P\\[.*\\])?\\s*$\'\n )\n\n @classmethod\n def parse(cls, src, dist=None):\n """Parse a single entry point from string `src`\n\n Entry point syntax follows the form::\n\n name = some.module:some.attr [extra1, extra2]\n\n The entry name and module name are required, but the ``:attrs`` and\n ``[extras]`` parts are optional\n """\n m = cls.pattern.match(src)\n if not m:\n msg = "EntryPoint must be in \'name=module:attrs [extras]\' format"\n raise ValueError(msg, src)\n res = m.groupdict()\n extras = cls._parse_extras(res[\'extras\'])\n attrs = res[\'attr\'].split(\'.\') if res[\'attr\'] else ()\n return cls(res[\'name\'], res[\'module\'], attrs, extras, dist)\n\n @classmethod\n def _parse_extras(cls, extras_spec):\n if not extras_spec:\n return ()\n req = Requirement.parse(\'x\' + extras_spec)\n if req.specs:\n raise ValueError()\n return req.extras\n\n @classmethod\n def parse_group(cls, group, lines, dist=None):\n """Parse an entry point group"""\n if not MODULE(group):\n raise ValueError("Invalid group name", group)\n this = {}\n for line in yield_lines(lines):\n ep = cls.parse(line, dist)\n if ep.name in this:\n raise ValueError("Duplicate entry point", group, ep.name)\n this[ep.name] = ep\n return this\n\n @classmethod\n def parse_map(cls, data, dist=None):\n """Parse a map of entry point groups"""\n if isinstance(data, dict):\n data = data.items()\n else:\n data = split_sections(data)\n maps = {}\n for group, lines in data:\n if group is None:\n if not lines:\n continue\n raise ValueError("Entry points must be listed in groups")\n group = group.strip()\n if group in maps:\n raise ValueError("Duplicate group name", group)\n maps[group] = cls.parse_group(group, lines, dist)\n return maps\n\n\ndef _version_from_file(lines):\n """\n Given an iterable of lines from a Metadata file, return\n the value of the Version field, if present, or None otherwise.\n """\n def is_version_line(line):\n return line.lower().startswith(\'version:\')\n version_lines = filter(is_version_line, lines)\n line = next(iter(version_lines), \'\')\n _, _, value = line.partition(\':\')\n return safe_version(value.strip()) or None\n\n\nclass Distribution:\n """Wrap an actual or potential sys.path entry w/metadata"""\n PKG_INFO = \'PKG-INFO\'\n\n def __init__(\n self, location=None, metadata=None, project_name=None,\n version=None, py_version=PY_MAJOR, platform=None,\n precedence=EGG_DIST):\n self.project_name = safe_name(project_name or \'Unknown\')\n if version is not None:\n self._version = safe_version(version)\n self.py_version = py_version\n self.platform = platform\n self.location = location\n self.precedence = precedence\n self._provider = metadata or empty_provider\n\n @classmethod\n def from_location(cls, location, basename, metadata=None, **kw):\n project_name, version, py_version, platform = [None] * 4\n basename, ext = os.path.splitext(basename)\n if ext.lower() in _distributionImpl:\n cls = _distributionImpl[ext.lower()]\n\n match = EGG_NAME(basename)\n if match:\n project_name, version, py_version, platform = match.group(\n \'name\', \'ver\', \'pyver\', \'plat\'\n )\n return cls(\n location, metadata, project_name=project_name, version=version,\n py_version=py_version, platform=platform, **kw\n )._reload_version()\n\n def _reload_version(self):\n return self\n\n @property\n def hashcmp(self):\n return (\n self.parsed_version,\n self.precedence,\n self.key,\n self.location,\n self.py_version or \'\',\n self.platform or \'\',\n )\n\n def __hash__(self):\n return hash(self.hashcmp)\n\n def __lt__(self, other):\n return self.hashcmp < other.hashcmp\n\n def __le__(self, other):\n return self.hashcmp <= other.hashcmp\n\n def __gt__(self, other):\n return self.hashcmp > other.hashcmp\n\n def __ge__(self, other):\n return self.hashcmp >= other.hashcmp\n\n def __eq__(self, other):\n if not isinstance(other, self.__class__):\n # It\'s not a Distribution, so they are not equal\n return False\n return self.hashcmp == other.hashcmp\n\n def __ne__(self, other):\n return not self == other\n\n # These properties have to be lazy so that we don\'t have to load any\n # metadata until/unless it\'s actually needed. (i.e., some distributions\n # may not know their name or version without loading PKG-INFO)\n\n @property\n def key(self):\n try:\n return self._key\n except AttributeError:\n self._key = key = self.project_name.lower()\n return key\n\n @property\n def parsed_version(self):\n if not hasattr(self, "_parsed_version"):\n self._parsed_version = parse_version(self.version)\n\n return self._parsed_version\n\n def _warn_legacy_version(self):\n LV = packaging.version.LegacyVersion\n is_legacy = isinstance(self._parsed_version, LV)\n if not is_legacy:\n return\n\n # While an empty version is technically a legacy version and\n # is not a valid PEP 440 version, it\'s also unlikely to\n # actually come from someone and instead it is more likely that\n # it comes from setuptools attempting to parse a filename and\n # including it in the list. So for that we\'ll gate this warning\n # on if the version is anything at all or not.\n if not self.version:\n return\n\n tmpl = textwrap.dedent("""\n \'{project_name} ({version})\' is being parsed as a legacy,\n non PEP 440,\n version. You may find odd behavior and sort order.\n In particular it will be sorted as less than 0.0. It\n is recommended to migrate to PEP 440 compatible\n versions.\n """).strip().replace(\'\\n\', \' \')\n\n warnings.warn(tmpl.format(**vars(self)), PEP440Warning)\n\n @property\n def version(self):\n try:\n return self._version\n except AttributeError:\n version = self._get_version()\n if version is None:\n path = self._get_metadata_path_for_display(self.PKG_INFO)\n msg = (\n "Missing \'Version:\' header and/or {} file at path: {}"\n ).format(self.PKG_INFO, path)\n raise ValueError(msg, self)\n\n return version\n\n @property\n def _dep_map(self):\n """\n A map of extra to its list of (direct) requirements\n for this distribution, including the null extra.\n """\n try:\n return self.__dep_map\n except AttributeError:\n self.__dep_map = self._filter_extras(self._build_dep_map())\n return self.__dep_map\n\n @staticmethod\n def _filter_extras(dm):\n """\n Given a mapping of extras to dependencies, strip off\n environment markers and filter out any dependencies\n not matching the markers.\n """\n for extra in list(filter(None, dm)):\n new_extra = extra\n reqs = dm.pop(extra)\n new_extra, _, marker = extra.partition(\':\')\n fails_marker = marker and (\n invalid_marker(marker)\n or not evaluate_marker(marker)\n )\n if fails_marker:\n reqs = []\n new_extra = safe_extra(new_extra) or None\n\n dm.setdefault(new_extra, []).extend(reqs)\n return dm\n\n def _build_dep_map(self):\n dm = {}\n for name in \'requires.txt\', \'depends.txt\':\n for extra, reqs in split_sections(self._get_metadata(name)):\n dm.setdefault(extra, []).extend(parse_requirements(reqs))\n return dm\n\n def requires(self, extras=()):\n """List of Requirements needed for this distro if `extras` are used"""\n dm = self._dep_map\n deps = []\n deps.extend(dm.get(None, ()))\n for ext in extras:\n try:\n deps.extend(dm[safe_extra(ext)])\n except KeyError:\n raise UnknownExtra(\n "%s has no such extra feature %r" % (self, ext)\n )\n return deps\n\n def _get_metadata_path_for_display(self, name):\n """\n Return the path to the given metadata file, if available.\n """\n try:\n # We need to access _get_metadata_path() on the provider object\n # directly rather than through this class\'s __getattr__()\n # since _get_metadata_path() is marked private.\n path = self._provider._get_metadata_path(name)\n\n # Handle exceptions e.g. in case the distribution\'s metadata\n # provider doesn\'t support _get_metadata_path().\n except Exception:\n return \'[could not detect]\'\n\n return path\n\n def _get_metadata(self, name):\n if self.has_metadata(name):\n for line in self.get_metadata_lines(name):\n yield line\n\n def _get_version(self):\n lines = self._get_metadata(self.PKG_INFO)\n version = _version_from_file(lines)\n\n return version\n\n def activate(self, path=None, replace=False):\n """Ensure distribution is importable on `path` (default=sys.path)"""\n if path is None:\n path = sys.path\n self.insert_on(path, replace=replace)\n if path is sys.path:\n fixup_namespace_packages(self.location)\n for pkg in self._get_metadata(\'namespace_packages.txt\'):\n if pkg in sys.modules:\n declare_namespace(pkg)\n\n def egg_name(self):\n """Return what this distribution\'s standard .egg filename should be"""\n filename = "%s-%s-py%s" % (\n to_filename(self.project_name), to_filename(self.version),\n self.py_version or PY_MAJOR\n )\n\n if self.platform:\n filename += \'-\' + self.platform\n return filename\n\n def __repr__(self):\n if self.location:\n return "%s (%s)" % (self, self.location)\n else:\n return str(self)\n\n def __str__(self):\n try:\n version = getattr(self, \'version\', None)\n except ValueError:\n version = None\n version = version or "[unknown version]"\n return "%s %s" % (self.project_name, version)\n\n def __getattr__(self, attr):\n """Delegate all unrecognized public attributes to .metadata provider"""\n if attr.startswith(\'_\'):\n raise AttributeError(attr)\n return getattr(self._provider, attr)\n\n def __dir__(self):\n return list(\n set(super(Distribution, self).__dir__())\n | set(\n attr for attr in self._provider.__dir__()\n if not attr.startswith(\'_\')\n )\n )\n\n if not hasattr(object, \'__dir__\'):\n # python 2.7 not supported\n del __dir__\n\n @classmethod\n def from_filename(cls, filename, metadata=None, **kw):\n return cls.from_location(\n _normalize_cached(filename), os.path.basename(filename), metadata,\n **kw\n )\n\n def as_requirement(self):\n """Return a ``Requirement`` that matches this distribution exactly"""\n if isinstance(self.parsed_version, packaging.version.Version):\n spec = "%s==%s" % (self.project_name, self.parsed_version)\n else:\n spec = "%s===%s" % (self.project_name, self.parsed_version)\n\n return Requirement.parse(spec)\n\n def load_entry_point(self, group, name):\n """Return the `name` entry point of `group` or raise ImportError"""\n ep = self.get_entry_info(group, name)\n if ep is None:\n raise ImportError("Entry point %r not found" % ((group, name),))\n return ep.load()\n\n def get_entry_map(self, group=None):\n """Return the entry point map for `group`, or the full entry map"""\n try:\n ep_map = self._ep_map\n except AttributeError:\n ep_map = self._ep_map = EntryPoint.parse_map(\n self._get_metadata(\'entry_points.txt\'), self\n )\n if group is not None:\n return ep_map.get(group, {})\n return ep_map\n\n def get_entry_info(self, group, name):\n """Return the EntryPoint object for `group`+`name`, or ``None``"""\n return self.get_entry_map(group).get(name)\n\n def insert_on(self, path, loc=None, replace=False):\n """Ensure self.location is on path\n\n If replace=False (default):\n - If location is already in path anywhere, do nothing.\n - Else:\n - If it\'s an egg and its parent directory is on path,\n insert just ahead of the parent.\n - Else: add to the end of path.\n If replace=True:\n - If location is already on path anywhere (not eggs)\n or higher priority than its parent (eggs)\n do nothing.\n - Else:\n - If it\'s an egg and its parent directory is on path,\n insert just ahead of the parent,\n removing any lower-priority entries.\n - Else: add it to the front of path.\n """\n\n loc = loc or self.location\n if not loc:\n return\n\n nloc = _normalize_cached(loc)\n bdir = os.path.dirname(nloc)\n npath = [(p and _normalize_cached(p) or p) for p in path]\n\n for p, item in enumerate(npath):\n if item == nloc:\n if replace:\n break\n else:\n # don\'t modify path (even removing duplicates) if\n # found and not replace\n return\n elif item == bdir and self.precedence == EGG_DIST:\n # if it\'s an .egg, give it precedence over its directory\n # UNLESS it\'s already been added to sys.path and replace=False\n if (not replace) and nloc in npath[p:]:\n return\n if path is sys.path:\n self.check_version_conflict()\n path.insert(p, loc)\n npath.insert(p, nloc)\n break\n else:\n if path is sys.path:\n self.check_version_conflict()\n if replace:\n path.insert(0, loc)\n else:\n path.append(loc)\n return\n\n # p is the spot where we found or inserted loc; now remove duplicates\n while True:\n try:\n np = npath.index(nloc, p + 1)\n except ValueError:\n break\n else:\n del npath[np], path[np]\n # ha!\n p = np\n\n return\n\n def check_version_conflict(self):\n if self.key == \'setuptools\':\n # ignore the inevitable setuptools self-conflicts :(\n return\n\n nsp = dict.fromkeys(self._get_metadata(\'namespace_packages.txt\'))\n loc = normalize_path(self.location)\n for modname in self._get_metadata(\'top_level.txt\'):\n if (modname not in sys.modules or modname in nsp\n or modname in _namespace_packages):\n continue\n if modname in (\'pkg_resources\', \'setuptools\', \'site\'):\n continue\n fn = getattr(sys.modules[modname], \'__file__\', None)\n if fn and (normalize_path(fn).startswith(loc) or\n fn.startswith(self.location)):\n continue\n issue_warning(\n "Module %s was already imported from %s, but %s is being added"\n " to sys.path" % (modname, fn, self.location),\n )\n\n def has_version(self):\n try:\n self.version\n except ValueError:\n issue_warning("Unbuilt egg for " + repr(self))\n return False\n return True\n\n def clone(self, **kw):\n """Copy this distribution, substituting in any changed keyword args"""\n names = \'project_name version py_version platform location precedence\'\n for attr in names.split():\n kw.setdefault(attr, getattr(self, attr, None))\n kw.setdefault(\'metadata\', self._provider)\n return self.__class__(**kw)\n\n @property\n def extras(self):\n return [dep for dep in self._dep_map if dep]\n\n\nclass EggInfoDistribution(Distribution):\n def _reload_version(self):\n """\n Packages installed by distutils (e.g. numpy or scipy),\n which uses an old safe_version, and so\n their version numbers can get mangled when\n converted to filenames (e.g., 1.11.0.dev0+2329eae to\n 1.11.0.dev0_2329eae). These distributions will not be\n parsed properly\n downstream by Distribution and safe_version, so\n take an extra step and try to get the version number from\n the metadata file itself instead of the filename.\n """\n md_version = self._get_version()\n if md_version:\n self._version = md_version\n return self\n\n\nclass DistInfoDistribution(Distribution):\n """\n Wrap an actual or potential sys.path entry\n w/metadata, .dist-info style.\n """\n PKG_INFO = \'METADATA\'\n EQEQ = re.compile(r"([\\(,])\\s*(\\d.*?)\\s*([,\\)])")\n\n @property\n def _parsed_pkg_info(self):\n """Parse and cache metadata"""\n try:\n return self._pkg_info\n except AttributeError:\n metadata = self.get_metadata(self.PKG_INFO)\n self._pkg_info = email.parser.Parser().parsestr(metadata)\n return self._pkg_info\n\n @property\n def _dep_map(self):\n try:\n return self.__dep_map\n except AttributeError:\n self.__dep_map = self._compute_dependencies()\n return self.__dep_map\n\n def _compute_dependencies(self):\n """Recompute this distribution\'s dependencies."""\n dm = self.__dep_map = {None: []}\n\n reqs = []\n # Including any condition expressions\n for req in self._parsed_pkg_info.get_all(\'Requires-Dist\') or []:\n reqs.extend(parse_requirements(req))\n\n def reqs_for_extra(extra):\n for req in reqs:\n if not req.marker or req.marker.evaluate({\'extra\': extra}):\n yield req\n\n common = frozenset(reqs_for_extra(None))\n dm[None].extend(common)\n\n for extra in self._parsed_pkg_info.get_all(\'Provides-Extra\') or []:\n s_extra = safe_extra(extra.strip())\n dm[s_extra] = list(frozenset(reqs_for_extra(extra)) - common)\n\n return dm\n\n\n_distributionImpl = {\n \'.egg\': Distribution,\n \'.egg-info\': EggInfoDistribution,\n \'.dist-info\': DistInfoDistribution,\n}\n\n\ndef issue_warning(*args, **kw):\n level = 1\n g = globals()\n try:\n # find the first stack frame that is *not* code in\n # the pkg_resources module, to use for the warning\n while sys._getframe(level).f_globals is g:\n level += 1\n except ValueError:\n pass\n warnings.warn(stacklevel=level + 1, *args, **kw)\n\n\nclass RequirementParseError(ValueError):\n def __str__(self):\n return \' \'.join(self.args)\n\n\ndef parse_requirements(strs):\n """Yield ``Requirement`` objects for each specification in `strs`\n\n `strs` must be a string, or a (possibly-nested) iterable thereof.\n """\n # create a steppable iterator, so we can handle \\-continuations\n lines = iter(yield_lines(strs))\n\n for line in lines:\n # Drop comments -- a hash without a space may be in a URL.\n if \' #\' in line:\n line = line[:line.find(\' #\')]\n # If there is a line continuation, drop it, and append the next line.\n if line.endswith(\'\\\\\'):\n line = line[:-2].strip()\n try:\n line += next(lines)\n except StopIteration:\n return\n yield Requirement(line)\n\n\nclass Requirement(packaging.requirements.Requirement):\n def __init__(self, requirement_string):\n """DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""\n try:\n super(Requirement, self).__init__(requirement_string)\n except packaging.requirements.InvalidRequirement as e:\n raise RequirementParseError(str(e))\n self.unsafe_name = self.name\n project_name = safe_name(self.name)\n self.project_name, self.key = project_name, project_name.lower()\n self.specs = [\n (spec.operator, spec.version) for spec in self.specifier]\n self.extras = tuple(map(safe_extra, self.extras))\n self.hashCmp = (\n self.key,\n self.url,\n self.specifier,\n frozenset(self.extras),\n str(self.marker) if self.marker else None,\n )\n self.__hash = hash(self.hashCmp)\n\n def __eq__(self, other):\n return (\n isinstance(other, Requirement) and\n self.hashCmp == other.hashCmp\n )\n\n def __ne__(self, other):\n return not self == other\n\n def __contains__(self, item):\n if isinstance(item, Distribution):\n if item.key != self.key:\n return False\n\n item = item.version\n\n # Allow prereleases always in order to match the previous behavior of\n # this method. In the future this should be smarter and follow PEP 440\n # more accurately.\n return self.specifier.contains(item, prereleases=True)\n\n def __hash__(self):\n return self.__hash\n\n def __repr__(self):\n return "Requirement.parse(%r)" % str(self)\n\n @staticmethod\n def parse(s):\n req, = parse_requirements(s)\n return req\n\n\ndef _always_object(classes):\n """\n Ensure object appears in the mro even\n for old-style classes.\n """\n if object not in classes:\n return classes + (object,)\n return classes\n\n\ndef _find_adapter(registry, ob):\n """Return an adapter factory for `ob` from `registry`"""\n types = _always_object(inspect.getmro(getattr(ob, \'__class__\', type(ob))))\n for t in types:\n if t in registry:\n return registry[t]\n\n\ndef ensure_directory(path):\n """Ensure that the parent directory of `path` exists"""\n dirname = os.path.dirname(path)\n py31compat.makedirs(dirname, exist_ok=True)\n\n\ndef _bypass_ensure_directory(path):\n """Sandbox-bypassing version of ensure_directory()"""\n if not WRITE_SUPPORT:\n raise IOError(\'"os.mkdir" not supported on this platform.\')\n dirname, filename = split(path)\n if dirname and filename and not isdir(dirname):\n _bypass_ensure_directory(dirname)\n try:\n mkdir(dirname, 0o755)\n except FileExistsError:\n pass\n\n\ndef split_sections(s):\n """Split a string or iterable thereof into (section, content) pairs\n\n Each ``section`` is a stripped version of the section header ("[section]")\n and each ``content`` is a list of stripped lines excluding blank lines and\n comment-only lines. If there are any such lines before the first section\n header, they\'re returned in a first ``section`` of ``None``.\n """\n section = None\n content = []\n for line in yield_lines(s):\n if line.startswith("["):\n if line.endswith("]"):\n if section or content:\n yield section, content\n section = line[1:-1].strip()\n content = []\n else:\n raise ValueError("Invalid section heading", line)\n else:\n content.append(line)\n\n # wrap up last segment\n yield section, content\n\n\ndef _mkstemp(*args, **kw):\n old_open = os.open\n try:\n # temporarily bypass sandboxing\n os.open = os_open\n return tempfile.mkstemp(*args, **kw)\n finally:\n # and then put it back\n os.open = old_open\n\n\n# Silence the PEP440Warning by default, so that end users don\'t get hit by it\n# randomly just because they use pkg_resources. We want to append the rule\n# because we want earlier uses of filterwarnings to take precedence over this\n# one.\nwarnings.filterwarnings("ignore", category=PEP440Warning, append=True)\n\n\n# from jaraco.functools 1.3\ndef _call_aside(f, *args, **kwargs):\n f(*args, **kwargs)\n return f\n\n\n@_call_aside\ndef _initialize(g=globals()):\n "Set up global resource manager (deliberately not state-saved)"\n manager = ResourceManager()\n g[\'_manager\'] = manager\n g.update(\n (name, getattr(manager, name))\n for name in dir(manager)\n if not name.startswith(\'_\')\n )\n\n\n@_call_aside\ndef _initialize_master_working_set():\n """\n Prepare the master working set and make the ``require()``\n API available.\n\n This function has explicit effects on the global state\n of pkg_resources. It is intended to be invoked once at\n the initialization of this module.\n\n Invocation by other packages is unsupported and done\n at their own risk.\n """\n working_set = WorkingSet._build_master()\n _declare_state(\'object\', working_set=working_set)\n\n require = working_set.require\n iter_entry_points = working_set.iter_entry_points\n add_activation_listener = working_set.subscribe\n run_script = working_set.run_script\n # backward compatibility\n run_main = run_script\n # Activate all distributions already on sys.path with replace=False and\n # ensure that all distributions added to the working set in the future\n # (e.g. by calling ``require()``) will get activated as well,\n # with higher priority (replace=True).\n tuple(\n dist.activate(replace=False)\n for dist in working_set\n )\n add_activation_listener(\n lambda dist: dist.activate(replace=True),\n existing=False,\n )\n working_set.entries = []\n # match order\n list(map(working_set.add_entry, sys.path))\n globals().update(locals())\n\n\nclass PkgResourcesDeprecationWarning(Warning):\n """\n Base class for warning about deprecations in ``pkg_resources``\n\n This class is not derived from ``DeprecationWarning``, and as such is\n visible by default.\n """\n') + __stickytape_write_module('pkg_resources/extern/__init__.py', b'import sys\n\n\nclass VendorImporter:\n """\n A PEP 302 meta path importer for finding optionally-vendored\n or otherwise naturally-installed packages from root_name.\n """\n\n def __init__(self, root_name, vendored_names=(), vendor_pkg=None):\n self.root_name = root_name\n self.vendored_names = set(vendored_names)\n self.vendor_pkg = vendor_pkg or root_name.replace(\'extern\', \'_vendor\')\n\n @property\n def search_path(self):\n """\n Search first the vendor package then as a natural package.\n """\n yield self.vendor_pkg + \'.\'\n yield \'\'\n\n def find_module(self, fullname, path=None):\n """\n Return self when fullname starts with root_name and the\n target module is one vendored through this importer.\n """\n root, base, target = fullname.partition(self.root_name + \'.\')\n if root:\n return\n if not any(map(target.startswith, self.vendored_names)):\n return\n return self\n\n def load_module(self, fullname):\n """\n Iterate over the search path to locate and load fullname.\n """\n root, base, target = fullname.partition(self.root_name + \'.\')\n for prefix in self.search_path:\n try:\n extant = prefix + target\n __import__(extant)\n mod = sys.modules[extant]\n sys.modules[fullname] = mod\n return mod\n except ImportError:\n pass\n else:\n raise ImportError(\n "The \'{target}\' package is required; "\n "normally this is bundled with this package so if you get "\n "this warning, consult the packager of your "\n "distribution.".format(**locals())\n )\n\n def install(self):\n """\n Install this importer into sys.meta_path if not already present.\n """\n if self not in sys.meta_path:\n sys.meta_path.append(self)\n\n\nnames = \'packaging\', \'pyparsing\', \'six\', \'appdirs\'\nVendorImporter(__name__, names).install()\n') + __stickytape_write_module('pkg_resources/py31compat.py', b'import os\nimport errno\nimport sys\n\nfrom .extern import six\n\n\ndef _makedirs_31(path, exist_ok=False):\n try:\n os.makedirs(path)\n except OSError as exc:\n if not exist_ok or exc.errno != errno.EEXIST:\n raise\n\n\n# rely on compatibility behavior until mode considerations\n# and exists_ok considerations are disentangled.\n# See https://github.com/pypa/setuptools/pull/1083#issuecomment-315168663\nneeds_makedirs = (\n six.PY2 or\n (3, 4) <= sys.version_info < (3, 4, 1)\n)\nmakedirs = _makedirs_31 if needs_makedirs else os.makedirs\n') + __stickytape_write_module('pygments/lexers/__init__.py', b'# -*- coding: utf-8 -*-\n"""\n pygments.lexers\n ~~~~~~~~~~~~~~~\n\n Pygments lexers.\n\n :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.\n :license: BSD, see LICENSE for details.\n"""\n\nimport re\nimport sys\nimport types\nimport fnmatch\nfrom os.path import basename\n\nfrom pygments.lexers._mapping import LEXERS\nfrom pygments.modeline import get_filetype_from_buffer\nfrom pygments.plugin import find_plugin_lexers\nfrom pygments.util import ClassNotFound, guess_decode\n\nCOMPAT = {\n \'Python3Lexer\': \'PythonLexer\',\n \'Python3TracebackLexer\': \'PythonTracebackLexer\',\n}\n\n__all__ = [\'get_lexer_by_name\', \'get_lexer_for_filename\', \'find_lexer_class\',\n \'guess_lexer\', \'load_lexer_from_file\'] + list(LEXERS) + list(COMPAT)\n\n_lexer_cache = {}\n_pattern_cache = {}\n\n\ndef _fn_matches(fn, glob):\n """Return whether the supplied file name fn matches pattern filename."""\n if glob not in _pattern_cache:\n pattern = _pattern_cache[glob] = re.compile(fnmatch.translate(glob))\n return pattern.match(fn)\n return _pattern_cache[glob].match(fn)\n\n\ndef _load_lexers(module_name):\n """Load a lexer (and all others in the module too)."""\n mod = __import__(module_name, None, None, [\'__all__\'])\n for lexer_name in mod.__all__:\n cls = getattr(mod, lexer_name)\n _lexer_cache[cls.name] = cls\n\n\ndef get_all_lexers():\n """Return a generator of tuples in the form ``(name, aliases,\n filenames, mimetypes)`` of all know lexers.\n """\n for item in LEXERS.values():\n yield item[1:]\n for lexer in find_plugin_lexers():\n yield lexer.name, lexer.aliases, lexer.filenames, lexer.mimetypes\n\n\ndef find_lexer_class(name):\n """Lookup a lexer class by name.\n\n Return None if not found.\n """\n if name in _lexer_cache:\n return _lexer_cache[name]\n # lookup builtin lexers\n for module_name, lname, aliases, _, _ in LEXERS.values():\n if name == lname:\n _load_lexers(module_name)\n return _lexer_cache[name]\n # continue with lexers from setuptools entrypoints\n for cls in find_plugin_lexers():\n if cls.name == name:\n return cls\n\n\ndef find_lexer_class_by_name(_alias):\n """Lookup a lexer class by alias.\n\n Like `get_lexer_by_name`, but does not instantiate the class.\n\n .. versionadded:: 2.2\n """\n if not _alias:\n raise ClassNotFound(\'no lexer for alias %r found\' % _alias)\n # lookup builtin lexers\n for module_name, name, aliases, _, _ in LEXERS.values():\n if _alias.lower() in aliases:\n if name not in _lexer_cache:\n _load_lexers(module_name)\n return _lexer_cache[name]\n # continue with lexers from setuptools entrypoints\n for cls in find_plugin_lexers():\n if _alias.lower() in cls.aliases:\n return cls\n raise ClassNotFound(\'no lexer for alias %r found\' % _alias)\n\n\ndef get_lexer_by_name(_alias, **options):\n """Get a lexer by an alias.\n\n Raises ClassNotFound if not found.\n """\n if not _alias:\n raise ClassNotFound(\'no lexer for alias %r found\' % _alias)\n\n # lookup builtin lexers\n for module_name, name, aliases, _, _ in LEXERS.values():\n if _alias.lower() in aliases:\n if name not in _lexer_cache:\n _load_lexers(module_name)\n return _lexer_cache[name](**options)\n # continue with lexers from setuptools entrypoints\n for cls in find_plugin_lexers():\n if _alias.lower() in cls.aliases:\n return cls(**options)\n raise ClassNotFound(\'no lexer for alias %r found\' % _alias)\n\n\ndef load_lexer_from_file(filename, lexername="CustomLexer", **options):\n """Load a lexer from a file.\n\n This method expects a file located relative to the current working\n directory, which contains a Lexer class. By default, it expects the\n Lexer to be name CustomLexer; you can specify your own class name\n as the second argument to this function.\n\n Users should be very careful with the input, because this method\n is equivalent to running eval on the input file.\n\n Raises ClassNotFound if there are any problems importing the Lexer.\n\n .. versionadded:: 2.2\n """\n try:\n # This empty dict will contain the namespace for the exec\'d file\n custom_namespace = {}\n with open(filename, \'rb\') as f:\n exec(f.read(), custom_namespace)\n # Retrieve the class `lexername` from that namespace\n if lexername not in custom_namespace:\n raise ClassNotFound(\'no valid %s class found in %s\' %\n (lexername, filename))\n lexer_class = custom_namespace[lexername]\n # And finally instantiate it with the options\n return lexer_class(**options)\n except IOError as err:\n raise ClassNotFound(\'cannot read %s: %s\' % (filename, err))\n except ClassNotFound:\n raise\n except Exception as err:\n raise ClassNotFound(\'error when loading custom lexer: %s\' % err)\n\n\ndef find_lexer_class_for_filename(_fn, code=None):\n """Get a lexer for a filename.\n\n If multiple lexers match the filename pattern, use ``analyse_text()`` to\n figure out which one is more appropriate.\n\n Returns None if not found.\n """\n matches = []\n fn = basename(_fn)\n for modname, name, _, filenames, _ in LEXERS.values():\n for filename in filenames:\n if _fn_matches(fn, filename):\n if name not in _lexer_cache:\n _load_lexers(modname)\n matches.append((_lexer_cache[name], filename))\n for cls in find_plugin_lexers():\n for filename in cls.filenames:\n if _fn_matches(fn, filename):\n matches.append((cls, filename))\n\n if isinstance(code, bytes):\n # decode it, since all analyse_text functions expect unicode\n code = guess_decode(code)\n\n def get_rating(info):\n cls, filename = info\n # explicit patterns get a bonus\n bonus = \'*\' not in filename and 0.5 or 0\n # The class _always_ defines analyse_text because it\'s included in\n # the Lexer class. The default implementation returns None which\n # gets turned into 0.0. Run scripts/detect_missing_analyse_text.py\n # to find lexers which need it overridden.\n if code:\n return cls.analyse_text(code) + bonus, cls.__name__\n return cls.priority + bonus, cls.__name__\n\n if matches:\n matches.sort(key=get_rating)\n # print "Possible lexers, after sort:", matches\n return matches[-1][0]\n\n\ndef get_lexer_for_filename(_fn, code=None, **options):\n """Get a lexer for a filename.\n\n If multiple lexers match the filename pattern, use ``analyse_text()`` to\n figure out which one is more appropriate.\n\n Raises ClassNotFound if not found.\n """\n res = find_lexer_class_for_filename(_fn, code)\n if not res:\n raise ClassNotFound(\'no lexer for filename %r found\' % _fn)\n return res(**options)\n\n\ndef get_lexer_for_mimetype(_mime, **options):\n """Get a lexer for a mimetype.\n\n Raises ClassNotFound if not found.\n """\n for modname, name, _, _, mimetypes in LEXERS.values():\n if _mime in mimetypes:\n if name not in _lexer_cache:\n _load_lexers(modname)\n return _lexer_cache[name](**options)\n for cls in find_plugin_lexers():\n if _mime in cls.mimetypes:\n return cls(**options)\n raise ClassNotFound(\'no lexer for mimetype %r found\' % _mime)\n\n\ndef _iter_lexerclasses(plugins=True):\n """Return an iterator over all lexer classes."""\n for key in sorted(LEXERS):\n module_name, name = LEXERS[key][:2]\n if name not in _lexer_cache:\n _load_lexers(module_name)\n yield _lexer_cache[name]\n if plugins:\n yield from find_plugin_lexers()\n\n\ndef guess_lexer_for_filename(_fn, _text, **options):\n """\n Lookup all lexers that handle those filenames primary (``filenames``)\n or secondary (``alias_filenames``). Then run a text analysis for those\n lexers and choose the best result.\n\n usage::\n\n >>> from pygments.lexers import guess_lexer_for_filename\n >>> guess_lexer_for_filename(\'hello.html\', \'<%= @foo %>\')\n \n >>> guess_lexer_for_filename(\'hello.html\', \'

{{ title|e }}

\')\n \n >>> guess_lexer_for_filename(\'style.css\', \'a { color: }\')\n \n """\n fn = basename(_fn)\n primary = {}\n matching_lexers = set()\n for lexer in _iter_lexerclasses():\n for filename in lexer.filenames:\n if _fn_matches(fn, filename):\n matching_lexers.add(lexer)\n primary[lexer] = True\n for filename in lexer.alias_filenames:\n if _fn_matches(fn, filename):\n matching_lexers.add(lexer)\n primary[lexer] = False\n if not matching_lexers:\n raise ClassNotFound(\'no lexer for filename %r found\' % fn)\n if len(matching_lexers) == 1:\n return matching_lexers.pop()(**options)\n result = []\n for lexer in matching_lexers:\n rv = lexer.analyse_text(_text)\n if rv == 1.0:\n return lexer(**options)\n result.append((rv, lexer))\n\n def type_sort(t):\n # sort by:\n # - analyse score\n # - is primary filename pattern?\n # - priority\n # - last resort: class name\n return (t[0], primary[t[1]], t[1].priority, t[1].__name__)\n result.sort(key=type_sort)\n\n return result[-1][1](**options)\n\n\ndef guess_lexer(_text, **options):\n """Guess a lexer by strong distinctions in the text (eg, shebang)."""\n\n if not isinstance(_text, str):\n inencoding = options.get(\'inencoding\', options.get(\'encoding\'))\n if inencoding:\n _text = _text.decode(inencoding or \'utf8\')\n else:\n _text, _ = guess_decode(_text)\n\n # try to get a vim modeline first\n ft = get_filetype_from_buffer(_text)\n\n if ft is not None:\n try:\n return get_lexer_by_name(ft, **options)\n except ClassNotFound:\n pass\n\n best_lexer = [0.0, None]\n for lexer in _iter_lexerclasses():\n rv = lexer.analyse_text(_text)\n if rv == 1.0:\n return lexer(**options)\n if rv > best_lexer[0]:\n best_lexer[:] = (rv, lexer)\n if not best_lexer[0] or best_lexer[1] is None:\n raise ClassNotFound(\'no lexer matching the text found\')\n return best_lexer[1](**options)\n\n\nclass _automodule(types.ModuleType):\n """Automatically import lexers."""\n\n def __getattr__(self, name):\n info = LEXERS.get(name)\n if info:\n _load_lexers(info[0])\n cls = _lexer_cache[info[1]]\n setattr(self, name, cls)\n return cls\n if name in COMPAT:\n return getattr(self, COMPAT[name])\n raise AttributeError(name)\n\n\noldmod = sys.modules[__name__]\nnewmod = _automodule(__name__)\nnewmod.__dict__.update(oldmod.__dict__)\nsys.modules[__name__] = newmod\ndel newmod.newmod, newmod.oldmod, newmod.sys, newmod.types\n') + __stickytape_write_module('pygments/lexers/_mapping.py', b'# -*- coding: utf-8 -*-\n"""\n pygments.lexers._mapping\n ~~~~~~~~~~~~~~~~~~~~~~~~\n\n Lexer mapping definitions. This file is generated by itself. Everytime\n you change something on a builtin lexer definition, run this script from\n the lexers folder to update it.\n\n Do not alter the LEXERS dictionary by hand.\n\n :copyright: Copyright 2006-2014, 2016 by the Pygments team, see AUTHORS.\n :license: BSD, see LICENSE for details.\n"""\n\nLEXERS = {\n \'ABAPLexer\': (\'pygments.lexers.business\', \'ABAP\', (\'abap\',), (\'*.abap\', \'*.ABAP\'), (\'text/x-abap\',)),\n \'APLLexer\': (\'pygments.lexers.apl\', \'APL\', (\'apl\',), (\'*.apl\',), ()),\n \'AbnfLexer\': (\'pygments.lexers.grammar_notation\', \'ABNF\', (\'abnf\',), (\'*.abnf\',), (\'text/x-abnf\',)),\n \'ActionScript3Lexer\': (\'pygments.lexers.actionscript\', \'ActionScript 3\', (\'as3\', \'actionscript3\'), (\'*.as\',), (\'application/x-actionscript3\', \'text/x-actionscript3\', \'text/actionscript3\')),\n \'ActionScriptLexer\': (\'pygments.lexers.actionscript\', \'ActionScript\', (\'as\', \'actionscript\'), (\'*.as\',), (\'application/x-actionscript\', \'text/x-actionscript\', \'text/actionscript\')),\n \'AdaLexer\': (\'pygments.lexers.pascal\', \'Ada\', (\'ada\', \'ada95\', \'ada2005\'), (\'*.adb\', \'*.ads\', \'*.ada\'), (\'text/x-ada\',)),\n \'AdlLexer\': (\'pygments.lexers.archetype\', \'ADL\', (\'adl\',), (\'*.adl\', \'*.adls\', \'*.adlf\', \'*.adlx\'), ()),\n \'AgdaLexer\': (\'pygments.lexers.haskell\', \'Agda\', (\'agda\',), (\'*.agda\',), (\'text/x-agda\',)),\n \'AheuiLexer\': (\'pygments.lexers.esoteric\', \'Aheui\', (\'aheui\',), (\'*.aheui\',), ()),\n \'AlloyLexer\': (\'pygments.lexers.dsls\', \'Alloy\', (\'alloy\',), (\'*.als\',), (\'text/x-alloy\',)),\n \'AmbientTalkLexer\': (\'pygments.lexers.ambient\', \'AmbientTalk\', (\'at\', \'ambienttalk\', \'ambienttalk/2\'), (\'*.at\',), (\'text/x-ambienttalk\',)),\n \'AmplLexer\': (\'pygments.lexers.ampl\', \'Ampl\', (\'ampl\',), (\'*.run\',), ()),\n \'Angular2HtmlLexer\': (\'pygments.lexers.templates\', \'HTML + Angular2\', (\'html+ng2\',), (\'*.ng2\',), ()),\n \'Angular2Lexer\': (\'pygments.lexers.templates\', \'Angular2\', (\'ng2\',), (), ()),\n \'AntlrActionScriptLexer\': (\'pygments.lexers.parsers\', \'ANTLR With ActionScript Target\', (\'antlr-as\', \'antlr-actionscript\'), (\'*.G\', \'*.g\'), ()),\n \'AntlrCSharpLexer\': (\'pygments.lexers.parsers\', \'ANTLR With C# Target\', (\'antlr-csharp\', \'antlr-c#\'), (\'*.G\', \'*.g\'), ()),\n \'AntlrCppLexer\': (\'pygments.lexers.parsers\', \'ANTLR With CPP Target\', (\'antlr-cpp\',), (\'*.G\', \'*.g\'), ()),\n \'AntlrJavaLexer\': (\'pygments.lexers.parsers\', \'ANTLR With Java Target\', (\'antlr-java\',), (\'*.G\', \'*.g\'), ()),\n \'AntlrLexer\': (\'pygments.lexers.parsers\', \'ANTLR\', (\'antlr\',), (), ()),\n \'AntlrObjectiveCLexer\': (\'pygments.lexers.parsers\', \'ANTLR With ObjectiveC Target\', (\'antlr-objc\',), (\'*.G\', \'*.g\'), ()),\n \'AntlrPerlLexer\': (\'pygments.lexers.parsers\', \'ANTLR With Perl Target\', (\'antlr-perl\',), (\'*.G\', \'*.g\'), ()),\n \'AntlrPythonLexer\': (\'pygments.lexers.parsers\', \'ANTLR With Python Target\', (\'antlr-python\',), (\'*.G\', \'*.g\'), ()),\n \'AntlrRubyLexer\': (\'pygments.lexers.parsers\', \'ANTLR With Ruby Target\', (\'antlr-ruby\', \'antlr-rb\'), (\'*.G\', \'*.g\'), ()),\n \'ApacheConfLexer\': (\'pygments.lexers.configs\', \'ApacheConf\', (\'apacheconf\', \'aconf\', \'apache\'), (\'.htaccess\', \'apache.conf\', \'apache2.conf\'), (\'text/x-apacheconf\',)),\n \'AppleScriptLexer\': (\'pygments.lexers.scripting\', \'AppleScript\', (\'applescript\',), (\'*.applescript\',), ()),\n \'ArduinoLexer\': (\'pygments.lexers.c_like\', \'Arduino\', (\'arduino\',), (\'*.ino\',), (\'text/x-arduino\',)),\n \'ArrowLexer\': (\'pygments.lexers.arrow\', \'Arrow\', (\'arrow\',), (\'*.arw\',), ()),\n \'AspectJLexer\': (\'pygments.lexers.jvm\', \'AspectJ\', (\'aspectj\',), (\'*.aj\',), (\'text/x-aspectj\',)),\n \'AsymptoteLexer\': (\'pygments.lexers.graphics\', \'Asymptote\', (\'asy\', \'asymptote\'), (\'*.asy\',), (\'text/x-asymptote\',)),\n \'AugeasLexer\': (\'pygments.lexers.configs\', \'Augeas\', (\'augeas\',), (\'*.aug\',), ()),\n \'AutoItLexer\': (\'pygments.lexers.automation\', \'AutoIt\', (\'autoit\',), (\'*.au3\',), (\'text/x-autoit\',)),\n \'AutohotkeyLexer\': (\'pygments.lexers.automation\', \'autohotkey\', (\'ahk\', \'autohotkey\'), (\'*.ahk\', \'*.ahkl\'), (\'text/x-autohotkey\',)),\n \'AwkLexer\': (\'pygments.lexers.textedit\', \'Awk\', (\'awk\', \'gawk\', \'mawk\', \'nawk\'), (\'*.awk\',), (\'application/x-awk\',)),\n \'BBCBasicLexer\': (\'pygments.lexers.basic\', \'BBC Basic\', (\'bbcbasic\',), (\'*.bbc\',), ()),\n \'BBCodeLexer\': (\'pygments.lexers.markup\', \'BBCode\', (\'bbcode\',), (), (\'text/x-bbcode\',)),\n \'BCLexer\': (\'pygments.lexers.algebra\', \'BC\', (\'bc\',), (\'*.bc\',), ()),\n \'BSTLexer\': (\'pygments.lexers.bibtex\', \'BST\', (\'bst\', \'bst-pybtex\'), (\'*.bst\',), ()),\n \'BareLexer\': (\'pygments.lexers.bare\', \'BARE\', (\'bare\',), (\'*.bare\',), ()),\n \'BaseMakefileLexer\': (\'pygments.lexers.make\', \'Base Makefile\', (\'basemake\',), (), ()),\n \'BashLexer\': (\'pygments.lexers.shell\', \'Bash\', (\'bash\', \'sh\', \'ksh\', \'zsh\', \'shell\'), (\'*.sh\', \'*.ksh\', \'*.bash\', \'*.ebuild\', \'*.eclass\', \'*.exheres-0\', \'*.exlib\', \'*.zsh\', \'.bashrc\', \'bashrc\', \'.bash_*\', \'bash_*\', \'zshrc\', \'.zshrc\', \'PKGBUILD\'), (\'application/x-sh\', \'application/x-shellscript\', \'text/x-shellscript\')),\n \'BashSessionLexer\': (\'pygments.lexers.shell\', \'Bash Session\', (\'console\', \'shell-session\'), (\'*.sh-session\', \'*.shell-session\'), (\'application/x-shell-session\', \'application/x-sh-session\')),\n \'BatchLexer\': (\'pygments.lexers.shell\', \'Batchfile\', (\'bat\', \'batch\', \'dosbatch\', \'winbatch\'), (\'*.bat\', \'*.cmd\'), (\'application/x-dos-batch\',)),\n \'BefungeLexer\': (\'pygments.lexers.esoteric\', \'Befunge\', (\'befunge\',), (\'*.befunge\',), (\'application/x-befunge\',)),\n \'BibTeXLexer\': (\'pygments.lexers.bibtex\', \'BibTeX\', (\'bib\', \'bibtex\'), (\'*.bib\',), (\'text/x-bibtex\',)),\n \'BlitzBasicLexer\': (\'pygments.lexers.basic\', \'BlitzBasic\', (\'blitzbasic\', \'b3d\', \'bplus\'), (\'*.bb\', \'*.decls\'), (\'text/x-bb\',)),\n \'BlitzMaxLexer\': (\'pygments.lexers.basic\', \'BlitzMax\', (\'blitzmax\', \'bmax\'), (\'*.bmx\',), (\'text/x-bmx\',)),\n \'BnfLexer\': (\'pygments.lexers.grammar_notation\', \'BNF\', (\'bnf\',), (\'*.bnf\',), (\'text/x-bnf\',)),\n \'BoaLexer\': (\'pygments.lexers.boa\', \'Boa\', (\'boa\',), (\'*.boa\',), ()),\n \'BooLexer\': (\'pygments.lexers.dotnet\', \'Boo\', (\'boo\',), (\'*.boo\',), (\'text/x-boo\',)),\n \'BoogieLexer\': (\'pygments.lexers.verification\', \'Boogie\', (\'boogie\',), (\'*.bpl\',), ()),\n \'BrainfuckLexer\': (\'pygments.lexers.esoteric\', \'Brainfuck\', (\'brainfuck\', \'bf\'), (\'*.bf\', \'*.b\'), (\'application/x-brainfuck\',)),\n \'BugsLexer\': (\'pygments.lexers.modeling\', \'BUGS\', (\'bugs\', \'winbugs\', \'openbugs\'), (\'*.bug\',), ()),\n \'CAmkESLexer\': (\'pygments.lexers.esoteric\', \'CAmkES\', (\'camkes\', \'idl4\'), (\'*.camkes\', \'*.idl4\'), ()),\n \'CLexer\': (\'pygments.lexers.c_cpp\', \'C\', (\'c\',), (\'*.c\', \'*.h\', \'*.idc\'), (\'text/x-chdr\', \'text/x-csrc\')),\n \'CMakeLexer\': (\'pygments.lexers.make\', \'CMake\', (\'cmake\',), (\'*.cmake\', \'CMakeLists.txt\'), (\'text/x-cmake\',)),\n \'CObjdumpLexer\': (\'pygments.lexers.asm\', \'c-objdump\', (\'c-objdump\',), (\'*.c-objdump\',), (\'text/x-c-objdump\',)),\n \'CPSALexer\': (\'pygments.lexers.lisp\', \'CPSA\', (\'cpsa\',), (\'*.cpsa\',), ()),\n \'CSharpAspxLexer\': (\'pygments.lexers.dotnet\', \'aspx-cs\', (\'aspx-cs\',), (\'*.aspx\', \'*.asax\', \'*.ascx\', \'*.ashx\', \'*.asmx\', \'*.axd\'), ()),\n \'CSharpLexer\': (\'pygments.lexers.dotnet\', \'C#\', (\'csharp\', \'c#\'), (\'*.cs\',), (\'text/x-csharp\',)),\n \'Ca65Lexer\': (\'pygments.lexers.asm\', \'ca65 assembler\', (\'ca65\',), (\'*.s\',), ()),\n \'CadlLexer\': (\'pygments.lexers.archetype\', \'cADL\', (\'cadl\',), (\'*.cadl\',), ()),\n \'CapDLLexer\': (\'pygments.lexers.esoteric\', \'CapDL\', (\'capdl\',), (\'*.cdl\',), ()),\n \'CapnProtoLexer\': (\'pygments.lexers.capnproto\', "Cap\'n Proto", (\'capnp\',), (\'*.capnp\',), ()),\n \'CbmBasicV2Lexer\': (\'pygments.lexers.basic\', \'CBM BASIC V2\', (\'cbmbas\',), (\'*.bas\',), ()),\n \'CeylonLexer\': (\'pygments.lexers.jvm\', \'Ceylon\', (\'ceylon\',), (\'*.ceylon\',), (\'text/x-ceylon\',)),\n \'Cfengine3Lexer\': (\'pygments.lexers.configs\', \'CFEngine3\', (\'cfengine3\', \'cf3\'), (\'*.cf\',), ()),\n \'ChaiscriptLexer\': (\'pygments.lexers.scripting\', \'ChaiScript\', (\'chai\', \'chaiscript\'), (\'*.chai\',), (\'text/x-chaiscript\', \'application/x-chaiscript\')),\n \'ChapelLexer\': (\'pygments.lexers.chapel\', \'Chapel\', (\'chapel\', \'chpl\'), (\'*.chpl\',), ()),\n \'CharmciLexer\': (\'pygments.lexers.c_like\', \'Charmci\', (\'charmci\',), (\'*.ci\',), ()),\n \'CheetahHtmlLexer\': (\'pygments.lexers.templates\', \'HTML+Cheetah\', (\'html+cheetah\', \'html+spitfire\', \'htmlcheetah\'), (), (\'text/html+cheetah\', \'text/html+spitfire\')),\n \'CheetahJavascriptLexer\': (\'pygments.lexers.templates\', \'JavaScript+Cheetah\', (\'js+cheetah\', \'javascript+cheetah\', \'js+spitfire\', \'javascript+spitfire\'), (), (\'application/x-javascript+cheetah\', \'text/x-javascript+cheetah\', \'text/javascript+cheetah\', \'application/x-javascript+spitfire\', \'text/x-javascript+spitfire\', \'text/javascript+spitfire\')),\n \'CheetahLexer\': (\'pygments.lexers.templates\', \'Cheetah\', (\'cheetah\', \'spitfire\'), (\'*.tmpl\', \'*.spt\'), (\'application/x-cheetah\', \'application/x-spitfire\')),\n \'CheetahXmlLexer\': (\'pygments.lexers.templates\', \'XML+Cheetah\', (\'xml+cheetah\', \'xml+spitfire\'), (), (\'application/xml+cheetah\', \'application/xml+spitfire\')),\n \'CirruLexer\': (\'pygments.lexers.webmisc\', \'Cirru\', (\'cirru\',), (\'*.cirru\',), (\'text/x-cirru\',)),\n \'ClayLexer\': (\'pygments.lexers.c_like\', \'Clay\', (\'clay\',), (\'*.clay\',), (\'text/x-clay\',)),\n \'CleanLexer\': (\'pygments.lexers.clean\', \'Clean\', (\'clean\',), (\'*.icl\', \'*.dcl\'), ()),\n \'ClojureLexer\': (\'pygments.lexers.jvm\', \'Clojure\', (\'clojure\', \'clj\'), (\'*.clj\',), (\'text/x-clojure\', \'application/x-clojure\')),\n \'ClojureScriptLexer\': (\'pygments.lexers.jvm\', \'ClojureScript\', (\'clojurescript\', \'cljs\'), (\'*.cljs\',), (\'text/x-clojurescript\', \'application/x-clojurescript\')),\n \'CobolFreeformatLexer\': (\'pygments.lexers.business\', \'COBOLFree\', (\'cobolfree\',), (\'*.cbl\', \'*.CBL\'), ()),\n \'CobolLexer\': (\'pygments.lexers.business\', \'COBOL\', (\'cobol\',), (\'*.cob\', \'*.COB\', \'*.cpy\', \'*.CPY\'), (\'text/x-cobol\',)),\n \'CoffeeScriptLexer\': (\'pygments.lexers.javascript\', \'CoffeeScript\', (\'coffee-script\', \'coffeescript\', \'coffee\'), (\'*.coffee\',), (\'text/coffeescript\',)),\n \'ColdfusionCFCLexer\': (\'pygments.lexers.templates\', \'Coldfusion CFC\', (\'cfc\',), (\'*.cfc\',), ()),\n \'ColdfusionHtmlLexer\': (\'pygments.lexers.templates\', \'Coldfusion HTML\', (\'cfm\',), (\'*.cfm\', \'*.cfml\'), (\'application/x-coldfusion\',)),\n \'ColdfusionLexer\': (\'pygments.lexers.templates\', \'cfstatement\', (\'cfs\',), (), ()),\n \'CommonLispLexer\': (\'pygments.lexers.lisp\', \'Common Lisp\', (\'common-lisp\', \'cl\', \'lisp\'), (\'*.cl\', \'*.lisp\'), (\'text/x-common-lisp\',)),\n \'ComponentPascalLexer\': (\'pygments.lexers.oberon\', \'Component Pascal\', (\'componentpascal\', \'cp\'), (\'*.cp\', \'*.cps\'), (\'text/x-component-pascal\',)),\n \'CoqLexer\': (\'pygments.lexers.theorem\', \'Coq\', (\'coq\',), (\'*.v\',), (\'text/x-coq\',)),\n \'CppLexer\': (\'pygments.lexers.c_cpp\', \'C++\', (\'cpp\', \'c++\'), (\'*.cpp\', \'*.hpp\', \'*.c++\', \'*.h++\', \'*.cc\', \'*.hh\', \'*.cxx\', \'*.hxx\', \'*.C\', \'*.H\', \'*.cp\', \'*.CPP\'), (\'text/x-c++hdr\', \'text/x-c++src\')),\n \'CppObjdumpLexer\': (\'pygments.lexers.asm\', \'cpp-objdump\', (\'cpp-objdump\', \'c++-objdumb\', \'cxx-objdump\'), (\'*.cpp-objdump\', \'*.c++-objdump\', \'*.cxx-objdump\'), (\'text/x-cpp-objdump\',)),\n \'CrmshLexer\': (\'pygments.lexers.dsls\', \'Crmsh\', (\'crmsh\', \'pcmk\'), (\'*.crmsh\', \'*.pcmk\'), ()),\n \'CrocLexer\': (\'pygments.lexers.d\', \'Croc\', (\'croc\',), (\'*.croc\',), (\'text/x-crocsrc\',)),\n \'CryptolLexer\': (\'pygments.lexers.haskell\', \'Cryptol\', (\'cryptol\', \'cry\'), (\'*.cry\',), (\'text/x-cryptol\',)),\n \'CrystalLexer\': (\'pygments.lexers.crystal\', \'Crystal\', (\'cr\', \'crystal\'), (\'*.cr\',), (\'text/x-crystal\',)),\n \'CsoundDocumentLexer\': (\'pygments.lexers.csound\', \'Csound Document\', (\'csound-document\', \'csound-csd\'), (\'*.csd\',), ()),\n \'CsoundOrchestraLexer\': (\'pygments.lexers.csound\', \'Csound Orchestra\', (\'csound\', \'csound-orc\'), (\'*.orc\', \'*.udo\'), ()),\n \'CsoundScoreLexer\': (\'pygments.lexers.csound\', \'Csound Score\', (\'csound-score\', \'csound-sco\'), (\'*.sco\',), ()),\n \'CssDjangoLexer\': (\'pygments.lexers.templates\', \'CSS+Django/Jinja\', (\'css+django\', \'css+jinja\'), (), (\'text/css+django\', \'text/css+jinja\')),\n \'CssErbLexer\': (\'pygments.lexers.templates\', \'CSS+Ruby\', (\'css+erb\', \'css+ruby\'), (), (\'text/css+ruby\',)),\n \'CssGenshiLexer\': (\'pygments.lexers.templates\', \'CSS+Genshi Text\', (\'css+genshitext\', \'css+genshi\'), (), (\'text/css+genshi\',)),\n \'CssLexer\': (\'pygments.lexers.css\', \'CSS\', (\'css\',), (\'*.css\',), (\'text/css\',)),\n \'CssPhpLexer\': (\'pygments.lexers.templates\', \'CSS+PHP\', (\'css+php\',), (), (\'text/css+php\',)),\n \'CssSmartyLexer\': (\'pygments.lexers.templates\', \'CSS+Smarty\', (\'css+smarty\',), (), (\'text/css+smarty\',)),\n \'CudaLexer\': (\'pygments.lexers.c_like\', \'CUDA\', (\'cuda\', \'cu\'), (\'*.cu\', \'*.cuh\'), (\'text/x-cuda\',)),\n \'CypherLexer\': (\'pygments.lexers.graph\', \'Cypher\', (\'cypher\',), (\'*.cyp\', \'*.cypher\'), ()),\n \'CythonLexer\': (\'pygments.lexers.python\', \'Cython\', (\'cython\', \'pyx\', \'pyrex\'), (\'*.pyx\', \'*.pxd\', \'*.pxi\'), (\'text/x-cython\', \'application/x-cython\')),\n \'DLexer\': (\'pygments.lexers.d\', \'D\', (\'d\',), (\'*.d\', \'*.di\'), (\'text/x-dsrc\',)),\n \'DObjdumpLexer\': (\'pygments.lexers.asm\', \'d-objdump\', (\'d-objdump\',), (\'*.d-objdump\',), (\'text/x-d-objdump\',)),\n \'DarcsPatchLexer\': (\'pygments.lexers.diff\', \'Darcs Patch\', (\'dpatch\',), (\'*.dpatch\', \'*.darcspatch\'), ()),\n \'DartLexer\': (\'pygments.lexers.javascript\', \'Dart\', (\'dart\',), (\'*.dart\',), (\'text/x-dart\',)),\n \'Dasm16Lexer\': (\'pygments.lexers.asm\', \'DASM16\', (\'dasm16\',), (\'*.dasm16\', \'*.dasm\'), (\'text/x-dasm16\',)),\n \'DebianControlLexer\': (\'pygments.lexers.installers\', \'Debian Control file\', (\'control\', \'debcontrol\'), (\'control\',), ()),\n \'DelphiLexer\': (\'pygments.lexers.pascal\', \'Delphi\', (\'delphi\', \'pas\', \'pascal\', \'objectpascal\'), (\'*.pas\', \'*.dpr\'), (\'text/x-pascal\',)),\n \'DevicetreeLexer\': (\'pygments.lexers.devicetree\', \'Devicetree\', (\'devicetree\', \'dts\'), (\'*.dts\', \'*.dtsi\'), (\'text/x-c\',)),\n \'DgLexer\': (\'pygments.lexers.python\', \'dg\', (\'dg\',), (\'*.dg\',), (\'text/x-dg\',)),\n \'DiffLexer\': (\'pygments.lexers.diff\', \'Diff\', (\'diff\', \'udiff\'), (\'*.diff\', \'*.patch\'), (\'text/x-diff\', \'text/x-patch\')),\n \'DjangoLexer\': (\'pygments.lexers.templates\', \'Django/Jinja\', (\'django\', \'jinja\'), (), (\'application/x-django-templating\', \'application/x-jinja\')),\n \'DockerLexer\': (\'pygments.lexers.configs\', \'Docker\', (\'docker\', \'dockerfile\'), (\'Dockerfile\', \'*.docker\'), (\'text/x-dockerfile-config\',)),\n \'DtdLexer\': (\'pygments.lexers.html\', \'DTD\', (\'dtd\',), (\'*.dtd\',), (\'application/xml-dtd\',)),\n \'DuelLexer\': (\'pygments.lexers.webmisc\', \'Duel\', (\'duel\', \'jbst\', \'jsonml+bst\'), (\'*.duel\', \'*.jbst\'), (\'text/x-duel\', \'text/x-jbst\')),\n \'DylanConsoleLexer\': (\'pygments.lexers.dylan\', \'Dylan session\', (\'dylan-console\', \'dylan-repl\'), (\'*.dylan-console\',), (\'text/x-dylan-console\',)),\n \'DylanLexer\': (\'pygments.lexers.dylan\', \'Dylan\', (\'dylan\',), (\'*.dylan\', \'*.dyl\', \'*.intr\'), (\'text/x-dylan\',)),\n \'DylanLidLexer\': (\'pygments.lexers.dylan\', \'DylanLID\', (\'dylan-lid\', \'lid\'), (\'*.lid\', \'*.hdp\'), (\'text/x-dylan-lid\',)),\n \'ECLLexer\': (\'pygments.lexers.ecl\', \'ECL\', (\'ecl\',), (\'*.ecl\',), (\'application/x-ecl\',)),\n \'ECLexer\': (\'pygments.lexers.c_like\', \'eC\', (\'ec\',), (\'*.ec\', \'*.eh\'), (\'text/x-echdr\', \'text/x-ecsrc\')),\n \'EarlGreyLexer\': (\'pygments.lexers.javascript\', \'Earl Grey\', (\'earl-grey\', \'earlgrey\', \'eg\'), (\'*.eg\',), (\'text/x-earl-grey\',)),\n \'EasytrieveLexer\': (\'pygments.lexers.scripting\', \'Easytrieve\', (\'easytrieve\',), (\'*.ezt\', \'*.mac\'), (\'text/x-easytrieve\',)),\n \'EbnfLexer\': (\'pygments.lexers.parsers\', \'EBNF\', (\'ebnf\',), (\'*.ebnf\',), (\'text/x-ebnf\',)),\n \'EiffelLexer\': (\'pygments.lexers.eiffel\', \'Eiffel\', (\'eiffel\',), (\'*.e\',), (\'text/x-eiffel\',)),\n \'ElixirConsoleLexer\': (\'pygments.lexers.erlang\', \'Elixir iex session\', (\'iex\',), (), (\'text/x-elixir-shellsession\',)),\n \'ElixirLexer\': (\'pygments.lexers.erlang\', \'Elixir\', (\'elixir\', \'ex\', \'exs\'), (\'*.ex\', \'*.eex\', \'*.exs\'), (\'text/x-elixir\',)),\n \'ElmLexer\': (\'pygments.lexers.elm\', \'Elm\', (\'elm\',), (\'*.elm\',), (\'text/x-elm\',)),\n \'EmacsLispLexer\': (\'pygments.lexers.lisp\', \'EmacsLisp\', (\'emacs\', \'elisp\', \'emacs-lisp\'), (\'*.el\',), (\'text/x-elisp\', \'application/x-elisp\')),\n \'EmailLexer\': (\'pygments.lexers.email\', \'E-mail\', (\'email\', \'eml\'), (\'*.eml\',), (\'message/rfc822\',)),\n \'ErbLexer\': (\'pygments.lexers.templates\', \'ERB\', (\'erb\',), (), (\'application/x-ruby-templating\',)),\n \'ErlangLexer\': (\'pygments.lexers.erlang\', \'Erlang\', (\'erlang\',), (\'*.erl\', \'*.hrl\', \'*.es\', \'*.escript\'), (\'text/x-erlang\',)),\n \'ErlangShellLexer\': (\'pygments.lexers.erlang\', \'Erlang erl session\', (\'erl\',), (\'*.erl-sh\',), (\'text/x-erl-shellsession\',)),\n \'EvoqueHtmlLexer\': (\'pygments.lexers.templates\', \'HTML+Evoque\', (\'html+evoque\',), (\'*.html\',), (\'text/html+evoque\',)),\n \'EvoqueLexer\': (\'pygments.lexers.templates\', \'Evoque\', (\'evoque\',), (\'*.evoque\',), (\'application/x-evoque\',)),\n \'EvoqueXmlLexer\': (\'pygments.lexers.templates\', \'XML+Evoque\', (\'xml+evoque\',), (\'*.xml\',), (\'application/xml+evoque\',)),\n \'ExeclineLexer\': (\'pygments.lexers.shell\', \'execline\', (\'execline\',), (\'*.exec\',), ()),\n \'EzhilLexer\': (\'pygments.lexers.ezhil\', \'Ezhil\', (\'ezhil\',), (\'*.n\',), (\'text/x-ezhil\',)),\n \'FSharpLexer\': (\'pygments.lexers.dotnet\', \'F#\', (\'fsharp\', \'f#\'), (\'*.fs\', \'*.fsi\'), (\'text/x-fsharp\',)),\n \'FStarLexer\': (\'pygments.lexers.ml\', \'FStar\', (\'fstar\',), (\'*.fst\', \'*.fsti\'), (\'text/x-fstar\',)),\n \'FactorLexer\': (\'pygments.lexers.factor\', \'Factor\', (\'factor\',), (\'*.factor\',), (\'text/x-factor\',)),\n \'FancyLexer\': (\'pygments.lexers.ruby\', \'Fancy\', (\'fancy\', \'fy\'), (\'*.fy\', \'*.fancypack\'), (\'text/x-fancysrc\',)),\n \'FantomLexer\': (\'pygments.lexers.fantom\', \'Fantom\', (\'fan\',), (\'*.fan\',), (\'application/x-fantom\',)),\n \'FelixLexer\': (\'pygments.lexers.felix\', \'Felix\', (\'felix\', \'flx\'), (\'*.flx\', \'*.flxh\'), (\'text/x-felix\',)),\n \'FennelLexer\': (\'pygments.lexers.lisp\', \'Fennel\', (\'fennel\', \'fnl\'), (\'*.fnl\',), ()),\n \'FishShellLexer\': (\'pygments.lexers.shell\', \'Fish\', (\'fish\', \'fishshell\'), (\'*.fish\', \'*.load\'), (\'application/x-fish\',)),\n \'FlatlineLexer\': (\'pygments.lexers.dsls\', \'Flatline\', (\'flatline\',), (), (\'text/x-flatline\',)),\n \'FloScriptLexer\': (\'pygments.lexers.floscript\', \'FloScript\', (\'floscript\', \'flo\'), (\'*.flo\',), ()),\n \'ForthLexer\': (\'pygments.lexers.forth\', \'Forth\', (\'forth\',), (\'*.frt\', \'*.fs\'), (\'application/x-forth\',)),\n \'FortranFixedLexer\': (\'pygments.lexers.fortran\', \'FortranFixed\', (\'fortranfixed\',), (\'*.f\', \'*.F\'), ()),\n \'FortranLexer\': (\'pygments.lexers.fortran\', \'Fortran\', (\'fortran\',), (\'*.f03\', \'*.f90\', \'*.F03\', \'*.F90\'), (\'text/x-fortran\',)),\n \'FoxProLexer\': (\'pygments.lexers.foxpro\', \'FoxPro\', (\'foxpro\', \'vfp\', \'clipper\', \'xbase\'), (\'*.PRG\', \'*.prg\'), ()),\n \'FreeFemLexer\': (\'pygments.lexers.freefem\', \'Freefem\', (\'freefem\',), (\'*.edp\',), (\'text/x-freefem\',)),\n \'GAPLexer\': (\'pygments.lexers.algebra\', \'GAP\', (\'gap\',), (\'*.g\', \'*.gd\', \'*.gi\', \'*.gap\'), ()),\n \'GDScriptLexer\': (\'pygments.lexers.gdscript\', \'GDScript\', (\'gdscript\', \'gd\'), (\'*.gd\',), (\'text/x-gdscript\', \'application/x-gdscript\')),\n \'GLShaderLexer\': (\'pygments.lexers.graphics\', \'GLSL\', (\'glsl\',), (\'*.vert\', \'*.frag\', \'*.geo\'), (\'text/x-glslsrc\',)),\n \'GasLexer\': (\'pygments.lexers.asm\', \'GAS\', (\'gas\', \'asm\'), (\'*.s\', \'*.S\'), (\'text/x-gas\',)),\n \'GenshiLexer\': (\'pygments.lexers.templates\', \'Genshi\', (\'genshi\', \'kid\', \'xml+genshi\', \'xml+kid\'), (\'*.kid\',), (\'application/x-genshi\', \'application/x-kid\')),\n \'GenshiTextLexer\': (\'pygments.lexers.templates\', \'Genshi Text\', (\'genshitext\',), (), (\'application/x-genshi-text\', \'text/x-genshi\')),\n \'GettextLexer\': (\'pygments.lexers.textfmts\', \'Gettext Catalog\', (\'pot\', \'po\'), (\'*.pot\', \'*.po\'), (\'application/x-gettext\', \'text/x-gettext\', \'text/gettext\')),\n \'GherkinLexer\': (\'pygments.lexers.testing\', \'Gherkin\', (\'cucumber\', \'gherkin\'), (\'*.feature\',), (\'text/x-gherkin\',)),\n \'GnuplotLexer\': (\'pygments.lexers.graphics\', \'Gnuplot\', (\'gnuplot\',), (\'*.plot\', \'*.plt\'), (\'text/x-gnuplot\',)),\n \'GoLexer\': (\'pygments.lexers.go\', \'Go\', (\'go\',), (\'*.go\',), (\'text/x-gosrc\',)),\n \'GoloLexer\': (\'pygments.lexers.jvm\', \'Golo\', (\'golo\',), (\'*.golo\',), ()),\n \'GoodDataCLLexer\': (\'pygments.lexers.business\', \'GoodData-CL\', (\'gooddata-cl\',), (\'*.gdc\',), (\'text/x-gooddata-cl\',)),\n \'GosuLexer\': (\'pygments.lexers.jvm\', \'Gosu\', (\'gosu\',), (\'*.gs\', \'*.gsx\', \'*.gsp\', \'*.vark\'), (\'text/x-gosu\',)),\n \'GosuTemplateLexer\': (\'pygments.lexers.jvm\', \'Gosu Template\', (\'gst\',), (\'*.gst\',), (\'text/x-gosu-template\',)),\n \'GroffLexer\': (\'pygments.lexers.markup\', \'Groff\', (\'groff\', \'nroff\', \'man\'), (\'*.[1234567]\', \'*.man\'), (\'application/x-troff\', \'text/troff\')),\n \'GroovyLexer\': (\'pygments.lexers.jvm\', \'Groovy\', (\'groovy\',), (\'*.groovy\', \'*.gradle\'), (\'text/x-groovy\',)),\n \'HLSLShaderLexer\': (\'pygments.lexers.graphics\', \'HLSL\', (\'hlsl\',), (\'*.hlsl\', \'*.hlsli\'), (\'text/x-hlsl\',)),\n \'HamlLexer\': (\'pygments.lexers.html\', \'Haml\', (\'haml\',), (\'*.haml\',), (\'text/x-haml\',)),\n \'HandlebarsHtmlLexer\': (\'pygments.lexers.templates\', \'HTML+Handlebars\', (\'html+handlebars\',), (\'*.handlebars\', \'*.hbs\'), (\'text/html+handlebars\', \'text/x-handlebars-template\')),\n \'HandlebarsLexer\': (\'pygments.lexers.templates\', \'Handlebars\', (\'handlebars\',), (), ()),\n \'HaskellLexer\': (\'pygments.lexers.haskell\', \'Haskell\', (\'haskell\', \'hs\'), (\'*.hs\',), (\'text/x-haskell\',)),\n \'HaxeLexer\': (\'pygments.lexers.haxe\', \'Haxe\', (\'hx\', \'haxe\', \'hxsl\'), (\'*.hx\', \'*.hxsl\'), (\'text/haxe\', \'text/x-haxe\', \'text/x-hx\')),\n \'HexdumpLexer\': (\'pygments.lexers.hexdump\', \'Hexdump\', (\'hexdump\',), (), ()),\n \'HsailLexer\': (\'pygments.lexers.asm\', \'HSAIL\', (\'hsail\', \'hsa\'), (\'*.hsail\',), (\'text/x-hsail\',)),\n \'HspecLexer\': (\'pygments.lexers.haskell\', \'Hspec\', (\'hspec\',), (), ()),\n \'HtmlDjangoLexer\': (\'pygments.lexers.templates\', \'HTML+Django/Jinja\', (\'html+django\', \'html+jinja\', \'htmldjango\'), (), (\'text/html+django\', \'text/html+jinja\')),\n \'HtmlGenshiLexer\': (\'pygments.lexers.templates\', \'HTML+Genshi\', (\'html+genshi\', \'html+kid\'), (), (\'text/html+genshi\',)),\n \'HtmlLexer\': (\'pygments.lexers.html\', \'HTML\', (\'html\',), (\'*.html\', \'*.htm\', \'*.xhtml\', \'*.xslt\'), (\'text/html\', \'application/xhtml+xml\')),\n \'HtmlPhpLexer\': (\'pygments.lexers.templates\', \'HTML+PHP\', (\'html+php\',), (\'*.phtml\',), (\'application/x-php\', \'application/x-httpd-php\', \'application/x-httpd-php3\', \'application/x-httpd-php4\', \'application/x-httpd-php5\')),\n \'HtmlSmartyLexer\': (\'pygments.lexers.templates\', \'HTML+Smarty\', (\'html+smarty\',), (), (\'text/html+smarty\',)),\n \'HttpLexer\': (\'pygments.lexers.textfmts\', \'HTTP\', (\'http\',), (), ()),\n \'HxmlLexer\': (\'pygments.lexers.haxe\', \'Hxml\', (\'haxeml\', \'hxml\'), (\'*.hxml\',), ()),\n \'HyLexer\': (\'pygments.lexers.lisp\', \'Hy\', (\'hylang\',), (\'*.hy\',), (\'text/x-hy\', \'application/x-hy\')),\n \'HybrisLexer\': (\'pygments.lexers.scripting\', \'Hybris\', (\'hybris\', \'hy\'), (\'*.hy\', \'*.hyb\'), (\'text/x-hybris\', \'application/x-hybris\')),\n \'IDLLexer\': (\'pygments.lexers.idl\', \'IDL\', (\'idl\',), (\'*.pro\',), (\'text/idl\',)),\n \'IconLexer\': (\'pygments.lexers.unicon\', \'Icon\', (\'icon\',), (\'*.icon\', \'*.ICON\'), ()),\n \'IdrisLexer\': (\'pygments.lexers.haskell\', \'Idris\', (\'idris\', \'idr\'), (\'*.idr\',), (\'text/x-idris\',)),\n \'IgorLexer\': (\'pygments.lexers.igor\', \'Igor\', (\'igor\', \'igorpro\'), (\'*.ipf\',), (\'text/ipf\',)),\n \'Inform6Lexer\': (\'pygments.lexers.int_fiction\', \'Inform 6\', (\'inform6\', \'i6\'), (\'*.inf\',), ()),\n \'Inform6TemplateLexer\': (\'pygments.lexers.int_fiction\', \'Inform 6 template\', (\'i6t\',), (\'*.i6t\',), ()),\n \'Inform7Lexer\': (\'pygments.lexers.int_fiction\', \'Inform 7\', (\'inform7\', \'i7\'), (\'*.ni\', \'*.i7x\'), ()),\n \'IniLexer\': (\'pygments.lexers.configs\', \'INI\', (\'ini\', \'cfg\', \'dosini\'), (\'*.ini\', \'*.cfg\', \'*.inf\'), (\'text/x-ini\', \'text/inf\')),\n \'IoLexer\': (\'pygments.lexers.iolang\', \'Io\', (\'io\',), (\'*.io\',), (\'text/x-iosrc\',)),\n \'IokeLexer\': (\'pygments.lexers.jvm\', \'Ioke\', (\'ioke\', \'ik\'), (\'*.ik\',), (\'text/x-iokesrc\',)),\n \'IrcLogsLexer\': (\'pygments.lexers.textfmts\', \'IRC logs\', (\'irc\',), (\'*.weechatlog\',), (\'text/x-irclog\',)),\n \'IsabelleLexer\': (\'pygments.lexers.theorem\', \'Isabelle\', (\'isabelle\',), (\'*.thy\',), (\'text/x-isabelle\',)),\n \'JLexer\': (\'pygments.lexers.j\', \'J\', (\'j\',), (\'*.ijs\',), (\'text/x-j\',)),\n \'JagsLexer\': (\'pygments.lexers.modeling\', \'JAGS\', (\'jags\',), (\'*.jag\', \'*.bug\'), ()),\n \'JasminLexer\': (\'pygments.lexers.jvm\', \'Jasmin\', (\'jasmin\', \'jasminxt\'), (\'*.j\',), ()),\n \'JavaLexer\': (\'pygments.lexers.jvm\', \'Java\', (\'java\',), (\'*.java\',), (\'text/x-java\',)),\n \'JavascriptDjangoLexer\': (\'pygments.lexers.templates\', \'JavaScript+Django/Jinja\', (\'js+django\', \'javascript+django\', \'js+jinja\', \'javascript+jinja\'), (), (\'application/x-javascript+django\', \'application/x-javascript+jinja\', \'text/x-javascript+django\', \'text/x-javascript+jinja\', \'text/javascript+django\', \'text/javascript+jinja\')),\n \'JavascriptErbLexer\': (\'pygments.lexers.templates\', \'JavaScript+Ruby\', (\'js+erb\', \'javascript+erb\', \'js+ruby\', \'javascript+ruby\'), (), (\'application/x-javascript+ruby\', \'text/x-javascript+ruby\', \'text/javascript+ruby\')),\n \'JavascriptGenshiLexer\': (\'pygments.lexers.templates\', \'JavaScript+Genshi Text\', (\'js+genshitext\', \'js+genshi\', \'javascript+genshitext\', \'javascript+genshi\'), (), (\'application/x-javascript+genshi\', \'text/x-javascript+genshi\', \'text/javascript+genshi\')),\n \'JavascriptLexer\': (\'pygments.lexers.javascript\', \'JavaScript\', (\'js\', \'javascript\'), (\'*.js\', \'*.jsm\', \'*.mjs\'), (\'application/javascript\', \'application/x-javascript\', \'text/x-javascript\', \'text/javascript\')),\n \'JavascriptPhpLexer\': (\'pygments.lexers.templates\', \'JavaScript+PHP\', (\'js+php\', \'javascript+php\'), (), (\'application/x-javascript+php\', \'text/x-javascript+php\', \'text/javascript+php\')),\n \'JavascriptSmartyLexer\': (\'pygments.lexers.templates\', \'JavaScript+Smarty\', (\'js+smarty\', \'javascript+smarty\'), (), (\'application/x-javascript+smarty\', \'text/x-javascript+smarty\', \'text/javascript+smarty\')),\n \'JclLexer\': (\'pygments.lexers.scripting\', \'JCL\', (\'jcl\',), (\'*.jcl\',), (\'text/x-jcl\',)),\n \'JsgfLexer\': (\'pygments.lexers.grammar_notation\', \'JSGF\', (\'jsgf\',), (\'*.jsgf\',), (\'application/jsgf\', \'application/x-jsgf\', \'text/jsgf\')),\n \'JsonBareObjectLexer\': (\'pygments.lexers.data\', \'JSONBareObject\', (\'json-object\',), (), (\'application/json-object\',)),\n \'JsonLdLexer\': (\'pygments.lexers.data\', \'JSON-LD\', (\'jsonld\', \'json-ld\'), (\'*.jsonld\',), (\'application/ld+json\',)),\n \'JsonLexer\': (\'pygments.lexers.data\', \'JSON\', (\'json\',), (\'*.json\', \'Pipfile.lock\'), (\'application/json\',)),\n \'JspLexer\': (\'pygments.lexers.templates\', \'Java Server Page\', (\'jsp\',), (\'*.jsp\',), (\'application/x-jsp\',)),\n \'JuliaConsoleLexer\': (\'pygments.lexers.julia\', \'Julia console\', (\'jlcon\',), (), ()),\n \'JuliaLexer\': (\'pygments.lexers.julia\', \'Julia\', (\'julia\', \'jl\'), (\'*.jl\',), (\'text/x-julia\', \'application/x-julia\')),\n \'JuttleLexer\': (\'pygments.lexers.javascript\', \'Juttle\', (\'juttle\', \'juttle\'), (\'*.juttle\',), (\'application/juttle\', \'application/x-juttle\', \'text/x-juttle\', \'text/juttle\')),\n \'KalLexer\': (\'pygments.lexers.javascript\', \'Kal\', (\'kal\',), (\'*.kal\',), (\'text/kal\', \'application/kal\')),\n \'KconfigLexer\': (\'pygments.lexers.configs\', \'Kconfig\', (\'kconfig\', \'menuconfig\', \'linux-config\', \'kernel-config\'), (\'Kconfig*\', \'*Config.in*\', \'external.in*\', \'standard-modules.in\'), (\'text/x-kconfig\',)),\n \'KernelLogLexer\': (\'pygments.lexers.textfmts\', \'Kernel log\', (\'kmsg\', \'dmesg\'), (\'*.kmsg\', \'*.dmesg\'), ()),\n \'KokaLexer\': (\'pygments.lexers.haskell\', \'Koka\', (\'koka\',), (\'*.kk\', \'*.kki\'), (\'text/x-koka\',)),\n \'KotlinLexer\': (\'pygments.lexers.jvm\', \'Kotlin\', (\'kotlin\',), (\'*.kt\',), (\'text/x-kotlin\',)),\n \'LSLLexer\': (\'pygments.lexers.scripting\', \'LSL\', (\'lsl\',), (\'*.lsl\',), (\'text/x-lsl\',)),\n \'LassoCssLexer\': (\'pygments.lexers.templates\', \'CSS+Lasso\', (\'css+lasso\',), (), (\'text/css+lasso\',)),\n \'LassoHtmlLexer\': (\'pygments.lexers.templates\', \'HTML+Lasso\', (\'html+lasso\',), (), (\'text/html+lasso\', \'application/x-httpd-lasso\', \'application/x-httpd-lasso[89]\')),\n \'LassoJavascriptLexer\': (\'pygments.lexers.templates\', \'JavaScript+Lasso\', (\'js+lasso\', \'javascript+lasso\'), (), (\'application/x-javascript+lasso\', \'text/x-javascript+lasso\', \'text/javascript+lasso\')),\n \'LassoLexer\': (\'pygments.lexers.javascript\', \'Lasso\', (\'lasso\', \'lassoscript\'), (\'*.lasso\', \'*.lasso[89]\'), (\'text/x-lasso\',)),\n \'LassoXmlLexer\': (\'pygments.lexers.templates\', \'XML+Lasso\', (\'xml+lasso\',), (), (\'application/xml+lasso\',)),\n \'LeanLexer\': (\'pygments.lexers.theorem\', \'Lean\', (\'lean\',), (\'*.lean\',), (\'text/x-lean\',)),\n \'LessCssLexer\': (\'pygments.lexers.css\', \'LessCss\', (\'less\',), (\'*.less\',), (\'text/x-less-css\',)),\n \'LighttpdConfLexer\': (\'pygments.lexers.configs\', \'Lighttpd configuration file\', (\'lighty\', \'lighttpd\'), (), (\'text/x-lighttpd-conf\',)),\n \'LimboLexer\': (\'pygments.lexers.inferno\', \'Limbo\', (\'limbo\',), (\'*.b\',), (\'text/limbo\',)),\n \'LiquidLexer\': (\'pygments.lexers.templates\', \'liquid\', (\'liquid\',), (\'*.liquid\',), ()),\n \'LiterateAgdaLexer\': (\'pygments.lexers.haskell\', \'Literate Agda\', (\'lagda\', \'literate-agda\'), (\'*.lagda\',), (\'text/x-literate-agda\',)),\n \'LiterateCryptolLexer\': (\'pygments.lexers.haskell\', \'Literate Cryptol\', (\'lcry\', \'literate-cryptol\', \'lcryptol\'), (\'*.lcry\',), (\'text/x-literate-cryptol\',)),\n \'LiterateHaskellLexer\': (\'pygments.lexers.haskell\', \'Literate Haskell\', (\'lhs\', \'literate-haskell\', \'lhaskell\'), (\'*.lhs\',), (\'text/x-literate-haskell\',)),\n \'LiterateIdrisLexer\': (\'pygments.lexers.haskell\', \'Literate Idris\', (\'lidr\', \'literate-idris\', \'lidris\'), (\'*.lidr\',), (\'text/x-literate-idris\',)),\n \'LiveScriptLexer\': (\'pygments.lexers.javascript\', \'LiveScript\', (\'live-script\', \'livescript\'), (\'*.ls\',), (\'text/livescript\',)),\n \'LlvmLexer\': (\'pygments.lexers.asm\', \'LLVM\', (\'llvm\',), (\'*.ll\',), (\'text/x-llvm\',)),\n \'LlvmMirBodyLexer\': (\'pygments.lexers.asm\', \'LLVM-MIR Body\', (\'llvm-mir-body\',), (), ()),\n \'LlvmMirLexer\': (\'pygments.lexers.asm\', \'LLVM-MIR\', (\'llvm-mir\',), (\'*.mir\',), ()),\n \'LogosLexer\': (\'pygments.lexers.objective\', \'Logos\', (\'logos\',), (\'*.x\', \'*.xi\', \'*.xm\', \'*.xmi\'), (\'text/x-logos\',)),\n \'LogtalkLexer\': (\'pygments.lexers.prolog\', \'Logtalk\', (\'logtalk\',), (\'*.lgt\', \'*.logtalk\'), (\'text/x-logtalk\',)),\n \'LuaLexer\': (\'pygments.lexers.scripting\', \'Lua\', (\'lua\',), (\'*.lua\', \'*.wlua\'), (\'text/x-lua\', \'application/x-lua\')),\n \'MIMELexer\': (\'pygments.lexers.mime\', \'MIME\', (\'mime\',), (), (\'multipart/mixed\', \'multipart/related\', \'multipart/alternative\')),\n \'MOOCodeLexer\': (\'pygments.lexers.scripting\', \'MOOCode\', (\'moocode\', \'moo\'), (\'*.moo\',), (\'text/x-moocode\',)),\n \'MSDOSSessionLexer\': (\'pygments.lexers.shell\', \'MSDOS Session\', (\'doscon\',), (), ()),\n \'MakefileLexer\': (\'pygments.lexers.make\', \'Makefile\', (\'make\', \'makefile\', \'mf\', \'bsdmake\'), (\'*.mak\', \'*.mk\', \'Makefile\', \'makefile\', \'Makefile.*\', \'GNUmakefile\'), (\'text/x-makefile\',)),\n \'MakoCssLexer\': (\'pygments.lexers.templates\', \'CSS+Mako\', (\'css+mako\',), (), (\'text/css+mako\',)),\n \'MakoHtmlLexer\': (\'pygments.lexers.templates\', \'HTML+Mako\', (\'html+mako\',), (), (\'text/html+mako\',)),\n \'MakoJavascriptLexer\': (\'pygments.lexers.templates\', \'JavaScript+Mako\', (\'js+mako\', \'javascript+mako\'), (), (\'application/x-javascript+mako\', \'text/x-javascript+mako\', \'text/javascript+mako\')),\n \'MakoLexer\': (\'pygments.lexers.templates\', \'Mako\', (\'mako\',), (\'*.mao\',), (\'application/x-mako\',)),\n \'MakoXmlLexer\': (\'pygments.lexers.templates\', \'XML+Mako\', (\'xml+mako\',), (), (\'application/xml+mako\',)),\n \'MaqlLexer\': (\'pygments.lexers.business\', \'MAQL\', (\'maql\',), (\'*.maql\',), (\'text/x-gooddata-maql\', \'application/x-gooddata-maql\')),\n \'MarkdownLexer\': (\'pygments.lexers.markup\', \'markdown\', (\'md\',), (\'*.md\', \'*.markdown\'), (\'text/x-markdown\',)),\n \'MaskLexer\': (\'pygments.lexers.javascript\', \'Mask\', (\'mask\',), (\'*.mask\',), (\'text/x-mask\',)),\n \'MasonLexer\': (\'pygments.lexers.templates\', \'Mason\', (\'mason\',), (\'*.m\', \'*.mhtml\', \'*.mc\', \'*.mi\', \'autohandler\', \'dhandler\'), (\'application/x-mason\',)),\n \'MathematicaLexer\': (\'pygments.lexers.algebra\', \'Mathematica\', (\'mathematica\', \'mma\', \'nb\'), (\'*.nb\', \'*.cdf\', \'*.nbp\', \'*.ma\'), (\'application/mathematica\', \'application/vnd.wolfram.mathematica\', \'application/vnd.wolfram.mathematica.package\', \'application/vnd.wolfram.cdf\')),\n \'MatlabLexer\': (\'pygments.lexers.matlab\', \'Matlab\', (\'matlab\',), (\'*.m\',), (\'text/matlab\',)),\n \'MatlabSessionLexer\': (\'pygments.lexers.matlab\', \'Matlab session\', (\'matlabsession\',), (), ()),\n \'MiniDLexer\': (\'pygments.lexers.d\', \'MiniD\', (\'minid\',), (), (\'text/x-minidsrc\',)),\n \'MiniScriptLexer\': (\'pygments.lexers.scripting\', \'MiniScript\', (\'ms\', \'miniscript\'), (\'*.ms\',), (\'text/x-minicript\', \'application/x-miniscript\')),\n \'ModelicaLexer\': (\'pygments.lexers.modeling\', \'Modelica\', (\'modelica\',), (\'*.mo\',), (\'text/x-modelica\',)),\n \'Modula2Lexer\': (\'pygments.lexers.modula2\', \'Modula-2\', (\'modula2\', \'m2\'), (\'*.def\', \'*.mod\'), (\'text/x-modula2\',)),\n \'MoinWikiLexer\': (\'pygments.lexers.markup\', \'MoinMoin/Trac Wiki markup\', (\'trac-wiki\', \'moin\'), (), (\'text/x-trac-wiki\',)),\n \'MonkeyLexer\': (\'pygments.lexers.basic\', \'Monkey\', (\'monkey\',), (\'*.monkey\',), (\'text/x-monkey\',)),\n \'MonteLexer\': (\'pygments.lexers.monte\', \'Monte\', (\'monte\',), (\'*.mt\',), ()),\n \'MoonScriptLexer\': (\'pygments.lexers.scripting\', \'MoonScript\', (\'moon\', \'moonscript\'), (\'*.moon\',), (\'text/x-moonscript\', \'application/x-moonscript\')),\n \'MoselLexer\': (\'pygments.lexers.mosel\', \'Mosel\', (\'mosel\',), (\'*.mos\',), ()),\n \'MozPreprocCssLexer\': (\'pygments.lexers.markup\', \'CSS+mozpreproc\', (\'css+mozpreproc\',), (\'*.css.in\',), ()),\n \'MozPreprocHashLexer\': (\'pygments.lexers.markup\', \'mozhashpreproc\', (\'mozhashpreproc\',), (), ()),\n \'MozPreprocJavascriptLexer\': (\'pygments.lexers.markup\', \'Javascript+mozpreproc\', (\'javascript+mozpreproc\',), (\'*.js.in\',), ()),\n \'MozPreprocPercentLexer\': (\'pygments.lexers.markup\', \'mozpercentpreproc\', (\'mozpercentpreproc\',), (), ()),\n \'MozPreprocXulLexer\': (\'pygments.lexers.markup\', \'XUL+mozpreproc\', (\'xul+mozpreproc\',), (\'*.xul.in\',), ()),\n \'MqlLexer\': (\'pygments.lexers.c_like\', \'MQL\', (\'mql\', \'mq4\', \'mq5\', \'mql4\', \'mql5\'), (\'*.mq4\', \'*.mq5\', \'*.mqh\'), (\'text/x-mql\',)),\n \'MscgenLexer\': (\'pygments.lexers.dsls\', \'Mscgen\', (\'mscgen\', \'msc\'), (\'*.msc\',), ()),\n \'MuPADLexer\': (\'pygments.lexers.algebra\', \'MuPAD\', (\'mupad\',), (\'*.mu\',), ()),\n \'MxmlLexer\': (\'pygments.lexers.actionscript\', \'MXML\', (\'mxml\',), (\'*.mxml\',), ()),\n \'MySqlLexer\': (\'pygments.lexers.sql\', \'MySQL\', (\'mysql\',), (), (\'text/x-mysql\',)),\n \'MyghtyCssLexer\': (\'pygments.lexers.templates\', \'CSS+Myghty\', (\'css+myghty\',), (), (\'text/css+myghty\',)),\n \'MyghtyHtmlLexer\': (\'pygments.lexers.templates\', \'HTML+Myghty\', (\'html+myghty\',), (), (\'text/html+myghty\',)),\n \'MyghtyJavascriptLexer\': (\'pygments.lexers.templates\', \'JavaScript+Myghty\', (\'js+myghty\', \'javascript+myghty\'), (), (\'application/x-javascript+myghty\', \'text/x-javascript+myghty\', \'text/javascript+mygthy\')),\n \'MyghtyLexer\': (\'pygments.lexers.templates\', \'Myghty\', (\'myghty\',), (\'*.myt\', \'autodelegate\'), (\'application/x-myghty\',)),\n \'MyghtyXmlLexer\': (\'pygments.lexers.templates\', \'XML+Myghty\', (\'xml+myghty\',), (), (\'application/xml+myghty\',)),\n \'NCLLexer\': (\'pygments.lexers.ncl\', \'NCL\', (\'ncl\',), (\'*.ncl\',), (\'text/ncl\',)),\n \'NSISLexer\': (\'pygments.lexers.installers\', \'NSIS\', (\'nsis\', \'nsi\', \'nsh\'), (\'*.nsi\', \'*.nsh\'), (\'text/x-nsis\',)),\n \'NasmLexer\': (\'pygments.lexers.asm\', \'NASM\', (\'nasm\',), (\'*.asm\', \'*.ASM\'), (\'text/x-nasm\',)),\n \'NasmObjdumpLexer\': (\'pygments.lexers.asm\', \'objdump-nasm\', (\'objdump-nasm\',), (\'*.objdump-intel\',), (\'text/x-nasm-objdump\',)),\n \'NemerleLexer\': (\'pygments.lexers.dotnet\', \'Nemerle\', (\'nemerle\',), (\'*.n\',), (\'text/x-nemerle\',)),\n \'NesCLexer\': (\'pygments.lexers.c_like\', \'nesC\', (\'nesc\',), (\'*.nc\',), (\'text/x-nescsrc\',)),\n \'NewLispLexer\': (\'pygments.lexers.lisp\', \'NewLisp\', (\'newlisp\',), (\'*.lsp\', \'*.nl\', \'*.kif\'), (\'text/x-newlisp\', \'application/x-newlisp\')),\n \'NewspeakLexer\': (\'pygments.lexers.smalltalk\', \'Newspeak\', (\'newspeak\',), (\'*.ns2\',), (\'text/x-newspeak\',)),\n \'NginxConfLexer\': (\'pygments.lexers.configs\', \'Nginx configuration file\', (\'nginx\',), (\'nginx.conf\',), (\'text/x-nginx-conf\',)),\n \'NimrodLexer\': (\'pygments.lexers.nimrod\', \'Nimrod\', (\'nim\', \'nimrod\'), (\'*.nim\', \'*.nimrod\'), (\'text/x-nim\',)),\n \'NitLexer\': (\'pygments.lexers.nit\', \'Nit\', (\'nit\',), (\'*.nit\',), ()),\n \'NixLexer\': (\'pygments.lexers.nix\', \'Nix\', (\'nixos\', \'nix\'), (\'*.nix\',), (\'text/x-nix\',)),\n \'NotmuchLexer\': (\'pygments.lexers.textfmts\', \'Notmuch\', (\'notmuch\',), (), ()),\n \'NuSMVLexer\': (\'pygments.lexers.smv\', \'NuSMV\', (\'nusmv\',), (\'*.smv\',), ()),\n \'NumPyLexer\': (\'pygments.lexers.python\', \'NumPy\', (\'numpy\',), (), ()),\n \'ObjdumpLexer\': (\'pygments.lexers.asm\', \'objdump\', (\'objdump\',), (\'*.objdump\',), (\'text/x-objdump\',)),\n \'ObjectiveCLexer\': (\'pygments.lexers.objective\', \'Objective-C\', (\'objective-c\', \'objectivec\', \'obj-c\', \'objc\'), (\'*.m\', \'*.h\'), (\'text/x-objective-c\',)),\n \'ObjectiveCppLexer\': (\'pygments.lexers.objective\', \'Objective-C++\', (\'objective-c++\', \'objectivec++\', \'obj-c++\', \'objc++\'), (\'*.mm\', \'*.hh\'), (\'text/x-objective-c++\',)),\n \'ObjectiveJLexer\': (\'pygments.lexers.javascript\', \'Objective-J\', (\'objective-j\', \'objectivej\', \'obj-j\', \'objj\'), (\'*.j\',), (\'text/x-objective-j\',)),\n \'OcamlLexer\': (\'pygments.lexers.ml\', \'OCaml\', (\'ocaml\',), (\'*.ml\', \'*.mli\', \'*.mll\', \'*.mly\'), (\'text/x-ocaml\',)),\n \'OctaveLexer\': (\'pygments.lexers.matlab\', \'Octave\', (\'octave\',), (\'*.m\',), (\'text/octave\',)),\n \'OdinLexer\': (\'pygments.lexers.archetype\', \'ODIN\', (\'odin\',), (\'*.odin\',), (\'text/odin\',)),\n \'OocLexer\': (\'pygments.lexers.ooc\', \'Ooc\', (\'ooc\',), (\'*.ooc\',), (\'text/x-ooc\',)),\n \'OpaLexer\': (\'pygments.lexers.ml\', \'Opa\', (\'opa\',), (\'*.opa\',), (\'text/x-opa\',)),\n \'OpenEdgeLexer\': (\'pygments.lexers.business\', \'OpenEdge ABL\', (\'openedge\', \'abl\', \'progress\'), (\'*.p\', \'*.cls\'), (\'text/x-openedge\', \'application/x-openedge\')),\n \'PacmanConfLexer\': (\'pygments.lexers.configs\', \'PacmanConf\', (\'pacmanconf\',), (\'pacman.conf\',), ()),\n \'PanLexer\': (\'pygments.lexers.dsls\', \'Pan\', (\'pan\',), (\'*.pan\',), ()),\n \'ParaSailLexer\': (\'pygments.lexers.parasail\', \'ParaSail\', (\'parasail\',), (\'*.psi\', \'*.psl\'), (\'text/x-parasail\',)),\n \'PawnLexer\': (\'pygments.lexers.pawn\', \'Pawn\', (\'pawn\',), (\'*.p\', \'*.pwn\', \'*.inc\'), (\'text/x-pawn\',)),\n \'PegLexer\': (\'pygments.lexers.grammar_notation\', \'PEG\', (\'peg\',), (\'*.peg\',), (\'text/x-peg\',)),\n \'Perl6Lexer\': (\'pygments.lexers.perl\', \'Perl6\', (\'perl6\', \'pl6\', \'raku\'), (\'*.pl\', \'*.pm\', \'*.nqp\', \'*.p6\', \'*.6pl\', \'*.p6l\', \'*.pl6\', \'*.6pm\', \'*.p6m\', \'*.pm6\', \'*.t\', \'*.raku\', \'*.rakumod\', \'*.rakutest\', \'*.rakudoc\'), (\'text/x-perl6\', \'application/x-perl6\')),\n \'PerlLexer\': (\'pygments.lexers.perl\', \'Perl\', (\'perl\', \'pl\'), (\'*.pl\', \'*.pm\', \'*.t\', \'*.perl\'), (\'text/x-perl\', \'application/x-perl\')),\n \'PhpLexer\': (\'pygments.lexers.php\', \'PHP\', (\'php\', \'php3\', \'php4\', \'php5\'), (\'*.php\', \'*.php[345]\', \'*.inc\'), (\'text/x-php\',)),\n \'PigLexer\': (\'pygments.lexers.jvm\', \'Pig\', (\'pig\',), (\'*.pig\',), (\'text/x-pig\',)),\n \'PikeLexer\': (\'pygments.lexers.c_like\', \'Pike\', (\'pike\',), (\'*.pike\', \'*.pmod\'), (\'text/x-pike\',)),\n \'PkgConfigLexer\': (\'pygments.lexers.configs\', \'PkgConfig\', (\'pkgconfig\',), (\'*.pc\',), ()),\n \'PlPgsqlLexer\': (\'pygments.lexers.sql\', \'PL/pgSQL\', (\'plpgsql\',), (), (\'text/x-plpgsql\',)),\n \'PointlessLexer\': (\'pygments.lexers.pointless\', \'Pointless\', (\'pointless\',), (\'*.ptls\',), ()),\n \'PonyLexer\': (\'pygments.lexers.pony\', \'Pony\', (\'pony\',), (\'*.pony\',), ()),\n \'PostScriptLexer\': (\'pygments.lexers.graphics\', \'PostScript\', (\'postscript\', \'postscr\'), (\'*.ps\', \'*.eps\'), (\'application/postscript\',)),\n \'PostgresConsoleLexer\': (\'pygments.lexers.sql\', \'PostgreSQL console (psql)\', (\'psql\', \'postgresql-console\', \'postgres-console\'), (), (\'text/x-postgresql-psql\',)),\n \'PostgresLexer\': (\'pygments.lexers.sql\', \'PostgreSQL SQL dialect\', (\'postgresql\', \'postgres\'), (), (\'text/x-postgresql\',)),\n \'PovrayLexer\': (\'pygments.lexers.graphics\', \'POVRay\', (\'pov\',), (\'*.pov\', \'*.inc\'), (\'text/x-povray\',)),\n \'PowerShellLexer\': (\'pygments.lexers.shell\', \'PowerShell\', (\'powershell\', \'posh\', \'ps1\', \'psm1\'), (\'*.ps1\', \'*.psm1\'), (\'text/x-powershell\',)),\n \'PowerShellSessionLexer\': (\'pygments.lexers.shell\', \'PowerShell Session\', (\'ps1con\',), (), ()),\n \'PraatLexer\': (\'pygments.lexers.praat\', \'Praat\', (\'praat\',), (\'*.praat\', \'*.proc\', \'*.psc\'), ()),\n \'PrologLexer\': (\'pygments.lexers.prolog\', \'Prolog\', (\'prolog\',), (\'*.ecl\', \'*.prolog\', \'*.pro\', \'*.pl\'), (\'text/x-prolog\',)),\n \'PromQLLexer\': (\'pygments.lexers.promql\', \'PromQL\', (\'promql\',), (\'*.promql\',), ()),\n \'PropertiesLexer\': (\'pygments.lexers.configs\', \'Properties\', (\'properties\', \'jproperties\'), (\'*.properties\',), (\'text/x-java-properties\',)),\n \'ProtoBufLexer\': (\'pygments.lexers.dsls\', \'Protocol Buffer\', (\'protobuf\', \'proto\'), (\'*.proto\',), ()),\n \'PsyshConsoleLexer\': (\'pygments.lexers.php\', \'PsySH console session for PHP\', (\'psysh\',), (), ()),\n \'PugLexer\': (\'pygments.lexers.html\', \'Pug\', (\'pug\', \'jade\'), (\'*.pug\', \'*.jade\'), (\'text/x-pug\', \'text/x-jade\')),\n \'PuppetLexer\': (\'pygments.lexers.dsls\', \'Puppet\', (\'puppet\',), (\'*.pp\',), ()),\n \'PyPyLogLexer\': (\'pygments.lexers.console\', \'PyPy Log\', (\'pypylog\', \'pypy\'), (\'*.pypylog\',), (\'application/x-pypylog\',)),\n \'Python2Lexer\': (\'pygments.lexers.python\', \'Python 2.x\', (\'python2\', \'py2\'), (), (\'text/x-python2\', \'application/x-python2\')),\n \'Python2TracebackLexer\': (\'pygments.lexers.python\', \'Python 2.x Traceback\', (\'py2tb\',), (\'*.py2tb\',), (\'text/x-python2-traceback\',)),\n \'PythonConsoleLexer\': (\'pygments.lexers.python\', \'Python console session\', (\'pycon\',), (), (\'text/x-python-doctest\',)),\n \'PythonLexer\': (\'pygments.lexers.python\', \'Python\', (\'python\', \'py\', \'sage\', \'python3\', \'py3\'), (\'*.py\', \'*.pyw\', \'*.jy\', \'*.sage\', \'*.sc\', \'SConstruct\', \'SConscript\', \'*.bzl\', \'BUCK\', \'BUILD\', \'BUILD.bazel\', \'WORKSPACE\', \'*.tac\'), (\'text/x-python\', \'application/x-python\', \'text/x-python3\', \'application/x-python3\')),\n \'PythonTracebackLexer\': (\'pygments.lexers.python\', \'Python Traceback\', (\'pytb\', \'py3tb\'), (\'*.pytb\', \'*.py3tb\'), (\'text/x-python-traceback\', \'text/x-python3-traceback\')),\n \'QBasicLexer\': (\'pygments.lexers.basic\', \'QBasic\', (\'qbasic\', \'basic\'), (\'*.BAS\', \'*.bas\'), (\'text/basic\',)),\n \'QVToLexer\': (\'pygments.lexers.qvt\', \'QVTO\', (\'qvto\', \'qvt\'), (\'*.qvto\',), ()),\n \'QmlLexer\': (\'pygments.lexers.webmisc\', \'QML\', (\'qml\', \'qbs\'), (\'*.qml\', \'*.qbs\'), (\'application/x-qml\', \'application/x-qt.qbs+qml\')),\n \'RConsoleLexer\': (\'pygments.lexers.r\', \'RConsole\', (\'rconsole\', \'rout\'), (\'*.Rout\',), ()),\n \'RNCCompactLexer\': (\'pygments.lexers.rnc\', \'Relax-NG Compact\', (\'rnc\', \'rng-compact\'), (\'*.rnc\',), ()),\n \'RPMSpecLexer\': (\'pygments.lexers.installers\', \'RPMSpec\', (\'spec\',), (\'*.spec\',), (\'text/x-rpm-spec\',)),\n \'RacketLexer\': (\'pygments.lexers.lisp\', \'Racket\', (\'racket\', \'rkt\'), (\'*.rkt\', \'*.rktd\', \'*.rktl\'), (\'text/x-racket\', \'application/x-racket\')),\n \'RagelCLexer\': (\'pygments.lexers.parsers\', \'Ragel in C Host\', (\'ragel-c\',), (\'*.rl\',), ()),\n \'RagelCppLexer\': (\'pygments.lexers.parsers\', \'Ragel in CPP Host\', (\'ragel-cpp\',), (\'*.rl\',), ()),\n \'RagelDLexer\': (\'pygments.lexers.parsers\', \'Ragel in D Host\', (\'ragel-d\',), (\'*.rl\',), ()),\n \'RagelEmbeddedLexer\': (\'pygments.lexers.parsers\', \'Embedded Ragel\', (\'ragel-em\',), (\'*.rl\',), ()),\n \'RagelJavaLexer\': (\'pygments.lexers.parsers\', \'Ragel in Java Host\', (\'ragel-java\',), (\'*.rl\',), ()),\n \'RagelLexer\': (\'pygments.lexers.parsers\', \'Ragel\', (\'ragel\',), (), ()),\n \'RagelObjectiveCLexer\': (\'pygments.lexers.parsers\', \'Ragel in Objective C Host\', (\'ragel-objc\',), (\'*.rl\',), ()),\n \'RagelRubyLexer\': (\'pygments.lexers.parsers\', \'Ragel in Ruby Host\', (\'ragel-ruby\', \'ragel-rb\'), (\'*.rl\',), ()),\n \'RawTokenLexer\': (\'pygments.lexers.special\', \'Raw token data\', (\'raw\',), (), (\'application/x-pygments-tokens\',)),\n \'RdLexer\': (\'pygments.lexers.r\', \'Rd\', (\'rd\',), (\'*.Rd\',), (\'text/x-r-doc\',)),\n \'ReasonLexer\': (\'pygments.lexers.ml\', \'ReasonML\', (\'reason\', \'reasonml\'), (\'*.re\', \'*.rei\'), (\'text/x-reasonml\',)),\n \'RebolLexer\': (\'pygments.lexers.rebol\', \'REBOL\', (\'rebol\',), (\'*.r\', \'*.r3\', \'*.reb\'), (\'text/x-rebol\',)),\n \'RedLexer\': (\'pygments.lexers.rebol\', \'Red\', (\'red\', \'red/system\'), (\'*.red\', \'*.reds\'), (\'text/x-red\', \'text/x-red-system\')),\n \'RedcodeLexer\': (\'pygments.lexers.esoteric\', \'Redcode\', (\'redcode\',), (\'*.cw\',), ()),\n \'RegeditLexer\': (\'pygments.lexers.configs\', \'reg\', (\'registry\',), (\'*.reg\',), (\'text/x-windows-registry\',)),\n \'ResourceLexer\': (\'pygments.lexers.resource\', \'ResourceBundle\', (\'resource\', \'resourcebundle\'), (), ()),\n \'RexxLexer\': (\'pygments.lexers.scripting\', \'Rexx\', (\'rexx\', \'arexx\'), (\'*.rexx\', \'*.rex\', \'*.rx\', \'*.arexx\'), (\'text/x-rexx\',)),\n \'RhtmlLexer\': (\'pygments.lexers.templates\', \'RHTML\', (\'rhtml\', \'html+erb\', \'html+ruby\'), (\'*.rhtml\',), (\'text/html+ruby\',)),\n \'RideLexer\': (\'pygments.lexers.ride\', \'Ride\', (\'ride\',), (\'*.ride\',), (\'text/x-ride\',)),\n \'RoboconfGraphLexer\': (\'pygments.lexers.roboconf\', \'Roboconf Graph\', (\'roboconf-graph\',), (\'*.graph\',), ()),\n \'RoboconfInstancesLexer\': (\'pygments.lexers.roboconf\', \'Roboconf Instances\', (\'roboconf-instances\',), (\'*.instances\',), ()),\n \'RobotFrameworkLexer\': (\'pygments.lexers.robotframework\', \'RobotFramework\', (\'robotframework\',), (\'*.robot\',), (\'text/x-robotframework\',)),\n \'RqlLexer\': (\'pygments.lexers.sql\', \'RQL\', (\'rql\',), (\'*.rql\',), (\'text/x-rql\',)),\n \'RslLexer\': (\'pygments.lexers.dsls\', \'RSL\', (\'rsl\',), (\'*.rsl\',), (\'text/rsl\',)),\n \'RstLexer\': (\'pygments.lexers.markup\', \'reStructuredText\', (\'rst\', \'rest\', \'restructuredtext\'), (\'*.rst\', \'*.rest\'), (\'text/x-rst\', \'text/prs.fallenstein.rst\')),\n \'RtsLexer\': (\'pygments.lexers.trafficscript\', \'TrafficScript\', (\'rts\', \'trafficscript\'), (\'*.rts\',), ()),\n \'RubyConsoleLexer\': (\'pygments.lexers.ruby\', \'Ruby irb session\', (\'rbcon\', \'irb\'), (), (\'text/x-ruby-shellsession\',)),\n \'RubyLexer\': (\'pygments.lexers.ruby\', \'Ruby\', (\'rb\', \'ruby\', \'duby\'), (\'*.rb\', \'*.rbw\', \'Rakefile\', \'*.rake\', \'*.gemspec\', \'*.rbx\', \'*.duby\', \'Gemfile\'), (\'text/x-ruby\', \'application/x-ruby\')),\n \'RustLexer\': (\'pygments.lexers.rust\', \'Rust\', (\'rust\', \'rs\'), (\'*.rs\', \'*.rs.in\'), (\'text/rust\', \'text/x-rust\')),\n \'SASLexer\': (\'pygments.lexers.sas\', \'SAS\', (\'sas\',), (\'*.SAS\', \'*.sas\'), (\'text/x-sas\', \'text/sas\', \'application/x-sas\')),\n \'SLexer\': (\'pygments.lexers.r\', \'S\', (\'splus\', \'s\', \'r\'), (\'*.S\', \'*.R\', \'.Rhistory\', \'.Rprofile\', \'.Renviron\'), (\'text/S-plus\', \'text/S\', \'text/x-r-source\', \'text/x-r\', \'text/x-R\', \'text/x-r-history\', \'text/x-r-profile\')),\n \'SMLLexer\': (\'pygments.lexers.ml\', \'Standard ML\', (\'sml\',), (\'*.sml\', \'*.sig\', \'*.fun\'), (\'text/x-standardml\', \'application/x-standardml\')),\n \'SarlLexer\': (\'pygments.lexers.jvm\', \'SARL\', (\'sarl\',), (\'*.sarl\',), (\'text/x-sarl\',)),\n \'SassLexer\': (\'pygments.lexers.css\', \'Sass\', (\'sass\',), (\'*.sass\',), (\'text/x-sass\',)),\n \'ScalaLexer\': (\'pygments.lexers.jvm\', \'Scala\', (\'scala\',), (\'*.scala\',), (\'text/x-scala\',)),\n \'ScamlLexer\': (\'pygments.lexers.html\', \'Scaml\', (\'scaml\',), (\'*.scaml\',), (\'text/x-scaml\',)),\n \'ScdocLexer\': (\'pygments.lexers.scdoc\', \'scdoc\', (\'scdoc\', \'scd\'), (\'*.scd\', \'*.scdoc\'), ()),\n \'SchemeLexer\': (\'pygments.lexers.lisp\', \'Scheme\', (\'scheme\', \'scm\'), (\'*.scm\', \'*.ss\'), (\'text/x-scheme\', \'application/x-scheme\')),\n \'ScilabLexer\': (\'pygments.lexers.matlab\', \'Scilab\', (\'scilab\',), (\'*.sci\', \'*.sce\', \'*.tst\'), (\'text/scilab\',)),\n \'ScssLexer\': (\'pygments.lexers.css\', \'SCSS\', (\'scss\',), (\'*.scss\',), (\'text/x-scss\',)),\n \'ShExCLexer\': (\'pygments.lexers.rdf\', \'ShExC\', (\'shexc\', \'shex\'), (\'*.shex\',), (\'text/shex\',)),\n \'ShenLexer\': (\'pygments.lexers.lisp\', \'Shen\', (\'shen\',), (\'*.shen\',), (\'text/x-shen\', \'application/x-shen\')),\n \'SieveLexer\': (\'pygments.lexers.sieve\', \'Sieve\', (\'sieve\',), (\'*.siv\', \'*.sieve\'), ()),\n \'SilverLexer\': (\'pygments.lexers.verification\', \'Silver\', (\'silver\',), (\'*.sil\', \'*.vpr\'), ()),\n \'SingularityLexer\': (\'pygments.lexers.configs\', \'Singularity\', (\'singularity\',), (\'*.def\', \'Singularity\'), ()),\n \'SlashLexer\': (\'pygments.lexers.slash\', \'Slash\', (\'slash\',), (\'*.sl\',), ()),\n \'SlimLexer\': (\'pygments.lexers.webmisc\', \'Slim\', (\'slim\',), (\'*.slim\',), (\'text/x-slim\',)),\n \'SlurmBashLexer\': (\'pygments.lexers.shell\', \'Slurm\', (\'slurm\', \'sbatch\'), (\'*.sl\',), ()),\n \'SmaliLexer\': (\'pygments.lexers.dalvik\', \'Smali\', (\'smali\',), (\'*.smali\',), (\'text/smali\',)),\n \'SmalltalkLexer\': (\'pygments.lexers.smalltalk\', \'Smalltalk\', (\'smalltalk\', \'squeak\', \'st\'), (\'*.st\',), (\'text/x-smalltalk\',)),\n \'SmartGameFormatLexer\': (\'pygments.lexers.sgf\', \'SmartGameFormat\', (\'sgf\',), (\'*.sgf\',), ()),\n \'SmartyLexer\': (\'pygments.lexers.templates\', \'Smarty\', (\'smarty\',), (\'*.tpl\',), (\'application/x-smarty\',)),\n \'SnobolLexer\': (\'pygments.lexers.snobol\', \'Snobol\', (\'snobol\',), (\'*.snobol\',), (\'text/x-snobol\',)),\n \'SnowballLexer\': (\'pygments.lexers.dsls\', \'Snowball\', (\'snowball\',), (\'*.sbl\',), ()),\n \'SolidityLexer\': (\'pygments.lexers.solidity\', \'Solidity\', (\'solidity\',), (\'*.sol\',), ()),\n \'SourcePawnLexer\': (\'pygments.lexers.pawn\', \'SourcePawn\', (\'sp\',), (\'*.sp\',), (\'text/x-sourcepawn\',)),\n \'SourcesListLexer\': (\'pygments.lexers.installers\', \'Debian Sourcelist\', (\'sourceslist\', \'sources.list\', \'debsources\'), (\'sources.list\',), ()),\n \'SparqlLexer\': (\'pygments.lexers.rdf\', \'SPARQL\', (\'sparql\',), (\'*.rq\', \'*.sparql\'), (\'application/sparql-query\',)),\n \'SqlLexer\': (\'pygments.lexers.sql\', \'SQL\', (\'sql\',), (\'*.sql\',), (\'text/x-sql\',)),\n \'SqliteConsoleLexer\': (\'pygments.lexers.sql\', \'sqlite3con\', (\'sqlite3\',), (\'*.sqlite3-console\',), (\'text/x-sqlite3-console\',)),\n \'SquidConfLexer\': (\'pygments.lexers.configs\', \'SquidConf\', (\'squidconf\', \'squid.conf\', \'squid\'), (\'squid.conf\',), (\'text/x-squidconf\',)),\n \'SspLexer\': (\'pygments.lexers.templates\', \'Scalate Server Page\', (\'ssp\',), (\'*.ssp\',), (\'application/x-ssp\',)),\n \'StanLexer\': (\'pygments.lexers.modeling\', \'Stan\', (\'stan\',), (\'*.stan\',), ()),\n \'StataLexer\': (\'pygments.lexers.stata\', \'Stata\', (\'stata\', \'do\'), (\'*.do\', \'*.ado\'), (\'text/x-stata\', \'text/stata\', \'application/x-stata\')),\n \'SuperColliderLexer\': (\'pygments.lexers.supercollider\', \'SuperCollider\', (\'sc\', \'supercollider\'), (\'*.sc\', \'*.scd\'), (\'application/supercollider\', \'text/supercollider\')),\n \'SwiftLexer\': (\'pygments.lexers.objective\', \'Swift\', (\'swift\',), (\'*.swift\',), (\'text/x-swift\',)),\n \'SwigLexer\': (\'pygments.lexers.c_like\', \'SWIG\', (\'swig\',), (\'*.swg\', \'*.i\'), (\'text/swig\',)),\n \'SystemVerilogLexer\': (\'pygments.lexers.hdl\', \'systemverilog\', (\'systemverilog\', \'sv\'), (\'*.sv\', \'*.svh\'), (\'text/x-systemverilog\',)),\n \'TAPLexer\': (\'pygments.lexers.testing\', \'TAP\', (\'tap\',), (\'*.tap\',), ()),\n \'TNTLexer\': (\'pygments.lexers.tnt\', \'Typographic Number Theory\', (\'tnt\',), (\'*.tnt\',), ()),\n \'TOMLLexer\': (\'pygments.lexers.configs\', \'TOML\', (\'toml\',), (\'*.toml\', \'Pipfile\', \'poetry.lock\'), ()),\n \'Tads3Lexer\': (\'pygments.lexers.int_fiction\', \'TADS 3\', (\'tads3\',), (\'*.t\',), ()),\n \'TasmLexer\': (\'pygments.lexers.asm\', \'TASM\', (\'tasm\',), (\'*.asm\', \'*.ASM\', \'*.tasm\'), (\'text/x-tasm\',)),\n \'TclLexer\': (\'pygments.lexers.tcl\', \'Tcl\', (\'tcl\',), (\'*.tcl\', \'*.rvt\'), (\'text/x-tcl\', \'text/x-script.tcl\', \'application/x-tcl\')),\n \'TcshLexer\': (\'pygments.lexers.shell\', \'Tcsh\', (\'tcsh\', \'csh\'), (\'*.tcsh\', \'*.csh\'), (\'application/x-csh\',)),\n \'TcshSessionLexer\': (\'pygments.lexers.shell\', \'Tcsh Session\', (\'tcshcon\',), (), ()),\n \'TeaTemplateLexer\': (\'pygments.lexers.templates\', \'Tea\', (\'tea\',), (\'*.tea\',), (\'text/x-tea\',)),\n \'TeraTermLexer\': (\'pygments.lexers.teraterm\', \'Tera Term macro\', (\'ttl\', \'teraterm\', \'teratermmacro\'), (\'*.ttl\',), (\'text/x-teratermmacro\',)),\n \'TermcapLexer\': (\'pygments.lexers.configs\', \'Termcap\', (\'termcap\',), (\'termcap\', \'termcap.src\'), ()),\n \'TerminfoLexer\': (\'pygments.lexers.configs\', \'Terminfo\', (\'terminfo\',), (\'terminfo\', \'terminfo.src\'), ()),\n \'TerraformLexer\': (\'pygments.lexers.configs\', \'Terraform\', (\'terraform\', \'tf\'), (\'*.tf\',), (\'application/x-tf\', \'application/x-terraform\')),\n \'TexLexer\': (\'pygments.lexers.markup\', \'TeX\', (\'tex\', \'latex\'), (\'*.tex\', \'*.aux\', \'*.toc\'), (\'text/x-tex\', \'text/x-latex\')),\n \'TextLexer\': (\'pygments.lexers.special\', \'Text only\', (\'text\',), (\'*.txt\',), (\'text/plain\',)),\n \'ThriftLexer\': (\'pygments.lexers.dsls\', \'Thrift\', (\'thrift\',), (\'*.thrift\',), (\'application/x-thrift\',)),\n \'TiddlyWiki5Lexer\': (\'pygments.lexers.markup\', \'tiddler\', (\'tid\',), (\'*.tid\',), (\'text/vnd.tiddlywiki\',)),\n \'TodotxtLexer\': (\'pygments.lexers.textfmts\', \'Todotxt\', (\'todotxt\',), (\'todo.txt\', \'*.todotxt\'), (\'text/x-todo\',)),\n \'TransactSqlLexer\': (\'pygments.lexers.sql\', \'Transact-SQL\', (\'tsql\', \'t-sql\'), (\'*.sql\',), (\'text/x-tsql\',)),\n \'TreetopLexer\': (\'pygments.lexers.parsers\', \'Treetop\', (\'treetop\',), (\'*.treetop\', \'*.tt\'), ()),\n \'TurtleLexer\': (\'pygments.lexers.rdf\', \'Turtle\', (\'turtle\',), (\'*.ttl\',), (\'text/turtle\', \'application/x-turtle\')),\n \'TwigHtmlLexer\': (\'pygments.lexers.templates\', \'HTML+Twig\', (\'html+twig\',), (\'*.twig\',), (\'text/html+twig\',)),\n \'TwigLexer\': (\'pygments.lexers.templates\', \'Twig\', (\'twig\',), (), (\'application/x-twig\',)),\n \'TypeScriptLexer\': (\'pygments.lexers.javascript\', \'TypeScript\', (\'ts\', \'typescript\'), (\'*.ts\', \'*.tsx\'), (\'text/x-typescript\',)),\n \'TypoScriptCssDataLexer\': (\'pygments.lexers.typoscript\', \'TypoScriptCssData\', (\'typoscriptcssdata\',), (), ()),\n \'TypoScriptHtmlDataLexer\': (\'pygments.lexers.typoscript\', \'TypoScriptHtmlData\', (\'typoscripthtmldata\',), (), ()),\n \'TypoScriptLexer\': (\'pygments.lexers.typoscript\', \'TypoScript\', (\'typoscript\',), (\'*.typoscript\',), (\'text/x-typoscript\',)),\n \'UcodeLexer\': (\'pygments.lexers.unicon\', \'ucode\', (\'ucode\',), (\'*.u\', \'*.u1\', \'*.u2\'), ()),\n \'UniconLexer\': (\'pygments.lexers.unicon\', \'Unicon\', (\'unicon\',), (\'*.icn\',), (\'text/unicon\',)),\n \'UrbiscriptLexer\': (\'pygments.lexers.urbi\', \'UrbiScript\', (\'urbiscript\',), (\'*.u\',), (\'application/x-urbiscript\',)),\n \'UsdLexer\': (\'pygments.lexers.usd\', \'USD\', (\'usd\', \'usda\'), (\'*.usd\', \'*.usda\'), ()),\n \'VBScriptLexer\': (\'pygments.lexers.basic\', \'VBScript\', (\'vbscript\',), (\'*.vbs\', \'*.VBS\'), ()),\n \'VCLLexer\': (\'pygments.lexers.varnish\', \'VCL\', (\'vcl\',), (\'*.vcl\',), (\'text/x-vclsrc\',)),\n \'VCLSnippetLexer\': (\'pygments.lexers.varnish\', \'VCLSnippets\', (\'vclsnippets\', \'vclsnippet\'), (), (\'text/x-vclsnippet\',)),\n \'VCTreeStatusLexer\': (\'pygments.lexers.console\', \'VCTreeStatus\', (\'vctreestatus\',), (), ()),\n \'VGLLexer\': (\'pygments.lexers.dsls\', \'VGL\', (\'vgl\',), (\'*.rpf\',), ()),\n \'ValaLexer\': (\'pygments.lexers.c_like\', \'Vala\', (\'vala\', \'vapi\'), (\'*.vala\', \'*.vapi\'), (\'text/x-vala\',)),\n \'VbNetAspxLexer\': (\'pygments.lexers.dotnet\', \'aspx-vb\', (\'aspx-vb\',), (\'*.aspx\', \'*.asax\', \'*.ascx\', \'*.ashx\', \'*.asmx\', \'*.axd\'), ()),\n \'VbNetLexer\': (\'pygments.lexers.dotnet\', \'VB.net\', (\'vb.net\', \'vbnet\'), (\'*.vb\', \'*.bas\'), (\'text/x-vbnet\', \'text/x-vba\')),\n \'VelocityHtmlLexer\': (\'pygments.lexers.templates\', \'HTML+Velocity\', (\'html+velocity\',), (), (\'text/html+velocity\',)),\n \'VelocityLexer\': (\'pygments.lexers.templates\', \'Velocity\', (\'velocity\',), (\'*.vm\', \'*.fhtml\'), ()),\n \'VelocityXmlLexer\': (\'pygments.lexers.templates\', \'XML+Velocity\', (\'xml+velocity\',), (), (\'application/xml+velocity\',)),\n \'VerilogLexer\': (\'pygments.lexers.hdl\', \'verilog\', (\'verilog\', \'v\'), (\'*.v\',), (\'text/x-verilog\',)),\n \'VhdlLexer\': (\'pygments.lexers.hdl\', \'vhdl\', (\'vhdl\',), (\'*.vhdl\', \'*.vhd\'), (\'text/x-vhdl\',)),\n \'VimLexer\': (\'pygments.lexers.textedit\', \'VimL\', (\'vim\',), (\'*.vim\', \'.vimrc\', \'.exrc\', \'.gvimrc\', \'_vimrc\', \'_exrc\', \'_gvimrc\', \'vimrc\', \'gvimrc\'), (\'text/x-vim\',)),\n \'WDiffLexer\': (\'pygments.lexers.diff\', \'WDiff\', (\'wdiff\',), (\'*.wdiff\',), ()),\n \'WebIDLLexer\': (\'pygments.lexers.webidl\', \'Web IDL\', (\'webidl\',), (\'*.webidl\',), ()),\n \'WhileyLexer\': (\'pygments.lexers.whiley\', \'Whiley\', (\'whiley\',), (\'*.whiley\',), (\'text/x-whiley\',)),\n \'X10Lexer\': (\'pygments.lexers.x10\', \'X10\', (\'x10\', \'xten\'), (\'*.x10\',), (\'text/x-x10\',)),\n \'XQueryLexer\': (\'pygments.lexers.webmisc\', \'XQuery\', (\'xquery\', \'xqy\', \'xq\', \'xql\', \'xqm\'), (\'*.xqy\', \'*.xquery\', \'*.xq\', \'*.xql\', \'*.xqm\'), (\'text/xquery\', \'application/xquery\')),\n \'XmlDjangoLexer\': (\'pygments.lexers.templates\', \'XML+Django/Jinja\', (\'xml+django\', \'xml+jinja\'), (), (\'application/xml+django\', \'application/xml+jinja\')),\n \'XmlErbLexer\': (\'pygments.lexers.templates\', \'XML+Ruby\', (\'xml+erb\', \'xml+ruby\'), (), (\'application/xml+ruby\',)),\n \'XmlLexer\': (\'pygments.lexers.html\', \'XML\', (\'xml\',), (\'*.xml\', \'*.xsl\', \'*.rss\', \'*.xslt\', \'*.xsd\', \'*.wsdl\', \'*.wsf\'), (\'text/xml\', \'application/xml\', \'image/svg+xml\', \'application/rss+xml\', \'application/atom+xml\')),\n \'XmlPhpLexer\': (\'pygments.lexers.templates\', \'XML+PHP\', (\'xml+php\',), (), (\'application/xml+php\',)),\n \'XmlSmartyLexer\': (\'pygments.lexers.templates\', \'XML+Smarty\', (\'xml+smarty\',), (), (\'application/xml+smarty\',)),\n \'XorgLexer\': (\'pygments.lexers.xorg\', \'Xorg\', (\'xorg.conf\',), (\'xorg.conf\',), ()),\n \'XsltLexer\': (\'pygments.lexers.html\', \'XSLT\', (\'xslt\',), (\'*.xsl\', \'*.xslt\', \'*.xpl\'), (\'application/xsl+xml\', \'application/xslt+xml\')),\n \'XtendLexer\': (\'pygments.lexers.jvm\', \'Xtend\', (\'xtend\',), (\'*.xtend\',), (\'text/x-xtend\',)),\n \'XtlangLexer\': (\'pygments.lexers.lisp\', \'xtlang\', (\'extempore\',), (\'*.xtm\',), ()),\n \'YamlJinjaLexer\': (\'pygments.lexers.templates\', \'YAML+Jinja\', (\'yaml+jinja\', \'salt\', \'sls\'), (\'*.sls\',), (\'text/x-yaml+jinja\', \'text/x-sls\')),\n \'YamlLexer\': (\'pygments.lexers.data\', \'YAML\', (\'yaml\',), (\'*.yaml\', \'*.yml\'), (\'text/x-yaml\',)),\n \'YangLexer\': (\'pygments.lexers.yang\', \'YANG\', (\'yang\',), (\'*.yang\',), (\'application/yang\',)),\n \'ZeekLexer\': (\'pygments.lexers.dsls\', \'Zeek\', (\'zeek\', \'bro\'), (\'*.zeek\', \'*.bro\'), ()),\n \'ZephirLexer\': (\'pygments.lexers.php\', \'Zephir\', (\'zephir\',), (\'*.zep\',), ()),\n \'ZigLexer\': (\'pygments.lexers.zig\', \'Zig\', (\'zig\',), (\'*.zig\',), (\'text/zig\',)),\n}\n\nif __name__ == \'__main__\': # pragma: no cover\n import sys\n import os\n\n # lookup lexers\n found_lexers = []\n sys.path.insert(0, os.path.join(os.path.dirname(__file__), \'..\', \'..\'))\n for root, dirs, files in os.walk(\'.\'):\n for filename in files:\n if filename.endswith(\'.py\') and not filename.startswith(\'_\'):\n module_name = \'pygments.lexers%s.%s\' % (\n root[1:].replace(\'/\', \'.\'), filename[:-3])\n print(module_name)\n module = __import__(module_name, None, None, [\'\'])\n for lexer_name in module.__all__:\n lexer = getattr(module, lexer_name)\n found_lexers.append(\n \'%r: %r\' % (lexer_name,\n (module_name,\n lexer.name,\n tuple(lexer.aliases),\n tuple(lexer.filenames),\n tuple(lexer.mimetypes))))\n # sort them to make the diff minimal\n found_lexers.sort()\n\n # extract useful sourcecode from this file\n with open(__file__) as fp:\n content = fp.read()\n # replace crnl to nl for Windows.\n #\n # Note that, originally, contributers should keep nl of master\n # repository, for example by using some kind of automatic\n # management EOL, like `EolExtension\n # `.\n content = content.replace("\\r\\n", "\\n")\n header = content[:content.find(\'LEXERS = {\')]\n footer = content[content.find("if __name__ == \'__main__\':"):]\n\n # write new file\n with open(__file__, \'w\') as fp:\n fp.write(header)\n fp.write(\'LEXERS = {\\n %s,\\n}\\n\\n\' % \',\\n \'.join(found_lexers))\n fp.write(footer)\n\n print (\'=== %d lexers processed.\' % len(found_lexers))\n') + __stickytape_write_module('pygments/modeline.py', b'# -*- coding: utf-8 -*-\n"""\n pygments.modeline\n ~~~~~~~~~~~~~~~~~\n\n A simple modeline parser (based on pymodeline).\n\n :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.\n :license: BSD, see LICENSE for details.\n"""\n\nimport re\n\n__all__ = [\'get_filetype_from_buffer\']\n\n\nmodeline_re = re.compile(r\'\'\'\n (?: vi | vim | ex ) (?: [<=>]? \\d* )? :\n .* (?: ft | filetype | syn | syntax ) = ( [^:\\s]+ )\n\'\'\', re.VERBOSE)\n\n\ndef get_filetype_from_line(l):\n m = modeline_re.search(l)\n if m:\n return m.group(1)\n\n\ndef get_filetype_from_buffer(buf, max_lines=5):\n """\n Scan the buffer for modelines and return filetype if one is found.\n """\n lines = buf.splitlines()\n for l in lines[-1:-max_lines-1:-1]:\n ret = get_filetype_from_line(l)\n if ret:\n return ret\n for i in range(max_lines, -1, -1):\n if i < len(lines):\n ret = get_filetype_from_line(lines[i])\n if ret:\n return ret\n\n return None\n') + __stickytape_write_module('icecream/coloring.py', b"# -*- coding: utf-8 -*-\n\n#\n# IceCream - Never use print() to debug again\n#\n# Ansgar Grunseid\n# grunseid.com\n# grunseid@gmail.com\n#\n# License: MIT\n#\n\nfrom pygments.style import Style\nfrom pygments.token import (\n Text, Name, Error, Other, String, Number, Keyword, Generic, Literal,\n Comment, Operator, Whitespace, Punctuation)\n\n\n# Solarized: https://ethanschoonover.com/solarized/\nclass SolarizedDark(Style):\n\n BASE03 = '#002b36' # noqa\n BASE02 = '#073642' # noqa\n BASE01 = '#586e75' # noqa\n BASE00 = '#657b83' # noqa\n BASE0 = '#839496' # noqa\n BASE1 = '#93a1a1' # noqa\n BASE2 = '#eee8d5' # noqa\n BASE3 = '#fdf6e3' # noqa\n YELLOW = '#b58900' # noqa\n ORANGE = '#cb4b16' # noqa\n RED = '#dc322f' # noqa\n MAGENTA = '#d33682' # noqa\n VIOLET = '#6c71c4' # noqa\n BLUE = '#268bd2' # noqa\n CYAN = '#2aa198' # noqa\n GREEN = '#859900' # noqa\n\n styles = {\n Text: BASE0,\n Whitespace: BASE03,\n Error: RED,\n Other: BASE0,\n\n Name: BASE1,\n Name.Attribute: BASE0,\n Name.Builtin: BLUE,\n Name.Builtin.Pseudo: BLUE,\n Name.Class: BLUE,\n Name.Constant: YELLOW,\n Name.Decorator: ORANGE,\n Name.Entity: ORANGE,\n Name.Exception: ORANGE,\n Name.Function: BLUE,\n Name.Property: BLUE,\n Name.Label: BASE0,\n Name.Namespace: YELLOW,\n Name.Other: BASE0,\n Name.Tag: GREEN,\n Name.Variable: ORANGE,\n Name.Variable.Class: BLUE,\n Name.Variable.Global: BLUE,\n Name.Variable.Instance: BLUE,\n\n String: CYAN,\n String.Backtick: CYAN,\n String.Char: CYAN,\n String.Doc: CYAN,\n String.Double: CYAN,\n String.Escape: ORANGE,\n String.Heredoc: CYAN,\n String.Interpol: ORANGE,\n String.Other: CYAN,\n String.Regex: CYAN,\n String.Single: CYAN,\n String.Symbol: CYAN,\n\n Number: CYAN,\n Number.Float: CYAN,\n Number.Hex: CYAN,\n Number.Integer: CYAN,\n Number.Integer.Long: CYAN,\n Number.Oct: CYAN,\n\n Keyword: GREEN,\n Keyword.Constant: GREEN,\n Keyword.Declaration: GREEN,\n Keyword.Namespace: ORANGE,\n Keyword.Pseudo: ORANGE,\n Keyword.Reserved: GREEN,\n Keyword.Type: GREEN,\n\n Generic: BASE0,\n Generic.Deleted: BASE0,\n Generic.Emph: BASE0,\n Generic.Error: BASE0,\n Generic.Heading: BASE0,\n Generic.Inserted: BASE0,\n Generic.Output: BASE0,\n Generic.Prompt: BASE0,\n Generic.Strong: BASE0,\n Generic.Subheading: BASE0,\n Generic.Traceback: BASE0,\n\n Literal: BASE0,\n Literal.Date: BASE0,\n\n Comment: BASE01,\n Comment.Multiline: BASE01,\n Comment.Preproc: BASE01,\n Comment.Single: BASE01,\n Comment.Special: BASE01,\n\n Operator: BASE0,\n Operator.Word: GREEN,\n\n Punctuation: BASE0,\n }\n") + __stickytape_write_module('pygments/style.py', b'# -*- coding: utf-8 -*-\n"""\n pygments.style\n ~~~~~~~~~~~~~~\n\n Basic style object.\n\n :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.\n :license: BSD, see LICENSE for details.\n"""\n\nfrom pygments.token import Token, STANDARD_TYPES\n\n# Default mapping of ansixxx to RGB colors.\n_ansimap = {\n # dark\n \'ansiblack\': \'000000\',\n \'ansired\': \'7f0000\',\n \'ansigreen\': \'007f00\',\n \'ansiyellow\': \'7f7fe0\',\n \'ansiblue\': \'00007f\',\n \'ansimagenta\': \'7f007f\',\n \'ansicyan\': \'007f7f\',\n \'ansigray\': \'e5e5e5\',\n # normal\n \'ansibrightblack\': \'555555\',\n \'ansibrightred\': \'ff0000\',\n \'ansibrightgreen\': \'00ff00\',\n \'ansibrightyellow\': \'ffff00\',\n \'ansibrightblue\': \'0000ff\',\n \'ansibrightmagenta\': \'ff00ff\',\n \'ansibrightcyan\': \'00ffff\',\n \'ansiwhite\': \'ffffff\',\n}\n# mapping of deprecated #ansixxx colors to new color names\n_deprecated_ansicolors = {\n # dark\n \'#ansiblack\': \'ansiblack\',\n \'#ansidarkred\': \'ansired\',\n \'#ansidarkgreen\': \'ansigreen\',\n \'#ansibrown\': \'ansiyellow\',\n \'#ansidarkblue\': \'ansiblue\',\n \'#ansipurple\': \'ansimagenta\',\n \'#ansiteal\': \'ansicyan\',\n \'#ansilightgray\': \'ansigray\',\n # normal\n \'#ansidarkgray\': \'ansibrightblack\',\n \'#ansired\': \'ansibrightred\',\n \'#ansigreen\': \'ansibrightgreen\',\n \'#ansiyellow\': \'ansibrightyellow\',\n \'#ansiblue\': \'ansibrightblue\',\n \'#ansifuchsia\': \'ansibrightmagenta\',\n \'#ansiturquoise\': \'ansibrightcyan\',\n \'#ansiwhite\': \'ansiwhite\',\n}\nansicolors = set(_ansimap)\n\n\nclass StyleMeta(type):\n\n def __new__(mcs, name, bases, dct):\n obj = type.__new__(mcs, name, bases, dct)\n for token in STANDARD_TYPES:\n if token not in obj.styles:\n obj.styles[token] = \'\'\n\n def colorformat(text):\n if text in ansicolors:\n return text\n if text[0:1] == \'#\':\n col = text[1:]\n if len(col) == 6:\n return col\n elif len(col) == 3:\n return col[0] * 2 + col[1] * 2 + col[2] * 2\n elif text == \'\':\n return \'\'\n elif text.startswith(\'var\') or text.startswith(\'calc\'):\n return text\n assert False, "wrong color format %r" % text\n\n _styles = obj._styles = {}\n\n for ttype in obj.styles:\n for token in ttype.split():\n if token in _styles:\n continue\n ndef = _styles.get(token.parent, None)\n styledefs = obj.styles.get(token, \'\').split()\n if not ndef or token is None:\n ndef = [\'\', 0, 0, 0, \'\', \'\', 0, 0, 0]\n elif \'noinherit\' in styledefs and token is not Token:\n ndef = _styles[Token][:]\n else:\n ndef = ndef[:]\n _styles[token] = ndef\n for styledef in obj.styles.get(token, \'\').split():\n if styledef == \'noinherit\':\n pass\n elif styledef == \'bold\':\n ndef[1] = 1\n elif styledef == \'nobold\':\n ndef[1] = 0\n elif styledef == \'italic\':\n ndef[2] = 1\n elif styledef == \'noitalic\':\n ndef[2] = 0\n elif styledef == \'underline\':\n ndef[3] = 1\n elif styledef == \'nounderline\':\n ndef[3] = 0\n elif styledef[:3] == \'bg:\':\n ndef[4] = colorformat(styledef[3:])\n elif styledef[:7] == \'border:\':\n ndef[5] = colorformat(styledef[7:])\n elif styledef == \'roman\':\n ndef[6] = 1\n elif styledef == \'sans\':\n ndef[7] = 1\n elif styledef == \'mono\':\n ndef[8] = 1\n else:\n ndef[0] = colorformat(styledef)\n\n return obj\n\n def style_for_token(cls, token):\n t = cls._styles[token]\n ansicolor = bgansicolor = None\n color = t[0]\n if color in _deprecated_ansicolors:\n color = _deprecated_ansicolors[color]\n if color in ansicolors:\n ansicolor = color\n color = _ansimap[color]\n bgcolor = t[4]\n if bgcolor in _deprecated_ansicolors:\n bgcolor = _deprecated_ansicolors[color]\n if bgcolor in ansicolors:\n bgansicolor = bgcolor\n bgcolor = _ansimap[bgcolor]\n\n return {\n \'color\': color or None,\n \'bold\': bool(t[1]),\n \'italic\': bool(t[2]),\n \'underline\': bool(t[3]),\n \'bgcolor\': bgcolor or None,\n \'border\': t[5] or None,\n \'roman\': bool(t[6]) or None,\n \'sans\': bool(t[7]) or None,\n \'mono\': bool(t[8]) or None,\n \'ansicolor\': ansicolor,\n \'bgansicolor\': bgansicolor,\n }\n\n def list_styles(cls):\n return list(cls)\n\n def styles_token(cls, ttype):\n return ttype in cls._styles\n\n def __iter__(cls):\n for token in cls._styles:\n yield token, cls.style_for_token(token)\n\n def __len__(cls):\n return len(cls._styles)\n\n\nclass Style(metaclass=StyleMeta):\n\n #: overall background color (``None`` means transparent)\n background_color = \'#ffffff\'\n\n #: highlight background color\n highlight_color = \'#ffffcc\'\n\n #: line number font color\n line_number_color = \'#000000\'\n\n #: line number background color\n line_number_background_color = \'#f0f0f0\'\n\n #: special line number font color\n line_number_special_color = \'#000000\'\n\n #: special line number background color\n line_number_special_background_color = \'#ffffc0\'\n\n #: Style definitions for individual token types.\n styles = {}\n') + __stickytape_write_module('pygments/token.py', b'# -*- coding: utf-8 -*-\n"""\n pygments.token\n ~~~~~~~~~~~~~~\n\n Basic token types and the standard tokens.\n\n :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.\n :license: BSD, see LICENSE for details.\n"""\n\n\nclass _TokenType(tuple):\n parent = None\n\n def split(self):\n buf = []\n node = self\n while node is not None:\n buf.append(node)\n node = node.parent\n buf.reverse()\n return buf\n\n def __init__(self, *args):\n # no need to call super.__init__\n self.subtypes = set()\n\n def __contains__(self, val):\n return self is val or (\n type(val) is self.__class__ and\n val[:len(self)] == self\n )\n\n def __getattr__(self, val):\n if not val or not val[0].isupper():\n return tuple.__getattribute__(self, val)\n new = _TokenType(self + (val,))\n setattr(self, val, new)\n self.subtypes.add(new)\n new.parent = self\n return new\n\n def __repr__(self):\n return \'Token\' + (self and \'.\' or \'\') + \'.\'.join(self)\n\n def __copy__(self):\n # These instances are supposed to be singletons\n return self\n\n def __deepcopy__(self, memo):\n # These instances are supposed to be singletons\n return self\n\n\nToken = _TokenType()\n\n# Special token types\nText = Token.Text\nWhitespace = Text.Whitespace\nEscape = Token.Escape\nError = Token.Error\n# Text that doesn\'t belong to this lexer (e.g. HTML in PHP)\nOther = Token.Other\n\n# Common token types for source code\nKeyword = Token.Keyword\nName = Token.Name\nLiteral = Token.Literal\nString = Literal.String\nNumber = Literal.Number\nPunctuation = Token.Punctuation\nOperator = Token.Operator\nComment = Token.Comment\n\n# Generic types for non-source code\nGeneric = Token.Generic\n\n# String and some others are not direct children of Token.\n# alias them:\nToken.Token = Token\nToken.String = String\nToken.Number = Number\n\n\ndef is_token_subtype(ttype, other):\n """\n Return True if ``ttype`` is a subtype of ``other``.\n\n exists for backwards compatibility. use ``ttype in other`` now.\n """\n return ttype in other\n\n\ndef string_to_tokentype(s):\n """\n Convert a string into a token type::\n\n >>> string_to_token(\'String.Double\')\n Token.Literal.String.Double\n >>> string_to_token(\'Token.Literal.Number\')\n Token.Literal.Number\n >>> string_to_token(\'\')\n Token\n\n Tokens that are already tokens are returned unchanged:\n\n >>> string_to_token(String)\n Token.Literal.String\n """\n if isinstance(s, _TokenType):\n return s\n if not s:\n return Token\n node = Token\n for item in s.split(\'.\'):\n node = getattr(node, item)\n return node\n\n\n# Map standard token types to short names, used in CSS class naming.\n# If you add a new item, please be sure to run this file to perform\n# a consistency check for duplicate values.\nSTANDARD_TYPES = {\n Token: \'\',\n\n Text: \'\',\n Whitespace: \'w\',\n Escape: \'esc\',\n Error: \'err\',\n Other: \'x\',\n\n Keyword: \'k\',\n Keyword.Constant: \'kc\',\n Keyword.Declaration: \'kd\',\n Keyword.Namespace: \'kn\',\n Keyword.Pseudo: \'kp\',\n Keyword.Reserved: \'kr\',\n Keyword.Type: \'kt\',\n\n Name: \'n\',\n Name.Attribute: \'na\',\n Name.Builtin: \'nb\',\n Name.Builtin.Pseudo: \'bp\',\n Name.Class: \'nc\',\n Name.Constant: \'no\',\n Name.Decorator: \'nd\',\n Name.Entity: \'ni\',\n Name.Exception: \'ne\',\n Name.Function: \'nf\',\n Name.Function.Magic: \'fm\',\n Name.Property: \'py\',\n Name.Label: \'nl\',\n Name.Namespace: \'nn\',\n Name.Other: \'nx\',\n Name.Tag: \'nt\',\n Name.Variable: \'nv\',\n Name.Variable.Class: \'vc\',\n Name.Variable.Global: \'vg\',\n Name.Variable.Instance: \'vi\',\n Name.Variable.Magic: \'vm\',\n\n Literal: \'l\',\n Literal.Date: \'ld\',\n\n String: \'s\',\n String.Affix: \'sa\',\n String.Backtick: \'sb\',\n String.Char: \'sc\',\n String.Delimiter: \'dl\',\n String.Doc: \'sd\',\n String.Double: \'s2\',\n String.Escape: \'se\',\n String.Heredoc: \'sh\',\n String.Interpol: \'si\',\n String.Other: \'sx\',\n String.Regex: \'sr\',\n String.Single: \'s1\',\n String.Symbol: \'ss\',\n\n Number: \'m\',\n Number.Bin: \'mb\',\n Number.Float: \'mf\',\n Number.Hex: \'mh\',\n Number.Integer: \'mi\',\n Number.Integer.Long: \'il\',\n Number.Oct: \'mo\',\n\n Operator: \'o\',\n Operator.Word: \'ow\',\n\n Punctuation: \'p\',\n\n Comment: \'c\',\n Comment.Hashbang: \'ch\',\n Comment.Multiline: \'cm\',\n Comment.Preproc: \'cp\',\n Comment.PreprocFile: \'cpf\',\n Comment.Single: \'c1\',\n Comment.Special: \'cs\',\n\n Generic: \'g\',\n Generic.Deleted: \'gd\',\n Generic.Emph: \'ge\',\n Generic.Error: \'gr\',\n Generic.Heading: \'gh\',\n Generic.Inserted: \'gi\',\n Generic.Output: \'go\',\n Generic.Prompt: \'gp\',\n Generic.Strong: \'gs\',\n Generic.Subheading: \'gu\',\n Generic.Traceback: \'gt\',\n}\n') + __stickytape_write_module('icecream/builtins.py', b"# -*- coding: utf-8 -*-\n\n#\n# IceCream - Never use print() to debug again\n#\n# Ansgar Grunseid\n# grunseid.com\n# grunseid@gmail.com\n#\n# License: MIT\n#\n\nimport icecream\n\n\ntry:\n builtins = __import__('__builtin__')\nexcept ImportError:\n builtins = __import__('builtins')\n\n\ndef install(ic='ic'):\n setattr(builtins, ic, icecream.ic)\n\n\ndef uninstall(ic='ic'):\n delattr(builtins, ic)\n") + __stickytape_write_module('pygments/formatters/terminal256.py', b'# -*- coding: utf-8 -*-\n"""\n pygments.formatters.terminal256\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n Formatter for 256-color terminal output with ANSI sequences.\n\n RGB-to-XTERM color conversion routines adapted from xterm256-conv\n tool (http://frexx.de/xterm-256-notes/data/xterm256-conv2.tar.bz2)\n by Wolfgang Frisch.\n\n Formatter version 1.\n\n :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.\n :license: BSD, see LICENSE for details.\n"""\n\n# TODO:\n# - Options to map style\'s bold/underline/italic/border attributes\n# to some ANSI attrbutes (something like \'italic=underline\')\n# - An option to output "style RGB to xterm RGB/index" conversion table\n# - An option to indicate that we are running in "reverse background"\n# xterm. This means that default colors are white-on-black, not\n# black-on-while, so colors like "white background" need to be converted\n# to "white background, black foreground", etc...\n\nimport sys\n\nfrom pygments.formatter import Formatter\nfrom pygments.console import codes\nfrom pygments.style import ansicolors\n\n\n__all__ = [\'Terminal256Formatter\', \'TerminalTrueColorFormatter\']\n\n\nclass EscapeSequence:\n def __init__(self, fg=None, bg=None, bold=False, underline=False, italic=False):\n self.fg = fg\n self.bg = bg\n self.bold = bold\n self.underline = underline\n self.italic = italic\n\n def escape(self, attrs):\n if len(attrs):\n return "\\x1b[" + ";".join(attrs) + "m"\n return ""\n\n def color_string(self):\n attrs = []\n if self.fg is not None:\n if self.fg in ansicolors:\n esc = codes[self.fg.replace(\'ansi\',\'\')]\n if \';01m\' in esc:\n self.bold = True\n # extract fg color code.\n attrs.append(esc[2:4])\n else:\n attrs.extend(("38", "5", "%i" % self.fg))\n if self.bg is not None:\n if self.bg in ansicolors:\n esc = codes[self.bg.replace(\'ansi\',\'\')]\n # extract fg color code, add 10 for bg.\n attrs.append(str(int(esc[2:4])+10))\n else:\n attrs.extend(("48", "5", "%i" % self.bg))\n if self.bold:\n attrs.append("01")\n if self.underline:\n attrs.append("04")\n if self.italic:\n attrs.append("03")\n return self.escape(attrs)\n\n def true_color_string(self):\n attrs = []\n if self.fg:\n attrs.extend(("38", "2", str(self.fg[0]), str(self.fg[1]), str(self.fg[2])))\n if self.bg:\n attrs.extend(("48", "2", str(self.bg[0]), str(self.bg[1]), str(self.bg[2])))\n if self.bold:\n attrs.append("01")\n if self.underline:\n attrs.append("04")\n if self.italic:\n attrs.append("03")\n return self.escape(attrs)\n\n def reset_string(self):\n attrs = []\n if self.fg is not None:\n attrs.append("39")\n if self.bg is not None:\n attrs.append("49")\n if self.bold or self.underline or self.italic:\n attrs.append("00")\n return self.escape(attrs)\n\n\nclass Terminal256Formatter(Formatter):\n """\n Format tokens with ANSI color sequences, for output in a 256-color\n terminal or console. Like in `TerminalFormatter` color sequences\n are terminated at newlines, so that paging the output works correctly.\n\n The formatter takes colors from a style defined by the `style` option\n and converts them to nearest ANSI 256-color escape sequences. Bold and\n underline attributes from the style are preserved (and displayed).\n\n .. versionadded:: 0.9\n\n .. versionchanged:: 2.2\n If the used style defines foreground colors in the form ``#ansi*``, then\n `Terminal256Formatter` will map these to non extended foreground color.\n See :ref:`AnsiTerminalStyle` for more information.\n\n .. versionchanged:: 2.4\n The ANSI color names have been updated with names that are easier to\n understand and align with colornames of other projects and terminals.\n See :ref:`this table ` for more information.\n\n\n Options accepted:\n\n `style`\n The style to use, can be a string or a Style subclass (default:\n ``\'default\'``).\n """\n name = \'Terminal256\'\n aliases = [\'terminal256\', \'console256\', \'256\']\n filenames = []\n\n def __init__(self, **options):\n Formatter.__init__(self, **options)\n\n self.xterm_colors = []\n self.best_match = {}\n self.style_string = {}\n\n self.usebold = \'nobold\' not in options\n self.useunderline = \'nounderline\' not in options\n self.useitalic = \'noitalic\' not in options\n\n self._build_color_table() # build an RGB-to-256 color conversion table\n self._setup_styles() # convert selected style\'s colors to term. colors\n\n def _build_color_table(self):\n # colors 0..15: 16 basic colors\n\n self.xterm_colors.append((0x00, 0x00, 0x00)) # 0\n self.xterm_colors.append((0xcd, 0x00, 0x00)) # 1\n self.xterm_colors.append((0x00, 0xcd, 0x00)) # 2\n self.xterm_colors.append((0xcd, 0xcd, 0x00)) # 3\n self.xterm_colors.append((0x00, 0x00, 0xee)) # 4\n self.xterm_colors.append((0xcd, 0x00, 0xcd)) # 5\n self.xterm_colors.append((0x00, 0xcd, 0xcd)) # 6\n self.xterm_colors.append((0xe5, 0xe5, 0xe5)) # 7\n self.xterm_colors.append((0x7f, 0x7f, 0x7f)) # 8\n self.xterm_colors.append((0xff, 0x00, 0x00)) # 9\n self.xterm_colors.append((0x00, 0xff, 0x00)) # 10\n self.xterm_colors.append((0xff, 0xff, 0x00)) # 11\n self.xterm_colors.append((0x5c, 0x5c, 0xff)) # 12\n self.xterm_colors.append((0xff, 0x00, 0xff)) # 13\n self.xterm_colors.append((0x00, 0xff, 0xff)) # 14\n self.xterm_colors.append((0xff, 0xff, 0xff)) # 15\n\n # colors 16..232: the 6x6x6 color cube\n\n valuerange = (0x00, 0x5f, 0x87, 0xaf, 0xd7, 0xff)\n\n for i in range(217):\n r = valuerange[(i // 36) % 6]\n g = valuerange[(i // 6) % 6]\n b = valuerange[i % 6]\n self.xterm_colors.append((r, g, b))\n\n # colors 233..253: grayscale\n\n for i in range(1, 22):\n v = 8 + i * 10\n self.xterm_colors.append((v, v, v))\n\n def _closest_color(self, r, g, b):\n distance = 257*257*3 # "infinity" (>distance from #000000 to #ffffff)\n match = 0\n\n for i in range(0, 254):\n values = self.xterm_colors[i]\n\n rd = r - values[0]\n gd = g - values[1]\n bd = b - values[2]\n d = rd*rd + gd*gd + bd*bd\n\n if d < distance:\n match = i\n distance = d\n return match\n\n def _color_index(self, color):\n index = self.best_match.get(color, None)\n if color in ansicolors:\n # strip the `ansi/#ansi` part and look up code\n index = color\n self.best_match[color] = index\n if index is None:\n try:\n rgb = int(str(color), 16)\n except ValueError:\n rgb = 0\n\n r = (rgb >> 16) & 0xff\n g = (rgb >> 8) & 0xff\n b = rgb & 0xff\n index = self._closest_color(r, g, b)\n self.best_match[color] = index\n return index\n\n def _setup_styles(self):\n for ttype, ndef in self.style:\n escape = EscapeSequence()\n # get foreground from ansicolor if set\n if ndef[\'ansicolor\']:\n escape.fg = self._color_index(ndef[\'ansicolor\'])\n elif ndef[\'color\']:\n escape.fg = self._color_index(ndef[\'color\'])\n if ndef[\'bgansicolor\']:\n escape.bg = self._color_index(ndef[\'bgansicolor\'])\n elif ndef[\'bgcolor\']:\n escape.bg = self._color_index(ndef[\'bgcolor\'])\n if self.usebold and ndef[\'bold\']:\n escape.bold = True\n if self.useunderline and ndef[\'underline\']:\n escape.underline = True\n if self.useitalic and ndef[\'italic\']:\n escape.italic = True\n self.style_string[str(ttype)] = (escape.color_string(),\n escape.reset_string())\n\n def format(self, tokensource, outfile):\n return Formatter.format(self, tokensource, outfile)\n\n def format_unencoded(self, tokensource, outfile):\n for ttype, value in tokensource:\n not_found = True\n while ttype and not_found:\n try:\n # outfile.write( "<" + str(ttype) + ">" )\n on, off = self.style_string[str(ttype)]\n\n # Like TerminalFormatter, add "reset colors" escape sequence\n # on newline.\n spl = value.split(\'\\n\')\n for line in spl[:-1]:\n if line:\n outfile.write(on + line + off)\n outfile.write(\'\\n\')\n if spl[-1]:\n outfile.write(on + spl[-1] + off)\n\n not_found = False\n # outfile.write( \'#\' + str(ttype) + \'#\' )\n\n except KeyError:\n # ottype = ttype\n ttype = ttype[:-1]\n # outfile.write( \'!\' + str(ottype) + \'->\' + str(ttype) + \'!\' )\n\n if not_found:\n outfile.write(value)\n\n\nclass TerminalTrueColorFormatter(Terminal256Formatter):\n r"""\n Format tokens with ANSI color sequences, for output in a true-color\n terminal or console. Like in `TerminalFormatter` color sequences\n are terminated at newlines, so that paging the output works correctly.\n\n .. versionadded:: 2.1\n\n Options accepted:\n\n `style`\n The style to use, can be a string or a Style subclass (default:\n ``\'default\'``).\n """\n name = \'TerminalTrueColor\'\n aliases = [\'terminal16m\', \'console16m\', \'16m\']\n filenames = []\n\n def _build_color_table(self):\n pass\n\n def _color_tuple(self, color):\n try:\n rgb = int(str(color), 16)\n except ValueError:\n return None\n r = (rgb >> 16) & 0xff\n g = (rgb >> 8) & 0xff\n b = rgb & 0xff\n return (r, g, b)\n\n def _setup_styles(self):\n for ttype, ndef in self.style:\n escape = EscapeSequence()\n if ndef[\'color\']:\n escape.fg = self._color_tuple(ndef[\'color\'])\n if ndef[\'bgcolor\']:\n escape.bg = self._color_tuple(ndef[\'bgcolor\'])\n if self.usebold and ndef[\'bold\']:\n escape.bold = True\n if self.useunderline and ndef[\'underline\']:\n escape.underline = True\n if self.useitalic and ndef[\'italic\']:\n escape.italic = True\n self.style_string[str(ttype)] = (escape.true_color_string(),\n escape.reset_string())\n') + __stickytape_write_module('pygments/formatter.py', b'# -*- coding: utf-8 -*-\n"""\n pygments.formatter\n ~~~~~~~~~~~~~~~~~~\n\n Base formatter class.\n\n :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.\n :license: BSD, see LICENSE for details.\n"""\n\nimport codecs\n\nfrom pygments.util import get_bool_opt\nfrom pygments.styles import get_style_by_name\n\n__all__ = [\'Formatter\']\n\n\ndef _lookup_style(style):\n if isinstance(style, str):\n return get_style_by_name(style)\n return style\n\n\nclass Formatter:\n """\n Converts a token stream to text.\n\n Options accepted:\n\n ``style``\n The style to use, can be a string or a Style subclass\n (default: "default"). Not used by e.g. the\n TerminalFormatter.\n ``full``\n Tells the formatter to output a "full" document, i.e.\n a complete self-contained document. This doesn\'t have\n any effect for some formatters (default: false).\n ``title``\n If ``full`` is true, the title that should be used to\n caption the document (default: \'\').\n ``encoding``\n If given, must be an encoding name. This will be used to\n convert the Unicode token strings to byte strings in the\n output. If it is "" or None, Unicode strings will be written\n to the output file, which most file-like objects do not\n support (default: None).\n ``outencoding``\n Overrides ``encoding`` if given.\n """\n\n #: Name of the formatter\n name = None\n\n #: Shortcuts for the formatter\n aliases = []\n\n #: fn match rules\n filenames = []\n\n #: If True, this formatter outputs Unicode strings when no encoding\n #: option is given.\n unicodeoutput = True\n\n def __init__(self, **options):\n self.style = _lookup_style(options.get(\'style\', \'default\'))\n self.full = get_bool_opt(options, \'full\', False)\n self.title = options.get(\'title\', \'\')\n self.encoding = options.get(\'encoding\', None) or None\n if self.encoding in (\'guess\', \'chardet\'):\n # can happen for e.g. pygmentize -O encoding=guess\n self.encoding = \'utf-8\'\n self.encoding = options.get(\'outencoding\') or self.encoding\n self.options = options\n\n def get_style_defs(self, arg=\'\'):\n """\n Return the style definitions for the current style as a string.\n\n ``arg`` is an additional argument whose meaning depends on the\n formatter used. Note that ``arg`` can also be a list or tuple\n for some formatters like the html formatter.\n """\n return \'\'\n\n def format(self, tokensource, outfile):\n """\n Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``\n tuples and write it into ``outfile``.\n """\n if self.encoding:\n # wrap the outfile in a StreamWriter\n outfile = codecs.lookup(self.encoding)[3](outfile)\n return self.format_unencoded(tokensource, outfile)\n') + __stickytape_write_module('pygments/styles/__init__.py', b'# -*- coding: utf-8 -*-\n"""\n pygments.styles\n ~~~~~~~~~~~~~~~\n\n Contains built-in styles.\n\n :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.\n :license: BSD, see LICENSE for details.\n"""\n\nfrom pygments.plugin import find_plugin_styles\nfrom pygments.util import ClassNotFound\n\n\n#: Maps style names to \'submodule::classname\'.\nSTYLE_MAP = {\n \'default\': \'default::DefaultStyle\',\n \'emacs\': \'emacs::EmacsStyle\',\n \'friendly\': \'friendly::FriendlyStyle\',\n \'colorful\': \'colorful::ColorfulStyle\',\n \'autumn\': \'autumn::AutumnStyle\',\n \'murphy\': \'murphy::MurphyStyle\',\n \'manni\': \'manni::ManniStyle\',\n \'monokai\': \'monokai::MonokaiStyle\',\n \'perldoc\': \'perldoc::PerldocStyle\',\n \'pastie\': \'pastie::PastieStyle\',\n \'borland\': \'borland::BorlandStyle\',\n \'trac\': \'trac::TracStyle\',\n \'native\': \'native::NativeStyle\',\n \'fruity\': \'fruity::FruityStyle\',\n \'bw\': \'bw::BlackWhiteStyle\',\n \'vim\': \'vim::VimStyle\',\n \'vs\': \'vs::VisualStudioStyle\',\n \'tango\': \'tango::TangoStyle\',\n \'rrt\': \'rrt::RrtStyle\',\n \'xcode\': \'xcode::XcodeStyle\',\n \'igor\': \'igor::IgorStyle\',\n \'paraiso-light\': \'paraiso_light::ParaisoLightStyle\',\n \'paraiso-dark\': \'paraiso_dark::ParaisoDarkStyle\',\n \'lovelace\': \'lovelace::LovelaceStyle\',\n \'algol\': \'algol::AlgolStyle\',\n \'algol_nu\': \'algol_nu::Algol_NuStyle\',\n \'arduino\': \'arduino::ArduinoStyle\',\n \'rainbow_dash\': \'rainbow_dash::RainbowDashStyle\',\n \'abap\': \'abap::AbapStyle\',\n \'solarized-dark\': \'solarized::SolarizedDarkStyle\',\n \'solarized-light\': \'solarized::SolarizedLightStyle\',\n \'sas\': \'sas::SasStyle\',\n \'stata\': \'stata_light::StataLightStyle\',\n \'stata-light\': \'stata_light::StataLightStyle\',\n \'stata-dark\': \'stata_dark::StataDarkStyle\',\n \'inkpot\': \'inkpot::InkPotStyle\',\n}\n\n\ndef get_style_by_name(name):\n if name in STYLE_MAP:\n mod, cls = STYLE_MAP[name].split(\'::\')\n builtin = "yes"\n else:\n for found_name, style in find_plugin_styles():\n if name == found_name:\n return style\n # perhaps it got dropped into our styles package\n builtin = ""\n mod = name\n cls = name.title() + "Style"\n\n try:\n mod = __import__(\'pygments.styles.\' + mod, None, None, [cls])\n except ImportError:\n raise ClassNotFound("Could not find style module %r" % mod +\n (builtin and ", though it should be builtin") + ".")\n try:\n return getattr(mod, cls)\n except AttributeError:\n raise ClassNotFound("Could not find style class %r in style module." % cls)\n\n\ndef get_all_styles():\n """Return an generator for all styles by name,\n both builtin and plugin."""\n yield from STYLE_MAP\n for name, _ in find_plugin_styles():\n yield name\n') + __stickytape_write_module('pygments/console.py', b'# -*- coding: utf-8 -*-\n"""\n pygments.console\n ~~~~~~~~~~~~~~~~\n\n Format colored console output.\n\n :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.\n :license: BSD, see LICENSE for details.\n"""\n\nesc = "\\x1b["\n\ncodes = {}\ncodes[""] = ""\ncodes["reset"] = esc + "39;49;00m"\n\ncodes["bold"] = esc + "01m"\ncodes["faint"] = esc + "02m"\ncodes["standout"] = esc + "03m"\ncodes["underline"] = esc + "04m"\ncodes["blink"] = esc + "05m"\ncodes["overline"] = esc + "06m"\n\ndark_colors = ["black", "red", "green", "yellow", "blue",\n "magenta", "cyan", "gray"]\nlight_colors = ["brightblack", "brightred", "brightgreen", "brightyellow", "brightblue",\n "brightmagenta", "brightcyan", "white"]\n\nx = 30\nfor d, l in zip(dark_colors, light_colors):\n codes[d] = esc + "%im" % x\n codes[l] = esc + "%im" % (60 + x)\n x += 1\n\ndel d, l, x\n\ncodes["white"] = codes["bold"]\n\n\ndef reset_color():\n return codes["reset"]\n\n\ndef colorize(color_key, text):\n return codes[color_key] + text + codes["reset"]\n\n\ndef ansiformat(attr, text):\n """\n Format ``text`` with a color and/or some attributes::\n\n color normal color\n *color* bold color\n _color_ underlined color\n +color+ blinking color\n """\n result = []\n if attr[:1] == attr[-1:] == \'+\':\n result.append(codes[\'blink\'])\n attr = attr[1:-1]\n if attr[:1] == attr[-1:] == \'*\':\n result.append(codes[\'bold\'])\n attr = attr[1:-1]\n if attr[:1] == attr[-1:] == \'_\':\n result.append(codes[\'underline\'])\n attr = attr[1:-1]\n result.append(codes[attr])\n result.append(text)\n result.append(codes[\'reset\'])\n return \'\'.join(result)\n') + __stickytape_write_module('pygments/lexers/python.py', b'# -*- coding: utf-8 -*-\n"""\n pygments.lexers.python\n ~~~~~~~~~~~~~~~~~~~~~~\n\n Lexers for Python and related languages.\n\n :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.\n :license: BSD, see LICENSE for details.\n"""\n\nimport re\n\nfrom pygments.lexer import Lexer, RegexLexer, include, bygroups, using, \\\n default, words, combined, do_insertions\nfrom pygments.util import get_bool_opt, shebang_matches\nfrom pygments.token import Text, Comment, Operator, Keyword, Name, String, \\\n Number, Punctuation, Generic, Other, Error\nfrom pygments import unistring as uni\n\n__all__ = [\'PythonLexer\', \'PythonConsoleLexer\', \'PythonTracebackLexer\',\n \'Python2Lexer\', \'Python2TracebackLexer\',\n \'CythonLexer\', \'DgLexer\', \'NumPyLexer\']\n\nline_re = re.compile(\'.*?\\n\')\n\n\nclass PythonLexer(RegexLexer):\n """\n For `Python `_ source code (version 3.x).\n\n .. versionadded:: 0.10\n\n .. versionchanged:: 2.5\n This is now the default ``PythonLexer``. It is still available as the\n alias ``Python3Lexer``.\n """\n\n name = \'Python\'\n aliases = [\'python\', \'py\', \'sage\', \'python3\', \'py3\']\n filenames = [\n \'*.py\',\n \'*.pyw\',\n # Jython\n \'*.jy\',\n # Sage\n \'*.sage\',\n # SCons\n \'*.sc\',\n \'SConstruct\',\n \'SConscript\',\n # Skylark/Starlark (used by Bazel, Buck, and Pants)\n \'*.bzl\',\n \'BUCK\',\n \'BUILD\',\n \'BUILD.bazel\',\n \'WORKSPACE\',\n # Twisted Application infrastructure\n \'*.tac\',\n ]\n mimetypes = [\'text/x-python\', \'application/x-python\',\n \'text/x-python3\', \'application/x-python3\']\n\n flags = re.MULTILINE | re.UNICODE\n\n uni_name = "[%s][%s]*" % (uni.xid_start, uni.xid_continue)\n\n def innerstring_rules(ttype):\n return [\n # the old style \'%s\' % (...) string formatting (still valid in Py3)\n (r\'%(\\(\\w+\\))?[-#0 +]*([0-9]+|[*])?(\\.([0-9]+|[*]))?\'\n \'[hlL]?[E-GXc-giorsaux%]\', String.Interpol),\n # the new style \'{}\'.format(...) string formatting\n (r\'\\{\'\n r\'((\\w+)((\\.\\w+)|(\\[[^\\]]+\\]))*)?\' # field name\n r\'(\\![sra])?\' # conversion\n r\'(\\:(.?[<>=\\^])?[-+ ]?#?0?(\\d+)?,?(\\.\\d+)?[E-GXb-gnosx%]?)?\'\n r\'\\}\', String.Interpol),\n\n # backslashes, quotes and formatting signs must be parsed one at a time\n (r\'[^\\\\\\\'"%{\\n]+\', ttype),\n (r\'[\\\'"\\\\]\', ttype),\n # unhandled string formatting sign\n (r\'%|(\\{{1,2})\', ttype)\n # newlines are an error (use "nl" state)\n ]\n\n def fstring_rules(ttype):\n return [\n # Assuming that a \'}\' is the closing brace after format specifier.\n # Sadly, this means that we won\'t detect syntax error. But it\'s\n # more important to parse correct syntax correctly, than to\n # highlight invalid syntax.\n (r\'\\}\', String.Interpol),\n (r\'\\{\', String.Interpol, \'expr-inside-fstring\'),\n # backslashes, quotes and formatting signs must be parsed one at a time\n (r\'[^\\\\\\\'"{}\\n]+\', ttype),\n (r\'[\\\'"\\\\]\', ttype),\n # newlines are an error (use "nl" state)\n ]\n\n tokens = {\n \'root\': [\n (r\'\\n\', Text),\n (r\'^(\\s*)([rRuUbB]{,2})("""(?:.|\\n)*?""")\',\n bygroups(Text, String.Affix, String.Doc)),\n (r"^(\\s*)([rRuUbB]{,2})(\'\'\'(?:.|\\n)*?\'\'\')",\n bygroups(Text, String.Affix, String.Doc)),\n (r\'\\A#!.+$\', Comment.Hashbang),\n (r\'#.*$\', Comment.Single),\n (r\'\\\\\\n\', Text),\n (r\'\\\\\', Text),\n include(\'keywords\'),\n (r\'(def)((?:\\s|\\\\\\s)+)\', bygroups(Keyword, Text), \'funcname\'),\n (r\'(class)((?:\\s|\\\\\\s)+)\', bygroups(Keyword, Text), \'classname\'),\n (r\'(from)((?:\\s|\\\\\\s)+)\', bygroups(Keyword.Namespace, Text),\n \'fromimport\'),\n (r\'(import)((?:\\s|\\\\\\s)+)\', bygroups(Keyword.Namespace, Text),\n \'import\'),\n include(\'expr\'),\n ],\n \'expr\': [\n # raw f-strings\n (\'(?i)(rf|fr)(""")\',\n bygroups(String.Affix, String.Double), \'tdqf\'),\n ("(?i)(rf|fr)(\'\'\')",\n bygroups(String.Affix, String.Single), \'tsqf\'),\n (\'(?i)(rf|fr)(")\',\n bygroups(String.Affix, String.Double), \'dqf\'),\n ("(?i)(rf|fr)(\')",\n bygroups(String.Affix, String.Single), \'sqf\'),\n # non-raw f-strings\n (\'([fF])(""")\', bygroups(String.Affix, String.Double),\n combined(\'fstringescape\', \'tdqf\')),\n ("([fF])(\'\'\')", bygroups(String.Affix, String.Single),\n combined(\'fstringescape\', \'tsqf\')),\n (\'([fF])(")\', bygroups(String.Affix, String.Double),\n combined(\'fstringescape\', \'dqf\')),\n ("([fF])(\')", bygroups(String.Affix, String.Single),\n combined(\'fstringescape\', \'sqf\')),\n # raw strings\n (\'(?i)(rb|br|r)(""")\',\n bygroups(String.Affix, String.Double), \'tdqs\'),\n ("(?i)(rb|br|r)(\'\'\')",\n bygroups(String.Affix, String.Single), \'tsqs\'),\n (\'(?i)(rb|br|r)(")\',\n bygroups(String.Affix, String.Double), \'dqs\'),\n ("(?i)(rb|br|r)(\')",\n bygroups(String.Affix, String.Single), \'sqs\'),\n # non-raw strings\n (\'([uUbB]?)(""")\', bygroups(String.Affix, String.Double),\n combined(\'stringescape\', \'tdqs\')),\n ("([uUbB]?)(\'\'\')", bygroups(String.Affix, String.Single),\n combined(\'stringescape\', \'tsqs\')),\n (\'([uUbB]?)(")\', bygroups(String.Affix, String.Double),\n combined(\'stringescape\', \'dqs\')),\n ("([uUbB]?)(\')", bygroups(String.Affix, String.Single),\n combined(\'stringescape\', \'sqs\')),\n (r\'[^\\S\\n]+\', Text),\n (r\'!=|==|<<|>>|:=|[-~+/*%=<>&^|.]\', Operator),\n (r\'[]{}:(),;[]\', Punctuation),\n (r\'(in|is|and|or|not)\\b\', Operator.Word),\n include(\'expr-keywords\'),\n include(\'builtins\'),\n include(\'magicfuncs\'),\n include(\'magicvars\'),\n include(\'name\'),\n include(\'numbers\'),\n ],\n \'expr-inside-fstring\': [\n (r\'[{([]\', Punctuation, \'expr-inside-fstring-inner\'),\n # without format specifier\n (r\'(=\\s*)?\' # debug (https://bugs.python.org/issue36817)\n r\'(\\![sraf])?\' # conversion\n r\'\\}\', String.Interpol, \'#pop\'),\n # with format specifier\n # we\'ll catch the remaining \'}\' in the outer scope\n (r\'(=\\s*)?\' # debug (https://bugs.python.org/issue36817)\n r\'(\\![sraf])?\' # conversion\n r\':\', String.Interpol, \'#pop\'),\n (r\'\\s+\', Text), # allow new lines\n include(\'expr\'),\n ],\n \'expr-inside-fstring-inner\': [\n (r\'[{([]\', Punctuation, \'expr-inside-fstring-inner\'),\n (r\'[])}]\', Punctuation, \'#pop\'),\n (r\'\\s+\', Text), # allow new lines\n include(\'expr\'),\n ],\n \'expr-keywords\': [\n # Based on https://docs.python.org/3/reference/expressions.html\n (words((\n \'async for\', \'await\', \'else\', \'for\', \'if\', \'lambda\',\n \'yield\', \'yield from\'), suffix=r\'\\b\'),\n Keyword),\n (words((\'True\', \'False\', \'None\'), suffix=r\'\\b\'), Keyword.Constant),\n ],\n \'keywords\': [\n (words((\n \'assert\', \'async\', \'await\', \'break\', \'continue\', \'del\', \'elif\',\n \'else\', \'except\', \'finally\', \'for\', \'global\', \'if\', \'lambda\',\n \'pass\', \'raise\', \'nonlocal\', \'return\', \'try\', \'while\', \'yield\',\n \'yield from\', \'as\', \'with\'), suffix=r\'\\b\'),\n Keyword),\n (words((\'True\', \'False\', \'None\'), suffix=r\'\\b\'), Keyword.Constant),\n ],\n \'builtins\': [\n (words((\n \'__import__\', \'abs\', \'all\', \'any\', \'bin\', \'bool\', \'bytearray\',\n \'bytes\', \'chr\', \'classmethod\', \'compile\', \'complex\',\n \'delattr\', \'dict\', \'dir\', \'divmod\', \'enumerate\', \'eval\', \'filter\',\n \'float\', \'format\', \'frozenset\', \'getattr\', \'globals\', \'hasattr\',\n \'hash\', \'hex\', \'id\', \'input\', \'int\', \'isinstance\', \'issubclass\',\n \'iter\', \'len\', \'list\', \'locals\', \'map\', \'max\', \'memoryview\',\n \'min\', \'next\', \'object\', \'oct\', \'open\', \'ord\', \'pow\', \'print\',\n \'property\', \'range\', \'repr\', \'reversed\', \'round\', \'set\', \'setattr\',\n \'slice\', \'sorted\', \'staticmethod\', \'str\', \'sum\', \'super\', \'tuple\',\n \'type\', \'vars\', \'zip\'), prefix=r\'(?`_ source code.\n\n .. versionchanged:: 2.5\n This class has been renamed from ``PythonLexer``. ``PythonLexer`` now\n refers to the Python 3 variant. File name patterns like ``*.py`` have\n been moved to Python 3 as well.\n """\n\n name = \'Python 2.x\'\n aliases = [\'python2\', \'py2\']\n filenames = [] # now taken over by PythonLexer (3.x)\n mimetypes = [\'text/x-python2\', \'application/x-python2\']\n\n def innerstring_rules(ttype):\n return [\n # the old style \'%s\' % (...) string formatting\n (r\'%(\\(\\w+\\))?[-#0 +]*([0-9]+|[*])?(\\.([0-9]+|[*]))?\'\n \'[hlL]?[E-GXc-giorsux%]\', String.Interpol),\n # backslashes, quotes and formatting signs must be parsed one at a time\n (r\'[^\\\\\\\'"%\\n]+\', ttype),\n (r\'[\\\'"\\\\]\', ttype),\n # unhandled string formatting sign\n (r\'%\', ttype),\n # newlines are an error (use "nl" state)\n ]\n\n tokens = {\n \'root\': [\n (r\'\\n\', Text),\n (r\'^(\\s*)([rRuUbB]{,2})("""(?:.|\\n)*?""")\',\n bygroups(Text, String.Affix, String.Doc)),\n (r"^(\\s*)([rRuUbB]{,2})(\'\'\'(?:.|\\n)*?\'\'\')",\n bygroups(Text, String.Affix, String.Doc)),\n (r\'[^\\S\\n]+\', Text),\n (r\'\\A#!.+$\', Comment.Hashbang),\n (r\'#.*$\', Comment.Single),\n (r\'[]{}:(),;[]\', Punctuation),\n (r\'\\\\\\n\', Text),\n (r\'\\\\\', Text),\n (r\'(in|is|and|or|not)\\b\', Operator.Word),\n (r\'!=|==|<<|>>|[-~+/*%=<>&^|.]\', Operator),\n include(\'keywords\'),\n (r\'(def)((?:\\s|\\\\\\s)+)\', bygroups(Keyword, Text), \'funcname\'),\n (r\'(class)((?:\\s|\\\\\\s)+)\', bygroups(Keyword, Text), \'classname\'),\n (r\'(from)((?:\\s|\\\\\\s)+)\', bygroups(Keyword.Namespace, Text),\n \'fromimport\'),\n (r\'(import)((?:\\s|\\\\\\s)+)\', bygroups(Keyword.Namespace, Text),\n \'import\'),\n include(\'builtins\'),\n include(\'magicfuncs\'),\n include(\'magicvars\'),\n include(\'backtick\'),\n (\'([rR]|[uUbB][rR]|[rR][uUbB])(""")\',\n bygroups(String.Affix, String.Double), \'tdqs\'),\n ("([rR]|[uUbB][rR]|[rR][uUbB])(\'\'\')",\n bygroups(String.Affix, String.Single), \'tsqs\'),\n (\'([rR]|[uUbB][rR]|[rR][uUbB])(")\',\n bygroups(String.Affix, String.Double), \'dqs\'),\n ("([rR]|[uUbB][rR]|[rR][uUbB])(\')",\n bygroups(String.Affix, String.Single), \'sqs\'),\n (\'([uUbB]?)(""")\', bygroups(String.Affix, String.Double),\n combined(\'stringescape\', \'tdqs\')),\n ("([uUbB]?)(\'\'\')", bygroups(String.Affix, String.Single),\n combined(\'stringescape\', \'tsqs\')),\n (\'([uUbB]?)(")\', bygroups(String.Affix, String.Double),\n combined(\'stringescape\', \'dqs\')),\n ("([uUbB]?)(\')", bygroups(String.Affix, String.Single),\n combined(\'stringescape\', \'sqs\')),\n include(\'name\'),\n include(\'numbers\'),\n ],\n \'keywords\': [\n (words((\n \'assert\', \'break\', \'continue\', \'del\', \'elif\', \'else\', \'except\',\n \'exec\', \'finally\', \'for\', \'global\', \'if\', \'lambda\', \'pass\',\n \'print\', \'raise\', \'return\', \'try\', \'while\', \'yield\',\n \'yield from\', \'as\', \'with\'), suffix=r\'\\b\'),\n Keyword),\n ],\n \'builtins\': [\n (words((\n \'__import__\', \'abs\', \'all\', \'any\', \'apply\', \'basestring\', \'bin\',\n \'bool\', \'buffer\', \'bytearray\', \'bytes\', \'callable\', \'chr\', \'classmethod\',\n \'cmp\', \'coerce\', \'compile\', \'complex\', \'delattr\', \'dict\', \'dir\', \'divmod\',\n \'enumerate\', \'eval\', \'execfile\', \'exit\', \'file\', \'filter\', \'float\',\n \'frozenset\', \'getattr\', \'globals\', \'hasattr\', \'hash\', \'hex\', \'id\',\n \'input\', \'int\', \'intern\', \'isinstance\', \'issubclass\', \'iter\', \'len\',\n \'list\', \'locals\', \'long\', \'map\', \'max\', \'min\', \'next\', \'object\',\n \'oct\', \'open\', \'ord\', \'pow\', \'property\', \'range\', \'raw_input\', \'reduce\',\n \'reload\', \'repr\', \'reversed\', \'round\', \'set\', \'setattr\', \'slice\',\n \'sorted\', \'staticmethod\', \'str\', \'sum\', \'super\', \'tuple\', \'type\',\n \'unichr\', \'unicode\', \'vars\', \'xrange\', \'zip\'),\n prefix=r\'(?>> a = \'foo\'\n >>> print a\n foo\n >>> 1 / 0\n Traceback (most recent call last):\n File "", line 1, in \n ZeroDivisionError: integer division or modulo by zero\n\n Additional options:\n\n `python3`\n Use Python 3 lexer for code. Default is ``True``.\n\n .. versionadded:: 1.0\n .. versionchanged:: 2.5\n Now defaults to ``True``.\n """\n name = \'Python console session\'\n aliases = [\'pycon\']\n mimetypes = [\'text/x-python-doctest\']\n\n def __init__(self, **options):\n self.python3 = get_bool_opt(options, \'python3\', True)\n Lexer.__init__(self, **options)\n\n def get_tokens_unprocessed(self, text):\n if self.python3:\n pylexer = PythonLexer(**self.options)\n tblexer = PythonTracebackLexer(**self.options)\n else:\n pylexer = Python2Lexer(**self.options)\n tblexer = Python2TracebackLexer(**self.options)\n\n curcode = \'\'\n insertions = []\n curtb = \'\'\n tbindex = 0\n tb = 0\n for match in line_re.finditer(text):\n line = match.group()\n if line.startswith(\'>>> \') or line.startswith(\'... \'):\n tb = 0\n insertions.append((len(curcode),\n [(0, Generic.Prompt, line[:4])]))\n curcode += line[4:]\n elif line.rstrip() == \'...\' and not tb:\n # only a new >>> prompt can end an exception block\n # otherwise an ellipsis in place of the traceback frames\n # will be mishandled\n insertions.append((len(curcode),\n [(0, Generic.Prompt, \'...\')]))\n curcode += line[3:]\n else:\n if curcode:\n yield from do_insertions(\n insertions, pylexer.get_tokens_unprocessed(curcode))\n curcode = \'\'\n insertions = []\n if (line.startswith(\'Traceback (most recent call last):\') or\n re.match(\' File "[^"]+", line \\\\d+\\\\n$\', line)):\n tb = 1\n curtb = line\n tbindex = match.start()\n elif line == \'KeyboardInterrupt\\n\':\n yield match.start(), Name.Class, line\n elif tb:\n curtb += line\n if not (line.startswith(\' \') or line.strip() == \'...\'):\n tb = 0\n for i, t, v in tblexer.get_tokens_unprocessed(curtb):\n yield tbindex+i, t, v\n curtb = \'\'\n else:\n yield match.start(), Generic.Output, line\n if curcode:\n yield from do_insertions(insertions,\n pylexer.get_tokens_unprocessed(curcode))\n if curtb:\n for i, t, v in tblexer.get_tokens_unprocessed(curtb):\n yield tbindex+i, t, v\n\n\nclass PythonTracebackLexer(RegexLexer):\n """\n For Python 3.x tracebacks, with support for chained exceptions.\n\n .. versionadded:: 1.0\n\n .. versionchanged:: 2.5\n This is now the default ``PythonTracebackLexer``. It is still available\n as the alias ``Python3TracebackLexer``.\n """\n\n name = \'Python Traceback\'\n aliases = [\'pytb\', \'py3tb\']\n filenames = [\'*.pytb\', \'*.py3tb\']\n mimetypes = [\'text/x-python-traceback\', \'text/x-python3-traceback\']\n\n tokens = {\n \'root\': [\n (r\'\\n\', Text),\n (r\'^Traceback \\(most recent call last\\):\\n\', Generic.Traceback, \'intb\'),\n (r\'^During handling of the above exception, another \'\n r\'exception occurred:\\n\\n\', Generic.Traceback),\n (r\'^The above exception was the direct cause of the \'\n r\'following exception:\\n\\n\', Generic.Traceback),\n (r\'^(?= File "[^"]+", line \\d+)\', Generic.Traceback, \'intb\'),\n (r\'^.*\\n\', Other),\n ],\n \'intb\': [\n (r\'^( File )("[^"]+")(, line )(\\d+)(, in )(.+)(\\n)\',\n bygroups(Text, Name.Builtin, Text, Number, Text, Name, Text)),\n (r\'^( File )("[^"]+")(, line )(\\d+)(\\n)\',\n bygroups(Text, Name.Builtin, Text, Number, Text)),\n (r\'^( )(.+)(\\n)\',\n bygroups(Text, using(PythonLexer), Text)),\n (r\'^([ \\t]*)(\\.\\.\\.)(\\n)\',\n bygroups(Text, Comment, Text)), # for doctests...\n (r\'^([^:]+)(: )(.+)(\\n)\',\n bygroups(Generic.Error, Text, Name, Text), \'#pop\'),\n (r\'^([a-zA-Z_]\\w*)(:?\\n)\',\n bygroups(Generic.Error, Text), \'#pop\')\n ],\n }\n\n\nPython3TracebackLexer = PythonTracebackLexer\n\n\nclass Python2TracebackLexer(RegexLexer):\n """\n For Python tracebacks.\n\n .. versionadded:: 0.7\n\n .. versionchanged:: 2.5\n This class has been renamed from ``PythonTracebackLexer``.\n ``PythonTracebackLexer`` now refers to the Python 3 variant.\n """\n\n name = \'Python 2.x Traceback\'\n aliases = [\'py2tb\']\n filenames = [\'*.py2tb\']\n mimetypes = [\'text/x-python2-traceback\']\n\n tokens = {\n \'root\': [\n # Cover both (most recent call last) and (innermost last)\n # The optional ^C allows us to catch keyboard interrupt signals.\n (r\'^(\\^C)?(Traceback.*\\n)\',\n bygroups(Text, Generic.Traceback), \'intb\'),\n # SyntaxError starts with this.\n (r\'^(?= File "[^"]+", line \\d+)\', Generic.Traceback, \'intb\'),\n (r\'^.*\\n\', Other),\n ],\n \'intb\': [\n (r\'^( File )("[^"]+")(, line )(\\d+)(, in )(.+)(\\n)\',\n bygroups(Text, Name.Builtin, Text, Number, Text, Name, Text)),\n (r\'^( File )("[^"]+")(, line )(\\d+)(\\n)\',\n bygroups(Text, Name.Builtin, Text, Number, Text)),\n (r\'^( )(.+)(\\n)\',\n bygroups(Text, using(Python2Lexer), Text)),\n (r\'^([ \\t]*)(\\.\\.\\.)(\\n)\',\n bygroups(Text, Comment, Text)), # for doctests...\n (r\'^([^:]+)(: )(.+)(\\n)\',\n bygroups(Generic.Error, Text, Name, Text), \'#pop\'),\n (r\'^([a-zA-Z_]\\w*)(:?\\n)\',\n bygroups(Generic.Error, Text), \'#pop\')\n ],\n }\n\n\nclass CythonLexer(RegexLexer):\n """\n For Pyrex and `Cython `_ source code.\n\n .. versionadded:: 1.1\n """\n\n name = \'Cython\'\n aliases = [\'cython\', \'pyx\', \'pyrex\']\n filenames = [\'*.pyx\', \'*.pxd\', \'*.pxi\']\n mimetypes = [\'text/x-cython\', \'application/x-cython\']\n\n tokens = {\n \'root\': [\n (r\'\\n\', Text),\n (r\'^(\\s*)("""(?:.|\\n)*?""")\', bygroups(Text, String.Doc)),\n (r"^(\\s*)(\'\'\'(?:.|\\n)*?\'\'\')", bygroups(Text, String.Doc)),\n (r\'[^\\S\\n]+\', Text),\n (r\'#.*$\', Comment),\n (r\'[]{}:(),;[]\', Punctuation),\n (r\'\\\\\\n\', Text),\n (r\'\\\\\', Text),\n (r\'(in|is|and|or|not)\\b\', Operator.Word),\n (r\'(<)([a-zA-Z0-9.?]+)(>)\',\n bygroups(Punctuation, Keyword.Type, Punctuation)),\n (r\'!=|==|<<|>>|[-~+/*%=<>&^|.?]\', Operator),\n (r\'(from)(\\d+)(<=)(\\s+)(<)(\\d+)(:)\',\n bygroups(Keyword, Number.Integer, Operator, Name, Operator,\n Name, Punctuation)),\n include(\'keywords\'),\n (r\'(def|property)(\\s+)\', bygroups(Keyword, Text), \'funcname\'),\n (r\'(cp?def)(\\s+)\', bygroups(Keyword, Text), \'cdef\'),\n # (should actually start a block with only cdefs)\n (r\'(cdef)(:)\', bygroups(Keyword, Punctuation)),\n (r\'(class|struct)(\\s+)\', bygroups(Keyword, Text), \'classname\'),\n (r\'(from)(\\s+)\', bygroups(Keyword, Text), \'fromimport\'),\n (r\'(c?import)(\\s+)\', bygroups(Keyword, Text), \'import\'),\n include(\'builtins\'),\n include(\'backtick\'),\n (\'(?:[rR]|[uU][rR]|[rR][uU])"""\', String, \'tdqs\'),\n ("(?:[rR]|[uU][rR]|[rR][uU])\'\'\'", String, \'tsqs\'),\n (\'(?:[rR]|[uU][rR]|[rR][uU])"\', String, \'dqs\'),\n ("(?:[rR]|[uU][rR]|[rR][uU])\'", String, \'sqs\'),\n (\'[uU]?"""\', String, combined(\'stringescape\', \'tdqs\')),\n ("[uU]?\'\'\'", String, combined(\'stringescape\', \'tsqs\')),\n (\'[uU]?"\', String, combined(\'stringescape\', \'dqs\')),\n ("[uU]?\'", String, combined(\'stringescape\', \'sqs\')),\n include(\'name\'),\n include(\'numbers\'),\n ],\n \'keywords\': [\n (words((\n \'assert\', \'async\', \'await\', \'break\', \'by\', \'continue\', \'ctypedef\', \'del\', \'elif\',\n \'else\', \'except\', \'except?\', \'exec\', \'finally\', \'for\', \'fused\', \'gil\',\n \'global\', \'if\', \'include\', \'lambda\', \'nogil\', \'pass\', \'print\',\n \'raise\', \'return\', \'try\', \'while\', \'yield\', \'as\', \'with\'), suffix=r\'\\b\'),\n Keyword),\n (r\'(DEF|IF|ELIF|ELSE)\\b\', Comment.Preproc),\n ],\n \'builtins\': [\n (words((\n \'__import__\', \'abs\', \'all\', \'any\', \'apply\', \'basestring\', \'bin\',\n \'bool\', \'buffer\', \'bytearray\', \'bytes\', \'callable\', \'chr\',\n \'classmethod\', \'cmp\', \'coerce\', \'compile\', \'complex\', \'delattr\',\n \'dict\', \'dir\', \'divmod\', \'enumerate\', \'eval\', \'execfile\', \'exit\',\n \'file\', \'filter\', \'float\', \'frozenset\', \'getattr\', \'globals\',\n \'hasattr\', \'hash\', \'hex\', \'id\', \'input\', \'int\', \'intern\', \'isinstance\',\n \'issubclass\', \'iter\', \'len\', \'list\', \'locals\', \'long\', \'map\', \'max\',\n \'min\', \'next\', \'object\', \'oct\', \'open\', \'ord\', \'pow\', \'property\',\n \'range\', \'raw_input\', \'reduce\', \'reload\', \'repr\', \'reversed\',\n \'round\', \'set\', \'setattr\', \'slice\', \'sorted\', \'staticmethod\',\n \'str\', \'sum\', \'super\', \'tuple\', \'type\', \'unichr\', \'unicode\', \'unsigned\',\n \'vars\', \'xrange\', \'zip\'), prefix=r\'(?`_,\n a functional and object-oriented programming language\n running on the CPython 3 VM.\n\n .. versionadded:: 1.6\n """\n name = \'dg\'\n aliases = [\'dg\']\n filenames = [\'*.dg\']\n mimetypes = [\'text/x-dg\']\n\n tokens = {\n \'root\': [\n (r\'\\s+\', Text),\n (r\'#.*?$\', Comment.Single),\n\n (r\'(?i)0b[01]+\', Number.Bin),\n (r\'(?i)0o[0-7]+\', Number.Oct),\n (r\'(?i)0x[0-9a-f]+\', Number.Hex),\n (r\'(?i)[+-]?[0-9]+\\.[0-9]+(e[+-]?[0-9]+)?j?\', Number.Float),\n (r\'(?i)[+-]?[0-9]+e[+-]?\\d+j?\', Number.Float),\n (r\'(?i)[+-]?[0-9]+j?\', Number.Integer),\n\n (r"(?i)(br|r?b?)\'\'\'", String, combined(\'stringescape\', \'tsqs\', \'string\')),\n (r\'(?i)(br|r?b?)"""\', String, combined(\'stringescape\', \'tdqs\', \'string\')),\n (r"(?i)(br|r?b?)\'", String, combined(\'stringescape\', \'sqs\', \'string\')),\n (r\'(?i)(br|r?b?)"\', String, combined(\'stringescape\', \'dqs\', \'string\')),\n\n (r"`\\w+\'*`", Operator),\n (r\'\\b(and|in|is|or|where)\\b\', Operator.Word),\n (r\'[!$%&*+\\-./:<-@\\\\^|~;,]+\', Operator),\n\n (words((\n \'bool\', \'bytearray\', \'bytes\', \'classmethod\', \'complex\', \'dict\', \'dict\\\'\',\n \'float\', \'frozenset\', \'int\', \'list\', \'list\\\'\', \'memoryview\', \'object\',\n \'property\', \'range\', \'set\', \'set\\\'\', \'slice\', \'staticmethod\', \'str\',\n \'super\', \'tuple\', \'tuple\\\'\', \'type\'),\n prefix=r\'(?\' % (self.__class__.__name__,\n self.options)\n else:\n return \'\' % self.__class__.__name__\n\n def add_filter(self, filter_, **options):\n """\n Add a new stream filter to this lexer.\n """\n if not isinstance(filter_, Filter):\n filter_ = get_filter_by_name(filter_, **options)\n self.filters.append(filter_)\n\n def analyse_text(text):\n """\n Has to return a float between ``0`` and ``1`` that indicates\n if a lexer wants to highlight this text. Used by ``guess_lexer``.\n If this method returns ``0`` it won\'t highlight it in any case, if\n it returns ``1`` highlighting with this lexer is guaranteed.\n\n The `LexerMeta` metaclass automatically wraps this function so\n that it works like a static method (no ``self`` or ``cls``\n parameter) and the return value is automatically converted to\n `float`. If the return value is an object that is boolean `False`\n it\'s the same as if the return values was ``0.0``.\n """\n\n def get_tokens(self, text, unfiltered=False):\n """\n Return an iterable of (tokentype, value) pairs generated from\n `text`. If `unfiltered` is set to `True`, the filtering mechanism\n is bypassed even if filters are defined.\n\n Also preprocess the text, i.e. expand tabs and strip it if\n wanted and applies registered filters.\n """\n if not isinstance(text, str):\n if self.encoding == \'guess\':\n text, _ = guess_decode(text)\n elif self.encoding == \'chardet\':\n try:\n import chardet\n except ImportError as e:\n raise ImportError(\'To enable chardet encoding guessing, \'\n \'please install the chardet library \'\n \'from http://chardet.feedparser.org/\') from e\n # check for BOM first\n decoded = None\n for bom, encoding in _encoding_map:\n if text.startswith(bom):\n decoded = text[len(bom):].decode(encoding, \'replace\')\n break\n # no BOM found, so use chardet\n if decoded is None:\n enc = chardet.detect(text[:1024]) # Guess using first 1KB\n decoded = text.decode(enc.get(\'encoding\') or \'utf-8\',\n \'replace\')\n text = decoded\n else:\n text = text.decode(self.encoding)\n if text.startswith(\'\\ufeff\'):\n text = text[len(\'\\ufeff\'):]\n else:\n if text.startswith(\'\\ufeff\'):\n text = text[len(\'\\ufeff\'):]\n\n # text now *is* a unicode string\n text = text.replace(\'\\r\\n\', \'\\n\')\n text = text.replace(\'\\r\', \'\\n\')\n if self.stripall:\n text = text.strip()\n elif self.stripnl:\n text = text.strip(\'\\n\')\n if self.tabsize > 0:\n text = text.expandtabs(self.tabsize)\n if self.ensurenl and not text.endswith(\'\\n\'):\n text += \'\\n\'\n\n def streamer():\n for _, t, v in self.get_tokens_unprocessed(text):\n yield t, v\n stream = streamer()\n if not unfiltered:\n stream = apply_filters(stream, self.filters, self)\n return stream\n\n def get_tokens_unprocessed(self, text):\n """\n Return an iterable of (index, tokentype, value) pairs where "index"\n is the starting position of the token within the input text.\n\n In subclasses, implement this method as a generator to\n maximize effectiveness.\n """\n raise NotImplementedError\n\n\nclass DelegatingLexer(Lexer):\n """\n This lexer takes two lexer as arguments. A root lexer and\n a language lexer. First everything is scanned using the language\n lexer, afterwards all ``Other`` tokens are lexed using the root\n lexer.\n\n The lexers from the ``template`` lexer package use this base lexer.\n """\n\n def __init__(self, _root_lexer, _language_lexer, _needle=Other, **options):\n self.root_lexer = _root_lexer(**options)\n self.language_lexer = _language_lexer(**options)\n self.needle = _needle\n Lexer.__init__(self, **options)\n\n def get_tokens_unprocessed(self, text):\n buffered = \'\'\n insertions = []\n lng_buffer = []\n for i, t, v in self.language_lexer.get_tokens_unprocessed(text):\n if t is self.needle:\n if lng_buffer:\n insertions.append((len(buffered), lng_buffer))\n lng_buffer = []\n buffered += v\n else:\n lng_buffer.append((i, t, v))\n if lng_buffer:\n insertions.append((len(buffered), lng_buffer))\n return do_insertions(insertions,\n self.root_lexer.get_tokens_unprocessed(buffered))\n\n\n# ------------------------------------------------------------------------------\n# RegexLexer and ExtendedRegexLexer\n#\n\n\nclass include(str): # pylint: disable=invalid-name\n """\n Indicates that a state should include rules from another state.\n """\n pass\n\n\nclass _inherit:\n """\n Indicates the a state should inherit from its superclass.\n """\n def __repr__(self):\n return \'inherit\'\n\ninherit = _inherit() # pylint: disable=invalid-name\n\n\nclass combined(tuple): # pylint: disable=invalid-name\n """\n Indicates a state combined from multiple states.\n """\n\n def __new__(cls, *args):\n return tuple.__new__(cls, args)\n\n def __init__(self, *args):\n # tuple.__init__ doesn\'t do anything\n pass\n\n\nclass _PseudoMatch:\n """\n A pseudo match object constructed from a string.\n """\n\n def __init__(self, start, text):\n self._text = text\n self._start = start\n\n def start(self, arg=None):\n return self._start\n\n def end(self, arg=None):\n return self._start + len(self._text)\n\n def group(self, arg=None):\n if arg:\n raise IndexError(\'No such group\')\n return self._text\n\n def groups(self):\n return (self._text,)\n\n def groupdict(self):\n return {}\n\n\ndef bygroups(*args):\n """\n Callback that yields multiple actions for each group in the match.\n """\n def callback(lexer, match, ctx=None):\n for i, action in enumerate(args):\n if action is None:\n continue\n elif type(action) is _TokenType:\n data = match.group(i + 1)\n if data:\n yield match.start(i + 1), action, data\n else:\n data = match.group(i + 1)\n if data is not None:\n if ctx:\n ctx.pos = match.start(i + 1)\n for item in action(lexer,\n _PseudoMatch(match.start(i + 1), data), ctx):\n if item:\n yield item\n if ctx:\n ctx.pos = match.end()\n return callback\n\n\nclass _This:\n """\n Special singleton used for indicating the caller class.\n Used by ``using``.\n """\n\nthis = _This()\n\n\ndef using(_other, **kwargs):\n """\n Callback that processes the match with a different lexer.\n\n The keyword arguments are forwarded to the lexer, except `state` which\n is handled separately.\n\n `state` specifies the state that the new lexer will start in, and can\n be an enumerable such as (\'root\', \'inline\', \'string\') or a simple\n string which is assumed to be on top of the root state.\n\n Note: For that to work, `_other` must not be an `ExtendedRegexLexer`.\n """\n gt_kwargs = {}\n if \'state\' in kwargs:\n s = kwargs.pop(\'state\')\n if isinstance(s, (list, tuple)):\n gt_kwargs[\'stack\'] = s\n else:\n gt_kwargs[\'stack\'] = (\'root\', s)\n\n if _other is this:\n def callback(lexer, match, ctx=None):\n # if keyword arguments are given the callback\n # function has to create a new lexer instance\n if kwargs:\n # XXX: cache that somehow\n kwargs.update(lexer.options)\n lx = lexer.__class__(**kwargs)\n else:\n lx = lexer\n s = match.start()\n for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):\n yield i + s, t, v\n if ctx:\n ctx.pos = match.end()\n else:\n def callback(lexer, match, ctx=None):\n # XXX: cache that somehow\n kwargs.update(lexer.options)\n lx = _other(**kwargs)\n\n s = match.start()\n for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):\n yield i + s, t, v\n if ctx:\n ctx.pos = match.end()\n return callback\n\n\nclass default:\n """\n Indicates a state or state action (e.g. #pop) to apply.\n For example default(\'#pop\') is equivalent to (\'\', Token, \'#pop\')\n Note that state tuples may be used as well.\n\n .. versionadded:: 2.0\n """\n def __init__(self, state):\n self.state = state\n\n\nclass words(Future):\n """\n Indicates a list of literal words that is transformed into an optimized\n regex that matches any of the words.\n\n .. versionadded:: 2.0\n """\n def __init__(self, words, prefix=\'\', suffix=\'\'):\n self.words = words\n self.prefix = prefix\n self.suffix = suffix\n\n def get(self):\n return regex_opt(self.words, prefix=self.prefix, suffix=self.suffix)\n\n\nclass RegexLexerMeta(LexerMeta):\n """\n Metaclass for RegexLexer, creates the self._tokens attribute from\n self.tokens on the first instantiation.\n """\n\n def _process_regex(cls, regex, rflags, state):\n """Preprocess the regular expression component of a token definition."""\n if isinstance(regex, Future):\n regex = regex.get()\n return re.compile(regex, rflags).match\n\n def _process_token(cls, token):\n """Preprocess the token component of a token definition."""\n assert type(token) is _TokenType or callable(token), \\\n \'token type must be simple type or callable, not %r\' % (token,)\n return token\n\n def _process_new_state(cls, new_state, unprocessed, processed):\n """Preprocess the state transition action of a token definition."""\n if isinstance(new_state, str):\n # an existing state\n if new_state == \'#pop\':\n return -1\n elif new_state in unprocessed:\n return (new_state,)\n elif new_state == \'#push\':\n return new_state\n elif new_state[:5] == \'#pop:\':\n return -int(new_state[5:])\n else:\n assert False, \'unknown new state %r\' % new_state\n elif isinstance(new_state, combined):\n # combine a new state from existing ones\n tmp_state = \'_tmp_%d\' % cls._tmpname\n cls._tmpname += 1\n itokens = []\n for istate in new_state:\n assert istate != new_state, \'circular state ref %r\' % istate\n itokens.extend(cls._process_state(unprocessed,\n processed, istate))\n processed[tmp_state] = itokens\n return (tmp_state,)\n elif isinstance(new_state, tuple):\n # push more than one state\n for istate in new_state:\n assert (istate in unprocessed or\n istate in (\'#pop\', \'#push\')), \\\n \'unknown new state \' + istate\n return new_state\n else:\n assert False, \'unknown new state def %r\' % new_state\n\n def _process_state(cls, unprocessed, processed, state):\n """Preprocess a single state definition."""\n assert type(state) is str, "wrong state name %r" % state\n assert state[0] != \'#\', "invalid state name %r" % state\n if state in processed:\n return processed[state]\n tokens = processed[state] = []\n rflags = cls.flags\n for tdef in unprocessed[state]:\n if isinstance(tdef, include):\n # it\'s a state reference\n assert tdef != state, "circular state reference %r" % state\n tokens.extend(cls._process_state(unprocessed, processed,\n str(tdef)))\n continue\n if isinstance(tdef, _inherit):\n # should be processed already, but may not in the case of:\n # 1. the state has no counterpart in any parent\n # 2. the state includes more than one \'inherit\'\n continue\n if isinstance(tdef, default):\n new_state = cls._process_new_state(tdef.state, unprocessed, processed)\n tokens.append((re.compile(\'\').match, None, new_state))\n continue\n\n assert type(tdef) is tuple, "wrong rule def %r" % tdef\n\n try:\n rex = cls._process_regex(tdef[0], rflags, state)\n except Exception as err:\n raise ValueError("uncompilable regex %r in state %r of %r: %s" %\n (tdef[0], state, cls, err)) from err\n\n token = cls._process_token(tdef[1])\n\n if len(tdef) == 2:\n new_state = None\n else:\n new_state = cls._process_new_state(tdef[2],\n unprocessed, processed)\n\n tokens.append((rex, token, new_state))\n return tokens\n\n def process_tokendef(cls, name, tokendefs=None):\n """Preprocess a dictionary of token definitions."""\n processed = cls._all_tokens[name] = {}\n tokendefs = tokendefs or cls.tokens[name]\n for state in list(tokendefs):\n cls._process_state(tokendefs, processed, state)\n return processed\n\n def get_tokendefs(cls):\n """\n Merge tokens from superclasses in MRO order, returning a single tokendef\n dictionary.\n\n Any state that is not defined by a subclass will be inherited\n automatically. States that *are* defined by subclasses will, by\n default, override that state in the superclass. If a subclass wishes to\n inherit definitions from a superclass, it can use the special value\n "inherit", which will cause the superclass\' state definition to be\n included at that point in the state.\n """\n tokens = {}\n inheritable = {}\n for c in cls.__mro__:\n toks = c.__dict__.get(\'tokens\', {})\n\n for state, items in toks.items():\n curitems = tokens.get(state)\n if curitems is None:\n # N.b. because this is assigned by reference, sufficiently\n # deep hierarchies are processed incrementally (e.g. for\n # A(B), B(C), C(RegexLexer), B will be premodified so X(B)\n # will not see any inherits in B).\n tokens[state] = items\n try:\n inherit_ndx = items.index(inherit)\n except ValueError:\n continue\n inheritable[state] = inherit_ndx\n continue\n\n inherit_ndx = inheritable.pop(state, None)\n if inherit_ndx is None:\n continue\n\n # Replace the "inherit" value with the items\n curitems[inherit_ndx:inherit_ndx+1] = items\n try:\n # N.b. this is the index in items (that is, the superclass\n # copy), so offset required when storing below.\n new_inh_ndx = items.index(inherit)\n except ValueError:\n pass\n else:\n inheritable[state] = inherit_ndx + new_inh_ndx\n\n return tokens\n\n def __call__(cls, *args, **kwds):\n """Instantiate cls after preprocessing its token definitions."""\n if \'_tokens\' not in cls.__dict__:\n cls._all_tokens = {}\n cls._tmpname = 0\n if hasattr(cls, \'token_variants\') and cls.token_variants:\n # don\'t process yet\n pass\n else:\n cls._tokens = cls.process_tokendef(\'\', cls.get_tokendefs())\n\n return type.__call__(cls, *args, **kwds)\n\n\nclass RegexLexer(Lexer, metaclass=RegexLexerMeta):\n """\n Base for simple stateful regular expression-based lexers.\n Simplifies the lexing process so that you need only\n provide a list of states and regular expressions.\n """\n\n #: Flags for compiling the regular expressions.\n #: Defaults to MULTILINE.\n flags = re.MULTILINE\n\n #: Dict of ``{\'state\': [(regex, tokentype, new_state), ...], ...}``\n #:\n #: The initial state is \'root\'.\n #: ``new_state`` can be omitted to signify no state transition.\n #: If it is a string, the state is pushed on the stack and changed.\n #: If it is a tuple of strings, all states are pushed on the stack and\n #: the current state will be the topmost.\n #: It can also be ``combined(\'state1\', \'state2\', ...)``\n #: to signify a new, anonymous state combined from the rules of two\n #: or more existing ones.\n #: Furthermore, it can be \'#pop\' to signify going back one step in\n #: the state stack, or \'#push\' to push the current state on the stack\n #: again.\n #:\n #: The tuple can also be replaced with ``include(\'state\')``, in which\n #: case the rules from the state named by the string are included in the\n #: current one.\n tokens = {}\n\n def get_tokens_unprocessed(self, text, stack=(\'root\',)):\n """\n Split ``text`` into (tokentype, text) pairs.\n\n ``stack`` is the inital stack (default: ``[\'root\']``)\n """\n pos = 0\n tokendefs = self._tokens\n statestack = list(stack)\n statetokens = tokendefs[statestack[-1]]\n while 1:\n for rexmatch, action, new_state in statetokens:\n m = rexmatch(text, pos)\n if m:\n if action is not None:\n if type(action) is _TokenType:\n yield pos, action, m.group()\n else:\n yield from action(self, m)\n pos = m.end()\n if new_state is not None:\n # state transition\n if isinstance(new_state, tuple):\n for state in new_state:\n if state == \'#pop\':\n if len(statestack) > 1:\n statestack.pop()\n elif state == \'#push\':\n statestack.append(statestack[-1])\n else:\n statestack.append(state)\n elif isinstance(new_state, int):\n # pop, but keep at least one state on the stack\n # (random code leading to unexpected pops should\n # not allow exceptions)\n if abs(new_state) >= len(statestack):\n del statestack[1:]\n else:\n del statestack[new_state:]\n elif new_state == \'#push\':\n statestack.append(statestack[-1])\n else:\n assert False, "wrong state def: %r" % new_state\n statetokens = tokendefs[statestack[-1]]\n break\n else:\n # We are here only if all state tokens have been considered\n # and there was not a match on any of them.\n try:\n if text[pos] == \'\\n\':\n # at EOL, reset state to "root"\n statestack = [\'root\']\n statetokens = tokendefs[\'root\']\n yield pos, Text, \'\\n\'\n pos += 1\n continue\n yield pos, Error, text[pos]\n pos += 1\n except IndexError:\n break\n\n\nclass LexerContext:\n """\n A helper object that holds lexer position data.\n """\n\n def __init__(self, text, pos, stack=None, end=None):\n self.text = text\n self.pos = pos\n self.end = end or len(text) # end=0 not supported ;-)\n self.stack = stack or [\'root\']\n\n def __repr__(self):\n return \'LexerContext(%r, %r, %r)\' % (\n self.text, self.pos, self.stack)\n\n\nclass ExtendedRegexLexer(RegexLexer):\n """\n A RegexLexer that uses a context object to store its state.\n """\n\n def get_tokens_unprocessed(self, text=None, context=None):\n """\n Split ``text`` into (tokentype, text) pairs.\n If ``context`` is given, use this lexer context instead.\n """\n tokendefs = self._tokens\n if not context:\n ctx = LexerContext(text, 0)\n statetokens = tokendefs[\'root\']\n else:\n ctx = context\n statetokens = tokendefs[ctx.stack[-1]]\n text = ctx.text\n while 1:\n for rexmatch, action, new_state in statetokens:\n m = rexmatch(text, ctx.pos, ctx.end)\n if m:\n if action is not None:\n if type(action) is _TokenType:\n yield ctx.pos, action, m.group()\n ctx.pos = m.end()\n else:\n yield from action(self, m, ctx)\n if not new_state:\n # altered the state stack?\n statetokens = tokendefs[ctx.stack[-1]]\n # CAUTION: callback must set ctx.pos!\n if new_state is not None:\n # state transition\n if isinstance(new_state, tuple):\n for state in new_state:\n if state == \'#pop\':\n if len(ctx.stack) > 1:\n ctx.stack.pop()\n elif state == \'#push\':\n ctx.stack.append(ctx.stack[-1])\n else:\n ctx.stack.append(state)\n elif isinstance(new_state, int):\n # see RegexLexer for why this check is made\n if abs(new_state) >= len(ctx.stack):\n del ctx.state[1:]\n else:\n del ctx.stack[new_state:]\n elif new_state == \'#push\':\n ctx.stack.append(ctx.stack[-1])\n else:\n assert False, "wrong state def: %r" % new_state\n statetokens = tokendefs[ctx.stack[-1]]\n break\n else:\n try:\n if ctx.pos >= ctx.end:\n break\n if text[ctx.pos] == \'\\n\':\n # at EOL, reset state to "root"\n ctx.stack = [\'root\']\n statetokens = tokendefs[\'root\']\n yield ctx.pos, Text, \'\\n\'\n ctx.pos += 1\n continue\n yield ctx.pos, Error, text[ctx.pos]\n ctx.pos += 1\n except IndexError:\n break\n\n\ndef do_insertions(insertions, tokens):\n """\n Helper for lexers which must combine the results of several\n sublexers.\n\n ``insertions`` is a list of ``(index, itokens)`` pairs.\n Each ``itokens`` iterable should be inserted at position\n ``index`` into the token stream given by the ``tokens``\n argument.\n\n The result is a combined token stream.\n\n TODO: clean up the code here.\n """\n insertions = iter(insertions)\n try:\n index, itokens = next(insertions)\n except StopIteration:\n # no insertions\n yield from tokens\n return\n\n realpos = None\n insleft = True\n\n # iterate over the token stream where we want to insert\n # the tokens from the insertion list.\n for i, t, v in tokens:\n # first iteration. store the postition of first item\n if realpos is None:\n realpos = i\n oldi = 0\n while insleft and i + len(v) >= index:\n tmpval = v[oldi:index - i]\n yield realpos, t, tmpval\n realpos += len(tmpval)\n for it_index, it_token, it_value in itokens:\n yield realpos, it_token, it_value\n realpos += len(it_value)\n oldi = index - i\n try:\n index, itokens = next(insertions)\n except StopIteration:\n insleft = False\n break # not strictly necessary\n yield realpos, t, v[oldi:]\n realpos += len(v) - oldi\n\n # leftover tokens\n while insleft:\n # no normal tokens, set realpos to zero\n realpos = realpos or 0\n for p, t, v in itokens:\n yield realpos, t, v\n realpos += len(v)\n try:\n index, itokens = next(insertions)\n except StopIteration:\n insleft = False\n break # not strictly necessary\n\n\nclass ProfilingRegexLexerMeta(RegexLexerMeta):\n """Metaclass for ProfilingRegexLexer, collects regex timing info."""\n\n def _process_regex(cls, regex, rflags, state):\n if isinstance(regex, words):\n rex = regex_opt(regex.words, prefix=regex.prefix,\n suffix=regex.suffix)\n else:\n rex = regex\n compiled = re.compile(rex, rflags)\n\n def match_func(text, pos, endpos=sys.maxsize):\n info = cls._prof_data[-1].setdefault((state, rex), [0, 0.0])\n t0 = time.time()\n res = compiled.match(text, pos, endpos)\n t1 = time.time()\n info[0] += 1\n info[1] += t1 - t0\n return res\n return match_func\n\n\nclass ProfilingRegexLexer(RegexLexer, metaclass=ProfilingRegexLexerMeta):\n """Drop-in replacement for RegexLexer that does profiling of its regexes."""\n\n _prof_data = []\n _prof_sort_index = 4 # defaults to time per call\n\n def get_tokens_unprocessed(self, text, stack=(\'root\',)):\n # this needs to be a stack, since using(this) will produce nested calls\n self.__class__._prof_data.append({})\n yield from RegexLexer.get_tokens_unprocessed(self, text, stack)\n rawdata = self.__class__._prof_data.pop()\n data = sorted(((s, repr(r).strip(\'u\\\'\').replace(\'\\\\\\\\\', \'\\\\\')[:65],\n n, 1000 * t, 1000 * t / n)\n for ((s, r), (n, t)) in rawdata.items()),\n key=lambda x: x[self._prof_sort_index],\n reverse=True)\n sum_total = sum(x[3] for x in data)\n\n print()\n print(\'Profiling result for %s lexing %d chars in %.3f ms\' %\n (self.__class__.__name__, len(text), sum_total))\n print(\'=\' * 110)\n print(\'%-20s %-64s ncalls tottime percall\' % (\'state\', \'regex\'))\n print(\'-\' * 110)\n for d in data:\n print(\'%-20s %-65s %5d %8.4f %8.4f\' % d)\n print(\'=\' * 110)\n') + __stickytape_write_module('pygments/filter.py', b'# -*- coding: utf-8 -*-\n"""\n pygments.filter\n ~~~~~~~~~~~~~~~\n\n Module that implements the default filter.\n\n :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.\n :license: BSD, see LICENSE for details.\n"""\n\n\ndef apply_filters(stream, filters, lexer=None):\n """\n Use this method to apply an iterable of filters to\n a stream. If lexer is given it\'s forwarded to the\n filter, otherwise the filter receives `None`.\n """\n def _apply(filter_, stream):\n yield from filter_.filter(lexer, stream)\n for filter_ in filters:\n stream = _apply(filter_, stream)\n return stream\n\n\ndef simplefilter(f):\n """\n Decorator that converts a function into a filter::\n\n @simplefilter\n def lowercase(self, lexer, stream, options):\n for ttype, value in stream:\n yield ttype, value.lower()\n """\n return type(f.__name__, (FunctionFilter,), {\n \'__module__\': getattr(f, \'__module__\'),\n \'__doc__\': f.__doc__,\n \'function\': f,\n })\n\n\nclass Filter:\n """\n Default filter. Subclass this class or use the `simplefilter`\n decorator to create own filters.\n """\n\n def __init__(self, **options):\n self.options = options\n\n def filter(self, lexer, stream):\n raise NotImplementedError()\n\n\nclass FunctionFilter(Filter):\n """\n Abstract class used by `simplefilter` to create simple\n function filters on the fly. The `simplefilter` decorator\n automatically creates subclasses of this class for\n functions passed to it.\n """\n function = None\n\n def __init__(self, **options):\n if not hasattr(self, \'function\'):\n raise TypeError(\'%r used without bound function\' %\n self.__class__.__name__)\n Filter.__init__(self, **options)\n\n def filter(self, lexer, stream):\n # pylint: disable=not-callable\n yield from self.function(lexer, stream, self.options)\n') + __stickytape_write_module('pygments/filters/__init__.py', b'# -*- coding: utf-8 -*-\n"""\n pygments.filters\n ~~~~~~~~~~~~~~~~\n\n Module containing filter lookup functions and default\n filters.\n\n :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.\n :license: BSD, see LICENSE for details.\n"""\n\nimport re\n\nfrom pygments.token import String, Comment, Keyword, Name, Error, Whitespace, \\\n string_to_tokentype\nfrom pygments.filter import Filter\nfrom pygments.util import get_list_opt, get_int_opt, get_bool_opt, \\\n get_choice_opt, ClassNotFound, OptionError\nfrom pygments.plugin import find_plugin_filters\n\n\ndef find_filter_class(filtername):\n """Lookup a filter by name. Return None if not found."""\n if filtername in FILTERS:\n return FILTERS[filtername]\n for name, cls in find_plugin_filters():\n if name == filtername:\n return cls\n return None\n\n\ndef get_filter_by_name(filtername, **options):\n """Return an instantiated filter.\n\n Options are passed to the filter initializer if wanted.\n Raise a ClassNotFound if not found.\n """\n cls = find_filter_class(filtername)\n if cls:\n return cls(**options)\n else:\n raise ClassNotFound(\'filter %r not found\' % filtername)\n\n\ndef get_all_filters():\n """Return a generator of all filter names."""\n yield from FILTERS\n for name, _ in find_plugin_filters():\n yield name\n\n\ndef _replace_special(ttype, value, regex, specialttype,\n replacefunc=lambda x: x):\n last = 0\n for match in regex.finditer(value):\n start, end = match.start(), match.end()\n if start != last:\n yield ttype, value[last:start]\n yield specialttype, replacefunc(value[start:end])\n last = end\n if last != len(value):\n yield ttype, value[last:]\n\n\nclass CodeTagFilter(Filter):\n """Highlight special code tags in comments and docstrings.\n\n Options accepted:\n\n `codetags` : list of strings\n A list of strings that are flagged as code tags. The default is to\n highlight ``XXX``, ``TODO``, ``BUG`` and ``NOTE``.\n """\n\n def __init__(self, **options):\n Filter.__init__(self, **options)\n tags = get_list_opt(options, \'codetags\',\n [\'XXX\', \'TODO\', \'BUG\', \'NOTE\'])\n self.tag_re = re.compile(r\'\\b(%s)\\b\' % \'|\'.join([\n re.escape(tag) for tag in tags if tag\n ]))\n\n def filter(self, lexer, stream):\n regex = self.tag_re\n for ttype, value in stream:\n if ttype in String.Doc or \\\n ttype in Comment and \\\n ttype not in Comment.Preproc:\n yield from _replace_special(ttype, value, regex, Comment.Special)\n else:\n yield ttype, value\n\n\nclass SymbolFilter(Filter):\n """Convert mathematical symbols such as \\\\ in Isabelle\n or \\\\longrightarrow in LaTeX into Unicode characters.\n\n This is mostly useful for HTML or console output when you want to\n approximate the source rendering you\'d see in an IDE.\n\n Options accepted:\n\n `lang` : string\n The symbol language. Must be one of ``\'isabelle\'`` or\n ``\'latex\'``. The default is ``\'isabelle\'``.\n """\n\n latex_symbols = {\n \'\\\\alpha\' : \'\\U000003b1\',\n \'\\\\beta\' : \'\\U000003b2\',\n \'\\\\gamma\' : \'\\U000003b3\',\n \'\\\\delta\' : \'\\U000003b4\',\n \'\\\\varepsilon\' : \'\\U000003b5\',\n \'\\\\zeta\' : \'\\U000003b6\',\n \'\\\\eta\' : \'\\U000003b7\',\n \'\\\\vartheta\' : \'\\U000003b8\',\n \'\\\\iota\' : \'\\U000003b9\',\n \'\\\\kappa\' : \'\\U000003ba\',\n \'\\\\lambda\' : \'\\U000003bb\',\n \'\\\\mu\' : \'\\U000003bc\',\n \'\\\\nu\' : \'\\U000003bd\',\n \'\\\\xi\' : \'\\U000003be\',\n \'\\\\pi\' : \'\\U000003c0\',\n \'\\\\varrho\' : \'\\U000003c1\',\n \'\\\\sigma\' : \'\\U000003c3\',\n \'\\\\tau\' : \'\\U000003c4\',\n \'\\\\upsilon\' : \'\\U000003c5\',\n \'\\\\varphi\' : \'\\U000003c6\',\n \'\\\\chi\' : \'\\U000003c7\',\n \'\\\\psi\' : \'\\U000003c8\',\n \'\\\\omega\' : \'\\U000003c9\',\n \'\\\\Gamma\' : \'\\U00000393\',\n \'\\\\Delta\' : \'\\U00000394\',\n \'\\\\Theta\' : \'\\U00000398\',\n \'\\\\Lambda\' : \'\\U0000039b\',\n \'\\\\Xi\' : \'\\U0000039e\',\n \'\\\\Pi\' : \'\\U000003a0\',\n \'\\\\Sigma\' : \'\\U000003a3\',\n \'\\\\Upsilon\' : \'\\U000003a5\',\n \'\\\\Phi\' : \'\\U000003a6\',\n \'\\\\Psi\' : \'\\U000003a8\',\n \'\\\\Omega\' : \'\\U000003a9\',\n \'\\\\leftarrow\' : \'\\U00002190\',\n \'\\\\longleftarrow\' : \'\\U000027f5\',\n \'\\\\rightarrow\' : \'\\U00002192\',\n \'\\\\longrightarrow\' : \'\\U000027f6\',\n \'\\\\Leftarrow\' : \'\\U000021d0\',\n \'\\\\Longleftarrow\' : \'\\U000027f8\',\n \'\\\\Rightarrow\' : \'\\U000021d2\',\n \'\\\\Longrightarrow\' : \'\\U000027f9\',\n \'\\\\leftrightarrow\' : \'\\U00002194\',\n \'\\\\longleftrightarrow\' : \'\\U000027f7\',\n \'\\\\Leftrightarrow\' : \'\\U000021d4\',\n \'\\\\Longleftrightarrow\' : \'\\U000027fa\',\n \'\\\\mapsto\' : \'\\U000021a6\',\n \'\\\\longmapsto\' : \'\\U000027fc\',\n \'\\\\relbar\' : \'\\U00002500\',\n \'\\\\Relbar\' : \'\\U00002550\',\n \'\\\\hookleftarrow\' : \'\\U000021a9\',\n \'\\\\hookrightarrow\' : \'\\U000021aa\',\n \'\\\\leftharpoondown\' : \'\\U000021bd\',\n \'\\\\rightharpoondown\' : \'\\U000021c1\',\n \'\\\\leftharpoonup\' : \'\\U000021bc\',\n \'\\\\rightharpoonup\' : \'\\U000021c0\',\n \'\\\\rightleftharpoons\' : \'\\U000021cc\',\n \'\\\\leadsto\' : \'\\U0000219d\',\n \'\\\\downharpoonleft\' : \'\\U000021c3\',\n \'\\\\downharpoonright\' : \'\\U000021c2\',\n \'\\\\upharpoonleft\' : \'\\U000021bf\',\n \'\\\\upharpoonright\' : \'\\U000021be\',\n \'\\\\restriction\' : \'\\U000021be\',\n \'\\\\uparrow\' : \'\\U00002191\',\n \'\\\\Uparrow\' : \'\\U000021d1\',\n \'\\\\downarrow\' : \'\\U00002193\',\n \'\\\\Downarrow\' : \'\\U000021d3\',\n \'\\\\updownarrow\' : \'\\U00002195\',\n \'\\\\Updownarrow\' : \'\\U000021d5\',\n \'\\\\langle\' : \'\\U000027e8\',\n \'\\\\rangle\' : \'\\U000027e9\',\n \'\\\\lceil\' : \'\\U00002308\',\n \'\\\\rceil\' : \'\\U00002309\',\n \'\\\\lfloor\' : \'\\U0000230a\',\n \'\\\\rfloor\' : \'\\U0000230b\',\n \'\\\\flqq\' : \'\\U000000ab\',\n \'\\\\frqq\' : \'\\U000000bb\',\n \'\\\\bot\' : \'\\U000022a5\',\n \'\\\\top\' : \'\\U000022a4\',\n \'\\\\wedge\' : \'\\U00002227\',\n \'\\\\bigwedge\' : \'\\U000022c0\',\n \'\\\\vee\' : \'\\U00002228\',\n \'\\\\bigvee\' : \'\\U000022c1\',\n \'\\\\forall\' : \'\\U00002200\',\n \'\\\\exists\' : \'\\U00002203\',\n \'\\\\nexists\' : \'\\U00002204\',\n \'\\\\neg\' : \'\\U000000ac\',\n \'\\\\Box\' : \'\\U000025a1\',\n \'\\\\Diamond\' : \'\\U000025c7\',\n \'\\\\vdash\' : \'\\U000022a2\',\n \'\\\\models\' : \'\\U000022a8\',\n \'\\\\dashv\' : \'\\U000022a3\',\n \'\\\\surd\' : \'\\U0000221a\',\n \'\\\\le\' : \'\\U00002264\',\n \'\\\\ge\' : \'\\U00002265\',\n \'\\\\ll\' : \'\\U0000226a\',\n \'\\\\gg\' : \'\\U0000226b\',\n \'\\\\lesssim\' : \'\\U00002272\',\n \'\\\\gtrsim\' : \'\\U00002273\',\n \'\\\\lessapprox\' : \'\\U00002a85\',\n \'\\\\gtrapprox\' : \'\\U00002a86\',\n \'\\\\in\' : \'\\U00002208\',\n \'\\\\notin\' : \'\\U00002209\',\n \'\\\\subset\' : \'\\U00002282\',\n \'\\\\supset\' : \'\\U00002283\',\n \'\\\\subseteq\' : \'\\U00002286\',\n \'\\\\supseteq\' : \'\\U00002287\',\n \'\\\\sqsubset\' : \'\\U0000228f\',\n \'\\\\sqsupset\' : \'\\U00002290\',\n \'\\\\sqsubseteq\' : \'\\U00002291\',\n \'\\\\sqsupseteq\' : \'\\U00002292\',\n \'\\\\cap\' : \'\\U00002229\',\n \'\\\\bigcap\' : \'\\U000022c2\',\n \'\\\\cup\' : \'\\U0000222a\',\n \'\\\\bigcup\' : \'\\U000022c3\',\n \'\\\\sqcup\' : \'\\U00002294\',\n \'\\\\bigsqcup\' : \'\\U00002a06\',\n \'\\\\sqcap\' : \'\\U00002293\',\n \'\\\\Bigsqcap\' : \'\\U00002a05\',\n \'\\\\setminus\' : \'\\U00002216\',\n \'\\\\propto\' : \'\\U0000221d\',\n \'\\\\uplus\' : \'\\U0000228e\',\n \'\\\\bigplus\' : \'\\U00002a04\',\n \'\\\\sim\' : \'\\U0000223c\',\n \'\\\\doteq\' : \'\\U00002250\',\n \'\\\\simeq\' : \'\\U00002243\',\n \'\\\\approx\' : \'\\U00002248\',\n \'\\\\asymp\' : \'\\U0000224d\',\n \'\\\\cong\' : \'\\U00002245\',\n \'\\\\equiv\' : \'\\U00002261\',\n \'\\\\Join\' : \'\\U000022c8\',\n \'\\\\bowtie\' : \'\\U00002a1d\',\n \'\\\\prec\' : \'\\U0000227a\',\n \'\\\\succ\' : \'\\U0000227b\',\n \'\\\\preceq\' : \'\\U0000227c\',\n \'\\\\succeq\' : \'\\U0000227d\',\n \'\\\\parallel\' : \'\\U00002225\',\n \'\\\\mid\' : \'\\U000000a6\',\n \'\\\\pm\' : \'\\U000000b1\',\n \'\\\\mp\' : \'\\U00002213\',\n \'\\\\times\' : \'\\U000000d7\',\n \'\\\\div\' : \'\\U000000f7\',\n \'\\\\cdot\' : \'\\U000022c5\',\n \'\\\\star\' : \'\\U000022c6\',\n \'\\\\circ\' : \'\\U00002218\',\n \'\\\\dagger\' : \'\\U00002020\',\n \'\\\\ddagger\' : \'\\U00002021\',\n \'\\\\lhd\' : \'\\U000022b2\',\n \'\\\\rhd\' : \'\\U000022b3\',\n \'\\\\unlhd\' : \'\\U000022b4\',\n \'\\\\unrhd\' : \'\\U000022b5\',\n \'\\\\triangleleft\' : \'\\U000025c3\',\n \'\\\\triangleright\' : \'\\U000025b9\',\n \'\\\\triangle\' : \'\\U000025b3\',\n \'\\\\triangleq\' : \'\\U0000225c\',\n \'\\\\oplus\' : \'\\U00002295\',\n \'\\\\bigoplus\' : \'\\U00002a01\',\n \'\\\\otimes\' : \'\\U00002297\',\n \'\\\\bigotimes\' : \'\\U00002a02\',\n \'\\\\odot\' : \'\\U00002299\',\n \'\\\\bigodot\' : \'\\U00002a00\',\n \'\\\\ominus\' : \'\\U00002296\',\n \'\\\\oslash\' : \'\\U00002298\',\n \'\\\\dots\' : \'\\U00002026\',\n \'\\\\cdots\' : \'\\U000022ef\',\n \'\\\\sum\' : \'\\U00002211\',\n \'\\\\prod\' : \'\\U0000220f\',\n \'\\\\coprod\' : \'\\U00002210\',\n \'\\\\infty\' : \'\\U0000221e\',\n \'\\\\int\' : \'\\U0000222b\',\n \'\\\\oint\' : \'\\U0000222e\',\n \'\\\\clubsuit\' : \'\\U00002663\',\n \'\\\\diamondsuit\' : \'\\U00002662\',\n \'\\\\heartsuit\' : \'\\U00002661\',\n \'\\\\spadesuit\' : \'\\U00002660\',\n \'\\\\aleph\' : \'\\U00002135\',\n \'\\\\emptyset\' : \'\\U00002205\',\n \'\\\\nabla\' : \'\\U00002207\',\n \'\\\\partial\' : \'\\U00002202\',\n \'\\\\flat\' : \'\\U0000266d\',\n \'\\\\natural\' : \'\\U0000266e\',\n \'\\\\sharp\' : \'\\U0000266f\',\n \'\\\\angle\' : \'\\U00002220\',\n \'\\\\copyright\' : \'\\U000000a9\',\n \'\\\\textregistered\' : \'\\U000000ae\',\n \'\\\\textonequarter\' : \'\\U000000bc\',\n \'\\\\textonehalf\' : \'\\U000000bd\',\n \'\\\\textthreequarters\' : \'\\U000000be\',\n \'\\\\textordfeminine\' : \'\\U000000aa\',\n \'\\\\textordmasculine\' : \'\\U000000ba\',\n \'\\\\euro\' : \'\\U000020ac\',\n \'\\\\pounds\' : \'\\U000000a3\',\n \'\\\\yen\' : \'\\U000000a5\',\n \'\\\\textcent\' : \'\\U000000a2\',\n \'\\\\textcurrency\' : \'\\U000000a4\',\n \'\\\\textdegree\' : \'\\U000000b0\',\n }\n\n isabelle_symbols = {\n \'\\\\\' : \'\\U0001d7ec\',\n \'\\\\\' : \'\\U0001d7ed\',\n \'\\\\\' : \'\\U0001d7ee\',\n \'\\\\\' : \'\\U0001d7ef\',\n \'\\\\\' : \'\\U0001d7f0\',\n \'\\\\\' : \'\\U0001d7f1\',\n \'\\\\\' : \'\\U0001d7f2\',\n \'\\\\\' : \'\\U0001d7f3\',\n \'\\\\\' : \'\\U0001d7f4\',\n \'\\\\\' : \'\\U0001d7f5\',\n \'\\\\\' : \'\\U0001d49c\',\n \'\\\\\' : \'\\U0000212c\',\n \'\\\\\' : \'\\U0001d49e\',\n \'\\\\\' : \'\\U0001d49f\',\n \'\\\\\' : \'\\U00002130\',\n \'\\\\\' : \'\\U00002131\',\n \'\\\\\' : \'\\U0001d4a2\',\n \'\\\\\' : \'\\U0000210b\',\n \'\\\\\' : \'\\U00002110\',\n \'\\\\\' : \'\\U0001d4a5\',\n \'\\\\\' : \'\\U0001d4a6\',\n \'\\\\\' : \'\\U00002112\',\n \'\\\\\' : \'\\U00002133\',\n \'\\\\\' : \'\\U0001d4a9\',\n \'\\\\\' : \'\\U0001d4aa\',\n \'\\\\

\' : \'\\U0001d5c9\',\n \'\\\\\' : \'\\U0001d5ca\',\n \'\\\\\' : \'\\U0001d5cb\',\n \'\\\\\' : \'\\U0001d5cc\',\n \'\\\\\' : \'\\U0001d5cd\',\n \'\\\\\' : \'\\U0001d5ce\',\n \'\\\\\' : \'\\U0001d5cf\',\n \'\\\\\' : \'\\U0001d5d0\',\n \'\\\\\' : \'\\U0001d5d1\',\n \'\\\\\' : \'\\U0001d5d2\',\n \'\\\\\' : \'\\U0001d5d3\',\n \'\\\\\' : \'\\U0001d504\',\n \'\\\\\' : \'\\U0001d505\',\n \'\\\\\' : \'\\U0000212d\',\n \'\\\\

\' : \'\\U0001d507\',\n \'\\\\\' : \'\\U0001d508\',\n \'\\\\\' : \'\\U0001d509\',\n \'\\\\\' : \'\\U0001d50a\',\n \'\\\\\' : \'\\U0000210c\',\n \'\\\\\' : \'\\U00002111\',\n \'\\\\\' : \'\\U0001d50d\',\n \'\\\\\' : \'\\U0001d50e\',\n \'\\\\\' : \'\\U0001d50f\',\n \'\\\\\' : \'\\U0001d510\',\n \'\\\\\' : \'\\U0001d511\',\n \'\\\\\' : \'\\U0001d512\',\n \'\\\\\' : \'\\U0001d513\',\n \'\\\\\' : \'\\U0001d514\',\n \'\\\\\' : \'\\U0000211c\',\n \'\\\\\' : \'\\U0001d516\',\n \'\\\\\' : \'\\U0001d517\',\n \'\\\\\' : \'\\U0001d518\',\n \'\\\\\' : \'\\U0001d519\',\n \'\\\\\' : \'\\U0001d51a\',\n \'\\\\\' : \'\\U0001d51b\',\n \'\\\\\' : \'\\U0001d51c\',\n \'\\\\\' : \'\\U00002128\',\n \'\\\\\' : \'\\U0001d51e\',\n \'\\\\\' : \'\\U0001d51f\',\n \'\\\\\' : \'\\U0001d520\',\n \'\\\\
\' : \'\\U0001d521\',\n \'\\\\\' : \'\\U0001d522\',\n \'\\\\\' : \'\\U0001d523\',\n \'\\\\\' : \'\\U0001d524\',\n \'\\\\\' : \'\\U0001d525\',\n \'\\\\\' : \'\\U0001d526\',\n \'\\\\\' : \'\\U0001d527\',\n \'\\\\\' : \'\\U0001d528\',\n \'\\\\\' : \'\\U0001d529\',\n \'\\\\\' : \'\\U0001d52a\',\n \'\\\\\' : \'\\U0001d52b\',\n \'\\\\\' : \'\\U0001d52c\',\n \'\\\\\' : \'\\U0001d52d\',\n \'\\\\\' : \'\\U0001d52e\',\n \'\\\\\' : \'\\U0001d52f\',\n \'\\\\\' : \'\\U0001d530\',\n \'\\\\\' : \'\\U0001d531\',\n \'\\\\\' : \'\\U0001d532\',\n \'\\\\\' : \'\\U0001d533\',\n \'\\\\\' : \'\\U0001d534\',\n \'\\\\\' : \'\\U0001d535\',\n \'\\\\\' : \'\\U0001d536\',\n \'\\\\\' : \'\\U0001d537\',\n \'\\\\\' : \'\\U000003b1\',\n \'\\\\\' : \'\\U000003b2\',\n \'\\\\\' : \'\\U000003b3\',\n \'\\\\\' : \'\\U000003b4\',\n \'\\\\\' : \'\\U000003b5\',\n \'\\\\\' : \'\\U000003b6\',\n \'\\\\\' : \'\\U000003b7\',\n \'\\\\\' : \'\\U000003b8\',\n \'\\\\\' : \'\\U000003b9\',\n \'\\\\\' : \'\\U000003ba\',\n \'\\\\\' : \'\\U000003bb\',\n \'\\\\\' : \'\\U000003bc\',\n \'\\\\\' : \'\\U000003bd\',\n \'\\\\\' : \'\\U000003be\',\n \'\\\\\' : \'\\U000003c0\',\n \'\\\\\' : \'\\U000003c1\',\n \'\\\\\' : \'\\U000003c3\',\n \'\\\\\' : \'\\U000003c4\',\n \'\\\\\' : \'\\U000003c5\',\n \'\\\\\' : \'\\U000003c6\',\n \'\\\\\' : \'\\U000003c7\',\n \'\\\\\' : \'\\U000003c8\',\n \'\\\\\' : \'\\U000003c9\',\n \'\\\\\' : \'\\U00000393\',\n \'\\\\\' : \'\\U00000394\',\n \'\\\\\' : \'\\U00000398\',\n \'\\\\\' : \'\\U0000039b\',\n \'\\\\\' : \'\\U0000039e\',\n \'\\\\\' : \'\\U000003a0\',\n \'\\\\\' : \'\\U000003a3\',\n \'\\\\\' : \'\\U000003a5\',\n \'\\\\\' : \'\\U000003a6\',\n \'\\\\\' : \'\\U000003a8\',\n \'\\\\\' : \'\\U000003a9\',\n \'\\\\\' : \'\\U0001d539\',\n \'\\\\\' : \'\\U00002102\',\n \'\\\\\' : \'\\U00002115\',\n \'\\\\\' : \'\\U0000211a\',\n \'\\\\\' : \'\\U0000211d\',\n \'\\\\\' : \'\\U00002124\',\n \'\\\\\' : \'\\U00002190\',\n \'\\\\\' : \'\\U000027f5\',\n \'\\\\\' : \'\\U00002192\',\n \'\\\\\' : \'\\U000027f6\',\n \'\\\\\' : \'\\U000021d0\',\n \'\\\\\' : \'\\U000027f8\',\n \'\\\\\' : \'\\U000021d2\',\n \'\\\\\' : \'\\U000027f9\',\n \'\\\\\' : \'\\U00002194\',\n \'\\\\\' : \'\\U000027f7\',\n \'\\\\\' : \'\\U000021d4\',\n \'\\\\\' : \'\\U000027fa\',\n \'\\\\\' : \'\\U000021a6\',\n \'\\\\\' : \'\\U000027fc\',\n \'\\\\\' : \'\\U00002500\',\n \'\\\\\' : \'\\U00002550\',\n \'\\\\\' : \'\\U000021a9\',\n \'\\\\\' : \'\\U000021aa\',\n \'\\\\\' : \'\\U000021bd\',\n \'\\\\\' : \'\\U000021c1\',\n \'\\\\\' : \'\\U000021bc\',\n \'\\\\\' : \'\\U000021c0\',\n \'\\\\\' : \'\\U000021cc\',\n \'\\\\\' : \'\\U0000219d\',\n \'\\\\\' : \'\\U000021c3\',\n \'\\\\\' : \'\\U000021c2\',\n \'\\\\\' : \'\\U000021bf\',\n \'\\\\\' : \'\\U000021be\',\n \'\\\\\' : \'\\U000021be\',\n \'\\\\\' : \'\\U00002237\',\n \'\\\\\' : \'\\U00002191\',\n \'\\\\\' : \'\\U000021d1\',\n \'\\\\\' : \'\\U00002193\',\n \'\\\\\' : \'\\U000021d3\',\n \'\\\\\' : \'\\U00002195\',\n \'\\\\\' : \'\\U000021d5\',\n \'\\\\\' : \'\\U000027e8\',\n \'\\\\\' : \'\\U000027e9\',\n \'\\\\\' : \'\\U00002308\',\n \'\\\\\' : \'\\U00002309\',\n \'\\\\\' : \'\\U0000230a\',\n \'\\\\\' : \'\\U0000230b\',\n \'\\\\\' : \'\\U00002987\',\n \'\\\\\' : \'\\U00002988\',\n \'\\\\\' : \'\\U000027e6\',\n \'\\\\\' : \'\\U000027e7\',\n \'\\\\\' : \'\\U00002983\',\n \'\\\\\' : \'\\U00002984\',\n \'\\\\\' : \'\\U000000ab\',\n \'\\\\\' : \'\\U000000bb\',\n \'\\\\\' : \'\\U000022a5\',\n \'\\\\\' : \'\\U000022a4\',\n \'\\\\\' : \'\\U00002227\',\n \'\\\\\' : \'\\U000022c0\',\n \'\\\\\' : \'\\U00002228\',\n \'\\\\\' : \'\\U000022c1\',\n \'\\\\\' : \'\\U00002200\',\n \'\\\\\' : \'\\U00002203\',\n \'\\\\\' : \'\\U00002204\',\n \'\\\\\' : \'\\U000000ac\',\n \'\\\\\' : \'\\U000025a1\',\n \'\\\\\' : \'\\U000025c7\',\n \'\\\\\' : \'\\U000022a2\',\n \'\\\\\' : \'\\U000022a8\',\n \'\\\\\' : \'\\U000022a9\',\n \'\\\\\' : \'\\U000022ab\',\n \'\\\\\' : \'\\U000022a3\',\n \'\\\\\' : \'\\U0000221a\',\n \'\\\\\' : \'\\U00002264\',\n \'\\\\\' : \'\\U00002265\',\n \'\\\\\' : \'\\U0000226a\',\n \'\\\\\' : \'\\U0000226b\',\n \'\\\\\' : \'\\U00002272\',\n \'\\\\\' : \'\\U00002273\',\n \'\\\\\' : \'\\U00002a85\',\n \'\\\\\' : \'\\U00002a86\',\n \'\\\\\' : \'\\U00002208\',\n \'\\\\\' : \'\\U00002209\',\n \'\\\\\' : \'\\U00002282\',\n \'\\\\\' : \'\\U00002283\',\n \'\\\\\' : \'\\U00002286\',\n \'\\\\\' : \'\\U00002287\',\n \'\\\\\' : \'\\U0000228f\',\n \'\\\\\' : \'\\U00002290\',\n \'\\\\\' : \'\\U00002291\',\n \'\\\\\' : \'\\U00002292\',\n \'\\\\\' : \'\\U00002229\',\n \'\\\\\' : \'\\U000022c2\',\n \'\\\\\' : \'\\U0000222a\',\n \'\\\\\' : \'\\U000022c3\',\n \'\\\\\' : \'\\U00002294\',\n \'\\\\\' : \'\\U00002a06\',\n \'\\\\\' : \'\\U00002293\',\n \'\\\\\' : \'\\U00002a05\',\n \'\\\\\' : \'\\U00002216\',\n \'\\\\\' : \'\\U0000221d\',\n \'\\\\\' : \'\\U0000228e\',\n \'\\\\\' : \'\\U00002a04\',\n \'\\\\\' : \'\\U00002260\',\n \'\\\\\' : \'\\U0000223c\',\n \'\\\\\' : \'\\U00002250\',\n \'\\\\\' : \'\\U00002243\',\n \'\\\\\' : \'\\U00002248\',\n \'\\\\\' : \'\\U0000224d\',\n \'\\\\\' : \'\\U00002245\',\n \'\\\\\' : \'\\U00002323\',\n \'\\\\\' : \'\\U00002261\',\n \'\\\\\' : \'\\U00002322\',\n \'\\\\\' : \'\\U000022c8\',\n \'\\\\\' : \'\\U00002a1d\',\n \'\\\\\' : \'\\U0000227a\',\n \'\\\\\' : \'\\U0000227b\',\n \'\\\\\' : \'\\U0000227c\',\n \'\\\\\' : \'\\U0000227d\',\n \'\\\\\' : \'\\U00002225\',\n \'\\\\\' : \'\\U000000a6\',\n \'\\\\\' : \'\\U000000b1\',\n \'\\\\\' : \'\\U00002213\',\n \'\\\\\' : \'\\U000000d7\',\n \'\\\\
\' : \'\\U000000f7\',\n \'\\\\\' : \'\\U000022c5\',\n \'\\\\\' : \'\\U000022c6\',\n \'\\\\\' : \'\\U00002219\',\n \'\\\\\' : \'\\U00002218\',\n \'\\\\\' : \'\\U00002020\',\n \'\\\\\' : \'\\U00002021\',\n \'\\\\\' : \'\\U000022b2\',\n \'\\\\\' : \'\\U000022b3\',\n \'\\\\\' : \'\\U000022b4\',\n \'\\\\\' : \'\\U000022b5\',\n \'\\\\\' : \'\\U000025c3\',\n \'\\\\\' : \'\\U000025b9\',\n \'\\\\\' : \'\\U000025b3\',\n \'\\\\\' : \'\\U0000225c\',\n \'\\\\\' : \'\\U00002295\',\n \'\\\\\' : \'\\U00002a01\',\n \'\\\\\' : \'\\U00002297\',\n \'\\\\\' : \'\\U00002a02\',\n \'\\\\\' : \'\\U00002299\',\n \'\\\\\' : \'\\U00002a00\',\n \'\\\\\' : \'\\U00002296\',\n \'\\\\\' : \'\\U00002298\',\n \'\\\\\' : \'\\U00002026\',\n \'\\\\\' : \'\\U000022ef\',\n \'\\\\\' : \'\\U00002211\',\n \'\\\\\' : \'\\U0000220f\',\n \'\\\\\' : \'\\U00002210\',\n \'\\\\\' : \'\\U0000221e\',\n \'\\\\\' : \'\\U0000222b\',\n \'\\\\\' : \'\\U0000222e\',\n \'\\\\\' : \'\\U00002663\',\n \'\\\\\' : \'\\U00002662\',\n \'\\\\\' : \'\\U00002661\',\n \'\\\\\' : \'\\U00002660\',\n \'\\\\\' : \'\\U00002135\',\n \'\\\\\' : \'\\U00002205\',\n \'\\\\\' : \'\\U00002207\',\n \'\\\\\' : \'\\U00002202\',\n \'\\\\\' : \'\\U0000266d\',\n \'\\\\\' : \'\\U0000266e\',\n \'\\\\\' : \'\\U0000266f\',\n \'\\\\\' : \'\\U00002220\',\n \'\\\\\' : \'\\U000000a9\',\n \'\\\\\' : \'\\U000000ae\',\n \'\\\\\' : \'\\U000000ad\',\n \'\\\\\' : \'\\U000000af\',\n \'\\\\\' : \'\\U000000bc\',\n \'\\\\\' : \'\\U000000bd\',\n \'\\\\\' : \'\\U000000be\',\n \'\\\\\' : \'\\U000000aa\',\n \'\\\\\' : \'\\U000000ba\',\n \'\\\\
\' : \'\\U000000a7\',\n \'\\\\\' : \'\\U000000b6\',\n \'\\\\\' : \'\\U000000a1\',\n \'\\\\\' : \'\\U000000bf\',\n \'\\\\\' : \'\\U000020ac\',\n \'\\\\\' : \'\\U000000a3\',\n \'\\\\\' : \'\\U000000a5\',\n \'\\\\\' : \'\\U000000a2\',\n \'\\\\\' : \'\\U000000a4\',\n \'\\\\\' : \'\\U000000b0\',\n \'\\\\\' : \'\\U00002a3f\',\n \'\\\\\' : \'\\U00002127\',\n \'\\\\\' : \'\\U000025ca\',\n \'\\\\\' : \'\\U00002118\',\n \'\\\\\' : \'\\U00002240\',\n \'\\\\\' : \'\\U000022c4\',\n \'\\\\\' : \'\\U000000b4\',\n \'\\\\\' : \'\\U00000131\',\n \'\\\\\' : \'\\U000000a8\',\n \'\\\\\' : \'\\U000000b8\',\n \'\\\\\' : \'\\U000002dd\',\n \'\\\\\' : \'\\U000003f5\',\n \'\\\\\' : \'\\U000023ce\',\n \'\\\\\' : \'\\U00002039\',\n \'\\\\\' : \'\\U0000203a\',\n \'\\\\\' : \'\\U00002302\',\n \'\\\\<^sub>\' : \'\\U000021e9\',\n \'\\\\<^sup>\' : \'\\U000021e7\',\n \'\\\\<^bold>\' : \'\\U00002759\',\n \'\\\\<^bsub>\' : \'\\U000021d8\',\n \'\\\\<^esub>\' : \'\\U000021d9\',\n \'\\\\<^bsup>\' : \'\\U000021d7\',\n \'\\\\<^esup>\' : \'\\U000021d6\',\n }\n\n lang_map = {\'isabelle\' : isabelle_symbols, \'latex\' : latex_symbols}\n\n def __init__(self, **options):\n Filter.__init__(self, **options)\n lang = get_choice_opt(options, \'lang\',\n [\'isabelle\', \'latex\'], \'isabelle\')\n self.symbols = self.lang_map[lang]\n\n def filter(self, lexer, stream):\n for ttype, value in stream:\n if value in self.symbols:\n yield ttype, self.symbols[value]\n else:\n yield ttype, value\n\n\nclass KeywordCaseFilter(Filter):\n """Convert keywords to lowercase or uppercase or capitalize them, which\n means first letter uppercase, rest lowercase.\n\n This can be useful e.g. if you highlight Pascal code and want to adapt the\n code to your styleguide.\n\n Options accepted:\n\n `case` : string\n The casing to convert keywords to. Must be one of ``\'lower\'``,\n ``\'upper\'`` or ``\'capitalize\'``. The default is ``\'lower\'``.\n """\n\n def __init__(self, **options):\n Filter.__init__(self, **options)\n case = get_choice_opt(options, \'case\',\n [\'lower\', \'upper\', \'capitalize\'], \'lower\')\n self.convert = getattr(str, case)\n\n def filter(self, lexer, stream):\n for ttype, value in stream:\n if ttype in Keyword:\n yield ttype, self.convert(value)\n else:\n yield ttype, value\n\n\nclass NameHighlightFilter(Filter):\n """Highlight a normal Name (and Name.*) token with a different token type.\n\n Example::\n\n filter = NameHighlightFilter(\n names=[\'foo\', \'bar\', \'baz\'],\n tokentype=Name.Function,\n )\n\n This would highlight the names "foo", "bar" and "baz"\n as functions. `Name.Function` is the default token type.\n\n Options accepted:\n\n `names` : list of strings\n A list of names that should be given the different token type.\n There is no default.\n `tokentype` : TokenType or string\n A token type or a string containing a token type name that is\n used for highlighting the strings in `names`. The default is\n `Name.Function`.\n """\n\n def __init__(self, **options):\n Filter.__init__(self, **options)\n self.names = set(get_list_opt(options, \'names\', []))\n tokentype = options.get(\'tokentype\')\n if tokentype:\n self.tokentype = string_to_tokentype(tokentype)\n else:\n self.tokentype = Name.Function\n\n def filter(self, lexer, stream):\n for ttype, value in stream:\n if ttype in Name and value in self.names:\n yield self.tokentype, value\n else:\n yield ttype, value\n\n\nclass ErrorToken(Exception):\n pass\n\n\nclass RaiseOnErrorTokenFilter(Filter):\n """Raise an exception when the lexer generates an error token.\n\n Options accepted:\n\n `excclass` : Exception class\n The exception class to raise.\n The default is `pygments.filters.ErrorToken`.\n\n .. versionadded:: 0.8\n """\n\n def __init__(self, **options):\n Filter.__init__(self, **options)\n self.exception = options.get(\'excclass\', ErrorToken)\n try:\n # issubclass() will raise TypeError if first argument is not a class\n if not issubclass(self.exception, Exception):\n raise TypeError\n except TypeError:\n raise OptionError(\'excclass option is not an exception class\')\n\n def filter(self, lexer, stream):\n for ttype, value in stream:\n if ttype is Error:\n raise self.exception(value)\n yield ttype, value\n\n\nclass VisibleWhitespaceFilter(Filter):\n """Convert tabs, newlines and/or spaces to visible characters.\n\n Options accepted:\n\n `spaces` : string or bool\n If this is a one-character string, spaces will be replaces by this string.\n If it is another true value, spaces will be replaced by ``\xc2\xb7`` (unicode\n MIDDLE DOT). If it is a false value, spaces will not be replaced. The\n default is ``False``.\n `tabs` : string or bool\n The same as for `spaces`, but the default replacement character is ``\xc2\xbb``\n (unicode RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK). The default value\n is ``False``. Note: this will not work if the `tabsize` option for the\n lexer is nonzero, as tabs will already have been expanded then.\n `tabsize` : int\n If tabs are to be replaced by this filter (see the `tabs` option), this\n is the total number of characters that a tab should be expanded to.\n The default is ``8``.\n `newlines` : string or bool\n The same as for `spaces`, but the default replacement character is ``\xc2\xb6``\n (unicode PILCROW SIGN). The default value is ``False``.\n `wstokentype` : bool\n If true, give whitespace the special `Whitespace` token type. This allows\n styling the visible whitespace differently (e.g. greyed out), but it can\n disrupt background colors. The default is ``True``.\n\n .. versionadded:: 0.8\n """\n\n def __init__(self, **options):\n Filter.__init__(self, **options)\n for name, default in [(\'spaces\', \'\xc2\xb7\'),\n (\'tabs\', \'\xc2\xbb\'),\n (\'newlines\', \'\xc2\xb6\')]:\n opt = options.get(name, False)\n if isinstance(opt, str) and len(opt) == 1:\n setattr(self, name, opt)\n else:\n setattr(self, name, (opt and default or \'\'))\n tabsize = get_int_opt(options, \'tabsize\', 8)\n if self.tabs:\n self.tabs += \' \' * (tabsize - 1)\n if self.newlines:\n self.newlines += \'\\n\'\n self.wstt = get_bool_opt(options, \'wstokentype\', True)\n\n def filter(self, lexer, stream):\n if self.wstt:\n spaces = self.spaces or \' \'\n tabs = self.tabs or \'\\t\'\n newlines = self.newlines or \'\\n\'\n regex = re.compile(r\'\\s\')\n\n def replacefunc(wschar):\n if wschar == \' \':\n return spaces\n elif wschar == \'\\t\':\n return tabs\n elif wschar == \'\\n\':\n return newlines\n return wschar\n\n for ttype, value in stream:\n yield from _replace_special(ttype, value, regex, Whitespace,\n replacefunc)\n else:\n spaces, tabs, newlines = self.spaces, self.tabs, self.newlines\n # simpler processing\n for ttype, value in stream:\n if spaces:\n value = value.replace(\' \', spaces)\n if tabs:\n value = value.replace(\'\\t\', tabs)\n if newlines:\n value = value.replace(\'\\n\', newlines)\n yield ttype, value\n\n\nclass GobbleFilter(Filter):\n """Gobbles source code lines (eats initial characters).\n\n This filter drops the first ``n`` characters off every line of code. This\n may be useful when the source code fed to the lexer is indented by a fixed\n amount of space that isn\'t desired in the output.\n\n Options accepted:\n\n `n` : int\n The number of characters to gobble.\n\n .. versionadded:: 1.2\n """\n def __init__(self, **options):\n Filter.__init__(self, **options)\n self.n = get_int_opt(options, \'n\', 0)\n\n def gobble(self, value, left):\n if left < len(value):\n return value[left:], 0\n else:\n return \'\', left - len(value)\n\n def filter(self, lexer, stream):\n n = self.n\n left = n # How many characters left to gobble.\n for ttype, value in stream:\n # Remove ``left`` tokens from first line, ``n`` from all others.\n parts = value.split(\'\\n\')\n (parts[0], left) = self.gobble(parts[0], left)\n for i in range(1, len(parts)):\n (parts[i], left) = self.gobble(parts[i], n)\n value = \'\\n\'.join(parts)\n\n if value != \'\':\n yield ttype, value\n\n\nclass TokenMergeFilter(Filter):\n """Merges consecutive tokens with the same token type in the output\n stream of a lexer.\n\n .. versionadded:: 1.2\n """\n def __init__(self, **options):\n Filter.__init__(self, **options)\n\n def filter(self, lexer, stream):\n current_type = None\n current_value = None\n for ttype, value in stream:\n if ttype is current_type:\n current_value += value\n else:\n if current_type is not None:\n yield current_type, current_value\n current_type = ttype\n current_value = value\n if current_type is not None:\n yield current_type, current_value\n\n\nFILTERS = {\n \'codetagify\': CodeTagFilter,\n \'keywordcase\': KeywordCaseFilter,\n \'highlight\': NameHighlightFilter,\n \'raiseonerror\': RaiseOnErrorTokenFilter,\n \'whitespace\': VisibleWhitespaceFilter,\n \'gobble\': GobbleFilter,\n \'tokenmerge\': TokenMergeFilter,\n \'symbols\': SymbolFilter,\n}\n') + __stickytape_write_module('pygments/regexopt.py', b'# -*- coding: utf-8 -*-\n"""\n pygments.regexopt\n ~~~~~~~~~~~~~~~~~\n\n An algorithm that generates optimized regexes for matching long lists of\n literal strings.\n\n :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.\n :license: BSD, see LICENSE for details.\n"""\n\nimport re\nfrom re import escape\nfrom os.path import commonprefix\nfrom itertools import groupby\nfrom operator import itemgetter\n\nCS_ESCAPE = re.compile(r\'[\\^\\\\\\-\\]]\')\nFIRST_ELEMENT = itemgetter(0)\n\n\ndef make_charset(letters):\n return \'[\' + CS_ESCAPE.sub(lambda m: \'\\\\\' + m.group(), \'\'.join(letters)) + \']\'\n\n\ndef regex_opt_inner(strings, open_paren):\n """Return a regex that matches any string in the sorted list of strings."""\n close_paren = open_paren and \')\' or \'\'\n # print strings, repr(open_paren)\n if not strings:\n # print \'-> nothing left\'\n return \'\'\n first = strings[0]\n if len(strings) == 1:\n # print \'-> only 1 string\'\n return open_paren + escape(first) + close_paren\n if not first:\n # print \'-> first string empty\'\n return open_paren + regex_opt_inner(strings[1:], \'(?:\') \\\n + \'?\' + close_paren\n if len(first) == 1:\n # multiple one-char strings? make a charset\n oneletter = []\n rest = []\n for s in strings:\n if len(s) == 1:\n oneletter.append(s)\n else:\n rest.append(s)\n if len(oneletter) > 1: # do we have more than one oneletter string?\n if rest:\n # print \'-> 1-character + rest\'\n return open_paren + regex_opt_inner(rest, \'\') + \'|\' \\\n + make_charset(oneletter) + close_paren\n # print \'-> only 1-character\'\n return open_paren + make_charset(oneletter) + close_paren\n prefix = commonprefix(strings)\n if prefix:\n plen = len(prefix)\n # we have a prefix for all strings\n # print \'-> prefix:\', prefix\n return open_paren + escape(prefix) \\\n + regex_opt_inner([s[plen:] for s in strings], \'(?:\') \\\n + close_paren\n # is there a suffix?\n strings_rev = [s[::-1] for s in strings]\n suffix = commonprefix(strings_rev)\n if suffix:\n slen = len(suffix)\n # print \'-> suffix:\', suffix[::-1]\n return open_paren \\\n + regex_opt_inner(sorted(s[:-slen] for s in strings), \'(?:\') \\\n + escape(suffix[::-1]) + close_paren\n # recurse on common 1-string prefixes\n # print \'-> last resort\'\n return open_paren + \\\n \'|\'.join(regex_opt_inner(list(group[1]), \'\')\n for group in groupby(strings, lambda s: s[0] == first[0])) \\\n + close_paren\n\n\ndef regex_opt(strings, prefix=\'\', suffix=\'\'):\n """Return a compiled regex that matches any string in the given list.\n\n The strings to match must be literal strings, not regexes. They will be\n regex-escaped.\n\n *prefix* and *suffix* are pre- and appended to the final regex.\n """\n strings = sorted(strings)\n return prefix + regex_opt_inner(strings, \'(\') + suffix\n') + __stickytape_write_module('pygments/unistring.py', b'# -*- coding: utf-8 -*-\n"""\n pygments.unistring\n ~~~~~~~~~~~~~~~~~~\n\n Strings of all Unicode characters of a certain category.\n Used for matching in Unicode-aware languages. Run to regenerate.\n\n Inspired by chartypes_create.py from the MoinMoin project.\n\n :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.\n :license: BSD, see LICENSE for details.\n"""\n\nimport sys\n\nCc = \'\\x00-\\x1f\\x7f-\\x9f\'\n\nCf = \'\\xad\\u0600-\\u0605\\u061c\\u06dd\\u070f\\u08e2\\u180e\\u200b-\\u200f\\u202a-\\u202e\\u2060-\\u2064\\u2066-\\u206f\\ufeff\\ufff9-\\ufffb\\U000110bd\\U000110cd\\U0001bca0-\\U0001bca3\\U0001d173-\\U0001d17a\\U000e0001\\U000e0020-\\U000e007f\'\n\nCn = \'\\u0378-\\u0379\\u0380-\\u0383\\u038b\\u038d\\u03a2\\u0530\\u0557-\\u0558\\u058b-\\u058c\\u0590\\u05c8-\\u05cf\\u05eb-\\u05ee\\u05f5-\\u05ff\\u061d\\u070e\\u074b-\\u074c\\u07b2-\\u07bf\\u07fb-\\u07fc\\u082e-\\u082f\\u083f\\u085c-\\u085d\\u085f\\u086b-\\u089f\\u08b5\\u08be-\\u08d2\\u0984\\u098d-\\u098e\\u0991-\\u0992\\u09a9\\u09b1\\u09b3-\\u09b5\\u09ba-\\u09bb\\u09c5-\\u09c6\\u09c9-\\u09ca\\u09cf-\\u09d6\\u09d8-\\u09db\\u09de\\u09e4-\\u09e5\\u09ff-\\u0a00\\u0a04\\u0a0b-\\u0a0e\\u0a11-\\u0a12\\u0a29\\u0a31\\u0a34\\u0a37\\u0a3a-\\u0a3b\\u0a3d\\u0a43-\\u0a46\\u0a49-\\u0a4a\\u0a4e-\\u0a50\\u0a52-\\u0a58\\u0a5d\\u0a5f-\\u0a65\\u0a77-\\u0a80\\u0a84\\u0a8e\\u0a92\\u0aa9\\u0ab1\\u0ab4\\u0aba-\\u0abb\\u0ac6\\u0aca\\u0ace-\\u0acf\\u0ad1-\\u0adf\\u0ae4-\\u0ae5\\u0af2-\\u0af8\\u0b00\\u0b04\\u0b0d-\\u0b0e\\u0b11-\\u0b12\\u0b29\\u0b31\\u0b34\\u0b3a-\\u0b3b\\u0b45-\\u0b46\\u0b49-\\u0b4a\\u0b4e-\\u0b55\\u0b58-\\u0b5b\\u0b5e\\u0b64-\\u0b65\\u0b78-\\u0b81\\u0b84\\u0b8b-\\u0b8d\\u0b91\\u0b96-\\u0b98\\u0b9b\\u0b9d\\u0ba0-\\u0ba2\\u0ba5-\\u0ba7\\u0bab-\\u0bad\\u0bba-\\u0bbd\\u0bc3-\\u0bc5\\u0bc9\\u0bce-\\u0bcf\\u0bd1-\\u0bd6\\u0bd8-\\u0be5\\u0bfb-\\u0bff\\u0c0d\\u0c11\\u0c29\\u0c3a-\\u0c3c\\u0c45\\u0c49\\u0c4e-\\u0c54\\u0c57\\u0c5b-\\u0c5f\\u0c64-\\u0c65\\u0c70-\\u0c77\\u0c8d\\u0c91\\u0ca9\\u0cb4\\u0cba-\\u0cbb\\u0cc5\\u0cc9\\u0cce-\\u0cd4\\u0cd7-\\u0cdd\\u0cdf\\u0ce4-\\u0ce5\\u0cf0\\u0cf3-\\u0cff\\u0d04\\u0d0d\\u0d11\\u0d45\\u0d49\\u0d50-\\u0d53\\u0d64-\\u0d65\\u0d80-\\u0d81\\u0d84\\u0d97-\\u0d99\\u0db2\\u0dbc\\u0dbe-\\u0dbf\\u0dc7-\\u0dc9\\u0dcb-\\u0dce\\u0dd5\\u0dd7\\u0de0-\\u0de5\\u0df0-\\u0df1\\u0df5-\\u0e00\\u0e3b-\\u0e3e\\u0e5c-\\u0e80\\u0e83\\u0e85-\\u0e86\\u0e89\\u0e8b-\\u0e8c\\u0e8e-\\u0e93\\u0e98\\u0ea0\\u0ea4\\u0ea6\\u0ea8-\\u0ea9\\u0eac\\u0eba\\u0ebe-\\u0ebf\\u0ec5\\u0ec7\\u0ece-\\u0ecf\\u0eda-\\u0edb\\u0ee0-\\u0eff\\u0f48\\u0f6d-\\u0f70\\u0f98\\u0fbd\\u0fcd\\u0fdb-\\u0fff\\u10c6\\u10c8-\\u10cc\\u10ce-\\u10cf\\u1249\\u124e-\\u124f\\u1257\\u1259\\u125e-\\u125f\\u1289\\u128e-\\u128f\\u12b1\\u12b6-\\u12b7\\u12bf\\u12c1\\u12c6-\\u12c7\\u12d7\\u1311\\u1316-\\u1317\\u135b-\\u135c\\u137d-\\u137f\\u139a-\\u139f\\u13f6-\\u13f7\\u13fe-\\u13ff\\u169d-\\u169f\\u16f9-\\u16ff\\u170d\\u1715-\\u171f\\u1737-\\u173f\\u1754-\\u175f\\u176d\\u1771\\u1774-\\u177f\\u17de-\\u17df\\u17ea-\\u17ef\\u17fa-\\u17ff\\u180f\\u181a-\\u181f\\u1879-\\u187f\\u18ab-\\u18af\\u18f6-\\u18ff\\u191f\\u192c-\\u192f\\u193c-\\u193f\\u1941-\\u1943\\u196e-\\u196f\\u1975-\\u197f\\u19ac-\\u19af\\u19ca-\\u19cf\\u19db-\\u19dd\\u1a1c-\\u1a1d\\u1a5f\\u1a7d-\\u1a7e\\u1a8a-\\u1a8f\\u1a9a-\\u1a9f\\u1aae-\\u1aaf\\u1abf-\\u1aff\\u1b4c-\\u1b4f\\u1b7d-\\u1b7f\\u1bf4-\\u1bfb\\u1c38-\\u1c3a\\u1c4a-\\u1c4c\\u1c89-\\u1c8f\\u1cbb-\\u1cbc\\u1cc8-\\u1ccf\\u1cfa-\\u1cff\\u1dfa\\u1f16-\\u1f17\\u1f1e-\\u1f1f\\u1f46-\\u1f47\\u1f4e-\\u1f4f\\u1f58\\u1f5a\\u1f5c\\u1f5e\\u1f7e-\\u1f7f\\u1fb5\\u1fc5\\u1fd4-\\u1fd5\\u1fdc\\u1ff0-\\u1ff1\\u1ff5\\u1fff\\u2065\\u2072-\\u2073\\u208f\\u209d-\\u209f\\u20c0-\\u20cf\\u20f1-\\u20ff\\u218c-\\u218f\\u2427-\\u243f\\u244b-\\u245f\\u2b74-\\u2b75\\u2b96-\\u2b97\\u2bc9\\u2bff\\u2c2f\\u2c5f\\u2cf4-\\u2cf8\\u2d26\\u2d28-\\u2d2c\\u2d2e-\\u2d2f\\u2d68-\\u2d6e\\u2d71-\\u2d7e\\u2d97-\\u2d9f\\u2da7\\u2daf\\u2db7\\u2dbf\\u2dc7\\u2dcf\\u2dd7\\u2ddf\\u2e4f-\\u2e7f\\u2e9a\\u2ef4-\\u2eff\\u2fd6-\\u2fef\\u2ffc-\\u2fff\\u3040\\u3097-\\u3098\\u3100-\\u3104\\u3130\\u318f\\u31bb-\\u31bf\\u31e4-\\u31ef\\u321f\\u32ff\\u4db6-\\u4dbf\\u9ff0-\\u9fff\\ua48d-\\ua48f\\ua4c7-\\ua4cf\\ua62c-\\ua63f\\ua6f8-\\ua6ff\\ua7ba-\\ua7f6\\ua82c-\\ua82f\\ua83a-\\ua83f\\ua878-\\ua87f\\ua8c6-\\ua8cd\\ua8da-\\ua8df\\ua954-\\ua95e\\ua97d-\\ua97f\\ua9ce\\ua9da-\\ua9dd\\ua9ff\\uaa37-\\uaa3f\\uaa4e-\\uaa4f\\uaa5a-\\uaa5b\\uaac3-\\uaada\\uaaf7-\\uab00\\uab07-\\uab08\\uab0f-\\uab10\\uab17-\\uab1f\\uab27\\uab2f\\uab66-\\uab6f\\uabee-\\uabef\\uabfa-\\uabff\\ud7a4-\\ud7af\\ud7c7-\\ud7ca\\ud7fc-\\ud7ff\\ufa6e-\\ufa6f\\ufada-\\ufaff\\ufb07-\\ufb12\\ufb18-\\ufb1c\\ufb37\\ufb3d\\ufb3f\\ufb42\\ufb45\\ufbc2-\\ufbd2\\ufd40-\\ufd4f\\ufd90-\\ufd91\\ufdc8-\\ufdef\\ufdfe-\\ufdff\\ufe1a-\\ufe1f\\ufe53\\ufe67\\ufe6c-\\ufe6f\\ufe75\\ufefd-\\ufefe\\uff00\\uffbf-\\uffc1\\uffc8-\\uffc9\\uffd0-\\uffd1\\uffd8-\\uffd9\\uffdd-\\uffdf\\uffe7\\uffef-\\ufff8\\ufffe-\\uffff\\U0001000c\\U00010027\\U0001003b\\U0001003e\\U0001004e-\\U0001004f\\U0001005e-\\U0001007f\\U000100fb-\\U000100ff\\U00010103-\\U00010106\\U00010134-\\U00010136\\U0001018f\\U0001019c-\\U0001019f\\U000101a1-\\U000101cf\\U000101fe-\\U0001027f\\U0001029d-\\U0001029f\\U000102d1-\\U000102df\\U000102fc-\\U000102ff\\U00010324-\\U0001032c\\U0001034b-\\U0001034f\\U0001037b-\\U0001037f\\U0001039e\\U000103c4-\\U000103c7\\U000103d6-\\U000103ff\\U0001049e-\\U0001049f\\U000104aa-\\U000104af\\U000104d4-\\U000104d7\\U000104fc-\\U000104ff\\U00010528-\\U0001052f\\U00010564-\\U0001056e\\U00010570-\\U000105ff\\U00010737-\\U0001073f\\U00010756-\\U0001075f\\U00010768-\\U000107ff\\U00010806-\\U00010807\\U00010809\\U00010836\\U00010839-\\U0001083b\\U0001083d-\\U0001083e\\U00010856\\U0001089f-\\U000108a6\\U000108b0-\\U000108df\\U000108f3\\U000108f6-\\U000108fa\\U0001091c-\\U0001091e\\U0001093a-\\U0001093e\\U00010940-\\U0001097f\\U000109b8-\\U000109bb\\U000109d0-\\U000109d1\\U00010a04\\U00010a07-\\U00010a0b\\U00010a14\\U00010a18\\U00010a36-\\U00010a37\\U00010a3b-\\U00010a3e\\U00010a49-\\U00010a4f\\U00010a59-\\U00010a5f\\U00010aa0-\\U00010abf\\U00010ae7-\\U00010aea\\U00010af7-\\U00010aff\\U00010b36-\\U00010b38\\U00010b56-\\U00010b57\\U00010b73-\\U00010b77\\U00010b92-\\U00010b98\\U00010b9d-\\U00010ba8\\U00010bb0-\\U00010bff\\U00010c49-\\U00010c7f\\U00010cb3-\\U00010cbf\\U00010cf3-\\U00010cf9\\U00010d28-\\U00010d2f\\U00010d3a-\\U00010e5f\\U00010e7f-\\U00010eff\\U00010f28-\\U00010f2f\\U00010f5a-\\U00010fff\\U0001104e-\\U00011051\\U00011070-\\U0001107e\\U000110c2-\\U000110cc\\U000110ce-\\U000110cf\\U000110e9-\\U000110ef\\U000110fa-\\U000110ff\\U00011135\\U00011147-\\U0001114f\\U00011177-\\U0001117f\\U000111ce-\\U000111cf\\U000111e0\\U000111f5-\\U000111ff\\U00011212\\U0001123f-\\U0001127f\\U00011287\\U00011289\\U0001128e\\U0001129e\\U000112aa-\\U000112af\\U000112eb-\\U000112ef\\U000112fa-\\U000112ff\\U00011304\\U0001130d-\\U0001130e\\U00011311-\\U00011312\\U00011329\\U00011331\\U00011334\\U0001133a\\U00011345-\\U00011346\\U00011349-\\U0001134a\\U0001134e-\\U0001134f\\U00011351-\\U00011356\\U00011358-\\U0001135c\\U00011364-\\U00011365\\U0001136d-\\U0001136f\\U00011375-\\U000113ff\\U0001145a\\U0001145c\\U0001145f-\\U0001147f\\U000114c8-\\U000114cf\\U000114da-\\U0001157f\\U000115b6-\\U000115b7\\U000115de-\\U000115ff\\U00011645-\\U0001164f\\U0001165a-\\U0001165f\\U0001166d-\\U0001167f\\U000116b8-\\U000116bf\\U000116ca-\\U000116ff\\U0001171b-\\U0001171c\\U0001172c-\\U0001172f\\U00011740-\\U000117ff\\U0001183c-\\U0001189f\\U000118f3-\\U000118fe\\U00011900-\\U000119ff\\U00011a48-\\U00011a4f\\U00011a84-\\U00011a85\\U00011aa3-\\U00011abf\\U00011af9-\\U00011bff\\U00011c09\\U00011c37\\U00011c46-\\U00011c4f\\U00011c6d-\\U00011c6f\\U00011c90-\\U00011c91\\U00011ca8\\U00011cb7-\\U00011cff\\U00011d07\\U00011d0a\\U00011d37-\\U00011d39\\U00011d3b\\U00011d3e\\U00011d48-\\U00011d4f\\U00011d5a-\\U00011d5f\\U00011d66\\U00011d69\\U00011d8f\\U00011d92\\U00011d99-\\U00011d9f\\U00011daa-\\U00011edf\\U00011ef9-\\U00011fff\\U0001239a-\\U000123ff\\U0001246f\\U00012475-\\U0001247f\\U00012544-\\U00012fff\\U0001342f-\\U000143ff\\U00014647-\\U000167ff\\U00016a39-\\U00016a3f\\U00016a5f\\U00016a6a-\\U00016a6d\\U00016a70-\\U00016acf\\U00016aee-\\U00016aef\\U00016af6-\\U00016aff\\U00016b46-\\U00016b4f\\U00016b5a\\U00016b62\\U00016b78-\\U00016b7c\\U00016b90-\\U00016e3f\\U00016e9b-\\U00016eff\\U00016f45-\\U00016f4f\\U00016f7f-\\U00016f8e\\U00016fa0-\\U00016fdf\\U00016fe2-\\U00016fff\\U000187f2-\\U000187ff\\U00018af3-\\U0001afff\\U0001b11f-\\U0001b16f\\U0001b2fc-\\U0001bbff\\U0001bc6b-\\U0001bc6f\\U0001bc7d-\\U0001bc7f\\U0001bc89-\\U0001bc8f\\U0001bc9a-\\U0001bc9b\\U0001bca4-\\U0001cfff\\U0001d0f6-\\U0001d0ff\\U0001d127-\\U0001d128\\U0001d1e9-\\U0001d1ff\\U0001d246-\\U0001d2df\\U0001d2f4-\\U0001d2ff\\U0001d357-\\U0001d35f\\U0001d379-\\U0001d3ff\\U0001d455\\U0001d49d\\U0001d4a0-\\U0001d4a1\\U0001d4a3-\\U0001d4a4\\U0001d4a7-\\U0001d4a8\\U0001d4ad\\U0001d4ba\\U0001d4bc\\U0001d4c4\\U0001d506\\U0001d50b-\\U0001d50c\\U0001d515\\U0001d51d\\U0001d53a\\U0001d53f\\U0001d545\\U0001d547-\\U0001d549\\U0001d551\\U0001d6a6-\\U0001d6a7\\U0001d7cc-\\U0001d7cd\\U0001da8c-\\U0001da9a\\U0001daa0\\U0001dab0-\\U0001dfff\\U0001e007\\U0001e019-\\U0001e01a\\U0001e022\\U0001e025\\U0001e02b-\\U0001e7ff\\U0001e8c5-\\U0001e8c6\\U0001e8d7-\\U0001e8ff\\U0001e94b-\\U0001e94f\\U0001e95a-\\U0001e95d\\U0001e960-\\U0001ec70\\U0001ecb5-\\U0001edff\\U0001ee04\\U0001ee20\\U0001ee23\\U0001ee25-\\U0001ee26\\U0001ee28\\U0001ee33\\U0001ee38\\U0001ee3a\\U0001ee3c-\\U0001ee41\\U0001ee43-\\U0001ee46\\U0001ee48\\U0001ee4a\\U0001ee4c\\U0001ee50\\U0001ee53\\U0001ee55-\\U0001ee56\\U0001ee58\\U0001ee5a\\U0001ee5c\\U0001ee5e\\U0001ee60\\U0001ee63\\U0001ee65-\\U0001ee66\\U0001ee6b\\U0001ee73\\U0001ee78\\U0001ee7d\\U0001ee7f\\U0001ee8a\\U0001ee9c-\\U0001eea0\\U0001eea4\\U0001eeaa\\U0001eebc-\\U0001eeef\\U0001eef2-\\U0001efff\\U0001f02c-\\U0001f02f\\U0001f094-\\U0001f09f\\U0001f0af-\\U0001f0b0\\U0001f0c0\\U0001f0d0\\U0001f0f6-\\U0001f0ff\\U0001f10d-\\U0001f10f\\U0001f16c-\\U0001f16f\\U0001f1ad-\\U0001f1e5\\U0001f203-\\U0001f20f\\U0001f23c-\\U0001f23f\\U0001f249-\\U0001f24f\\U0001f252-\\U0001f25f\\U0001f266-\\U0001f2ff\\U0001f6d5-\\U0001f6df\\U0001f6ed-\\U0001f6ef\\U0001f6fa-\\U0001f6ff\\U0001f774-\\U0001f77f\\U0001f7d9-\\U0001f7ff\\U0001f80c-\\U0001f80f\\U0001f848-\\U0001f84f\\U0001f85a-\\U0001f85f\\U0001f888-\\U0001f88f\\U0001f8ae-\\U0001f8ff\\U0001f90c-\\U0001f90f\\U0001f93f\\U0001f971-\\U0001f972\\U0001f977-\\U0001f979\\U0001f97b\\U0001f9a3-\\U0001f9af\\U0001f9ba-\\U0001f9bf\\U0001f9c3-\\U0001f9cf\\U0001fa00-\\U0001fa5f\\U0001fa6e-\\U0001ffff\\U0002a6d7-\\U0002a6ff\\U0002b735-\\U0002b73f\\U0002b81e-\\U0002b81f\\U0002cea2-\\U0002ceaf\\U0002ebe1-\\U0002f7ff\\U0002fa1e-\\U000e0000\\U000e0002-\\U000e001f\\U000e0080-\\U000e00ff\\U000e01f0-\\U000effff\\U000ffffe-\\U000fffff\\U0010fffe-\\U0010ffff\'\n\nCo = \'\\ue000-\\uf8ff\\U000f0000-\\U000ffffd\\U00100000-\\U0010fffd\'\n\nCs = \'\\ud800-\\udbff\\\\\\udc00\\udc01-\\udfff\'\n\nLl = \'a-z\\xb5\\xdf-\\xf6\\xf8-\\xff\\u0101\\u0103\\u0105\\u0107\\u0109\\u010b\\u010d\\u010f\\u0111\\u0113\\u0115\\u0117\\u0119\\u011b\\u011d\\u011f\\u0121\\u0123\\u0125\\u0127\\u0129\\u012b\\u012d\\u012f\\u0131\\u0133\\u0135\\u0137-\\u0138\\u013a\\u013c\\u013e\\u0140\\u0142\\u0144\\u0146\\u0148-\\u0149\\u014b\\u014d\\u014f\\u0151\\u0153\\u0155\\u0157\\u0159\\u015b\\u015d\\u015f\\u0161\\u0163\\u0165\\u0167\\u0169\\u016b\\u016d\\u016f\\u0171\\u0173\\u0175\\u0177\\u017a\\u017c\\u017e-\\u0180\\u0183\\u0185\\u0188\\u018c-\\u018d\\u0192\\u0195\\u0199-\\u019b\\u019e\\u01a1\\u01a3\\u01a5\\u01a8\\u01aa-\\u01ab\\u01ad\\u01b0\\u01b4\\u01b6\\u01b9-\\u01ba\\u01bd-\\u01bf\\u01c6\\u01c9\\u01cc\\u01ce\\u01d0\\u01d2\\u01d4\\u01d6\\u01d8\\u01da\\u01dc-\\u01dd\\u01df\\u01e1\\u01e3\\u01e5\\u01e7\\u01e9\\u01eb\\u01ed\\u01ef-\\u01f0\\u01f3\\u01f5\\u01f9\\u01fb\\u01fd\\u01ff\\u0201\\u0203\\u0205\\u0207\\u0209\\u020b\\u020d\\u020f\\u0211\\u0213\\u0215\\u0217\\u0219\\u021b\\u021d\\u021f\\u0221\\u0223\\u0225\\u0227\\u0229\\u022b\\u022d\\u022f\\u0231\\u0233-\\u0239\\u023c\\u023f-\\u0240\\u0242\\u0247\\u0249\\u024b\\u024d\\u024f-\\u0293\\u0295-\\u02af\\u0371\\u0373\\u0377\\u037b-\\u037d\\u0390\\u03ac-\\u03ce\\u03d0-\\u03d1\\u03d5-\\u03d7\\u03d9\\u03db\\u03dd\\u03df\\u03e1\\u03e3\\u03e5\\u03e7\\u03e9\\u03eb\\u03ed\\u03ef-\\u03f3\\u03f5\\u03f8\\u03fb-\\u03fc\\u0430-\\u045f\\u0461\\u0463\\u0465\\u0467\\u0469\\u046b\\u046d\\u046f\\u0471\\u0473\\u0475\\u0477\\u0479\\u047b\\u047d\\u047f\\u0481\\u048b\\u048d\\u048f\\u0491\\u0493\\u0495\\u0497\\u0499\\u049b\\u049d\\u049f\\u04a1\\u04a3\\u04a5\\u04a7\\u04a9\\u04ab\\u04ad\\u04af\\u04b1\\u04b3\\u04b5\\u04b7\\u04b9\\u04bb\\u04bd\\u04bf\\u04c2\\u04c4\\u04c6\\u04c8\\u04ca\\u04cc\\u04ce-\\u04cf\\u04d1\\u04d3\\u04d5\\u04d7\\u04d9\\u04db\\u04dd\\u04df\\u04e1\\u04e3\\u04e5\\u04e7\\u04e9\\u04eb\\u04ed\\u04ef\\u04f1\\u04f3\\u04f5\\u04f7\\u04f9\\u04fb\\u04fd\\u04ff\\u0501\\u0503\\u0505\\u0507\\u0509\\u050b\\u050d\\u050f\\u0511\\u0513\\u0515\\u0517\\u0519\\u051b\\u051d\\u051f\\u0521\\u0523\\u0525\\u0527\\u0529\\u052b\\u052d\\u052f\\u0560-\\u0588\\u10d0-\\u10fa\\u10fd-\\u10ff\\u13f8-\\u13fd\\u1c80-\\u1c88\\u1d00-\\u1d2b\\u1d6b-\\u1d77\\u1d79-\\u1d9a\\u1e01\\u1e03\\u1e05\\u1e07\\u1e09\\u1e0b\\u1e0d\\u1e0f\\u1e11\\u1e13\\u1e15\\u1e17\\u1e19\\u1e1b\\u1e1d\\u1e1f\\u1e21\\u1e23\\u1e25\\u1e27\\u1e29\\u1e2b\\u1e2d\\u1e2f\\u1e31\\u1e33\\u1e35\\u1e37\\u1e39\\u1e3b\\u1e3d\\u1e3f\\u1e41\\u1e43\\u1e45\\u1e47\\u1e49\\u1e4b\\u1e4d\\u1e4f\\u1e51\\u1e53\\u1e55\\u1e57\\u1e59\\u1e5b\\u1e5d\\u1e5f\\u1e61\\u1e63\\u1e65\\u1e67\\u1e69\\u1e6b\\u1e6d\\u1e6f\\u1e71\\u1e73\\u1e75\\u1e77\\u1e79\\u1e7b\\u1e7d\\u1e7f\\u1e81\\u1e83\\u1e85\\u1e87\\u1e89\\u1e8b\\u1e8d\\u1e8f\\u1e91\\u1e93\\u1e95-\\u1e9d\\u1e9f\\u1ea1\\u1ea3\\u1ea5\\u1ea7\\u1ea9\\u1eab\\u1ead\\u1eaf\\u1eb1\\u1eb3\\u1eb5\\u1eb7\\u1eb9\\u1ebb\\u1ebd\\u1ebf\\u1ec1\\u1ec3\\u1ec5\\u1ec7\\u1ec9\\u1ecb\\u1ecd\\u1ecf\\u1ed1\\u1ed3\\u1ed5\\u1ed7\\u1ed9\\u1edb\\u1edd\\u1edf\\u1ee1\\u1ee3\\u1ee5\\u1ee7\\u1ee9\\u1eeb\\u1eed\\u1eef\\u1ef1\\u1ef3\\u1ef5\\u1ef7\\u1ef9\\u1efb\\u1efd\\u1eff-\\u1f07\\u1f10-\\u1f15\\u1f20-\\u1f27\\u1f30-\\u1f37\\u1f40-\\u1f45\\u1f50-\\u1f57\\u1f60-\\u1f67\\u1f70-\\u1f7d\\u1f80-\\u1f87\\u1f90-\\u1f97\\u1fa0-\\u1fa7\\u1fb0-\\u1fb4\\u1fb6-\\u1fb7\\u1fbe\\u1fc2-\\u1fc4\\u1fc6-\\u1fc7\\u1fd0-\\u1fd3\\u1fd6-\\u1fd7\\u1fe0-\\u1fe7\\u1ff2-\\u1ff4\\u1ff6-\\u1ff7\\u210a\\u210e-\\u210f\\u2113\\u212f\\u2134\\u2139\\u213c-\\u213d\\u2146-\\u2149\\u214e\\u2184\\u2c30-\\u2c5e\\u2c61\\u2c65-\\u2c66\\u2c68\\u2c6a\\u2c6c\\u2c71\\u2c73-\\u2c74\\u2c76-\\u2c7b\\u2c81\\u2c83\\u2c85\\u2c87\\u2c89\\u2c8b\\u2c8d\\u2c8f\\u2c91\\u2c93\\u2c95\\u2c97\\u2c99\\u2c9b\\u2c9d\\u2c9f\\u2ca1\\u2ca3\\u2ca5\\u2ca7\\u2ca9\\u2cab\\u2cad\\u2caf\\u2cb1\\u2cb3\\u2cb5\\u2cb7\\u2cb9\\u2cbb\\u2cbd\\u2cbf\\u2cc1\\u2cc3\\u2cc5\\u2cc7\\u2cc9\\u2ccb\\u2ccd\\u2ccf\\u2cd1\\u2cd3\\u2cd5\\u2cd7\\u2cd9\\u2cdb\\u2cdd\\u2cdf\\u2ce1\\u2ce3-\\u2ce4\\u2cec\\u2cee\\u2cf3\\u2d00-\\u2d25\\u2d27\\u2d2d\\ua641\\ua643\\ua645\\ua647\\ua649\\ua64b\\ua64d\\ua64f\\ua651\\ua653\\ua655\\ua657\\ua659\\ua65b\\ua65d\\ua65f\\ua661\\ua663\\ua665\\ua667\\ua669\\ua66b\\ua66d\\ua681\\ua683\\ua685\\ua687\\ua689\\ua68b\\ua68d\\ua68f\\ua691\\ua693\\ua695\\ua697\\ua699\\ua69b\\ua723\\ua725\\ua727\\ua729\\ua72b\\ua72d\\ua72f-\\ua731\\ua733\\ua735\\ua737\\ua739\\ua73b\\ua73d\\ua73f\\ua741\\ua743\\ua745\\ua747\\ua749\\ua74b\\ua74d\\ua74f\\ua751\\ua753\\ua755\\ua757\\ua759\\ua75b\\ua75d\\ua75f\\ua761\\ua763\\ua765\\ua767\\ua769\\ua76b\\ua76d\\ua76f\\ua771-\\ua778\\ua77a\\ua77c\\ua77f\\ua781\\ua783\\ua785\\ua787\\ua78c\\ua78e\\ua791\\ua793-\\ua795\\ua797\\ua799\\ua79b\\ua79d\\ua79f\\ua7a1\\ua7a3\\ua7a5\\ua7a7\\ua7a9\\ua7af\\ua7b5\\ua7b7\\ua7b9\\ua7fa\\uab30-\\uab5a\\uab60-\\uab65\\uab70-\\uabbf\\ufb00-\\ufb06\\ufb13-\\ufb17\\uff41-\\uff5a\\U00010428-\\U0001044f\\U000104d8-\\U000104fb\\U00010cc0-\\U00010cf2\\U000118c0-\\U000118df\\U00016e60-\\U00016e7f\\U0001d41a-\\U0001d433\\U0001d44e-\\U0001d454\\U0001d456-\\U0001d467\\U0001d482-\\U0001d49b\\U0001d4b6-\\U0001d4b9\\U0001d4bb\\U0001d4bd-\\U0001d4c3\\U0001d4c5-\\U0001d4cf\\U0001d4ea-\\U0001d503\\U0001d51e-\\U0001d537\\U0001d552-\\U0001d56b\\U0001d586-\\U0001d59f\\U0001d5ba-\\U0001d5d3\\U0001d5ee-\\U0001d607\\U0001d622-\\U0001d63b\\U0001d656-\\U0001d66f\\U0001d68a-\\U0001d6a5\\U0001d6c2-\\U0001d6da\\U0001d6dc-\\U0001d6e1\\U0001d6fc-\\U0001d714\\U0001d716-\\U0001d71b\\U0001d736-\\U0001d74e\\U0001d750-\\U0001d755\\U0001d770-\\U0001d788\\U0001d78a-\\U0001d78f\\U0001d7aa-\\U0001d7c2\\U0001d7c4-\\U0001d7c9\\U0001d7cb\\U0001e922-\\U0001e943\'\n\nLm = \'\\u02b0-\\u02c1\\u02c6-\\u02d1\\u02e0-\\u02e4\\u02ec\\u02ee\\u0374\\u037a\\u0559\\u0640\\u06e5-\\u06e6\\u07f4-\\u07f5\\u07fa\\u081a\\u0824\\u0828\\u0971\\u0e46\\u0ec6\\u10fc\\u17d7\\u1843\\u1aa7\\u1c78-\\u1c7d\\u1d2c-\\u1d6a\\u1d78\\u1d9b-\\u1dbf\\u2071\\u207f\\u2090-\\u209c\\u2c7c-\\u2c7d\\u2d6f\\u2e2f\\u3005\\u3031-\\u3035\\u303b\\u309d-\\u309e\\u30fc-\\u30fe\\ua015\\ua4f8-\\ua4fd\\ua60c\\ua67f\\ua69c-\\ua69d\\ua717-\\ua71f\\ua770\\ua788\\ua7f8-\\ua7f9\\ua9cf\\ua9e6\\uaa70\\uaadd\\uaaf3-\\uaaf4\\uab5c-\\uab5f\\uff70\\uff9e-\\uff9f\\U00016b40-\\U00016b43\\U00016f93-\\U00016f9f\\U00016fe0-\\U00016fe1\'\n\nLo = \'\\xaa\\xba\\u01bb\\u01c0-\\u01c3\\u0294\\u05d0-\\u05ea\\u05ef-\\u05f2\\u0620-\\u063f\\u0641-\\u064a\\u066e-\\u066f\\u0671-\\u06d3\\u06d5\\u06ee-\\u06ef\\u06fa-\\u06fc\\u06ff\\u0710\\u0712-\\u072f\\u074d-\\u07a5\\u07b1\\u07ca-\\u07ea\\u0800-\\u0815\\u0840-\\u0858\\u0860-\\u086a\\u08a0-\\u08b4\\u08b6-\\u08bd\\u0904-\\u0939\\u093d\\u0950\\u0958-\\u0961\\u0972-\\u0980\\u0985-\\u098c\\u098f-\\u0990\\u0993-\\u09a8\\u09aa-\\u09b0\\u09b2\\u09b6-\\u09b9\\u09bd\\u09ce\\u09dc-\\u09dd\\u09df-\\u09e1\\u09f0-\\u09f1\\u09fc\\u0a05-\\u0a0a\\u0a0f-\\u0a10\\u0a13-\\u0a28\\u0a2a-\\u0a30\\u0a32-\\u0a33\\u0a35-\\u0a36\\u0a38-\\u0a39\\u0a59-\\u0a5c\\u0a5e\\u0a72-\\u0a74\\u0a85-\\u0a8d\\u0a8f-\\u0a91\\u0a93-\\u0aa8\\u0aaa-\\u0ab0\\u0ab2-\\u0ab3\\u0ab5-\\u0ab9\\u0abd\\u0ad0\\u0ae0-\\u0ae1\\u0af9\\u0b05-\\u0b0c\\u0b0f-\\u0b10\\u0b13-\\u0b28\\u0b2a-\\u0b30\\u0b32-\\u0b33\\u0b35-\\u0b39\\u0b3d\\u0b5c-\\u0b5d\\u0b5f-\\u0b61\\u0b71\\u0b83\\u0b85-\\u0b8a\\u0b8e-\\u0b90\\u0b92-\\u0b95\\u0b99-\\u0b9a\\u0b9c\\u0b9e-\\u0b9f\\u0ba3-\\u0ba4\\u0ba8-\\u0baa\\u0bae-\\u0bb9\\u0bd0\\u0c05-\\u0c0c\\u0c0e-\\u0c10\\u0c12-\\u0c28\\u0c2a-\\u0c39\\u0c3d\\u0c58-\\u0c5a\\u0c60-\\u0c61\\u0c80\\u0c85-\\u0c8c\\u0c8e-\\u0c90\\u0c92-\\u0ca8\\u0caa-\\u0cb3\\u0cb5-\\u0cb9\\u0cbd\\u0cde\\u0ce0-\\u0ce1\\u0cf1-\\u0cf2\\u0d05-\\u0d0c\\u0d0e-\\u0d10\\u0d12-\\u0d3a\\u0d3d\\u0d4e\\u0d54-\\u0d56\\u0d5f-\\u0d61\\u0d7a-\\u0d7f\\u0d85-\\u0d96\\u0d9a-\\u0db1\\u0db3-\\u0dbb\\u0dbd\\u0dc0-\\u0dc6\\u0e01-\\u0e30\\u0e32-\\u0e33\\u0e40-\\u0e45\\u0e81-\\u0e82\\u0e84\\u0e87-\\u0e88\\u0e8a\\u0e8d\\u0e94-\\u0e97\\u0e99-\\u0e9f\\u0ea1-\\u0ea3\\u0ea5\\u0ea7\\u0eaa-\\u0eab\\u0ead-\\u0eb0\\u0eb2-\\u0eb3\\u0ebd\\u0ec0-\\u0ec4\\u0edc-\\u0edf\\u0f00\\u0f40-\\u0f47\\u0f49-\\u0f6c\\u0f88-\\u0f8c\\u1000-\\u102a\\u103f\\u1050-\\u1055\\u105a-\\u105d\\u1061\\u1065-\\u1066\\u106e-\\u1070\\u1075-\\u1081\\u108e\\u1100-\\u1248\\u124a-\\u124d\\u1250-\\u1256\\u1258\\u125a-\\u125d\\u1260-\\u1288\\u128a-\\u128d\\u1290-\\u12b0\\u12b2-\\u12b5\\u12b8-\\u12be\\u12c0\\u12c2-\\u12c5\\u12c8-\\u12d6\\u12d8-\\u1310\\u1312-\\u1315\\u1318-\\u135a\\u1380-\\u138f\\u1401-\\u166c\\u166f-\\u167f\\u1681-\\u169a\\u16a0-\\u16ea\\u16f1-\\u16f8\\u1700-\\u170c\\u170e-\\u1711\\u1720-\\u1731\\u1740-\\u1751\\u1760-\\u176c\\u176e-\\u1770\\u1780-\\u17b3\\u17dc\\u1820-\\u1842\\u1844-\\u1878\\u1880-\\u1884\\u1887-\\u18a8\\u18aa\\u18b0-\\u18f5\\u1900-\\u191e\\u1950-\\u196d\\u1970-\\u1974\\u1980-\\u19ab\\u19b0-\\u19c9\\u1a00-\\u1a16\\u1a20-\\u1a54\\u1b05-\\u1b33\\u1b45-\\u1b4b\\u1b83-\\u1ba0\\u1bae-\\u1baf\\u1bba-\\u1be5\\u1c00-\\u1c23\\u1c4d-\\u1c4f\\u1c5a-\\u1c77\\u1ce9-\\u1cec\\u1cee-\\u1cf1\\u1cf5-\\u1cf6\\u2135-\\u2138\\u2d30-\\u2d67\\u2d80-\\u2d96\\u2da0-\\u2da6\\u2da8-\\u2dae\\u2db0-\\u2db6\\u2db8-\\u2dbe\\u2dc0-\\u2dc6\\u2dc8-\\u2dce\\u2dd0-\\u2dd6\\u2dd8-\\u2dde\\u3006\\u303c\\u3041-\\u3096\\u309f\\u30a1-\\u30fa\\u30ff\\u3105-\\u312f\\u3131-\\u318e\\u31a0-\\u31ba\\u31f0-\\u31ff\\u3400-\\u4db5\\u4e00-\\u9fef\\ua000-\\ua014\\ua016-\\ua48c\\ua4d0-\\ua4f7\\ua500-\\ua60b\\ua610-\\ua61f\\ua62a-\\ua62b\\ua66e\\ua6a0-\\ua6e5\\ua78f\\ua7f7\\ua7fb-\\ua801\\ua803-\\ua805\\ua807-\\ua80a\\ua80c-\\ua822\\ua840-\\ua873\\ua882-\\ua8b3\\ua8f2-\\ua8f7\\ua8fb\\ua8fd-\\ua8fe\\ua90a-\\ua925\\ua930-\\ua946\\ua960-\\ua97c\\ua984-\\ua9b2\\ua9e0-\\ua9e4\\ua9e7-\\ua9ef\\ua9fa-\\ua9fe\\uaa00-\\uaa28\\uaa40-\\uaa42\\uaa44-\\uaa4b\\uaa60-\\uaa6f\\uaa71-\\uaa76\\uaa7a\\uaa7e-\\uaaaf\\uaab1\\uaab5-\\uaab6\\uaab9-\\uaabd\\uaac0\\uaac2\\uaadb-\\uaadc\\uaae0-\\uaaea\\uaaf2\\uab01-\\uab06\\uab09-\\uab0e\\uab11-\\uab16\\uab20-\\uab26\\uab28-\\uab2e\\uabc0-\\uabe2\\uac00-\\ud7a3\\ud7b0-\\ud7c6\\ud7cb-\\ud7fb\\uf900-\\ufa6d\\ufa70-\\ufad9\\ufb1d\\ufb1f-\\ufb28\\ufb2a-\\ufb36\\ufb38-\\ufb3c\\ufb3e\\ufb40-\\ufb41\\ufb43-\\ufb44\\ufb46-\\ufbb1\\ufbd3-\\ufd3d\\ufd50-\\ufd8f\\ufd92-\\ufdc7\\ufdf0-\\ufdfb\\ufe70-\\ufe74\\ufe76-\\ufefc\\uff66-\\uff6f\\uff71-\\uff9d\\uffa0-\\uffbe\\uffc2-\\uffc7\\uffca-\\uffcf\\uffd2-\\uffd7\\uffda-\\uffdc\\U00010000-\\U0001000b\\U0001000d-\\U00010026\\U00010028-\\U0001003a\\U0001003c-\\U0001003d\\U0001003f-\\U0001004d\\U00010050-\\U0001005d\\U00010080-\\U000100fa\\U00010280-\\U0001029c\\U000102a0-\\U000102d0\\U00010300-\\U0001031f\\U0001032d-\\U00010340\\U00010342-\\U00010349\\U00010350-\\U00010375\\U00010380-\\U0001039d\\U000103a0-\\U000103c3\\U000103c8-\\U000103cf\\U00010450-\\U0001049d\\U00010500-\\U00010527\\U00010530-\\U00010563\\U00010600-\\U00010736\\U00010740-\\U00010755\\U00010760-\\U00010767\\U00010800-\\U00010805\\U00010808\\U0001080a-\\U00010835\\U00010837-\\U00010838\\U0001083c\\U0001083f-\\U00010855\\U00010860-\\U00010876\\U00010880-\\U0001089e\\U000108e0-\\U000108f2\\U000108f4-\\U000108f5\\U00010900-\\U00010915\\U00010920-\\U00010939\\U00010980-\\U000109b7\\U000109be-\\U000109bf\\U00010a00\\U00010a10-\\U00010a13\\U00010a15-\\U00010a17\\U00010a19-\\U00010a35\\U00010a60-\\U00010a7c\\U00010a80-\\U00010a9c\\U00010ac0-\\U00010ac7\\U00010ac9-\\U00010ae4\\U00010b00-\\U00010b35\\U00010b40-\\U00010b55\\U00010b60-\\U00010b72\\U00010b80-\\U00010b91\\U00010c00-\\U00010c48\\U00010d00-\\U00010d23\\U00010f00-\\U00010f1c\\U00010f27\\U00010f30-\\U00010f45\\U00011003-\\U00011037\\U00011083-\\U000110af\\U000110d0-\\U000110e8\\U00011103-\\U00011126\\U00011144\\U00011150-\\U00011172\\U00011176\\U00011183-\\U000111b2\\U000111c1-\\U000111c4\\U000111da\\U000111dc\\U00011200-\\U00011211\\U00011213-\\U0001122b\\U00011280-\\U00011286\\U00011288\\U0001128a-\\U0001128d\\U0001128f-\\U0001129d\\U0001129f-\\U000112a8\\U000112b0-\\U000112de\\U00011305-\\U0001130c\\U0001130f-\\U00011310\\U00011313-\\U00011328\\U0001132a-\\U00011330\\U00011332-\\U00011333\\U00011335-\\U00011339\\U0001133d\\U00011350\\U0001135d-\\U00011361\\U00011400-\\U00011434\\U00011447-\\U0001144a\\U00011480-\\U000114af\\U000114c4-\\U000114c5\\U000114c7\\U00011580-\\U000115ae\\U000115d8-\\U000115db\\U00011600-\\U0001162f\\U00011644\\U00011680-\\U000116aa\\U00011700-\\U0001171a\\U00011800-\\U0001182b\\U000118ff\\U00011a00\\U00011a0b-\\U00011a32\\U00011a3a\\U00011a50\\U00011a5c-\\U00011a83\\U00011a86-\\U00011a89\\U00011a9d\\U00011ac0-\\U00011af8\\U00011c00-\\U00011c08\\U00011c0a-\\U00011c2e\\U00011c40\\U00011c72-\\U00011c8f\\U00011d00-\\U00011d06\\U00011d08-\\U00011d09\\U00011d0b-\\U00011d30\\U00011d46\\U00011d60-\\U00011d65\\U00011d67-\\U00011d68\\U00011d6a-\\U00011d89\\U00011d98\\U00011ee0-\\U00011ef2\\U00012000-\\U00012399\\U00012480-\\U00012543\\U00013000-\\U0001342e\\U00014400-\\U00014646\\U00016800-\\U00016a38\\U00016a40-\\U00016a5e\\U00016ad0-\\U00016aed\\U00016b00-\\U00016b2f\\U00016b63-\\U00016b77\\U00016b7d-\\U00016b8f\\U00016f00-\\U00016f44\\U00016f50\\U00017000-\\U000187f1\\U00018800-\\U00018af2\\U0001b000-\\U0001b11e\\U0001b170-\\U0001b2fb\\U0001bc00-\\U0001bc6a\\U0001bc70-\\U0001bc7c\\U0001bc80-\\U0001bc88\\U0001bc90-\\U0001bc99\\U0001e800-\\U0001e8c4\\U0001ee00-\\U0001ee03\\U0001ee05-\\U0001ee1f\\U0001ee21-\\U0001ee22\\U0001ee24\\U0001ee27\\U0001ee29-\\U0001ee32\\U0001ee34-\\U0001ee37\\U0001ee39\\U0001ee3b\\U0001ee42\\U0001ee47\\U0001ee49\\U0001ee4b\\U0001ee4d-\\U0001ee4f\\U0001ee51-\\U0001ee52\\U0001ee54\\U0001ee57\\U0001ee59\\U0001ee5b\\U0001ee5d\\U0001ee5f\\U0001ee61-\\U0001ee62\\U0001ee64\\U0001ee67-\\U0001ee6a\\U0001ee6c-\\U0001ee72\\U0001ee74-\\U0001ee77\\U0001ee79-\\U0001ee7c\\U0001ee7e\\U0001ee80-\\U0001ee89\\U0001ee8b-\\U0001ee9b\\U0001eea1-\\U0001eea3\\U0001eea5-\\U0001eea9\\U0001eeab-\\U0001eebb\\U00020000-\\U0002a6d6\\U0002a700-\\U0002b734\\U0002b740-\\U0002b81d\\U0002b820-\\U0002cea1\\U0002ceb0-\\U0002ebe0\\U0002f800-\\U0002fa1d\'\n\nLt = \'\\u01c5\\u01c8\\u01cb\\u01f2\\u1f88-\\u1f8f\\u1f98-\\u1f9f\\u1fa8-\\u1faf\\u1fbc\\u1fcc\\u1ffc\'\n\nLu = \'A-Z\\xc0-\\xd6\\xd8-\\xde\\u0100\\u0102\\u0104\\u0106\\u0108\\u010a\\u010c\\u010e\\u0110\\u0112\\u0114\\u0116\\u0118\\u011a\\u011c\\u011e\\u0120\\u0122\\u0124\\u0126\\u0128\\u012a\\u012c\\u012e\\u0130\\u0132\\u0134\\u0136\\u0139\\u013b\\u013d\\u013f\\u0141\\u0143\\u0145\\u0147\\u014a\\u014c\\u014e\\u0150\\u0152\\u0154\\u0156\\u0158\\u015a\\u015c\\u015e\\u0160\\u0162\\u0164\\u0166\\u0168\\u016a\\u016c\\u016e\\u0170\\u0172\\u0174\\u0176\\u0178-\\u0179\\u017b\\u017d\\u0181-\\u0182\\u0184\\u0186-\\u0187\\u0189-\\u018b\\u018e-\\u0191\\u0193-\\u0194\\u0196-\\u0198\\u019c-\\u019d\\u019f-\\u01a0\\u01a2\\u01a4\\u01a6-\\u01a7\\u01a9\\u01ac\\u01ae-\\u01af\\u01b1-\\u01b3\\u01b5\\u01b7-\\u01b8\\u01bc\\u01c4\\u01c7\\u01ca\\u01cd\\u01cf\\u01d1\\u01d3\\u01d5\\u01d7\\u01d9\\u01db\\u01de\\u01e0\\u01e2\\u01e4\\u01e6\\u01e8\\u01ea\\u01ec\\u01ee\\u01f1\\u01f4\\u01f6-\\u01f8\\u01fa\\u01fc\\u01fe\\u0200\\u0202\\u0204\\u0206\\u0208\\u020a\\u020c\\u020e\\u0210\\u0212\\u0214\\u0216\\u0218\\u021a\\u021c\\u021e\\u0220\\u0222\\u0224\\u0226\\u0228\\u022a\\u022c\\u022e\\u0230\\u0232\\u023a-\\u023b\\u023d-\\u023e\\u0241\\u0243-\\u0246\\u0248\\u024a\\u024c\\u024e\\u0370\\u0372\\u0376\\u037f\\u0386\\u0388-\\u038a\\u038c\\u038e-\\u038f\\u0391-\\u03a1\\u03a3-\\u03ab\\u03cf\\u03d2-\\u03d4\\u03d8\\u03da\\u03dc\\u03de\\u03e0\\u03e2\\u03e4\\u03e6\\u03e8\\u03ea\\u03ec\\u03ee\\u03f4\\u03f7\\u03f9-\\u03fa\\u03fd-\\u042f\\u0460\\u0462\\u0464\\u0466\\u0468\\u046a\\u046c\\u046e\\u0470\\u0472\\u0474\\u0476\\u0478\\u047a\\u047c\\u047e\\u0480\\u048a\\u048c\\u048e\\u0490\\u0492\\u0494\\u0496\\u0498\\u049a\\u049c\\u049e\\u04a0\\u04a2\\u04a4\\u04a6\\u04a8\\u04aa\\u04ac\\u04ae\\u04b0\\u04b2\\u04b4\\u04b6\\u04b8\\u04ba\\u04bc\\u04be\\u04c0-\\u04c1\\u04c3\\u04c5\\u04c7\\u04c9\\u04cb\\u04cd\\u04d0\\u04d2\\u04d4\\u04d6\\u04d8\\u04da\\u04dc\\u04de\\u04e0\\u04e2\\u04e4\\u04e6\\u04e8\\u04ea\\u04ec\\u04ee\\u04f0\\u04f2\\u04f4\\u04f6\\u04f8\\u04fa\\u04fc\\u04fe\\u0500\\u0502\\u0504\\u0506\\u0508\\u050a\\u050c\\u050e\\u0510\\u0512\\u0514\\u0516\\u0518\\u051a\\u051c\\u051e\\u0520\\u0522\\u0524\\u0526\\u0528\\u052a\\u052c\\u052e\\u0531-\\u0556\\u10a0-\\u10c5\\u10c7\\u10cd\\u13a0-\\u13f5\\u1c90-\\u1cba\\u1cbd-\\u1cbf\\u1e00\\u1e02\\u1e04\\u1e06\\u1e08\\u1e0a\\u1e0c\\u1e0e\\u1e10\\u1e12\\u1e14\\u1e16\\u1e18\\u1e1a\\u1e1c\\u1e1e\\u1e20\\u1e22\\u1e24\\u1e26\\u1e28\\u1e2a\\u1e2c\\u1e2e\\u1e30\\u1e32\\u1e34\\u1e36\\u1e38\\u1e3a\\u1e3c\\u1e3e\\u1e40\\u1e42\\u1e44\\u1e46\\u1e48\\u1e4a\\u1e4c\\u1e4e\\u1e50\\u1e52\\u1e54\\u1e56\\u1e58\\u1e5a\\u1e5c\\u1e5e\\u1e60\\u1e62\\u1e64\\u1e66\\u1e68\\u1e6a\\u1e6c\\u1e6e\\u1e70\\u1e72\\u1e74\\u1e76\\u1e78\\u1e7a\\u1e7c\\u1e7e\\u1e80\\u1e82\\u1e84\\u1e86\\u1e88\\u1e8a\\u1e8c\\u1e8e\\u1e90\\u1e92\\u1e94\\u1e9e\\u1ea0\\u1ea2\\u1ea4\\u1ea6\\u1ea8\\u1eaa\\u1eac\\u1eae\\u1eb0\\u1eb2\\u1eb4\\u1eb6\\u1eb8\\u1eba\\u1ebc\\u1ebe\\u1ec0\\u1ec2\\u1ec4\\u1ec6\\u1ec8\\u1eca\\u1ecc\\u1ece\\u1ed0\\u1ed2\\u1ed4\\u1ed6\\u1ed8\\u1eda\\u1edc\\u1ede\\u1ee0\\u1ee2\\u1ee4\\u1ee6\\u1ee8\\u1eea\\u1eec\\u1eee\\u1ef0\\u1ef2\\u1ef4\\u1ef6\\u1ef8\\u1efa\\u1efc\\u1efe\\u1f08-\\u1f0f\\u1f18-\\u1f1d\\u1f28-\\u1f2f\\u1f38-\\u1f3f\\u1f48-\\u1f4d\\u1f59\\u1f5b\\u1f5d\\u1f5f\\u1f68-\\u1f6f\\u1fb8-\\u1fbb\\u1fc8-\\u1fcb\\u1fd8-\\u1fdb\\u1fe8-\\u1fec\\u1ff8-\\u1ffb\\u2102\\u2107\\u210b-\\u210d\\u2110-\\u2112\\u2115\\u2119-\\u211d\\u2124\\u2126\\u2128\\u212a-\\u212d\\u2130-\\u2133\\u213e-\\u213f\\u2145\\u2183\\u2c00-\\u2c2e\\u2c60\\u2c62-\\u2c64\\u2c67\\u2c69\\u2c6b\\u2c6d-\\u2c70\\u2c72\\u2c75\\u2c7e-\\u2c80\\u2c82\\u2c84\\u2c86\\u2c88\\u2c8a\\u2c8c\\u2c8e\\u2c90\\u2c92\\u2c94\\u2c96\\u2c98\\u2c9a\\u2c9c\\u2c9e\\u2ca0\\u2ca2\\u2ca4\\u2ca6\\u2ca8\\u2caa\\u2cac\\u2cae\\u2cb0\\u2cb2\\u2cb4\\u2cb6\\u2cb8\\u2cba\\u2cbc\\u2cbe\\u2cc0\\u2cc2\\u2cc4\\u2cc6\\u2cc8\\u2cca\\u2ccc\\u2cce\\u2cd0\\u2cd2\\u2cd4\\u2cd6\\u2cd8\\u2cda\\u2cdc\\u2cde\\u2ce0\\u2ce2\\u2ceb\\u2ced\\u2cf2\\ua640\\ua642\\ua644\\ua646\\ua648\\ua64a\\ua64c\\ua64e\\ua650\\ua652\\ua654\\ua656\\ua658\\ua65a\\ua65c\\ua65e\\ua660\\ua662\\ua664\\ua666\\ua668\\ua66a\\ua66c\\ua680\\ua682\\ua684\\ua686\\ua688\\ua68a\\ua68c\\ua68e\\ua690\\ua692\\ua694\\ua696\\ua698\\ua69a\\ua722\\ua724\\ua726\\ua728\\ua72a\\ua72c\\ua72e\\ua732\\ua734\\ua736\\ua738\\ua73a\\ua73c\\ua73e\\ua740\\ua742\\ua744\\ua746\\ua748\\ua74a\\ua74c\\ua74e\\ua750\\ua752\\ua754\\ua756\\ua758\\ua75a\\ua75c\\ua75e\\ua760\\ua762\\ua764\\ua766\\ua768\\ua76a\\ua76c\\ua76e\\ua779\\ua77b\\ua77d-\\ua77e\\ua780\\ua782\\ua784\\ua786\\ua78b\\ua78d\\ua790\\ua792\\ua796\\ua798\\ua79a\\ua79c\\ua79e\\ua7a0\\ua7a2\\ua7a4\\ua7a6\\ua7a8\\ua7aa-\\ua7ae\\ua7b0-\\ua7b4\\ua7b6\\ua7b8\\uff21-\\uff3a\\U00010400-\\U00010427\\U000104b0-\\U000104d3\\U00010c80-\\U00010cb2\\U000118a0-\\U000118bf\\U00016e40-\\U00016e5f\\U0001d400-\\U0001d419\\U0001d434-\\U0001d44d\\U0001d468-\\U0001d481\\U0001d49c\\U0001d49e-\\U0001d49f\\U0001d4a2\\U0001d4a5-\\U0001d4a6\\U0001d4a9-\\U0001d4ac\\U0001d4ae-\\U0001d4b5\\U0001d4d0-\\U0001d4e9\\U0001d504-\\U0001d505\\U0001d507-\\U0001d50a\\U0001d50d-\\U0001d514\\U0001d516-\\U0001d51c\\U0001d538-\\U0001d539\\U0001d53b-\\U0001d53e\\U0001d540-\\U0001d544\\U0001d546\\U0001d54a-\\U0001d550\\U0001d56c-\\U0001d585\\U0001d5a0-\\U0001d5b9\\U0001d5d4-\\U0001d5ed\\U0001d608-\\U0001d621\\U0001d63c-\\U0001d655\\U0001d670-\\U0001d689\\U0001d6a8-\\U0001d6c0\\U0001d6e2-\\U0001d6fa\\U0001d71c-\\U0001d734\\U0001d756-\\U0001d76e\\U0001d790-\\U0001d7a8\\U0001d7ca\\U0001e900-\\U0001e921\'\n\nMc = \'\\u0903\\u093b\\u093e-\\u0940\\u0949-\\u094c\\u094e-\\u094f\\u0982-\\u0983\\u09be-\\u09c0\\u09c7-\\u09c8\\u09cb-\\u09cc\\u09d7\\u0a03\\u0a3e-\\u0a40\\u0a83\\u0abe-\\u0ac0\\u0ac9\\u0acb-\\u0acc\\u0b02-\\u0b03\\u0b3e\\u0b40\\u0b47-\\u0b48\\u0b4b-\\u0b4c\\u0b57\\u0bbe-\\u0bbf\\u0bc1-\\u0bc2\\u0bc6-\\u0bc8\\u0bca-\\u0bcc\\u0bd7\\u0c01-\\u0c03\\u0c41-\\u0c44\\u0c82-\\u0c83\\u0cbe\\u0cc0-\\u0cc4\\u0cc7-\\u0cc8\\u0cca-\\u0ccb\\u0cd5-\\u0cd6\\u0d02-\\u0d03\\u0d3e-\\u0d40\\u0d46-\\u0d48\\u0d4a-\\u0d4c\\u0d57\\u0d82-\\u0d83\\u0dcf-\\u0dd1\\u0dd8-\\u0ddf\\u0df2-\\u0df3\\u0f3e-\\u0f3f\\u0f7f\\u102b-\\u102c\\u1031\\u1038\\u103b-\\u103c\\u1056-\\u1057\\u1062-\\u1064\\u1067-\\u106d\\u1083-\\u1084\\u1087-\\u108c\\u108f\\u109a-\\u109c\\u17b6\\u17be-\\u17c5\\u17c7-\\u17c8\\u1923-\\u1926\\u1929-\\u192b\\u1930-\\u1931\\u1933-\\u1938\\u1a19-\\u1a1a\\u1a55\\u1a57\\u1a61\\u1a63-\\u1a64\\u1a6d-\\u1a72\\u1b04\\u1b35\\u1b3b\\u1b3d-\\u1b41\\u1b43-\\u1b44\\u1b82\\u1ba1\\u1ba6-\\u1ba7\\u1baa\\u1be7\\u1bea-\\u1bec\\u1bee\\u1bf2-\\u1bf3\\u1c24-\\u1c2b\\u1c34-\\u1c35\\u1ce1\\u1cf2-\\u1cf3\\u1cf7\\u302e-\\u302f\\ua823-\\ua824\\ua827\\ua880-\\ua881\\ua8b4-\\ua8c3\\ua952-\\ua953\\ua983\\ua9b4-\\ua9b5\\ua9ba-\\ua9bb\\ua9bd-\\ua9c0\\uaa2f-\\uaa30\\uaa33-\\uaa34\\uaa4d\\uaa7b\\uaa7d\\uaaeb\\uaaee-\\uaaef\\uaaf5\\uabe3-\\uabe4\\uabe6-\\uabe7\\uabe9-\\uabea\\uabec\\U00011000\\U00011002\\U00011082\\U000110b0-\\U000110b2\\U000110b7-\\U000110b8\\U0001112c\\U00011145-\\U00011146\\U00011182\\U000111b3-\\U000111b5\\U000111bf-\\U000111c0\\U0001122c-\\U0001122e\\U00011232-\\U00011233\\U00011235\\U000112e0-\\U000112e2\\U00011302-\\U00011303\\U0001133e-\\U0001133f\\U00011341-\\U00011344\\U00011347-\\U00011348\\U0001134b-\\U0001134d\\U00011357\\U00011362-\\U00011363\\U00011435-\\U00011437\\U00011440-\\U00011441\\U00011445\\U000114b0-\\U000114b2\\U000114b9\\U000114bb-\\U000114be\\U000114c1\\U000115af-\\U000115b1\\U000115b8-\\U000115bb\\U000115be\\U00011630-\\U00011632\\U0001163b-\\U0001163c\\U0001163e\\U000116ac\\U000116ae-\\U000116af\\U000116b6\\U00011720-\\U00011721\\U00011726\\U0001182c-\\U0001182e\\U00011838\\U00011a39\\U00011a57-\\U00011a58\\U00011a97\\U00011c2f\\U00011c3e\\U00011ca9\\U00011cb1\\U00011cb4\\U00011d8a-\\U00011d8e\\U00011d93-\\U00011d94\\U00011d96\\U00011ef5-\\U00011ef6\\U00016f51-\\U00016f7e\\U0001d165-\\U0001d166\\U0001d16d-\\U0001d172\'\n\nMe = \'\\u0488-\\u0489\\u1abe\\u20dd-\\u20e0\\u20e2-\\u20e4\\ua670-\\ua672\'\n\nMn = \'\\u0300-\\u036f\\u0483-\\u0487\\u0591-\\u05bd\\u05bf\\u05c1-\\u05c2\\u05c4-\\u05c5\\u05c7\\u0610-\\u061a\\u064b-\\u065f\\u0670\\u06d6-\\u06dc\\u06df-\\u06e4\\u06e7-\\u06e8\\u06ea-\\u06ed\\u0711\\u0730-\\u074a\\u07a6-\\u07b0\\u07eb-\\u07f3\\u07fd\\u0816-\\u0819\\u081b-\\u0823\\u0825-\\u0827\\u0829-\\u082d\\u0859-\\u085b\\u08d3-\\u08e1\\u08e3-\\u0902\\u093a\\u093c\\u0941-\\u0948\\u094d\\u0951-\\u0957\\u0962-\\u0963\\u0981\\u09bc\\u09c1-\\u09c4\\u09cd\\u09e2-\\u09e3\\u09fe\\u0a01-\\u0a02\\u0a3c\\u0a41-\\u0a42\\u0a47-\\u0a48\\u0a4b-\\u0a4d\\u0a51\\u0a70-\\u0a71\\u0a75\\u0a81-\\u0a82\\u0abc\\u0ac1-\\u0ac5\\u0ac7-\\u0ac8\\u0acd\\u0ae2-\\u0ae3\\u0afa-\\u0aff\\u0b01\\u0b3c\\u0b3f\\u0b41-\\u0b44\\u0b4d\\u0b56\\u0b62-\\u0b63\\u0b82\\u0bc0\\u0bcd\\u0c00\\u0c04\\u0c3e-\\u0c40\\u0c46-\\u0c48\\u0c4a-\\u0c4d\\u0c55-\\u0c56\\u0c62-\\u0c63\\u0c81\\u0cbc\\u0cbf\\u0cc6\\u0ccc-\\u0ccd\\u0ce2-\\u0ce3\\u0d00-\\u0d01\\u0d3b-\\u0d3c\\u0d41-\\u0d44\\u0d4d\\u0d62-\\u0d63\\u0dca\\u0dd2-\\u0dd4\\u0dd6\\u0e31\\u0e34-\\u0e3a\\u0e47-\\u0e4e\\u0eb1\\u0eb4-\\u0eb9\\u0ebb-\\u0ebc\\u0ec8-\\u0ecd\\u0f18-\\u0f19\\u0f35\\u0f37\\u0f39\\u0f71-\\u0f7e\\u0f80-\\u0f84\\u0f86-\\u0f87\\u0f8d-\\u0f97\\u0f99-\\u0fbc\\u0fc6\\u102d-\\u1030\\u1032-\\u1037\\u1039-\\u103a\\u103d-\\u103e\\u1058-\\u1059\\u105e-\\u1060\\u1071-\\u1074\\u1082\\u1085-\\u1086\\u108d\\u109d\\u135d-\\u135f\\u1712-\\u1714\\u1732-\\u1734\\u1752-\\u1753\\u1772-\\u1773\\u17b4-\\u17b5\\u17b7-\\u17bd\\u17c6\\u17c9-\\u17d3\\u17dd\\u180b-\\u180d\\u1885-\\u1886\\u18a9\\u1920-\\u1922\\u1927-\\u1928\\u1932\\u1939-\\u193b\\u1a17-\\u1a18\\u1a1b\\u1a56\\u1a58-\\u1a5e\\u1a60\\u1a62\\u1a65-\\u1a6c\\u1a73-\\u1a7c\\u1a7f\\u1ab0-\\u1abd\\u1b00-\\u1b03\\u1b34\\u1b36-\\u1b3a\\u1b3c\\u1b42\\u1b6b-\\u1b73\\u1b80-\\u1b81\\u1ba2-\\u1ba5\\u1ba8-\\u1ba9\\u1bab-\\u1bad\\u1be6\\u1be8-\\u1be9\\u1bed\\u1bef-\\u1bf1\\u1c2c-\\u1c33\\u1c36-\\u1c37\\u1cd0-\\u1cd2\\u1cd4-\\u1ce0\\u1ce2-\\u1ce8\\u1ced\\u1cf4\\u1cf8-\\u1cf9\\u1dc0-\\u1df9\\u1dfb-\\u1dff\\u20d0-\\u20dc\\u20e1\\u20e5-\\u20f0\\u2cef-\\u2cf1\\u2d7f\\u2de0-\\u2dff\\u302a-\\u302d\\u3099-\\u309a\\ua66f\\ua674-\\ua67d\\ua69e-\\ua69f\\ua6f0-\\ua6f1\\ua802\\ua806\\ua80b\\ua825-\\ua826\\ua8c4-\\ua8c5\\ua8e0-\\ua8f1\\ua8ff\\ua926-\\ua92d\\ua947-\\ua951\\ua980-\\ua982\\ua9b3\\ua9b6-\\ua9b9\\ua9bc\\ua9e5\\uaa29-\\uaa2e\\uaa31-\\uaa32\\uaa35-\\uaa36\\uaa43\\uaa4c\\uaa7c\\uaab0\\uaab2-\\uaab4\\uaab7-\\uaab8\\uaabe-\\uaabf\\uaac1\\uaaec-\\uaaed\\uaaf6\\uabe5\\uabe8\\uabed\\ufb1e\\ufe00-\\ufe0f\\ufe20-\\ufe2f\\U000101fd\\U000102e0\\U00010376-\\U0001037a\\U00010a01-\\U00010a03\\U00010a05-\\U00010a06\\U00010a0c-\\U00010a0f\\U00010a38-\\U00010a3a\\U00010a3f\\U00010ae5-\\U00010ae6\\U00010d24-\\U00010d27\\U00010f46-\\U00010f50\\U00011001\\U00011038-\\U00011046\\U0001107f-\\U00011081\\U000110b3-\\U000110b6\\U000110b9-\\U000110ba\\U00011100-\\U00011102\\U00011127-\\U0001112b\\U0001112d-\\U00011134\\U00011173\\U00011180-\\U00011181\\U000111b6-\\U000111be\\U000111c9-\\U000111cc\\U0001122f-\\U00011231\\U00011234\\U00011236-\\U00011237\\U0001123e\\U000112df\\U000112e3-\\U000112ea\\U00011300-\\U00011301\\U0001133b-\\U0001133c\\U00011340\\U00011366-\\U0001136c\\U00011370-\\U00011374\\U00011438-\\U0001143f\\U00011442-\\U00011444\\U00011446\\U0001145e\\U000114b3-\\U000114b8\\U000114ba\\U000114bf-\\U000114c0\\U000114c2-\\U000114c3\\U000115b2-\\U000115b5\\U000115bc-\\U000115bd\\U000115bf-\\U000115c0\\U000115dc-\\U000115dd\\U00011633-\\U0001163a\\U0001163d\\U0001163f-\\U00011640\\U000116ab\\U000116ad\\U000116b0-\\U000116b5\\U000116b7\\U0001171d-\\U0001171f\\U00011722-\\U00011725\\U00011727-\\U0001172b\\U0001182f-\\U00011837\\U00011839-\\U0001183a\\U00011a01-\\U00011a0a\\U00011a33-\\U00011a38\\U00011a3b-\\U00011a3e\\U00011a47\\U00011a51-\\U00011a56\\U00011a59-\\U00011a5b\\U00011a8a-\\U00011a96\\U00011a98-\\U00011a99\\U00011c30-\\U00011c36\\U00011c38-\\U00011c3d\\U00011c3f\\U00011c92-\\U00011ca7\\U00011caa-\\U00011cb0\\U00011cb2-\\U00011cb3\\U00011cb5-\\U00011cb6\\U00011d31-\\U00011d36\\U00011d3a\\U00011d3c-\\U00011d3d\\U00011d3f-\\U00011d45\\U00011d47\\U00011d90-\\U00011d91\\U00011d95\\U00011d97\\U00011ef3-\\U00011ef4\\U00016af0-\\U00016af4\\U00016b30-\\U00016b36\\U00016f8f-\\U00016f92\\U0001bc9d-\\U0001bc9e\\U0001d167-\\U0001d169\\U0001d17b-\\U0001d182\\U0001d185-\\U0001d18b\\U0001d1aa-\\U0001d1ad\\U0001d242-\\U0001d244\\U0001da00-\\U0001da36\\U0001da3b-\\U0001da6c\\U0001da75\\U0001da84\\U0001da9b-\\U0001da9f\\U0001daa1-\\U0001daaf\\U0001e000-\\U0001e006\\U0001e008-\\U0001e018\\U0001e01b-\\U0001e021\\U0001e023-\\U0001e024\\U0001e026-\\U0001e02a\\U0001e8d0-\\U0001e8d6\\U0001e944-\\U0001e94a\\U000e0100-\\U000e01ef\'\n\nNd = \'0-9\\u0660-\\u0669\\u06f0-\\u06f9\\u07c0-\\u07c9\\u0966-\\u096f\\u09e6-\\u09ef\\u0a66-\\u0a6f\\u0ae6-\\u0aef\\u0b66-\\u0b6f\\u0be6-\\u0bef\\u0c66-\\u0c6f\\u0ce6-\\u0cef\\u0d66-\\u0d6f\\u0de6-\\u0def\\u0e50-\\u0e59\\u0ed0-\\u0ed9\\u0f20-\\u0f29\\u1040-\\u1049\\u1090-\\u1099\\u17e0-\\u17e9\\u1810-\\u1819\\u1946-\\u194f\\u19d0-\\u19d9\\u1a80-\\u1a89\\u1a90-\\u1a99\\u1b50-\\u1b59\\u1bb0-\\u1bb9\\u1c40-\\u1c49\\u1c50-\\u1c59\\ua620-\\ua629\\ua8d0-\\ua8d9\\ua900-\\ua909\\ua9d0-\\ua9d9\\ua9f0-\\ua9f9\\uaa50-\\uaa59\\uabf0-\\uabf9\\uff10-\\uff19\\U000104a0-\\U000104a9\\U00010d30-\\U00010d39\\U00011066-\\U0001106f\\U000110f0-\\U000110f9\\U00011136-\\U0001113f\\U000111d0-\\U000111d9\\U000112f0-\\U000112f9\\U00011450-\\U00011459\\U000114d0-\\U000114d9\\U00011650-\\U00011659\\U000116c0-\\U000116c9\\U00011730-\\U00011739\\U000118e0-\\U000118e9\\U00011c50-\\U00011c59\\U00011d50-\\U00011d59\\U00011da0-\\U00011da9\\U00016a60-\\U00016a69\\U00016b50-\\U00016b59\\U0001d7ce-\\U0001d7ff\\U0001e950-\\U0001e959\'\n\nNl = \'\\u16ee-\\u16f0\\u2160-\\u2182\\u2185-\\u2188\\u3007\\u3021-\\u3029\\u3038-\\u303a\\ua6e6-\\ua6ef\\U00010140-\\U00010174\\U00010341\\U0001034a\\U000103d1-\\U000103d5\\U00012400-\\U0001246e\'\n\nNo = \'\\xb2-\\xb3\\xb9\\xbc-\\xbe\\u09f4-\\u09f9\\u0b72-\\u0b77\\u0bf0-\\u0bf2\\u0c78-\\u0c7e\\u0d58-\\u0d5e\\u0d70-\\u0d78\\u0f2a-\\u0f33\\u1369-\\u137c\\u17f0-\\u17f9\\u19da\\u2070\\u2074-\\u2079\\u2080-\\u2089\\u2150-\\u215f\\u2189\\u2460-\\u249b\\u24ea-\\u24ff\\u2776-\\u2793\\u2cfd\\u3192-\\u3195\\u3220-\\u3229\\u3248-\\u324f\\u3251-\\u325f\\u3280-\\u3289\\u32b1-\\u32bf\\ua830-\\ua835\\U00010107-\\U00010133\\U00010175-\\U00010178\\U0001018a-\\U0001018b\\U000102e1-\\U000102fb\\U00010320-\\U00010323\\U00010858-\\U0001085f\\U00010879-\\U0001087f\\U000108a7-\\U000108af\\U000108fb-\\U000108ff\\U00010916-\\U0001091b\\U000109bc-\\U000109bd\\U000109c0-\\U000109cf\\U000109d2-\\U000109ff\\U00010a40-\\U00010a48\\U00010a7d-\\U00010a7e\\U00010a9d-\\U00010a9f\\U00010aeb-\\U00010aef\\U00010b58-\\U00010b5f\\U00010b78-\\U00010b7f\\U00010ba9-\\U00010baf\\U00010cfa-\\U00010cff\\U00010e60-\\U00010e7e\\U00010f1d-\\U00010f26\\U00010f51-\\U00010f54\\U00011052-\\U00011065\\U000111e1-\\U000111f4\\U0001173a-\\U0001173b\\U000118ea-\\U000118f2\\U00011c5a-\\U00011c6c\\U00016b5b-\\U00016b61\\U00016e80-\\U00016e96\\U0001d2e0-\\U0001d2f3\\U0001d360-\\U0001d378\\U0001e8c7-\\U0001e8cf\\U0001ec71-\\U0001ecab\\U0001ecad-\\U0001ecaf\\U0001ecb1-\\U0001ecb4\\U0001f100-\\U0001f10c\'\n\nPc = \'_\\u203f-\\u2040\\u2054\\ufe33-\\ufe34\\ufe4d-\\ufe4f\\uff3f\'\n\nPd = \'\\\\-\\u058a\\u05be\\u1400\\u1806\\u2010-\\u2015\\u2e17\\u2e1a\\u2e3a-\\u2e3b\\u2e40\\u301c\\u3030\\u30a0\\ufe31-\\ufe32\\ufe58\\ufe63\\uff0d\'\n\nPe = \')\\\\]}\\u0f3b\\u0f3d\\u169c\\u2046\\u207e\\u208e\\u2309\\u230b\\u232a\\u2769\\u276b\\u276d\\u276f\\u2771\\u2773\\u2775\\u27c6\\u27e7\\u27e9\\u27eb\\u27ed\\u27ef\\u2984\\u2986\\u2988\\u298a\\u298c\\u298e\\u2990\\u2992\\u2994\\u2996\\u2998\\u29d9\\u29db\\u29fd\\u2e23\\u2e25\\u2e27\\u2e29\\u3009\\u300b\\u300d\\u300f\\u3011\\u3015\\u3017\\u3019\\u301b\\u301e-\\u301f\\ufd3e\\ufe18\\ufe36\\ufe38\\ufe3a\\ufe3c\\ufe3e\\ufe40\\ufe42\\ufe44\\ufe48\\ufe5a\\ufe5c\\ufe5e\\uff09\\uff3d\\uff5d\\uff60\\uff63\'\n\nPf = \'\\xbb\\u2019\\u201d\\u203a\\u2e03\\u2e05\\u2e0a\\u2e0d\\u2e1d\\u2e21\'\n\nPi = \'\\xab\\u2018\\u201b-\\u201c\\u201f\\u2039\\u2e02\\u2e04\\u2e09\\u2e0c\\u2e1c\\u2e20\'\n\nPo = "!-#%-\'*,.-/:-;?-@\\\\\\\\\\xa1\\xa7\\xb6-\\xb7\\xbf\\u037e\\u0387\\u055a-\\u055f\\u0589\\u05c0\\u05c3\\u05c6\\u05f3-\\u05f4\\u0609-\\u060a\\u060c-\\u060d\\u061b\\u061e-\\u061f\\u066a-\\u066d\\u06d4\\u0700-\\u070d\\u07f7-\\u07f9\\u0830-\\u083e\\u085e\\u0964-\\u0965\\u0970\\u09fd\\u0a76\\u0af0\\u0c84\\u0df4\\u0e4f\\u0e5a-\\u0e5b\\u0f04-\\u0f12\\u0f14\\u0f85\\u0fd0-\\u0fd4\\u0fd9-\\u0fda\\u104a-\\u104f\\u10fb\\u1360-\\u1368\\u166d-\\u166e\\u16eb-\\u16ed\\u1735-\\u1736\\u17d4-\\u17d6\\u17d8-\\u17da\\u1800-\\u1805\\u1807-\\u180a\\u1944-\\u1945\\u1a1e-\\u1a1f\\u1aa0-\\u1aa6\\u1aa8-\\u1aad\\u1b5a-\\u1b60\\u1bfc-\\u1bff\\u1c3b-\\u1c3f\\u1c7e-\\u1c7f\\u1cc0-\\u1cc7\\u1cd3\\u2016-\\u2017\\u2020-\\u2027\\u2030-\\u2038\\u203b-\\u203e\\u2041-\\u2043\\u2047-\\u2051\\u2053\\u2055-\\u205e\\u2cf9-\\u2cfc\\u2cfe-\\u2cff\\u2d70\\u2e00-\\u2e01\\u2e06-\\u2e08\\u2e0b\\u2e0e-\\u2e16\\u2e18-\\u2e19\\u2e1b\\u2e1e-\\u2e1f\\u2e2a-\\u2e2e\\u2e30-\\u2e39\\u2e3c-\\u2e3f\\u2e41\\u2e43-\\u2e4e\\u3001-\\u3003\\u303d\\u30fb\\ua4fe-\\ua4ff\\ua60d-\\ua60f\\ua673\\ua67e\\ua6f2-\\ua6f7\\ua874-\\ua877\\ua8ce-\\ua8cf\\ua8f8-\\ua8fa\\ua8fc\\ua92e-\\ua92f\\ua95f\\ua9c1-\\ua9cd\\ua9de-\\ua9df\\uaa5c-\\uaa5f\\uaade-\\uaadf\\uaaf0-\\uaaf1\\uabeb\\ufe10-\\ufe16\\ufe19\\ufe30\\ufe45-\\ufe46\\ufe49-\\ufe4c\\ufe50-\\ufe52\\ufe54-\\ufe57\\ufe5f-\\ufe61\\ufe68\\ufe6a-\\ufe6b\\uff01-\\uff03\\uff05-\\uff07\\uff0a\\uff0c\\uff0e-\\uff0f\\uff1a-\\uff1b\\uff1f-\\uff20\\uff3c\\uff61\\uff64-\\uff65\\U00010100-\\U00010102\\U0001039f\\U000103d0\\U0001056f\\U00010857\\U0001091f\\U0001093f\\U00010a50-\\U00010a58\\U00010a7f\\U00010af0-\\U00010af6\\U00010b39-\\U00010b3f\\U00010b99-\\U00010b9c\\U00010f55-\\U00010f59\\U00011047-\\U0001104d\\U000110bb-\\U000110bc\\U000110be-\\U000110c1\\U00011140-\\U00011143\\U00011174-\\U00011175\\U000111c5-\\U000111c8\\U000111cd\\U000111db\\U000111dd-\\U000111df\\U00011238-\\U0001123d\\U000112a9\\U0001144b-\\U0001144f\\U0001145b\\U0001145d\\U000114c6\\U000115c1-\\U000115d7\\U00011641-\\U00011643\\U00011660-\\U0001166c\\U0001173c-\\U0001173e\\U0001183b\\U00011a3f-\\U00011a46\\U00011a9a-\\U00011a9c\\U00011a9e-\\U00011aa2\\U00011c41-\\U00011c45\\U00011c70-\\U00011c71\\U00011ef7-\\U00011ef8\\U00012470-\\U00012474\\U00016a6e-\\U00016a6f\\U00016af5\\U00016b37-\\U00016b3b\\U00016b44\\U00016e97-\\U00016e9a\\U0001bc9f\\U0001da87-\\U0001da8b\\U0001e95e-\\U0001e95f"\n\nPs = \'(\\\\[{\\u0f3a\\u0f3c\\u169b\\u201a\\u201e\\u2045\\u207d\\u208d\\u2308\\u230a\\u2329\\u2768\\u276a\\u276c\\u276e\\u2770\\u2772\\u2774\\u27c5\\u27e6\\u27e8\\u27ea\\u27ec\\u27ee\\u2983\\u2985\\u2987\\u2989\\u298b\\u298d\\u298f\\u2991\\u2993\\u2995\\u2997\\u29d8\\u29da\\u29fc\\u2e22\\u2e24\\u2e26\\u2e28\\u2e42\\u3008\\u300a\\u300c\\u300e\\u3010\\u3014\\u3016\\u3018\\u301a\\u301d\\ufd3f\\ufe17\\ufe35\\ufe37\\ufe39\\ufe3b\\ufe3d\\ufe3f\\ufe41\\ufe43\\ufe47\\ufe59\\ufe5b\\ufe5d\\uff08\\uff3b\\uff5b\\uff5f\\uff62\'\n\nSc = \'$\\xa2-\\xa5\\u058f\\u060b\\u07fe-\\u07ff\\u09f2-\\u09f3\\u09fb\\u0af1\\u0bf9\\u0e3f\\u17db\\u20a0-\\u20bf\\ua838\\ufdfc\\ufe69\\uff04\\uffe0-\\uffe1\\uffe5-\\uffe6\\U0001ecb0\'\n\nSk = \'\\\\^`\\xa8\\xaf\\xb4\\xb8\\u02c2-\\u02c5\\u02d2-\\u02df\\u02e5-\\u02eb\\u02ed\\u02ef-\\u02ff\\u0375\\u0384-\\u0385\\u1fbd\\u1fbf-\\u1fc1\\u1fcd-\\u1fcf\\u1fdd-\\u1fdf\\u1fed-\\u1fef\\u1ffd-\\u1ffe\\u309b-\\u309c\\ua700-\\ua716\\ua720-\\ua721\\ua789-\\ua78a\\uab5b\\ufbb2-\\ufbc1\\uff3e\\uff40\\uffe3\\U0001f3fb-\\U0001f3ff\'\n\nSm = \'+<->|~\\xac\\xb1\\xd7\\xf7\\u03f6\\u0606-\\u0608\\u2044\\u2052\\u207a-\\u207c\\u208a-\\u208c\\u2118\\u2140-\\u2144\\u214b\\u2190-\\u2194\\u219a-\\u219b\\u21a0\\u21a3\\u21a6\\u21ae\\u21ce-\\u21cf\\u21d2\\u21d4\\u21f4-\\u22ff\\u2320-\\u2321\\u237c\\u239b-\\u23b3\\u23dc-\\u23e1\\u25b7\\u25c1\\u25f8-\\u25ff\\u266f\\u27c0-\\u27c4\\u27c7-\\u27e5\\u27f0-\\u27ff\\u2900-\\u2982\\u2999-\\u29d7\\u29dc-\\u29fb\\u29fe-\\u2aff\\u2b30-\\u2b44\\u2b47-\\u2b4c\\ufb29\\ufe62\\ufe64-\\ufe66\\uff0b\\uff1c-\\uff1e\\uff5c\\uff5e\\uffe2\\uffe9-\\uffec\\U0001d6c1\\U0001d6db\\U0001d6fb\\U0001d715\\U0001d735\\U0001d74f\\U0001d76f\\U0001d789\\U0001d7a9\\U0001d7c3\\U0001eef0-\\U0001eef1\'\n\nSo = \'\\xa6\\xa9\\xae\\xb0\\u0482\\u058d-\\u058e\\u060e-\\u060f\\u06de\\u06e9\\u06fd-\\u06fe\\u07f6\\u09fa\\u0b70\\u0bf3-\\u0bf8\\u0bfa\\u0c7f\\u0d4f\\u0d79\\u0f01-\\u0f03\\u0f13\\u0f15-\\u0f17\\u0f1a-\\u0f1f\\u0f34\\u0f36\\u0f38\\u0fbe-\\u0fc5\\u0fc7-\\u0fcc\\u0fce-\\u0fcf\\u0fd5-\\u0fd8\\u109e-\\u109f\\u1390-\\u1399\\u1940\\u19de-\\u19ff\\u1b61-\\u1b6a\\u1b74-\\u1b7c\\u2100-\\u2101\\u2103-\\u2106\\u2108-\\u2109\\u2114\\u2116-\\u2117\\u211e-\\u2123\\u2125\\u2127\\u2129\\u212e\\u213a-\\u213b\\u214a\\u214c-\\u214d\\u214f\\u218a-\\u218b\\u2195-\\u2199\\u219c-\\u219f\\u21a1-\\u21a2\\u21a4-\\u21a5\\u21a7-\\u21ad\\u21af-\\u21cd\\u21d0-\\u21d1\\u21d3\\u21d5-\\u21f3\\u2300-\\u2307\\u230c-\\u231f\\u2322-\\u2328\\u232b-\\u237b\\u237d-\\u239a\\u23b4-\\u23db\\u23e2-\\u2426\\u2440-\\u244a\\u249c-\\u24e9\\u2500-\\u25b6\\u25b8-\\u25c0\\u25c2-\\u25f7\\u2600-\\u266e\\u2670-\\u2767\\u2794-\\u27bf\\u2800-\\u28ff\\u2b00-\\u2b2f\\u2b45-\\u2b46\\u2b4d-\\u2b73\\u2b76-\\u2b95\\u2b98-\\u2bc8\\u2bca-\\u2bfe\\u2ce5-\\u2cea\\u2e80-\\u2e99\\u2e9b-\\u2ef3\\u2f00-\\u2fd5\\u2ff0-\\u2ffb\\u3004\\u3012-\\u3013\\u3020\\u3036-\\u3037\\u303e-\\u303f\\u3190-\\u3191\\u3196-\\u319f\\u31c0-\\u31e3\\u3200-\\u321e\\u322a-\\u3247\\u3250\\u3260-\\u327f\\u328a-\\u32b0\\u32c0-\\u32fe\\u3300-\\u33ff\\u4dc0-\\u4dff\\ua490-\\ua4c6\\ua828-\\ua82b\\ua836-\\ua837\\ua839\\uaa77-\\uaa79\\ufdfd\\uffe4\\uffe8\\uffed-\\uffee\\ufffc-\\ufffd\\U00010137-\\U0001013f\\U00010179-\\U00010189\\U0001018c-\\U0001018e\\U00010190-\\U0001019b\\U000101a0\\U000101d0-\\U000101fc\\U00010877-\\U00010878\\U00010ac8\\U0001173f\\U00016b3c-\\U00016b3f\\U00016b45\\U0001bc9c\\U0001d000-\\U0001d0f5\\U0001d100-\\U0001d126\\U0001d129-\\U0001d164\\U0001d16a-\\U0001d16c\\U0001d183-\\U0001d184\\U0001d18c-\\U0001d1a9\\U0001d1ae-\\U0001d1e8\\U0001d200-\\U0001d241\\U0001d245\\U0001d300-\\U0001d356\\U0001d800-\\U0001d9ff\\U0001da37-\\U0001da3a\\U0001da6d-\\U0001da74\\U0001da76-\\U0001da83\\U0001da85-\\U0001da86\\U0001ecac\\U0001f000-\\U0001f02b\\U0001f030-\\U0001f093\\U0001f0a0-\\U0001f0ae\\U0001f0b1-\\U0001f0bf\\U0001f0c1-\\U0001f0cf\\U0001f0d1-\\U0001f0f5\\U0001f110-\\U0001f16b\\U0001f170-\\U0001f1ac\\U0001f1e6-\\U0001f202\\U0001f210-\\U0001f23b\\U0001f240-\\U0001f248\\U0001f250-\\U0001f251\\U0001f260-\\U0001f265\\U0001f300-\\U0001f3fa\\U0001f400-\\U0001f6d4\\U0001f6e0-\\U0001f6ec\\U0001f6f0-\\U0001f6f9\\U0001f700-\\U0001f773\\U0001f780-\\U0001f7d8\\U0001f800-\\U0001f80b\\U0001f810-\\U0001f847\\U0001f850-\\U0001f859\\U0001f860-\\U0001f887\\U0001f890-\\U0001f8ad\\U0001f900-\\U0001f90b\\U0001f910-\\U0001f93e\\U0001f940-\\U0001f970\\U0001f973-\\U0001f976\\U0001f97a\\U0001f97c-\\U0001f9a2\\U0001f9b0-\\U0001f9b9\\U0001f9c0-\\U0001f9c2\\U0001f9d0-\\U0001f9ff\\U0001fa60-\\U0001fa6d\'\n\nZl = \'\\u2028\'\n\nZp = \'\\u2029\'\n\nZs = \' \\xa0\\u1680\\u2000-\\u200a\\u202f\\u205f\\u3000\'\n\nxid_continue = \'0-9A-Z_a-z\\xaa\\xb5\\xb7\\xba\\xc0-\\xd6\\xd8-\\xf6\\xf8-\\u02c1\\u02c6-\\u02d1\\u02e0-\\u02e4\\u02ec\\u02ee\\u0300-\\u0374\\u0376-\\u0377\\u037b-\\u037d\\u037f\\u0386-\\u038a\\u038c\\u038e-\\u03a1\\u03a3-\\u03f5\\u03f7-\\u0481\\u0483-\\u0487\\u048a-\\u052f\\u0531-\\u0556\\u0559\\u0560-\\u0588\\u0591-\\u05bd\\u05bf\\u05c1-\\u05c2\\u05c4-\\u05c5\\u05c7\\u05d0-\\u05ea\\u05ef-\\u05f2\\u0610-\\u061a\\u0620-\\u0669\\u066e-\\u06d3\\u06d5-\\u06dc\\u06df-\\u06e8\\u06ea-\\u06fc\\u06ff\\u0710-\\u074a\\u074d-\\u07b1\\u07c0-\\u07f5\\u07fa\\u07fd\\u0800-\\u082d\\u0840-\\u085b\\u0860-\\u086a\\u08a0-\\u08b4\\u08b6-\\u08bd\\u08d3-\\u08e1\\u08e3-\\u0963\\u0966-\\u096f\\u0971-\\u0983\\u0985-\\u098c\\u098f-\\u0990\\u0993-\\u09a8\\u09aa-\\u09b0\\u09b2\\u09b6-\\u09b9\\u09bc-\\u09c4\\u09c7-\\u09c8\\u09cb-\\u09ce\\u09d7\\u09dc-\\u09dd\\u09df-\\u09e3\\u09e6-\\u09f1\\u09fc\\u09fe\\u0a01-\\u0a03\\u0a05-\\u0a0a\\u0a0f-\\u0a10\\u0a13-\\u0a28\\u0a2a-\\u0a30\\u0a32-\\u0a33\\u0a35-\\u0a36\\u0a38-\\u0a39\\u0a3c\\u0a3e-\\u0a42\\u0a47-\\u0a48\\u0a4b-\\u0a4d\\u0a51\\u0a59-\\u0a5c\\u0a5e\\u0a66-\\u0a75\\u0a81-\\u0a83\\u0a85-\\u0a8d\\u0a8f-\\u0a91\\u0a93-\\u0aa8\\u0aaa-\\u0ab0\\u0ab2-\\u0ab3\\u0ab5-\\u0ab9\\u0abc-\\u0ac5\\u0ac7-\\u0ac9\\u0acb-\\u0acd\\u0ad0\\u0ae0-\\u0ae3\\u0ae6-\\u0aef\\u0af9-\\u0aff\\u0b01-\\u0b03\\u0b05-\\u0b0c\\u0b0f-\\u0b10\\u0b13-\\u0b28\\u0b2a-\\u0b30\\u0b32-\\u0b33\\u0b35-\\u0b39\\u0b3c-\\u0b44\\u0b47-\\u0b48\\u0b4b-\\u0b4d\\u0b56-\\u0b57\\u0b5c-\\u0b5d\\u0b5f-\\u0b63\\u0b66-\\u0b6f\\u0b71\\u0b82-\\u0b83\\u0b85-\\u0b8a\\u0b8e-\\u0b90\\u0b92-\\u0b95\\u0b99-\\u0b9a\\u0b9c\\u0b9e-\\u0b9f\\u0ba3-\\u0ba4\\u0ba8-\\u0baa\\u0bae-\\u0bb9\\u0bbe-\\u0bc2\\u0bc6-\\u0bc8\\u0bca-\\u0bcd\\u0bd0\\u0bd7\\u0be6-\\u0bef\\u0c00-\\u0c0c\\u0c0e-\\u0c10\\u0c12-\\u0c28\\u0c2a-\\u0c39\\u0c3d-\\u0c44\\u0c46-\\u0c48\\u0c4a-\\u0c4d\\u0c55-\\u0c56\\u0c58-\\u0c5a\\u0c60-\\u0c63\\u0c66-\\u0c6f\\u0c80-\\u0c83\\u0c85-\\u0c8c\\u0c8e-\\u0c90\\u0c92-\\u0ca8\\u0caa-\\u0cb3\\u0cb5-\\u0cb9\\u0cbc-\\u0cc4\\u0cc6-\\u0cc8\\u0cca-\\u0ccd\\u0cd5-\\u0cd6\\u0cde\\u0ce0-\\u0ce3\\u0ce6-\\u0cef\\u0cf1-\\u0cf2\\u0d00-\\u0d03\\u0d05-\\u0d0c\\u0d0e-\\u0d10\\u0d12-\\u0d44\\u0d46-\\u0d48\\u0d4a-\\u0d4e\\u0d54-\\u0d57\\u0d5f-\\u0d63\\u0d66-\\u0d6f\\u0d7a-\\u0d7f\\u0d82-\\u0d83\\u0d85-\\u0d96\\u0d9a-\\u0db1\\u0db3-\\u0dbb\\u0dbd\\u0dc0-\\u0dc6\\u0dca\\u0dcf-\\u0dd4\\u0dd6\\u0dd8-\\u0ddf\\u0de6-\\u0def\\u0df2-\\u0df3\\u0e01-\\u0e3a\\u0e40-\\u0e4e\\u0e50-\\u0e59\\u0e81-\\u0e82\\u0e84\\u0e87-\\u0e88\\u0e8a\\u0e8d\\u0e94-\\u0e97\\u0e99-\\u0e9f\\u0ea1-\\u0ea3\\u0ea5\\u0ea7\\u0eaa-\\u0eab\\u0ead-\\u0eb9\\u0ebb-\\u0ebd\\u0ec0-\\u0ec4\\u0ec6\\u0ec8-\\u0ecd\\u0ed0-\\u0ed9\\u0edc-\\u0edf\\u0f00\\u0f18-\\u0f19\\u0f20-\\u0f29\\u0f35\\u0f37\\u0f39\\u0f3e-\\u0f47\\u0f49-\\u0f6c\\u0f71-\\u0f84\\u0f86-\\u0f97\\u0f99-\\u0fbc\\u0fc6\\u1000-\\u1049\\u1050-\\u109d\\u10a0-\\u10c5\\u10c7\\u10cd\\u10d0-\\u10fa\\u10fc-\\u1248\\u124a-\\u124d\\u1250-\\u1256\\u1258\\u125a-\\u125d\\u1260-\\u1288\\u128a-\\u128d\\u1290-\\u12b0\\u12b2-\\u12b5\\u12b8-\\u12be\\u12c0\\u12c2-\\u12c5\\u12c8-\\u12d6\\u12d8-\\u1310\\u1312-\\u1315\\u1318-\\u135a\\u135d-\\u135f\\u1369-\\u1371\\u1380-\\u138f\\u13a0-\\u13f5\\u13f8-\\u13fd\\u1401-\\u166c\\u166f-\\u167f\\u1681-\\u169a\\u16a0-\\u16ea\\u16ee-\\u16f8\\u1700-\\u170c\\u170e-\\u1714\\u1720-\\u1734\\u1740-\\u1753\\u1760-\\u176c\\u176e-\\u1770\\u1772-\\u1773\\u1780-\\u17d3\\u17d7\\u17dc-\\u17dd\\u17e0-\\u17e9\\u180b-\\u180d\\u1810-\\u1819\\u1820-\\u1878\\u1880-\\u18aa\\u18b0-\\u18f5\\u1900-\\u191e\\u1920-\\u192b\\u1930-\\u193b\\u1946-\\u196d\\u1970-\\u1974\\u1980-\\u19ab\\u19b0-\\u19c9\\u19d0-\\u19da\\u1a00-\\u1a1b\\u1a20-\\u1a5e\\u1a60-\\u1a7c\\u1a7f-\\u1a89\\u1a90-\\u1a99\\u1aa7\\u1ab0-\\u1abd\\u1b00-\\u1b4b\\u1b50-\\u1b59\\u1b6b-\\u1b73\\u1b80-\\u1bf3\\u1c00-\\u1c37\\u1c40-\\u1c49\\u1c4d-\\u1c7d\\u1c80-\\u1c88\\u1c90-\\u1cba\\u1cbd-\\u1cbf\\u1cd0-\\u1cd2\\u1cd4-\\u1cf9\\u1d00-\\u1df9\\u1dfb-\\u1f15\\u1f18-\\u1f1d\\u1f20-\\u1f45\\u1f48-\\u1f4d\\u1f50-\\u1f57\\u1f59\\u1f5b\\u1f5d\\u1f5f-\\u1f7d\\u1f80-\\u1fb4\\u1fb6-\\u1fbc\\u1fbe\\u1fc2-\\u1fc4\\u1fc6-\\u1fcc\\u1fd0-\\u1fd3\\u1fd6-\\u1fdb\\u1fe0-\\u1fec\\u1ff2-\\u1ff4\\u1ff6-\\u1ffc\\u203f-\\u2040\\u2054\\u2071\\u207f\\u2090-\\u209c\\u20d0-\\u20dc\\u20e1\\u20e5-\\u20f0\\u2102\\u2107\\u210a-\\u2113\\u2115\\u2118-\\u211d\\u2124\\u2126\\u2128\\u212a-\\u2139\\u213c-\\u213f\\u2145-\\u2149\\u214e\\u2160-\\u2188\\u2c00-\\u2c2e\\u2c30-\\u2c5e\\u2c60-\\u2ce4\\u2ceb-\\u2cf3\\u2d00-\\u2d25\\u2d27\\u2d2d\\u2d30-\\u2d67\\u2d6f\\u2d7f-\\u2d96\\u2da0-\\u2da6\\u2da8-\\u2dae\\u2db0-\\u2db6\\u2db8-\\u2dbe\\u2dc0-\\u2dc6\\u2dc8-\\u2dce\\u2dd0-\\u2dd6\\u2dd8-\\u2dde\\u2de0-\\u2dff\\u3005-\\u3007\\u3021-\\u302f\\u3031-\\u3035\\u3038-\\u303c\\u3041-\\u3096\\u3099-\\u309a\\u309d-\\u309f\\u30a1-\\u30fa\\u30fc-\\u30ff\\u3105-\\u312f\\u3131-\\u318e\\u31a0-\\u31ba\\u31f0-\\u31ff\\u3400-\\u4db5\\u4e00-\\u9fef\\ua000-\\ua48c\\ua4d0-\\ua4fd\\ua500-\\ua60c\\ua610-\\ua62b\\ua640-\\ua66f\\ua674-\\ua67d\\ua67f-\\ua6f1\\ua717-\\ua71f\\ua722-\\ua788\\ua78b-\\ua7b9\\ua7f7-\\ua827\\ua840-\\ua873\\ua880-\\ua8c5\\ua8d0-\\ua8d9\\ua8e0-\\ua8f7\\ua8fb\\ua8fd-\\ua92d\\ua930-\\ua953\\ua960-\\ua97c\\ua980-\\ua9c0\\ua9cf-\\ua9d9\\ua9e0-\\ua9fe\\uaa00-\\uaa36\\uaa40-\\uaa4d\\uaa50-\\uaa59\\uaa60-\\uaa76\\uaa7a-\\uaac2\\uaadb-\\uaadd\\uaae0-\\uaaef\\uaaf2-\\uaaf6\\uab01-\\uab06\\uab09-\\uab0e\\uab11-\\uab16\\uab20-\\uab26\\uab28-\\uab2e\\uab30-\\uab5a\\uab5c-\\uab65\\uab70-\\uabea\\uabec-\\uabed\\uabf0-\\uabf9\\uac00-\\ud7a3\\ud7b0-\\ud7c6\\ud7cb-\\ud7fb\\uf900-\\ufa6d\\ufa70-\\ufad9\\ufb00-\\ufb06\\ufb13-\\ufb17\\ufb1d-\\ufb28\\ufb2a-\\ufb36\\ufb38-\\ufb3c\\ufb3e\\ufb40-\\ufb41\\ufb43-\\ufb44\\ufb46-\\ufbb1\\ufbd3-\\ufc5d\\ufc64-\\ufd3d\\ufd50-\\ufd8f\\ufd92-\\ufdc7\\ufdf0-\\ufdf9\\ufe00-\\ufe0f\\ufe20-\\ufe2f\\ufe33-\\ufe34\\ufe4d-\\ufe4f\\ufe71\\ufe73\\ufe77\\ufe79\\ufe7b\\ufe7d\\ufe7f-\\ufefc\\uff10-\\uff19\\uff21-\\uff3a\\uff3f\\uff41-\\uff5a\\uff66-\\uffbe\\uffc2-\\uffc7\\uffca-\\uffcf\\uffd2-\\uffd7\\uffda-\\uffdc\\U00010000-\\U0001000b\\U0001000d-\\U00010026\\U00010028-\\U0001003a\\U0001003c-\\U0001003d\\U0001003f-\\U0001004d\\U00010050-\\U0001005d\\U00010080-\\U000100fa\\U00010140-\\U00010174\\U000101fd\\U00010280-\\U0001029c\\U000102a0-\\U000102d0\\U000102e0\\U00010300-\\U0001031f\\U0001032d-\\U0001034a\\U00010350-\\U0001037a\\U00010380-\\U0001039d\\U000103a0-\\U000103c3\\U000103c8-\\U000103cf\\U000103d1-\\U000103d5\\U00010400-\\U0001049d\\U000104a0-\\U000104a9\\U000104b0-\\U000104d3\\U000104d8-\\U000104fb\\U00010500-\\U00010527\\U00010530-\\U00010563\\U00010600-\\U00010736\\U00010740-\\U00010755\\U00010760-\\U00010767\\U00010800-\\U00010805\\U00010808\\U0001080a-\\U00010835\\U00010837-\\U00010838\\U0001083c\\U0001083f-\\U00010855\\U00010860-\\U00010876\\U00010880-\\U0001089e\\U000108e0-\\U000108f2\\U000108f4-\\U000108f5\\U00010900-\\U00010915\\U00010920-\\U00010939\\U00010980-\\U000109b7\\U000109be-\\U000109bf\\U00010a00-\\U00010a03\\U00010a05-\\U00010a06\\U00010a0c-\\U00010a13\\U00010a15-\\U00010a17\\U00010a19-\\U00010a35\\U00010a38-\\U00010a3a\\U00010a3f\\U00010a60-\\U00010a7c\\U00010a80-\\U00010a9c\\U00010ac0-\\U00010ac7\\U00010ac9-\\U00010ae6\\U00010b00-\\U00010b35\\U00010b40-\\U00010b55\\U00010b60-\\U00010b72\\U00010b80-\\U00010b91\\U00010c00-\\U00010c48\\U00010c80-\\U00010cb2\\U00010cc0-\\U00010cf2\\U00010d00-\\U00010d27\\U00010d30-\\U00010d39\\U00010f00-\\U00010f1c\\U00010f27\\U00010f30-\\U00010f50\\U00011000-\\U00011046\\U00011066-\\U0001106f\\U0001107f-\\U000110ba\\U000110d0-\\U000110e8\\U000110f0-\\U000110f9\\U00011100-\\U00011134\\U00011136-\\U0001113f\\U00011144-\\U00011146\\U00011150-\\U00011173\\U00011176\\U00011180-\\U000111c4\\U000111c9-\\U000111cc\\U000111d0-\\U000111da\\U000111dc\\U00011200-\\U00011211\\U00011213-\\U00011237\\U0001123e\\U00011280-\\U00011286\\U00011288\\U0001128a-\\U0001128d\\U0001128f-\\U0001129d\\U0001129f-\\U000112a8\\U000112b0-\\U000112ea\\U000112f0-\\U000112f9\\U00011300-\\U00011303\\U00011305-\\U0001130c\\U0001130f-\\U00011310\\U00011313-\\U00011328\\U0001132a-\\U00011330\\U00011332-\\U00011333\\U00011335-\\U00011339\\U0001133b-\\U00011344\\U00011347-\\U00011348\\U0001134b-\\U0001134d\\U00011350\\U00011357\\U0001135d-\\U00011363\\U00011366-\\U0001136c\\U00011370-\\U00011374\\U00011400-\\U0001144a\\U00011450-\\U00011459\\U0001145e\\U00011480-\\U000114c5\\U000114c7\\U000114d0-\\U000114d9\\U00011580-\\U000115b5\\U000115b8-\\U000115c0\\U000115d8-\\U000115dd\\U00011600-\\U00011640\\U00011644\\U00011650-\\U00011659\\U00011680-\\U000116b7\\U000116c0-\\U000116c9\\U00011700-\\U0001171a\\U0001171d-\\U0001172b\\U00011730-\\U00011739\\U00011800-\\U0001183a\\U000118a0-\\U000118e9\\U000118ff\\U00011a00-\\U00011a3e\\U00011a47\\U00011a50-\\U00011a83\\U00011a86-\\U00011a99\\U00011a9d\\U00011ac0-\\U00011af8\\U00011c00-\\U00011c08\\U00011c0a-\\U00011c36\\U00011c38-\\U00011c40\\U00011c50-\\U00011c59\\U00011c72-\\U00011c8f\\U00011c92-\\U00011ca7\\U00011ca9-\\U00011cb6\\U00011d00-\\U00011d06\\U00011d08-\\U00011d09\\U00011d0b-\\U00011d36\\U00011d3a\\U00011d3c-\\U00011d3d\\U00011d3f-\\U00011d47\\U00011d50-\\U00011d59\\U00011d60-\\U00011d65\\U00011d67-\\U00011d68\\U00011d6a-\\U00011d8e\\U00011d90-\\U00011d91\\U00011d93-\\U00011d98\\U00011da0-\\U00011da9\\U00011ee0-\\U00011ef6\\U00012000-\\U00012399\\U00012400-\\U0001246e\\U00012480-\\U00012543\\U00013000-\\U0001342e\\U00014400-\\U00014646\\U00016800-\\U00016a38\\U00016a40-\\U00016a5e\\U00016a60-\\U00016a69\\U00016ad0-\\U00016aed\\U00016af0-\\U00016af4\\U00016b00-\\U00016b36\\U00016b40-\\U00016b43\\U00016b50-\\U00016b59\\U00016b63-\\U00016b77\\U00016b7d-\\U00016b8f\\U00016e40-\\U00016e7f\\U00016f00-\\U00016f44\\U00016f50-\\U00016f7e\\U00016f8f-\\U00016f9f\\U00016fe0-\\U00016fe1\\U00017000-\\U000187f1\\U00018800-\\U00018af2\\U0001b000-\\U0001b11e\\U0001b170-\\U0001b2fb\\U0001bc00-\\U0001bc6a\\U0001bc70-\\U0001bc7c\\U0001bc80-\\U0001bc88\\U0001bc90-\\U0001bc99\\U0001bc9d-\\U0001bc9e\\U0001d165-\\U0001d169\\U0001d16d-\\U0001d172\\U0001d17b-\\U0001d182\\U0001d185-\\U0001d18b\\U0001d1aa-\\U0001d1ad\\U0001d242-\\U0001d244\\U0001d400-\\U0001d454\\U0001d456-\\U0001d49c\\U0001d49e-\\U0001d49f\\U0001d4a2\\U0001d4a5-\\U0001d4a6\\U0001d4a9-\\U0001d4ac\\U0001d4ae-\\U0001d4b9\\U0001d4bb\\U0001d4bd-\\U0001d4c3\\U0001d4c5-\\U0001d505\\U0001d507-\\U0001d50a\\U0001d50d-\\U0001d514\\U0001d516-\\U0001d51c\\U0001d51e-\\U0001d539\\U0001d53b-\\U0001d53e\\U0001d540-\\U0001d544\\U0001d546\\U0001d54a-\\U0001d550\\U0001d552-\\U0001d6a5\\U0001d6a8-\\U0001d6c0\\U0001d6c2-\\U0001d6da\\U0001d6dc-\\U0001d6fa\\U0001d6fc-\\U0001d714\\U0001d716-\\U0001d734\\U0001d736-\\U0001d74e\\U0001d750-\\U0001d76e\\U0001d770-\\U0001d788\\U0001d78a-\\U0001d7a8\\U0001d7aa-\\U0001d7c2\\U0001d7c4-\\U0001d7cb\\U0001d7ce-\\U0001d7ff\\U0001da00-\\U0001da36\\U0001da3b-\\U0001da6c\\U0001da75\\U0001da84\\U0001da9b-\\U0001da9f\\U0001daa1-\\U0001daaf\\U0001e000-\\U0001e006\\U0001e008-\\U0001e018\\U0001e01b-\\U0001e021\\U0001e023-\\U0001e024\\U0001e026-\\U0001e02a\\U0001e800-\\U0001e8c4\\U0001e8d0-\\U0001e8d6\\U0001e900-\\U0001e94a\\U0001e950-\\U0001e959\\U0001ee00-\\U0001ee03\\U0001ee05-\\U0001ee1f\\U0001ee21-\\U0001ee22\\U0001ee24\\U0001ee27\\U0001ee29-\\U0001ee32\\U0001ee34-\\U0001ee37\\U0001ee39\\U0001ee3b\\U0001ee42\\U0001ee47\\U0001ee49\\U0001ee4b\\U0001ee4d-\\U0001ee4f\\U0001ee51-\\U0001ee52\\U0001ee54\\U0001ee57\\U0001ee59\\U0001ee5b\\U0001ee5d\\U0001ee5f\\U0001ee61-\\U0001ee62\\U0001ee64\\U0001ee67-\\U0001ee6a\\U0001ee6c-\\U0001ee72\\U0001ee74-\\U0001ee77\\U0001ee79-\\U0001ee7c\\U0001ee7e\\U0001ee80-\\U0001ee89\\U0001ee8b-\\U0001ee9b\\U0001eea1-\\U0001eea3\\U0001eea5-\\U0001eea9\\U0001eeab-\\U0001eebb\\U00020000-\\U0002a6d6\\U0002a700-\\U0002b734\\U0002b740-\\U0002b81d\\U0002b820-\\U0002cea1\\U0002ceb0-\\U0002ebe0\\U0002f800-\\U0002fa1d\\U000e0100-\\U000e01ef\'\n\nxid_start = \'A-Z_a-z\\xaa\\xb5\\xba\\xc0-\\xd6\\xd8-\\xf6\\xf8-\\u02c1\\u02c6-\\u02d1\\u02e0-\\u02e4\\u02ec\\u02ee\\u0370-\\u0374\\u0376-\\u0377\\u037b-\\u037d\\u037f\\u0386\\u0388-\\u038a\\u038c\\u038e-\\u03a1\\u03a3-\\u03f5\\u03f7-\\u0481\\u048a-\\u052f\\u0531-\\u0556\\u0559\\u0560-\\u0588\\u05d0-\\u05ea\\u05ef-\\u05f2\\u0620-\\u064a\\u066e-\\u066f\\u0671-\\u06d3\\u06d5\\u06e5-\\u06e6\\u06ee-\\u06ef\\u06fa-\\u06fc\\u06ff\\u0710\\u0712-\\u072f\\u074d-\\u07a5\\u07b1\\u07ca-\\u07ea\\u07f4-\\u07f5\\u07fa\\u0800-\\u0815\\u081a\\u0824\\u0828\\u0840-\\u0858\\u0860-\\u086a\\u08a0-\\u08b4\\u08b6-\\u08bd\\u0904-\\u0939\\u093d\\u0950\\u0958-\\u0961\\u0971-\\u0980\\u0985-\\u098c\\u098f-\\u0990\\u0993-\\u09a8\\u09aa-\\u09b0\\u09b2\\u09b6-\\u09b9\\u09bd\\u09ce\\u09dc-\\u09dd\\u09df-\\u09e1\\u09f0-\\u09f1\\u09fc\\u0a05-\\u0a0a\\u0a0f-\\u0a10\\u0a13-\\u0a28\\u0a2a-\\u0a30\\u0a32-\\u0a33\\u0a35-\\u0a36\\u0a38-\\u0a39\\u0a59-\\u0a5c\\u0a5e\\u0a72-\\u0a74\\u0a85-\\u0a8d\\u0a8f-\\u0a91\\u0a93-\\u0aa8\\u0aaa-\\u0ab0\\u0ab2-\\u0ab3\\u0ab5-\\u0ab9\\u0abd\\u0ad0\\u0ae0-\\u0ae1\\u0af9\\u0b05-\\u0b0c\\u0b0f-\\u0b10\\u0b13-\\u0b28\\u0b2a-\\u0b30\\u0b32-\\u0b33\\u0b35-\\u0b39\\u0b3d\\u0b5c-\\u0b5d\\u0b5f-\\u0b61\\u0b71\\u0b83\\u0b85-\\u0b8a\\u0b8e-\\u0b90\\u0b92-\\u0b95\\u0b99-\\u0b9a\\u0b9c\\u0b9e-\\u0b9f\\u0ba3-\\u0ba4\\u0ba8-\\u0baa\\u0bae-\\u0bb9\\u0bd0\\u0c05-\\u0c0c\\u0c0e-\\u0c10\\u0c12-\\u0c28\\u0c2a-\\u0c39\\u0c3d\\u0c58-\\u0c5a\\u0c60-\\u0c61\\u0c80\\u0c85-\\u0c8c\\u0c8e-\\u0c90\\u0c92-\\u0ca8\\u0caa-\\u0cb3\\u0cb5-\\u0cb9\\u0cbd\\u0cde\\u0ce0-\\u0ce1\\u0cf1-\\u0cf2\\u0d05-\\u0d0c\\u0d0e-\\u0d10\\u0d12-\\u0d3a\\u0d3d\\u0d4e\\u0d54-\\u0d56\\u0d5f-\\u0d61\\u0d7a-\\u0d7f\\u0d85-\\u0d96\\u0d9a-\\u0db1\\u0db3-\\u0dbb\\u0dbd\\u0dc0-\\u0dc6\\u0e01-\\u0e30\\u0e32\\u0e40-\\u0e46\\u0e81-\\u0e82\\u0e84\\u0e87-\\u0e88\\u0e8a\\u0e8d\\u0e94-\\u0e97\\u0e99-\\u0e9f\\u0ea1-\\u0ea3\\u0ea5\\u0ea7\\u0eaa-\\u0eab\\u0ead-\\u0eb0\\u0eb2\\u0ebd\\u0ec0-\\u0ec4\\u0ec6\\u0edc-\\u0edf\\u0f00\\u0f40-\\u0f47\\u0f49-\\u0f6c\\u0f88-\\u0f8c\\u1000-\\u102a\\u103f\\u1050-\\u1055\\u105a-\\u105d\\u1061\\u1065-\\u1066\\u106e-\\u1070\\u1075-\\u1081\\u108e\\u10a0-\\u10c5\\u10c7\\u10cd\\u10d0-\\u10fa\\u10fc-\\u1248\\u124a-\\u124d\\u1250-\\u1256\\u1258\\u125a-\\u125d\\u1260-\\u1288\\u128a-\\u128d\\u1290-\\u12b0\\u12b2-\\u12b5\\u12b8-\\u12be\\u12c0\\u12c2-\\u12c5\\u12c8-\\u12d6\\u12d8-\\u1310\\u1312-\\u1315\\u1318-\\u135a\\u1380-\\u138f\\u13a0-\\u13f5\\u13f8-\\u13fd\\u1401-\\u166c\\u166f-\\u167f\\u1681-\\u169a\\u16a0-\\u16ea\\u16ee-\\u16f8\\u1700-\\u170c\\u170e-\\u1711\\u1720-\\u1731\\u1740-\\u1751\\u1760-\\u176c\\u176e-\\u1770\\u1780-\\u17b3\\u17d7\\u17dc\\u1820-\\u1878\\u1880-\\u18a8\\u18aa\\u18b0-\\u18f5\\u1900-\\u191e\\u1950-\\u196d\\u1970-\\u1974\\u1980-\\u19ab\\u19b0-\\u19c9\\u1a00-\\u1a16\\u1a20-\\u1a54\\u1aa7\\u1b05-\\u1b33\\u1b45-\\u1b4b\\u1b83-\\u1ba0\\u1bae-\\u1baf\\u1bba-\\u1be5\\u1c00-\\u1c23\\u1c4d-\\u1c4f\\u1c5a-\\u1c7d\\u1c80-\\u1c88\\u1c90-\\u1cba\\u1cbd-\\u1cbf\\u1ce9-\\u1cec\\u1cee-\\u1cf1\\u1cf5-\\u1cf6\\u1d00-\\u1dbf\\u1e00-\\u1f15\\u1f18-\\u1f1d\\u1f20-\\u1f45\\u1f48-\\u1f4d\\u1f50-\\u1f57\\u1f59\\u1f5b\\u1f5d\\u1f5f-\\u1f7d\\u1f80-\\u1fb4\\u1fb6-\\u1fbc\\u1fbe\\u1fc2-\\u1fc4\\u1fc6-\\u1fcc\\u1fd0-\\u1fd3\\u1fd6-\\u1fdb\\u1fe0-\\u1fec\\u1ff2-\\u1ff4\\u1ff6-\\u1ffc\\u2071\\u207f\\u2090-\\u209c\\u2102\\u2107\\u210a-\\u2113\\u2115\\u2118-\\u211d\\u2124\\u2126\\u2128\\u212a-\\u2139\\u213c-\\u213f\\u2145-\\u2149\\u214e\\u2160-\\u2188\\u2c00-\\u2c2e\\u2c30-\\u2c5e\\u2c60-\\u2ce4\\u2ceb-\\u2cee\\u2cf2-\\u2cf3\\u2d00-\\u2d25\\u2d27\\u2d2d\\u2d30-\\u2d67\\u2d6f\\u2d80-\\u2d96\\u2da0-\\u2da6\\u2da8-\\u2dae\\u2db0-\\u2db6\\u2db8-\\u2dbe\\u2dc0-\\u2dc6\\u2dc8-\\u2dce\\u2dd0-\\u2dd6\\u2dd8-\\u2dde\\u3005-\\u3007\\u3021-\\u3029\\u3031-\\u3035\\u3038-\\u303c\\u3041-\\u3096\\u309d-\\u309f\\u30a1-\\u30fa\\u30fc-\\u30ff\\u3105-\\u312f\\u3131-\\u318e\\u31a0-\\u31ba\\u31f0-\\u31ff\\u3400-\\u4db5\\u4e00-\\u9fef\\ua000-\\ua48c\\ua4d0-\\ua4fd\\ua500-\\ua60c\\ua610-\\ua61f\\ua62a-\\ua62b\\ua640-\\ua66e\\ua67f-\\ua69d\\ua6a0-\\ua6ef\\ua717-\\ua71f\\ua722-\\ua788\\ua78b-\\ua7b9\\ua7f7-\\ua801\\ua803-\\ua805\\ua807-\\ua80a\\ua80c-\\ua822\\ua840-\\ua873\\ua882-\\ua8b3\\ua8f2-\\ua8f7\\ua8fb\\ua8fd-\\ua8fe\\ua90a-\\ua925\\ua930-\\ua946\\ua960-\\ua97c\\ua984-\\ua9b2\\ua9cf\\ua9e0-\\ua9e4\\ua9e6-\\ua9ef\\ua9fa-\\ua9fe\\uaa00-\\uaa28\\uaa40-\\uaa42\\uaa44-\\uaa4b\\uaa60-\\uaa76\\uaa7a\\uaa7e-\\uaaaf\\uaab1\\uaab5-\\uaab6\\uaab9-\\uaabd\\uaac0\\uaac2\\uaadb-\\uaadd\\uaae0-\\uaaea\\uaaf2-\\uaaf4\\uab01-\\uab06\\uab09-\\uab0e\\uab11-\\uab16\\uab20-\\uab26\\uab28-\\uab2e\\uab30-\\uab5a\\uab5c-\\uab65\\uab70-\\uabe2\\uac00-\\ud7a3\\ud7b0-\\ud7c6\\ud7cb-\\ud7fb\\uf900-\\ufa6d\\ufa70-\\ufad9\\ufb00-\\ufb06\\ufb13-\\ufb17\\ufb1d\\ufb1f-\\ufb28\\ufb2a-\\ufb36\\ufb38-\\ufb3c\\ufb3e\\ufb40-\\ufb41\\ufb43-\\ufb44\\ufb46-\\ufbb1\\ufbd3-\\ufc5d\\ufc64-\\ufd3d\\ufd50-\\ufd8f\\ufd92-\\ufdc7\\ufdf0-\\ufdf9\\ufe71\\ufe73\\ufe77\\ufe79\\ufe7b\\ufe7d\\ufe7f-\\ufefc\\uff21-\\uff3a\\uff41-\\uff5a\\uff66-\\uff9d\\uffa0-\\uffbe\\uffc2-\\uffc7\\uffca-\\uffcf\\uffd2-\\uffd7\\uffda-\\uffdc\\U00010000-\\U0001000b\\U0001000d-\\U00010026\\U00010028-\\U0001003a\\U0001003c-\\U0001003d\\U0001003f-\\U0001004d\\U00010050-\\U0001005d\\U00010080-\\U000100fa\\U00010140-\\U00010174\\U00010280-\\U0001029c\\U000102a0-\\U000102d0\\U00010300-\\U0001031f\\U0001032d-\\U0001034a\\U00010350-\\U00010375\\U00010380-\\U0001039d\\U000103a0-\\U000103c3\\U000103c8-\\U000103cf\\U000103d1-\\U000103d5\\U00010400-\\U0001049d\\U000104b0-\\U000104d3\\U000104d8-\\U000104fb\\U00010500-\\U00010527\\U00010530-\\U00010563\\U00010600-\\U00010736\\U00010740-\\U00010755\\U00010760-\\U00010767\\U00010800-\\U00010805\\U00010808\\U0001080a-\\U00010835\\U00010837-\\U00010838\\U0001083c\\U0001083f-\\U00010855\\U00010860-\\U00010876\\U00010880-\\U0001089e\\U000108e0-\\U000108f2\\U000108f4-\\U000108f5\\U00010900-\\U00010915\\U00010920-\\U00010939\\U00010980-\\U000109b7\\U000109be-\\U000109bf\\U00010a00\\U00010a10-\\U00010a13\\U00010a15-\\U00010a17\\U00010a19-\\U00010a35\\U00010a60-\\U00010a7c\\U00010a80-\\U00010a9c\\U00010ac0-\\U00010ac7\\U00010ac9-\\U00010ae4\\U00010b00-\\U00010b35\\U00010b40-\\U00010b55\\U00010b60-\\U00010b72\\U00010b80-\\U00010b91\\U00010c00-\\U00010c48\\U00010c80-\\U00010cb2\\U00010cc0-\\U00010cf2\\U00010d00-\\U00010d23\\U00010f00-\\U00010f1c\\U00010f27\\U00010f30-\\U00010f45\\U00011003-\\U00011037\\U00011083-\\U000110af\\U000110d0-\\U000110e8\\U00011103-\\U00011126\\U00011144\\U00011150-\\U00011172\\U00011176\\U00011183-\\U000111b2\\U000111c1-\\U000111c4\\U000111da\\U000111dc\\U00011200-\\U00011211\\U00011213-\\U0001122b\\U00011280-\\U00011286\\U00011288\\U0001128a-\\U0001128d\\U0001128f-\\U0001129d\\U0001129f-\\U000112a8\\U000112b0-\\U000112de\\U00011305-\\U0001130c\\U0001130f-\\U00011310\\U00011313-\\U00011328\\U0001132a-\\U00011330\\U00011332-\\U00011333\\U00011335-\\U00011339\\U0001133d\\U00011350\\U0001135d-\\U00011361\\U00011400-\\U00011434\\U00011447-\\U0001144a\\U00011480-\\U000114af\\U000114c4-\\U000114c5\\U000114c7\\U00011580-\\U000115ae\\U000115d8-\\U000115db\\U00011600-\\U0001162f\\U00011644\\U00011680-\\U000116aa\\U00011700-\\U0001171a\\U00011800-\\U0001182b\\U000118a0-\\U000118df\\U000118ff\\U00011a00\\U00011a0b-\\U00011a32\\U00011a3a\\U00011a50\\U00011a5c-\\U00011a83\\U00011a86-\\U00011a89\\U00011a9d\\U00011ac0-\\U00011af8\\U00011c00-\\U00011c08\\U00011c0a-\\U00011c2e\\U00011c40\\U00011c72-\\U00011c8f\\U00011d00-\\U00011d06\\U00011d08-\\U00011d09\\U00011d0b-\\U00011d30\\U00011d46\\U00011d60-\\U00011d65\\U00011d67-\\U00011d68\\U00011d6a-\\U00011d89\\U00011d98\\U00011ee0-\\U00011ef2\\U00012000-\\U00012399\\U00012400-\\U0001246e\\U00012480-\\U00012543\\U00013000-\\U0001342e\\U00014400-\\U00014646\\U00016800-\\U00016a38\\U00016a40-\\U00016a5e\\U00016ad0-\\U00016aed\\U00016b00-\\U00016b2f\\U00016b40-\\U00016b43\\U00016b63-\\U00016b77\\U00016b7d-\\U00016b8f\\U00016e40-\\U00016e7f\\U00016f00-\\U00016f44\\U00016f50\\U00016f93-\\U00016f9f\\U00016fe0-\\U00016fe1\\U00017000-\\U000187f1\\U00018800-\\U00018af2\\U0001b000-\\U0001b11e\\U0001b170-\\U0001b2fb\\U0001bc00-\\U0001bc6a\\U0001bc70-\\U0001bc7c\\U0001bc80-\\U0001bc88\\U0001bc90-\\U0001bc99\\U0001d400-\\U0001d454\\U0001d456-\\U0001d49c\\U0001d49e-\\U0001d49f\\U0001d4a2\\U0001d4a5-\\U0001d4a6\\U0001d4a9-\\U0001d4ac\\U0001d4ae-\\U0001d4b9\\U0001d4bb\\U0001d4bd-\\U0001d4c3\\U0001d4c5-\\U0001d505\\U0001d507-\\U0001d50a\\U0001d50d-\\U0001d514\\U0001d516-\\U0001d51c\\U0001d51e-\\U0001d539\\U0001d53b-\\U0001d53e\\U0001d540-\\U0001d544\\U0001d546\\U0001d54a-\\U0001d550\\U0001d552-\\U0001d6a5\\U0001d6a8-\\U0001d6c0\\U0001d6c2-\\U0001d6da\\U0001d6dc-\\U0001d6fa\\U0001d6fc-\\U0001d714\\U0001d716-\\U0001d734\\U0001d736-\\U0001d74e\\U0001d750-\\U0001d76e\\U0001d770-\\U0001d788\\U0001d78a-\\U0001d7a8\\U0001d7aa-\\U0001d7c2\\U0001d7c4-\\U0001d7cb\\U0001e800-\\U0001e8c4\\U0001e900-\\U0001e943\\U0001ee00-\\U0001ee03\\U0001ee05-\\U0001ee1f\\U0001ee21-\\U0001ee22\\U0001ee24\\U0001ee27\\U0001ee29-\\U0001ee32\\U0001ee34-\\U0001ee37\\U0001ee39\\U0001ee3b\\U0001ee42\\U0001ee47\\U0001ee49\\U0001ee4b\\U0001ee4d-\\U0001ee4f\\U0001ee51-\\U0001ee52\\U0001ee54\\U0001ee57\\U0001ee59\\U0001ee5b\\U0001ee5d\\U0001ee5f\\U0001ee61-\\U0001ee62\\U0001ee64\\U0001ee67-\\U0001ee6a\\U0001ee6c-\\U0001ee72\\U0001ee74-\\U0001ee77\\U0001ee79-\\U0001ee7c\\U0001ee7e\\U0001ee80-\\U0001ee89\\U0001ee8b-\\U0001ee9b\\U0001eea1-\\U0001eea3\\U0001eea5-\\U0001eea9\\U0001eeab-\\U0001eebb\\U00020000-\\U0002a6d6\\U0002a700-\\U0002b734\\U0002b740-\\U0002b81d\\U0002b820-\\U0002cea1\\U0002ceb0-\\U0002ebe0\\U0002f800-\\U0002fa1d\'\n\ncats = [\'Cc\', \'Cf\', \'Cn\', \'Co\', \'Cs\', \'Ll\', \'Lm\', \'Lo\', \'Lt\', \'Lu\', \'Mc\', \'Me\', \'Mn\', \'Nd\', \'Nl\', \'No\', \'Pc\', \'Pd\', \'Pe\', \'Pf\', \'Pi\', \'Po\', \'Ps\', \'Sc\', \'Sk\', \'Sm\', \'So\', \'Zl\', \'Zp\', \'Zs\']\n\n# Generated from unidata 11.0.0\n\ndef combine(*args):\n return \'\'.join(globals()[cat] for cat in args)\n\n\ndef allexcept(*args):\n newcats = cats[:]\n for arg in args:\n newcats.remove(arg)\n return \'\'.join(globals()[cat] for cat in newcats)\n\n\ndef _handle_runs(char_list): # pragma: no cover\n buf = []\n for c in char_list:\n if len(c) == 1:\n if buf and buf[-1][1] == chr(ord(c)-1):\n buf[-1] = (buf[-1][0], c)\n else:\n buf.append((c, c))\n else:\n buf.append((c, c))\n for a, b in buf:\n if a == b:\n yield a\n else:\n yield \'%s-%s\' % (a, b)\n\n\nif __name__ == \'__main__\': # pragma: no cover\n import unicodedata\n\n categories = {\'xid_start\': [], \'xid_continue\': []}\n\n with open(__file__) as fp:\n content = fp.read()\n\n header = content[:content.find(\'Cc =\')]\n footer = content[content.find("def combine("):]\n\n for code in range(0x110000):\n c = chr(code)\n cat = unicodedata.category(c)\n if ord(c) == 0xdc00:\n # Hack to avoid combining this combining with the preceeding high\n # surrogate, 0xdbff, when doing a repr.\n c = \'\\\\\' + c\n elif ord(c) in (0x2d, 0x5b, 0x5c, 0x5d, 0x5e):\n # Escape regex metachars.\n c = \'\\\\\' + c\n categories.setdefault(cat, []).append(c)\n # XID_START and XID_CONTINUE are special categories used for matching\n # identifiers in Python 3.\n if c.isidentifier():\n categories[\'xid_start\'].append(c)\n if (\'a\' + c).isidentifier():\n categories[\'xid_continue\'].append(c)\n\n with open(__file__, \'w\') as fp:\n fp.write(header)\n\n for cat in sorted(categories):\n val = \'\'.join(_handle_runs(categories[cat]))\n fp.write(\'%s = %a\\n\\n\' % (cat, val))\n\n cats = sorted(categories)\n cats.remove(\'xid_start\')\n cats.remove(\'xid_continue\')\n fp.write(\'cats = %r\\n\\n\' % cats)\n\n fp.write(\'# Generated from unidata %s\\n\\n\' % (unicodedata.unidata_version,))\n\n fp.write(footer)\n') + __stickytape_write_module('icecream/__version__.py', b"# -*- coding: utf-8 -*-\n\n#\n# IceCream - Never use print() to debug again\n#\n# Ansgar Grunseid\n# grunseid.com\n# grunseid@gmail.com\n#\n# License: MIT\n#\n\n__title__ = 'icecream'\n__license__ = 'MIT'\n__version__ = '2.1.0'\n__author__ = 'Ansgar Grunseid'\n__contact__ = 'grunseid@gmail.com'\n__url__ = 'https://github.com/gruns/icecream'\n__description__ = (\n 'Never use print() to debug again; inspect variables, expressions, and '\n 'program execution with a single, simple function call.')\n") + # After this module is imported and exits, stickytape deletes + # the temporary directory containing the files + # Necessary modules need to be imported before then + # asttokens is only imported dynamically by executing + import asttokens + from icecream import * + \ No newline at end of file diff --git a/single_file/site_packages_path.py b/single_file/site_packages_path.py new file mode 100644 index 0000000..dec3374 --- /dev/null +++ b/single_file/site_packages_path.py @@ -0,0 +1,9 @@ +import executing + +path = executing.__file__ +suffix = "/executing/__init__.py" +assert path.endswith(suffix) +path = path[:-len(suffix)] + +if __name__ == '__main__': + print(path) diff --git a/single_file/stickytape_entry.py b/single_file/stickytape_entry.py new file mode 100644 index 0000000..8dcca3d --- /dev/null +++ b/single_file/stickytape_entry.py @@ -0,0 +1,6 @@ +# After this module is imported and exits, stickytape deletes +# the temporary directory containing the files +# Necessary modules need to be imported before then +# asttokens is only imported dynamically by executing +import asttokens +from icecream import * diff --git a/single_file/test_single_file_icecream.py b/single_file/test_single_file_icecream.py new file mode 100644 index 0000000..f834c93 --- /dev/null +++ b/single_file/test_single_file_icecream.py @@ -0,0 +1,9 @@ +import sys +import site_packages_path +sys.path.remove(site_packages_path.path) +del sys.modules["executing"] +del sys.modules["executing.executing"] + +from single_file_icecream import ic + +ic(ic(1+2)) From 0b4722fc5cd288cd546a778be7696ab582f64e41 Mon Sep 17 00:00:00 2001 From: Alex Hall Date: Fri, 2 Apr 2021 16:00:42 +0200 Subject: [PATCH 2/4] Package different files for different versions --- .gitignore | 2 + single_file/compare_versions.py | 24 +++++++ single_file/icecream_py27.py | 90 ++++++++++++++++++++++++ single_file/icecream_py35.py | 85 ++++++++++++++++++++++ single_file/icecream_py36.py | 85 ++++++++++++++++++++++ single_file/icecream_py37.py | 85 ++++++++++++++++++++++ single_file/icecream_py38.py | 85 ++++++++++++++++++++++ single_file/icecream_py39.py | 85 ++++++++++++++++++++++ single_file/icecream_pypy27.py | 90 ++++++++++++++++++++++++ single_file/icecream_pypy35.py | 85 ++++++++++++++++++++++ single_file/icecream_pypy36.py | 85 ++++++++++++++++++++++ single_file/package.sh | 7 +- single_file/single_file_icecream.py | 86 ---------------------- single_file/site_packages_path.py | 4 +- single_file/test_single_file_icecream.py | 1 + tox.ini | 4 +- 16 files changed, 812 insertions(+), 91 deletions(-) create mode 100644 single_file/compare_versions.py create mode 100644 single_file/icecream_py27.py create mode 100644 single_file/icecream_py35.py create mode 100644 single_file/icecream_py36.py create mode 100644 single_file/icecream_py37.py create mode 100644 single_file/icecream_py38.py create mode 100644 single_file/icecream_py39.py create mode 100644 single_file/icecream_pypy27.py create mode 100644 single_file/icecream_pypy35.py create mode 100644 single_file/icecream_pypy36.py delete mode 100644 single_file/single_file_icecream.py diff --git a/.gitignore b/.gitignore index 5a8edc0..d0f9438 100644 --- a/.gitignore +++ b/.gitignore @@ -9,3 +9,5 @@ build/ *.pyo *.egg *.egg-info +single_file/single_file_icecream.py +single_file/icecream_pypy.py diff --git a/single_file/compare_versions.py b/single_file/compare_versions.py new file mode 100644 index 0000000..be7b1b3 --- /dev/null +++ b/single_file/compare_versions.py @@ -0,0 +1,24 @@ +""" +Utility to see which version files differ. +""" + +from collections import defaultdict +from glob import glob +from pathlib import Path + +result = defaultdict(list) + +for filename in glob("icecream_*.py"): + group = result[hash(Path(filename).read_text())] + group.append(filename.replace("icecream_", "").replace(".py", "")) + group.sort() + +for group in sorted(result.values()): + print(group) + +""" +The result is that the files fall into these groups: +['py27', 'pypy27'] +['py35', 'pypy35'] +['py36', 'py37', 'py38', 'py39', 'pypy36'] +""" diff --git a/single_file/icecream_py27.py b/single_file/icecream_py27.py new file mode 100644 index 0000000..a039c0b --- /dev/null +++ b/single_file/icecream_py27.py @@ -0,0 +1,90 @@ +#!/usr/bin/env python +import contextlib as __stickytape_contextlib + +@__stickytape_contextlib.contextmanager +def __stickytape_temporary_dir(): + import tempfile + import shutil + dir_path = tempfile.mkdtemp() + try: + yield dir_path + finally: + shutil.rmtree(dir_path) + +with __stickytape_temporary_dir() as __stickytape_working_dir: + def __stickytape_write_module(path, contents): + import os, os.path + + def make_package(path): + parts = path.split("/") + partial_path = __stickytape_working_dir + for part in parts: + partial_path = os.path.join(partial_path, part) + if not os.path.exists(partial_path): + os.mkdir(partial_path) + with open(os.path.join(partial_path, "__init__.py"), "wb") as f: + f.write(b"\n") + + make_package(os.path.dirname(path)) + + full_path = os.path.join(__stickytape_working_dir, path) + with open(full_path, "wb") as module_file: + module_file.write(contents) + + import sys as __stickytape_sys + __stickytape_sys.path.insert(0, __stickytape_working_dir) + + __stickytape_write_module('asttokens/__init__.py', b'# Copyright 2016 Grist Labs, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n"""\nThis module enhances the Python AST tree with token and source code information, sufficent to\ndetect the source text of each AST node. This is helpful for tools that make source code\ntransformations.\n"""\n\nfrom .line_numbers import LineNumbers\nfrom .asttokens import ASTTokens\n') + __stickytape_write_module('asttokens/line_numbers.py', b'# Copyright 2016 Grist Labs, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport bisect\nimport re\n\n_line_start_re = re.compile(r\'^\', re.M)\n\nclass LineNumbers(object):\n """\n Class to convert between character offsets in a text string, and pairs (line, column) of 1-based\n line and 0-based column numbers, as used by tokens and AST nodes.\n\n This class expects unicode for input and stores positions in unicode. But it supports\n translating to and from utf8 offsets, which are used by ast parsing.\n """\n def __init__(self, text):\n # A list of character offsets of each line\'s first character.\n self._line_offsets = [m.start(0) for m in _line_start_re.finditer(text)]\n self._text = text\n self._text_len = len(text)\n self._utf8_offset_cache = {} # maps line num to list of char offset for each byte in line\n\n def from_utf8_col(self, line, utf8_column):\n """\n Given a 1-based line number and 0-based utf8 column, returns a 0-based unicode column.\n """\n offsets = self._utf8_offset_cache.get(line)\n if offsets is None:\n end_offset = self._line_offsets[line] if line < len(self._line_offsets) else self._text_len\n line_text = self._text[self._line_offsets[line - 1] : end_offset]\n\n offsets = [i for i,c in enumerate(line_text) for byte in c.encode(\'utf8\')]\n offsets.append(len(line_text))\n self._utf8_offset_cache[line] = offsets\n\n return offsets[max(0, min(len(offsets)-1, utf8_column))]\n\n def line_to_offset(self, line, column):\n """\n Converts 1-based line number and 0-based column to 0-based character offset into text.\n """\n line -= 1\n if line >= len(self._line_offsets):\n return self._text_len\n elif line < 0:\n return 0\n else:\n return min(self._line_offsets[line] + max(0, column), self._text_len)\n\n def offset_to_line(self, offset):\n """\n Converts 0-based character offset to pair (line, col) of 1-based line and 0-based column\n numbers.\n """\n offset = max(0, min(self._text_len, offset))\n line_index = bisect.bisect_right(self._line_offsets, offset) - 1\n return (line_index + 1, offset - self._line_offsets[line_index])\n\n\n') + __stickytape_write_module('asttokens/asttokens.py', b'# Copyright 2016 Grist Labs, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport ast\nimport bisect\nimport token\nimport tokenize\nimport io\nimport six\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nfrom .line_numbers import LineNumbers\nfrom .util import Token, match_token, is_non_coding_token\nfrom .mark_tokens import MarkTokens\n\nclass ASTTokens(object):\n """\n ASTTokens maintains the text of Python code in several forms: as a string, as line numbers, and\n as tokens, and is used to mark and access token and position information.\n\n ``source_text`` must be a unicode or UTF8-encoded string. If you pass in UTF8 bytes, remember\n that all offsets you\'ll get are to the unicode text, which is available as the ``.text``\n property.\n\n If ``parse`` is set, the ``source_text`` will be parsed with ``ast.parse()``, and the resulting\n tree marked with token info and made available as the ``.tree`` property.\n\n If ``tree`` is given, it will be marked and made available as the ``.tree`` property. In\n addition to the trees produced by the ``ast`` module, ASTTokens will also mark trees produced\n using ``astroid`` library .\n\n If only ``source_text`` is given, you may use ``.mark_tokens(tree)`` to mark the nodes of an AST\n tree created separately.\n """\n def __init__(self, source_text, parse=False, tree=None, filename=\'\'):\n self._filename = filename\n self._tree = ast.parse(source_text, filename) if parse else tree\n\n # Decode source after parsing to let Python 2 handle coding declarations.\n # (If the encoding was not utf-8 compatible, then even if it parses correctly,\n # we\'ll fail with a unicode error here.)\n if isinstance(source_text, six.binary_type):\n source_text = source_text.decode(\'utf8\')\n\n self._text = source_text\n self._line_numbers = LineNumbers(source_text)\n\n # Tokenize the code.\n self._tokens = list(self._generate_tokens(source_text))\n\n # Extract the start positions of all tokens, so that we can quickly map positions to tokens.\n self._token_offsets = [tok.startpos for tok in self._tokens]\n\n if self._tree:\n self.mark_tokens(self._tree)\n\n\n def mark_tokens(self, root_node):\n """\n Given the root of the AST or Astroid tree produced from source_text, visits all nodes marking\n them with token and position information by adding ``.first_token`` and\n ``.last_token``attributes. This is done automatically in the constructor when ``parse`` or\n ``tree`` arguments are set, but may be used manually with a separate AST or Astroid tree.\n """\n # The hard work of this class is done by MarkTokens\n MarkTokens(self).visit_tree(root_node)\n\n\n def _generate_tokens(self, text):\n """\n Generates tokens for the given code.\n """\n # This is technically an undocumented API for Python3, but allows us to use the same API as for\n # Python2. See http://stackoverflow.com/a/4952291/328565.\n for index, tok in enumerate(tokenize.generate_tokens(io.StringIO(text).readline)):\n tok_type, tok_str, start, end, line = tok\n yield Token(tok_type, tok_str, start, end, line, index,\n self._line_numbers.line_to_offset(start[0], start[1]),\n self._line_numbers.line_to_offset(end[0], end[1]))\n\n @property\n def text(self):\n """The source code passed into the constructor."""\n return self._text\n\n @property\n def tokens(self):\n """The list of tokens corresponding to the source code from the constructor."""\n return self._tokens\n\n @property\n def tree(self):\n """The root of the AST tree passed into the constructor or parsed from the source code."""\n return self._tree\n\n @property\n def filename(self):\n """The filename that was parsed"""\n return self._filename\n\n def get_token_from_offset(self, offset):\n """\n Returns the token containing the given character offset (0-based position in source text),\n or the preceeding token if the position is between tokens.\n """\n return self._tokens[bisect.bisect(self._token_offsets, offset) - 1]\n\n def get_token(self, lineno, col_offset):\n """\n Returns the token containing the given (lineno, col_offset) position, or the preceeding token\n if the position is between tokens.\n """\n # TODO: add test for multibyte unicode. We need to translate offsets from ast module (which\n # are in utf8) to offsets into the unicode text. tokenize module seems to use unicode offsets\n # but isn\'t explicit.\n return self.get_token_from_offset(self._line_numbers.line_to_offset(lineno, col_offset))\n\n def get_token_from_utf8(self, lineno, col_offset):\n """\n Same as get_token(), but interprets col_offset as a UTF8 offset, which is what `ast` uses.\n """\n return self.get_token(lineno, self._line_numbers.from_utf8_col(lineno, col_offset))\n\n def next_token(self, tok, include_extra=False):\n """\n Returns the next token after the given one. If include_extra is True, includes non-coding\n tokens from the tokenize module, such as NL and COMMENT.\n """\n i = tok.index + 1\n if not include_extra:\n while is_non_coding_token(self._tokens[i].type):\n i += 1\n return self._tokens[i]\n\n def prev_token(self, tok, include_extra=False):\n """\n Returns the previous token before the given one. If include_extra is True, includes non-coding\n tokens from the tokenize module, such as NL and COMMENT.\n """\n i = tok.index - 1\n if not include_extra:\n while is_non_coding_token(self._tokens[i].type):\n i -= 1\n return self._tokens[i]\n\n def find_token(self, start_token, tok_type, tok_str=None, reverse=False):\n """\n Looks for the first token, starting at start_token, that matches tok_type and, if given, the\n token string. Searches backwards if reverse is True. Returns ENDMARKER token if not found (you\n can check it with `token.ISEOF(t.type)`.\n """\n t = start_token\n advance = self.prev_token if reverse else self.next_token\n while not match_token(t, tok_type, tok_str) and not token.ISEOF(t.type):\n t = advance(t, include_extra=True)\n return t\n\n def token_range(self, first_token, last_token, include_extra=False):\n """\n Yields all tokens in order from first_token through and including last_token. If\n include_extra is True, includes non-coding tokens such as tokenize.NL and .COMMENT.\n """\n for i in xrange(first_token.index, last_token.index + 1):\n if include_extra or not is_non_coding_token(self._tokens[i].type):\n yield self._tokens[i]\n\n def get_tokens(self, node, include_extra=False):\n """\n Yields all tokens making up the given node. If include_extra is True, includes non-coding\n tokens such as tokenize.NL and .COMMENT.\n """\n return self.token_range(node.first_token, node.last_token, include_extra=include_extra)\n\n def get_text_range(self, node):\n """\n After mark_tokens() has been called, returns the (startpos, endpos) positions in source text\n corresponding to the given node. Returns (0, 0) for nodes (like `Load`) that don\'t correspond\n to any particular text.\n """\n if not hasattr(node, \'first_token\'):\n return (0, 0)\n\n start = node.first_token.startpos\n if any(match_token(t, token.NEWLINE) for t in self.get_tokens(node)):\n # Multi-line nodes would be invalid unless we keep the indentation of the first node.\n start = self._text.rfind(\'\\n\', 0, start) + 1\n\n return (start, node.last_token.endpos)\n\n def get_text(self, node):\n """\n After mark_tokens() has been called, returns the text corresponding to the given node. Returns\n \'\' for nodes (like `Load`) that don\'t correspond to any particular text.\n """\n start, end = self.get_text_range(node)\n return self._text[start : end]\n') + __stickytape_write_module('six.py', b'# Copyright (c) 2010-2020 Benjamin Peterson\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the "Software"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n"""Utilities for writing code that runs on Python 2 and 3"""\n\nfrom __future__ import absolute_import\n\nimport functools\nimport itertools\nimport operator\nimport sys\nimport types\n\n__author__ = "Benjamin Peterson "\n__version__ = "1.15.0"\n\n\n# Useful for very coarse version differentiation.\nPY2 = sys.version_info[0] == 2\nPY3 = sys.version_info[0] == 3\nPY34 = sys.version_info[0:2] >= (3, 4)\n\nif PY3:\n string_types = str,\n integer_types = int,\n class_types = type,\n text_type = str\n binary_type = bytes\n\n MAXSIZE = sys.maxsize\nelse:\n string_types = basestring,\n integer_types = (int, long)\n class_types = (type, types.ClassType)\n text_type = unicode\n binary_type = str\n\n if sys.platform.startswith("java"):\n # Jython always uses 32 bits.\n MAXSIZE = int((1 << 31) - 1)\n else:\n # It\'s possible to have sizeof(long) != sizeof(Py_ssize_t).\n class X(object):\n\n def __len__(self):\n return 1 << 31\n try:\n len(X())\n except OverflowError:\n # 32-bit\n MAXSIZE = int((1 << 31) - 1)\n else:\n # 64-bit\n MAXSIZE = int((1 << 63) - 1)\n del X\n\n\ndef _add_doc(func, doc):\n """Add documentation to a function."""\n func.__doc__ = doc\n\n\ndef _import_module(name):\n """Import module, returning the module after the last dot."""\n __import__(name)\n return sys.modules[name]\n\n\nclass _LazyDescr(object):\n\n def __init__(self, name):\n self.name = name\n\n def __get__(self, obj, tp):\n result = self._resolve()\n setattr(obj, self.name, result) # Invokes __set__.\n try:\n # This is a bit ugly, but it avoids running this again by\n # removing this descriptor.\n delattr(obj.__class__, self.name)\n except AttributeError:\n pass\n return result\n\n\nclass MovedModule(_LazyDescr):\n\n def __init__(self, name, old, new=None):\n super(MovedModule, self).__init__(name)\n if PY3:\n if new is None:\n new = name\n self.mod = new\n else:\n self.mod = old\n\n def _resolve(self):\n return _import_module(self.mod)\n\n def __getattr__(self, attr):\n _module = self._resolve()\n value = getattr(_module, attr)\n setattr(self, attr, value)\n return value\n\n\nclass _LazyModule(types.ModuleType):\n\n def __init__(self, name):\n super(_LazyModule, self).__init__(name)\n self.__doc__ = self.__class__.__doc__\n\n def __dir__(self):\n attrs = ["__doc__", "__name__"]\n attrs += [attr.name for attr in self._moved_attributes]\n return attrs\n\n # Subclasses should override this\n _moved_attributes = []\n\n\nclass MovedAttribute(_LazyDescr):\n\n def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):\n super(MovedAttribute, self).__init__(name)\n if PY3:\n if new_mod is None:\n new_mod = name\n self.mod = new_mod\n if new_attr is None:\n if old_attr is None:\n new_attr = name\n else:\n new_attr = old_attr\n self.attr = new_attr\n else:\n self.mod = old_mod\n if old_attr is None:\n old_attr = name\n self.attr = old_attr\n\n def _resolve(self):\n module = _import_module(self.mod)\n return getattr(module, self.attr)\n\n\nclass _SixMetaPathImporter(object):\n\n """\n A meta path importer to import six.moves and its submodules.\n\n This class implements a PEP302 finder and loader. It should be compatible\n with Python 2.5 and all existing versions of Python3\n """\n\n def __init__(self, six_module_name):\n self.name = six_module_name\n self.known_modules = {}\n\n def _add_module(self, mod, *fullnames):\n for fullname in fullnames:\n self.known_modules[self.name + "." + fullname] = mod\n\n def _get_module(self, fullname):\n return self.known_modules[self.name + "." + fullname]\n\n def find_module(self, fullname, path=None):\n if fullname in self.known_modules:\n return self\n return None\n\n def __get_module(self, fullname):\n try:\n return self.known_modules[fullname]\n except KeyError:\n raise ImportError("This loader does not know module " + fullname)\n\n def load_module(self, fullname):\n try:\n # in case of a reload\n return sys.modules[fullname]\n except KeyError:\n pass\n mod = self.__get_module(fullname)\n if isinstance(mod, MovedModule):\n mod = mod._resolve()\n else:\n mod.__loader__ = self\n sys.modules[fullname] = mod\n return mod\n\n def is_package(self, fullname):\n """\n Return true, if the named module is a package.\n\n We need this method to get correct spec objects with\n Python 3.4 (see PEP451)\n """\n return hasattr(self.__get_module(fullname), "__path__")\n\n def get_code(self, fullname):\n """Return None\n\n Required, if is_package is implemented"""\n self.__get_module(fullname) # eventually raises ImportError\n return None\n get_source = get_code # same as get_code\n\n_importer = _SixMetaPathImporter(__name__)\n\n\nclass _MovedItems(_LazyModule):\n\n """Lazy loading of moved objects"""\n __path__ = [] # mark as package\n\n\n_moved_attributes = [\n MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),\n MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),\n MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),\n MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),\n MovedAttribute("intern", "__builtin__", "sys"),\n MovedAttribute("map", "itertools", "builtins", "imap", "map"),\n MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),\n MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),\n MovedAttribute("getoutput", "commands", "subprocess"),\n MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),\n MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),\n MovedAttribute("reduce", "__builtin__", "functools"),\n MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),\n MovedAttribute("StringIO", "StringIO", "io"),\n MovedAttribute("UserDict", "UserDict", "collections"),\n MovedAttribute("UserList", "UserList", "collections"),\n MovedAttribute("UserString", "UserString", "collections"),\n MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),\n MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),\n MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),\n MovedModule("builtins", "__builtin__"),\n MovedModule("configparser", "ConfigParser"),\n MovedModule("collections_abc", "collections", "collections.abc" if sys.version_info >= (3, 3) else "collections"),\n MovedModule("copyreg", "copy_reg"),\n MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),\n MovedModule("dbm_ndbm", "dbm", "dbm.ndbm"),\n MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread" if sys.version_info < (3, 9) else "_thread"),\n MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),\n MovedModule("http_cookies", "Cookie", "http.cookies"),\n MovedModule("html_entities", "htmlentitydefs", "html.entities"),\n MovedModule("html_parser", "HTMLParser", "html.parser"),\n MovedModule("http_client", "httplib", "http.client"),\n MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),\n MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"),\n MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),\n MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),\n MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),\n MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),\n MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),\n MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),\n MovedModule("cPickle", "cPickle", "pickle"),\n MovedModule("queue", "Queue"),\n MovedModule("reprlib", "repr"),\n MovedModule("socketserver", "SocketServer"),\n MovedModule("_thread", "thread", "_thread"),\n MovedModule("tkinter", "Tkinter"),\n MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),\n MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),\n MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),\n MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),\n MovedModule("tkinter_tix", "Tix", "tkinter.tix"),\n MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),\n MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),\n MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),\n MovedModule("tkinter_colorchooser", "tkColorChooser",\n "tkinter.colorchooser"),\n MovedModule("tkinter_commondialog", "tkCommonDialog",\n "tkinter.commondialog"),\n MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),\n MovedModule("tkinter_font", "tkFont", "tkinter.font"),\n MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),\n MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",\n "tkinter.simpledialog"),\n MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),\n MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),\n MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),\n MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),\n MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),\n MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),\n]\n# Add windows specific modules.\nif sys.platform == "win32":\n _moved_attributes += [\n MovedModule("winreg", "_winreg"),\n ]\n\nfor attr in _moved_attributes:\n setattr(_MovedItems, attr.name, attr)\n if isinstance(attr, MovedModule):\n _importer._add_module(attr, "moves." + attr.name)\ndel attr\n\n_MovedItems._moved_attributes = _moved_attributes\n\nmoves = _MovedItems(__name__ + ".moves")\n_importer._add_module(moves, "moves")\n\n\nclass Module_six_moves_urllib_parse(_LazyModule):\n\n """Lazy loading of moved objects in six.moves.urllib_parse"""\n\n\n_urllib_parse_moved_attributes = [\n MovedAttribute("ParseResult", "urlparse", "urllib.parse"),\n MovedAttribute("SplitResult", "urlparse", "urllib.parse"),\n MovedAttribute("parse_qs", "urlparse", "urllib.parse"),\n MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),\n MovedAttribute("urldefrag", "urlparse", "urllib.parse"),\n MovedAttribute("urljoin", "urlparse", "urllib.parse"),\n MovedAttribute("urlparse", "urlparse", "urllib.parse"),\n MovedAttribute("urlsplit", "urlparse", "urllib.parse"),\n MovedAttribute("urlunparse", "urlparse", "urllib.parse"),\n MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),\n MovedAttribute("quote", "urllib", "urllib.parse"),\n MovedAttribute("quote_plus", "urllib", "urllib.parse"),\n MovedAttribute("unquote", "urllib", "urllib.parse"),\n MovedAttribute("unquote_plus", "urllib", "urllib.parse"),\n MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"),\n MovedAttribute("urlencode", "urllib", "urllib.parse"),\n MovedAttribute("splitquery", "urllib", "urllib.parse"),\n MovedAttribute("splittag", "urllib", "urllib.parse"),\n MovedAttribute("splituser", "urllib", "urllib.parse"),\n MovedAttribute("splitvalue", "urllib", "urllib.parse"),\n MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),\n MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),\n MovedAttribute("uses_params", "urlparse", "urllib.parse"),\n MovedAttribute("uses_query", "urlparse", "urllib.parse"),\n MovedAttribute("uses_relative", "urlparse", "urllib.parse"),\n]\nfor attr in _urllib_parse_moved_attributes:\n setattr(Module_six_moves_urllib_parse, attr.name, attr)\ndel attr\n\nModule_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes\n\n_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),\n "moves.urllib_parse", "moves.urllib.parse")\n\n\nclass Module_six_moves_urllib_error(_LazyModule):\n\n """Lazy loading of moved objects in six.moves.urllib_error"""\n\n\n_urllib_error_moved_attributes = [\n MovedAttribute("URLError", "urllib2", "urllib.error"),\n MovedAttribute("HTTPError", "urllib2", "urllib.error"),\n MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),\n]\nfor attr in _urllib_error_moved_attributes:\n setattr(Module_six_moves_urllib_error, attr.name, attr)\ndel attr\n\nModule_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes\n\n_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),\n "moves.urllib_error", "moves.urllib.error")\n\n\nclass Module_six_moves_urllib_request(_LazyModule):\n\n """Lazy loading of moved objects in six.moves.urllib_request"""\n\n\n_urllib_request_moved_attributes = [\n MovedAttribute("urlopen", "urllib2", "urllib.request"),\n MovedAttribute("install_opener", "urllib2", "urllib.request"),\n MovedAttribute("build_opener", "urllib2", "urllib.request"),\n MovedAttribute("pathname2url", "urllib", "urllib.request"),\n MovedAttribute("url2pathname", "urllib", "urllib.request"),\n MovedAttribute("getproxies", "urllib", "urllib.request"),\n MovedAttribute("Request", "urllib2", "urllib.request"),\n MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),\n MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),\n MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),\n MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),\n MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),\n MovedAttribute("BaseHandler", "urllib2", "urllib.request"),\n MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),\n MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),\n MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),\n MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),\n MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),\n MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),\n MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),\n MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),\n MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),\n MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),\n MovedAttribute("FileHandler", "urllib2", "urllib.request"),\n MovedAttribute("FTPHandler", "urllib2", "urllib.request"),\n MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),\n MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),\n MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),\n MovedAttribute("urlretrieve", "urllib", "urllib.request"),\n MovedAttribute("urlcleanup", "urllib", "urllib.request"),\n MovedAttribute("URLopener", "urllib", "urllib.request"),\n MovedAttribute("FancyURLopener", "urllib", "urllib.request"),\n MovedAttribute("proxy_bypass", "urllib", "urllib.request"),\n MovedAttribute("parse_http_list", "urllib2", "urllib.request"),\n MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"),\n]\nfor attr in _urllib_request_moved_attributes:\n setattr(Module_six_moves_urllib_request, attr.name, attr)\ndel attr\n\nModule_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes\n\n_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),\n "moves.urllib_request", "moves.urllib.request")\n\n\nclass Module_six_moves_urllib_response(_LazyModule):\n\n """Lazy loading of moved objects in six.moves.urllib_response"""\n\n\n_urllib_response_moved_attributes = [\n MovedAttribute("addbase", "urllib", "urllib.response"),\n MovedAttribute("addclosehook", "urllib", "urllib.response"),\n MovedAttribute("addinfo", "urllib", "urllib.response"),\n MovedAttribute("addinfourl", "urllib", "urllib.response"),\n]\nfor attr in _urllib_response_moved_attributes:\n setattr(Module_six_moves_urllib_response, attr.name, attr)\ndel attr\n\nModule_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes\n\n_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),\n "moves.urllib_response", "moves.urllib.response")\n\n\nclass Module_six_moves_urllib_robotparser(_LazyModule):\n\n """Lazy loading of moved objects in six.moves.urllib_robotparser"""\n\n\n_urllib_robotparser_moved_attributes = [\n MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),\n]\nfor attr in _urllib_robotparser_moved_attributes:\n setattr(Module_six_moves_urllib_robotparser, attr.name, attr)\ndel attr\n\nModule_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes\n\n_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),\n "moves.urllib_robotparser", "moves.urllib.robotparser")\n\n\nclass Module_six_moves_urllib(types.ModuleType):\n\n """Create a six.moves.urllib namespace that resembles the Python 3 namespace"""\n __path__ = [] # mark as package\n parse = _importer._get_module("moves.urllib_parse")\n error = _importer._get_module("moves.urllib_error")\n request = _importer._get_module("moves.urllib_request")\n response = _importer._get_module("moves.urllib_response")\n robotparser = _importer._get_module("moves.urllib_robotparser")\n\n def __dir__(self):\n return [\'parse\', \'error\', \'request\', \'response\', \'robotparser\']\n\n_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),\n "moves.urllib")\n\n\ndef add_move(move):\n """Add an item to six.moves."""\n setattr(_MovedItems, move.name, move)\n\n\ndef remove_move(name):\n """Remove item from six.moves."""\n try:\n delattr(_MovedItems, name)\n except AttributeError:\n try:\n del moves.__dict__[name]\n except KeyError:\n raise AttributeError("no such move, %r" % (name,))\n\n\nif PY3:\n _meth_func = "__func__"\n _meth_self = "__self__"\n\n _func_closure = "__closure__"\n _func_code = "__code__"\n _func_defaults = "__defaults__"\n _func_globals = "__globals__"\nelse:\n _meth_func = "im_func"\n _meth_self = "im_self"\n\n _func_closure = "func_closure"\n _func_code = "func_code"\n _func_defaults = "func_defaults"\n _func_globals = "func_globals"\n\n\ntry:\n advance_iterator = next\nexcept NameError:\n def advance_iterator(it):\n return it.next()\nnext = advance_iterator\n\n\ntry:\n callable = callable\nexcept NameError:\n def callable(obj):\n return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)\n\n\nif PY3:\n def get_unbound_function(unbound):\n return unbound\n\n create_bound_method = types.MethodType\n\n def create_unbound_method(func, cls):\n return func\n\n Iterator = object\nelse:\n def get_unbound_function(unbound):\n return unbound.im_func\n\n def create_bound_method(func, obj):\n return types.MethodType(func, obj, obj.__class__)\n\n def create_unbound_method(func, cls):\n return types.MethodType(func, None, cls)\n\n class Iterator(object):\n\n def next(self):\n return type(self).__next__(self)\n\n callable = callable\n_add_doc(get_unbound_function,\n """Get the function out of a possibly unbound function""")\n\n\nget_method_function = operator.attrgetter(_meth_func)\nget_method_self = operator.attrgetter(_meth_self)\nget_function_closure = operator.attrgetter(_func_closure)\nget_function_code = operator.attrgetter(_func_code)\nget_function_defaults = operator.attrgetter(_func_defaults)\nget_function_globals = operator.attrgetter(_func_globals)\n\n\nif PY3:\n def iterkeys(d, **kw):\n return iter(d.keys(**kw))\n\n def itervalues(d, **kw):\n return iter(d.values(**kw))\n\n def iteritems(d, **kw):\n return iter(d.items(**kw))\n\n def iterlists(d, **kw):\n return iter(d.lists(**kw))\n\n viewkeys = operator.methodcaller("keys")\n\n viewvalues = operator.methodcaller("values")\n\n viewitems = operator.methodcaller("items")\nelse:\n def iterkeys(d, **kw):\n return d.iterkeys(**kw)\n\n def itervalues(d, **kw):\n return d.itervalues(**kw)\n\n def iteritems(d, **kw):\n return d.iteritems(**kw)\n\n def iterlists(d, **kw):\n return d.iterlists(**kw)\n\n viewkeys = operator.methodcaller("viewkeys")\n\n viewvalues = operator.methodcaller("viewvalues")\n\n viewitems = operator.methodcaller("viewitems")\n\n_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")\n_add_doc(itervalues, "Return an iterator over the values of a dictionary.")\n_add_doc(iteritems,\n "Return an iterator over the (key, value) pairs of a dictionary.")\n_add_doc(iterlists,\n "Return an iterator over the (key, [values]) pairs of a dictionary.")\n\n\nif PY3:\n def b(s):\n return s.encode("latin-1")\n\n def u(s):\n return s\n unichr = chr\n import struct\n int2byte = struct.Struct(">B").pack\n del struct\n byte2int = operator.itemgetter(0)\n indexbytes = operator.getitem\n iterbytes = iter\n import io\n StringIO = io.StringIO\n BytesIO = io.BytesIO\n del io\n _assertCountEqual = "assertCountEqual"\n if sys.version_info[1] <= 1:\n _assertRaisesRegex = "assertRaisesRegexp"\n _assertRegex = "assertRegexpMatches"\n _assertNotRegex = "assertNotRegexpMatches"\n else:\n _assertRaisesRegex = "assertRaisesRegex"\n _assertRegex = "assertRegex"\n _assertNotRegex = "assertNotRegex"\nelse:\n def b(s):\n return s\n # Workaround for standalone backslash\n\n def u(s):\n return unicode(s.replace(r\'\\\\\', r\'\\\\\\\\\'), "unicode_escape")\n unichr = unichr\n int2byte = chr\n\n def byte2int(bs):\n return ord(bs[0])\n\n def indexbytes(buf, i):\n return ord(buf[i])\n iterbytes = functools.partial(itertools.imap, ord)\n import StringIO\n StringIO = BytesIO = StringIO.StringIO\n _assertCountEqual = "assertItemsEqual"\n _assertRaisesRegex = "assertRaisesRegexp"\n _assertRegex = "assertRegexpMatches"\n _assertNotRegex = "assertNotRegexpMatches"\n_add_doc(b, """Byte literal""")\n_add_doc(u, """Text literal""")\n\n\ndef assertCountEqual(self, *args, **kwargs):\n return getattr(self, _assertCountEqual)(*args, **kwargs)\n\n\ndef assertRaisesRegex(self, *args, **kwargs):\n return getattr(self, _assertRaisesRegex)(*args, **kwargs)\n\n\ndef assertRegex(self, *args, **kwargs):\n return getattr(self, _assertRegex)(*args, **kwargs)\n\n\ndef assertNotRegex(self, *args, **kwargs):\n return getattr(self, _assertNotRegex)(*args, **kwargs)\n\n\nif PY3:\n exec_ = getattr(moves.builtins, "exec")\n\n def reraise(tp, value, tb=None):\n try:\n if value is None:\n value = tp()\n if value.__traceback__ is not tb:\n raise value.with_traceback(tb)\n raise value\n finally:\n value = None\n tb = None\n\nelse:\n def exec_(_code_, _globs_=None, _locs_=None):\n """Execute code in a namespace."""\n if _globs_ is None:\n frame = sys._getframe(1)\n _globs_ = frame.f_globals\n if _locs_ is None:\n _locs_ = frame.f_locals\n del frame\n elif _locs_ is None:\n _locs_ = _globs_\n exec("""exec _code_ in _globs_, _locs_""")\n\n exec_("""def reraise(tp, value, tb=None):\n try:\n raise tp, value, tb\n finally:\n tb = None\n""")\n\n\nif sys.version_info[:2] > (3,):\n exec_("""def raise_from(value, from_value):\n try:\n raise value from from_value\n finally:\n value = None\n""")\nelse:\n def raise_from(value, from_value):\n raise value\n\n\nprint_ = getattr(moves.builtins, "print", None)\nif print_ is None:\n def print_(*args, **kwargs):\n """The new-style print function for Python 2.4 and 2.5."""\n fp = kwargs.pop("file", sys.stdout)\n if fp is None:\n return\n\n def write(data):\n if not isinstance(data, basestring):\n data = str(data)\n # If the file has an encoding, encode unicode with it.\n if (isinstance(fp, file) and\n isinstance(data, unicode) and\n fp.encoding is not None):\n errors = getattr(fp, "errors", None)\n if errors is None:\n errors = "strict"\n data = data.encode(fp.encoding, errors)\n fp.write(data)\n want_unicode = False\n sep = kwargs.pop("sep", None)\n if sep is not None:\n if isinstance(sep, unicode):\n want_unicode = True\n elif not isinstance(sep, str):\n raise TypeError("sep must be None or a string")\n end = kwargs.pop("end", None)\n if end is not None:\n if isinstance(end, unicode):\n want_unicode = True\n elif not isinstance(end, str):\n raise TypeError("end must be None or a string")\n if kwargs:\n raise TypeError("invalid keyword arguments to print()")\n if not want_unicode:\n for arg in args:\n if isinstance(arg, unicode):\n want_unicode = True\n break\n if want_unicode:\n newline = unicode("\\n")\n space = unicode(" ")\n else:\n newline = "\\n"\n space = " "\n if sep is None:\n sep = space\n if end is None:\n end = newline\n for i, arg in enumerate(args):\n if i:\n write(sep)\n write(arg)\n write(end)\nif sys.version_info[:2] < (3, 3):\n _print = print_\n\n def print_(*args, **kwargs):\n fp = kwargs.get("file", sys.stdout)\n flush = kwargs.pop("flush", False)\n _print(*args, **kwargs)\n if flush and fp is not None:\n fp.flush()\n\n_add_doc(reraise, """Reraise an exception.""")\n\nif sys.version_info[0:2] < (3, 4):\n # This does exactly the same what the :func:`py3:functools.update_wrapper`\n # function does on Python versions after 3.2. It sets the ``__wrapped__``\n # attribute on ``wrapper`` object and it doesn\'t raise an error if any of\n # the attributes mentioned in ``assigned`` and ``updated`` are missing on\n # ``wrapped`` object.\n def _update_wrapper(wrapper, wrapped,\n assigned=functools.WRAPPER_ASSIGNMENTS,\n updated=functools.WRAPPER_UPDATES):\n for attr in assigned:\n try:\n value = getattr(wrapped, attr)\n except AttributeError:\n continue\n else:\n setattr(wrapper, attr, value)\n for attr in updated:\n getattr(wrapper, attr).update(getattr(wrapped, attr, {}))\n wrapper.__wrapped__ = wrapped\n return wrapper\n _update_wrapper.__doc__ = functools.update_wrapper.__doc__\n\n def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,\n updated=functools.WRAPPER_UPDATES):\n return functools.partial(_update_wrapper, wrapped=wrapped,\n assigned=assigned, updated=updated)\n wraps.__doc__ = functools.wraps.__doc__\n\nelse:\n wraps = functools.wraps\n\n\ndef with_metaclass(meta, *bases):\n """Create a base class with a metaclass."""\n # This requires a bit of explanation: the basic idea is to make a dummy\n # metaclass for one level of class instantiation that replaces itself with\n # the actual metaclass.\n class metaclass(type):\n\n def __new__(cls, name, this_bases, d):\n if sys.version_info[:2] >= (3, 7):\n # This version introduced PEP 560 that requires a bit\n # of extra care (we mimic what is done by __build_class__).\n resolved_bases = types.resolve_bases(bases)\n if resolved_bases is not bases:\n d[\'__orig_bases__\'] = bases\n else:\n resolved_bases = bases\n return meta(name, resolved_bases, d)\n\n @classmethod\n def __prepare__(cls, name, this_bases):\n return meta.__prepare__(name, bases)\n return type.__new__(metaclass, \'temporary_class\', (), {})\n\n\ndef add_metaclass(metaclass):\n """Class decorator for creating a class with a metaclass."""\n def wrapper(cls):\n orig_vars = cls.__dict__.copy()\n slots = orig_vars.get(\'__slots__\')\n if slots is not None:\n if isinstance(slots, str):\n slots = [slots]\n for slots_var in slots:\n orig_vars.pop(slots_var)\n orig_vars.pop(\'__dict__\', None)\n orig_vars.pop(\'__weakref__\', None)\n if hasattr(cls, \'__qualname__\'):\n orig_vars[\'__qualname__\'] = cls.__qualname__\n return metaclass(cls.__name__, cls.__bases__, orig_vars)\n return wrapper\n\n\ndef ensure_binary(s, encoding=\'utf-8\', errors=\'strict\'):\n """Coerce **s** to six.binary_type.\n\n For Python 2:\n - `unicode` -> encoded to `str`\n - `str` -> `str`\n\n For Python 3:\n - `str` -> encoded to `bytes`\n - `bytes` -> `bytes`\n """\n if isinstance(s, binary_type):\n return s\n if isinstance(s, text_type):\n return s.encode(encoding, errors)\n raise TypeError("not expecting type \'%s\'" % type(s))\n\n\ndef ensure_str(s, encoding=\'utf-8\', errors=\'strict\'):\n """Coerce *s* to `str`.\n\n For Python 2:\n - `unicode` -> encoded to `str`\n - `str` -> `str`\n\n For Python 3:\n - `str` -> `str`\n - `bytes` -> decoded to `str`\n """\n # Optimization: Fast return for the common case.\n if type(s) is str:\n return s\n if PY2 and isinstance(s, text_type):\n return s.encode(encoding, errors)\n elif PY3 and isinstance(s, binary_type):\n return s.decode(encoding, errors)\n elif not isinstance(s, (text_type, binary_type)):\n raise TypeError("not expecting type \'%s\'" % type(s))\n return s\n\n\ndef ensure_text(s, encoding=\'utf-8\', errors=\'strict\'):\n """Coerce *s* to six.text_type.\n\n For Python 2:\n - `unicode` -> `unicode`\n - `str` -> `unicode`\n\n For Python 3:\n - `str` -> `str`\n - `bytes` -> decoded to `str`\n """\n if isinstance(s, binary_type):\n return s.decode(encoding, errors)\n elif isinstance(s, text_type):\n return s\n else:\n raise TypeError("not expecting type \'%s\'" % type(s))\n\n\ndef python_2_unicode_compatible(klass):\n """\n A class decorator that defines __unicode__ and __str__ methods under Python 2.\n Under Python 3 it does nothing.\n\n To support Python 2 and 3 with a single code base, define a __str__ method\n returning text and apply this decorator to the class.\n """\n if PY2:\n if \'__str__\' not in klass.__dict__:\n raise ValueError("@python_2_unicode_compatible cannot be applied "\n "to %s because it doesn\'t define __str__()." %\n klass.__name__)\n klass.__unicode__ = klass.__str__\n klass.__str__ = lambda self: self.__unicode__().encode(\'utf-8\')\n return klass\n\n\n# Complete the moves implementation.\n# This code is at the end of this module to speed up module loading.\n# Turn this module into a package.\n__path__ = [] # required for PEP 302 and PEP 451\n__package__ = __name__ # see PEP 366 @ReservedAssignment\nif globals().get("__spec__") is not None:\n __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable\n# Remove other six meta path importers, since they cause problems. This can\n# happen if six is removed from sys.modules and then reloaded. (Setuptools does\n# this for some reason.)\nif sys.meta_path:\n for i, importer in enumerate(sys.meta_path):\n # Here\'s some real nastiness: Another "instance" of the six module might\n # be floating around. Therefore, we can\'t use isinstance() to check for\n # the six meta path importer, since the other six instance will have\n # inserted an importer with different class.\n if (type(importer).__name__ == "_SixMetaPathImporter" and\n importer.name == __name__):\n del sys.meta_path[i]\n break\n del i, importer\n# Finally, add the importer to the meta path import hook.\nsys.meta_path.append(_importer)\n') + __stickytape_write_module('asttokens/util.py', b'# Copyright 2016 Grist Labs, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport ast\nimport collections\nimport token\nfrom six import iteritems\n\n\ndef token_repr(tok_type, string):\n """Returns a human-friendly representation of a token with the given type and string."""\n # repr() prefixes unicode with \'u\' on Python2 but not Python3; strip it out for consistency.\n return \'%s:%s\' % (token.tok_name[tok_type], repr(string).lstrip(\'u\'))\n\n\nclass Token(collections.namedtuple(\'Token\', \'type string start end line index startpos endpos\')):\n """\n TokenInfo is an 8-tuple containing the same 5 fields as the tokens produced by the tokenize\n module, and 3 additional ones useful for this module:\n\n - [0] .type Token type (see token.py)\n - [1] .string Token (a string)\n - [2] .start Starting (row, column) indices of the token (a 2-tuple of ints)\n - [3] .end Ending (row, column) indices of the token (a 2-tuple of ints)\n - [4] .line Original line (string)\n - [5] .index Index of the token in the list of tokens that it belongs to.\n - [6] .startpos Starting character offset into the input text.\n - [7] .endpos Ending character offset into the input text.\n """\n def __str__(self):\n return token_repr(self.type, self.string)\n\n\ndef match_token(token, tok_type, tok_str=None):\n """Returns true if token is of the given type and, if a string is given, has that string."""\n return token.type == tok_type and (tok_str is None or token.string == tok_str)\n\n\ndef expect_token(token, tok_type, tok_str=None):\n """\n Verifies that the given token is of the expected type. If tok_str is given, the token string\n is verified too. If the token doesn\'t match, raises an informative ValueError.\n """\n if not match_token(token, tok_type, tok_str):\n raise ValueError("Expected token %s, got %s on line %s col %s" % (\n token_repr(tok_type, tok_str), str(token),\n token.start[0], token.start[1] + 1))\n\n# These were previously defined in tokenize.py and distinguishable by being greater than\n# token.N_TOKEN. As of python3.7, they are in token.py, and we check for them explicitly.\nif hasattr(token, \'ENCODING\'):\n def is_non_coding_token(token_type):\n """\n These are considered non-coding tokens, as they don\'t affect the syntax tree.\n """\n return token_type in (token.NL, token.COMMENT, token.ENCODING)\nelse:\n def is_non_coding_token(token_type):\n """\n These are considered non-coding tokens, as they don\'t affect the syntax tree.\n """\n return token_type >= token.N_TOKENS\n\n\ndef iter_children_func(node):\n """\n Returns a function which yields all direct children of a AST node,\n skipping children that are singleton nodes.\n The function depends on whether ``node`` is from ``ast`` or from the ``astroid`` module.\n """\n return iter_children_astroid if hasattr(node, \'get_children\') else iter_children_ast\n\n\ndef iter_children_astroid(node):\n # Don\'t attempt to process children of JoinedStr nodes, which we can\'t fully handle yet.\n if is_joined_str(node):\n return []\n\n return node.get_children()\n\n\nSINGLETONS = {c for n, c in iteritems(ast.__dict__) if isinstance(c, type) and\n issubclass(c, (ast.expr_context, ast.boolop, ast.operator, ast.unaryop, ast.cmpop))}\n\ndef iter_children_ast(node):\n # Don\'t attempt to process children of JoinedStr nodes, which we can\'t fully handle yet.\n if is_joined_str(node):\n return\n\n if isinstance(node, ast.Dict):\n # override the iteration order: instead of , ,\n # yield keys and values in source order (key1, value1, key2, value2, ...)\n for (key, value) in zip(node.keys, node.values):\n if key is not None:\n yield key\n yield value\n return\n\n for child in ast.iter_child_nodes(node):\n # Skip singleton children; they don\'t reflect particular positions in the code and break the\n # assumptions about the tree consisting of distinct nodes. Note that collecting classes\n # beforehand and checking them in a set is faster than using isinstance each time.\n if child.__class__ not in SINGLETONS:\n yield child\n\n\nstmt_class_names = {n for n, c in iteritems(ast.__dict__)\n if isinstance(c, type) and issubclass(c, ast.stmt)}\nexpr_class_names = ({n for n, c in iteritems(ast.__dict__)\n if isinstance(c, type) and issubclass(c, ast.expr)} |\n {\'AssignName\', \'DelName\', \'Const\', \'AssignAttr\', \'DelAttr\'})\n\n# These feel hacky compared to isinstance() but allow us to work with both ast and astroid nodes\n# in the same way, and without even importing astroid.\ndef is_expr(node):\n """Returns whether node is an expression node."""\n return node.__class__.__name__ in expr_class_names\n\ndef is_stmt(node):\n """Returns whether node is a statement node."""\n return node.__class__.__name__ in stmt_class_names\n\ndef is_module(node):\n """Returns whether node is a module node."""\n return node.__class__.__name__ == \'Module\'\n\ndef is_joined_str(node):\n """Returns whether node is a JoinedStr node, used to represent f-strings."""\n # At the moment, nodes below JoinedStr have wrong line/col info, and trying to process them only\n # leads to errors.\n return node.__class__.__name__ == \'JoinedStr\'\n\n\ndef is_slice(node):\n """Returns whether node represents a slice, e.g. `1:2` in `x[1:2]`"""\n # Before 3.9, a tuple containing a slice is an ExtSlice,\n # but this was removed in https://bugs.python.org/issue34822\n return (\n node.__class__.__name__ in (\'Slice\', \'ExtSlice\')\n or (\n node.__class__.__name__ == \'Tuple\'\n and any(map(is_slice, node.elts))\n )\n )\n\n\n# Sentinel value used by visit_tree().\n_PREVISIT = object()\n\ndef visit_tree(node, previsit, postvisit):\n """\n Scans the tree under the node depth-first using an explicit stack. It avoids implicit recursion\n via the function call stack to avoid hitting \'maximum recursion depth exceeded\' error.\n\n It calls ``previsit()`` and ``postvisit()`` as follows:\n\n * ``previsit(node, par_value)`` - should return ``(par_value, value)``\n ``par_value`` is as returned from ``previsit()`` of the parent.\n\n * ``postvisit(node, par_value, value)`` - should return ``value``\n ``par_value`` is as returned from ``previsit()`` of the parent, and ``value`` is as\n returned from ``previsit()`` of this node itself. The return ``value`` is ignored except\n the one for the root node, which is returned from the overall ``visit_tree()`` call.\n\n For the initial node, ``par_value`` is None. ``postvisit`` may be None.\n """\n if not postvisit:\n postvisit = lambda node, pvalue, value: None\n\n iter_children = iter_children_func(node)\n done = set()\n ret = None\n stack = [(node, None, _PREVISIT)]\n while stack:\n current, par_value, value = stack.pop()\n if value is _PREVISIT:\n assert current not in done # protect againt infinite loop in case of a bad tree.\n done.add(current)\n\n pvalue, post_value = previsit(current, par_value)\n stack.append((current, par_value, post_value))\n\n # Insert all children in reverse order (so that first child ends up on top of the stack).\n ins = len(stack)\n for n in iter_children(current):\n stack.insert(ins, (n, pvalue, _PREVISIT))\n else:\n ret = postvisit(current, par_value, value)\n return ret\n\n\n\ndef walk(node):\n """\n Recursively yield all descendant nodes in the tree starting at ``node`` (including ``node``\n itself), using depth-first pre-order traversal (yieling parents before their children).\n\n This is similar to ``ast.walk()``, but with a different order, and it works for both ``ast`` and\n ``astroid`` trees. Also, as ``iter_children()``, it skips singleton nodes generated by ``ast``.\n """\n iter_children = iter_children_func(node)\n done = set()\n stack = [node]\n while stack:\n current = stack.pop()\n assert current not in done # protect againt infinite loop in case of a bad tree.\n done.add(current)\n\n yield current\n\n # Insert all children in reverse order (so that first child ends up on top of the stack).\n # This is faster than building a list and reversing it.\n ins = len(stack)\n for c in iter_children(current):\n stack.insert(ins, c)\n\n\ndef replace(text, replacements):\n """\n Replaces multiple slices of text with new values. This is a convenience method for making code\n modifications of ranges e.g. as identified by ``ASTTokens.get_text_range(node)``. Replacements is\n an iterable of ``(start, end, new_text)`` tuples.\n\n For example, ``replace("this is a test", [(0, 4, "X"), (8, 9, "THE")])`` produces\n ``"X is THE test"``.\n """\n p = 0\n parts = []\n for (start, end, new_text) in sorted(replacements):\n parts.append(text[p:start])\n parts.append(new_text)\n p = end\n parts.append(text[p:])\n return \'\'.join(parts)\n\n\nclass NodeMethods(object):\n """\n Helper to get `visit_{node_type}` methods given a node\'s class and cache the results.\n """\n def __init__(self):\n self._cache = {}\n\n def get(self, obj, cls):\n """\n Using the lowercase name of the class as node_type, returns `obj.visit_{node_type}`,\n or `obj.visit_default` if the type-specific method is not found.\n """\n method = self._cache.get(cls)\n if not method:\n name = "visit_" + cls.__name__.lower()\n method = getattr(obj, name, obj.visit_default)\n self._cache[cls] = method\n return method\n') + __stickytape_write_module('asttokens/mark_tokens.py', b'# Copyright 2016 Grist Labs, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numbers\nimport sys\nimport token\n\nimport six\n\nfrom . import util\n\n# Mapping of matching braces. To find a token here, look up token[:2].\n_matching_pairs_left = {\n (token.OP, \'(\'): (token.OP, \')\'),\n (token.OP, \'[\'): (token.OP, \']\'),\n (token.OP, \'{\'): (token.OP, \'}\'),\n}\n\n_matching_pairs_right = {\n (token.OP, \')\'): (token.OP, \'(\'),\n (token.OP, \']\'): (token.OP, \'[\'),\n (token.OP, \'}\'): (token.OP, \'{\'),\n}\n\n\nclass MarkTokens(object):\n """\n Helper that visits all nodes in the AST tree and assigns .first_token and .last_token attributes\n to each of them. This is the heart of the token-marking logic.\n """\n def __init__(self, code):\n self._code = code\n self._methods = util.NodeMethods()\n self._iter_children = None\n\n def visit_tree(self, node):\n self._iter_children = util.iter_children_func(node)\n util.visit_tree(node, self._visit_before_children, self._visit_after_children)\n\n def _visit_before_children(self, node, parent_token):\n col = getattr(node, \'col_offset\', None)\n token = self._code.get_token_from_utf8(node.lineno, col) if col is not None else None\n\n if not token and util.is_module(node):\n # We\'ll assume that a Module node starts at the start of the source code.\n token = self._code.get_token(1, 0)\n\n # Use our own token, or our parent\'s if we don\'t have one, to pass to child calls as\n # parent_token argument. The second value becomes the token argument of _visit_after_children.\n return (token or parent_token, token)\n\n def _visit_after_children(self, node, parent_token, token):\n # This processes the node generically first, after all children have been processed.\n\n # Get the first and last tokens that belong to children. Note how this doesn\'t assume that we\n # iterate through children in order that corresponds to occurrence in source code. This\n # assumption can fail (e.g. with return annotations).\n first = token\n last = None\n for child in self._iter_children(node):\n if not first or child.first_token.index < first.index:\n first = child.first_token\n if not last or child.last_token.index > last.index:\n last = child.last_token\n\n # If we don\'t have a first token from _visit_before_children, and there were no children, then\n # use the parent\'s token as the first token.\n first = first or parent_token\n\n # If no children, set last token to the first one.\n last = last or first\n\n # Statements continue to before NEWLINE. This helps cover a few different cases at once.\n if util.is_stmt(node):\n last = self._find_last_in_stmt(last)\n\n # Capture any unmatched brackets.\n first, last = self._expand_to_matching_pairs(first, last, node)\n\n # Give a chance to node-specific methods to adjust.\n nfirst, nlast = self._methods.get(self, node.__class__)(node, first, last)\n\n if (nfirst, nlast) != (first, last):\n # If anything changed, expand again to capture any unmatched brackets.\n nfirst, nlast = self._expand_to_matching_pairs(nfirst, nlast, node)\n\n node.first_token = nfirst\n node.last_token = nlast\n\n def _find_last_in_stmt(self, start_token):\n t = start_token\n while (not util.match_token(t, token.NEWLINE) and\n not util.match_token(t, token.OP, \';\') and\n not token.ISEOF(t.type)):\n t = self._code.next_token(t, include_extra=True)\n return self._code.prev_token(t)\n\n def _expand_to_matching_pairs(self, first_token, last_token, node):\n """\n Scan tokens in [first_token, last_token] range that are between node\'s children, and for any\n unmatched brackets, adjust first/last tokens to include the closing pair.\n """\n # We look for opening parens/braces among non-child tokens (i.e. tokens between our actual\n # child nodes). If we find any closing ones, we match them to the opens.\n to_match_right = []\n to_match_left = []\n for tok in self._code.token_range(first_token, last_token):\n tok_info = tok[:2]\n if to_match_right and tok_info == to_match_right[-1]:\n to_match_right.pop()\n elif tok_info in _matching_pairs_left:\n to_match_right.append(_matching_pairs_left[tok_info])\n elif tok_info in _matching_pairs_right:\n to_match_left.append(_matching_pairs_right[tok_info])\n\n # Once done, extend `last_token` to match any unclosed parens/braces.\n for match in reversed(to_match_right):\n last = self._code.next_token(last_token)\n # Allow for trailing commas or colons (allowed in subscripts) before the closing delimiter\n while any(util.match_token(last, token.OP, x) for x in (\',\', \':\')):\n last = self._code.next_token(last)\n # Now check for the actual closing delimiter.\n if util.match_token(last, *match):\n last_token = last\n\n # And extend `first_token` to match any unclosed opening parens/braces.\n for match in to_match_left:\n first = self._code.prev_token(first_token)\n if util.match_token(first, *match):\n first_token = first\n\n return (first_token, last_token)\n\n #----------------------------------------------------------------------\n # Node visitors. Each takes a preliminary first and last tokens, and returns the adjusted pair\n # that will actually be assigned.\n\n def visit_default(self, node, first_token, last_token):\n # pylint: disable=no-self-use\n # By default, we don\'t need to adjust the token we computed earlier.\n return (first_token, last_token)\n\n def handle_comp(self, open_brace, node, first_token, last_token):\n # For list/set/dict comprehensions, we only get the token of the first child, so adjust it to\n # include the opening brace (the closing brace will be matched automatically).\n before = self._code.prev_token(first_token)\n util.expect_token(before, token.OP, open_brace)\n return (before, last_token)\n\n # Python 3.8 fixed the starting position of list comprehensions:\n # https://bugs.python.org/issue31241\n if sys.version_info < (3, 8):\n def visit_listcomp(self, node, first_token, last_token):\n return self.handle_comp(\'[\', node, first_token, last_token)\n\n if six.PY2:\n # We shouldn\'t do this on PY3 because its SetComp/DictComp already have a correct start.\n def visit_setcomp(self, node, first_token, last_token):\n return self.handle_comp(\'{\', node, first_token, last_token)\n\n def visit_dictcomp(self, node, first_token, last_token):\n return self.handle_comp(\'{\', node, first_token, last_token)\n\n def visit_comprehension(self, node, first_token, last_token):\n # The \'comprehension\' node starts with \'for\' but we only get first child; we search backwards\n # to find the \'for\' keyword.\n first = self._code.find_token(first_token, token.NAME, \'for\', reverse=True)\n return (first, last_token)\n\n def visit_if(self, node, first_token, last_token):\n while first_token.string not in (\'if\', \'elif\'):\n first_token = self._code.prev_token(first_token)\n return first_token, last_token\n\n def handle_attr(self, node, first_token, last_token):\n # Attribute node has ".attr" (2 tokens) after the last child.\n dot = self._code.find_token(last_token, token.OP, \'.\')\n name = self._code.next_token(dot)\n util.expect_token(name, token.NAME)\n return (first_token, name)\n\n visit_attribute = handle_attr\n visit_assignattr = handle_attr\n visit_delattr = handle_attr\n\n def handle_def(self, node, first_token, last_token):\n # With astroid, nodes that start with a doc-string can have an empty body, in which case we\n # need to adjust the last token to include the doc string.\n if not node.body and getattr(node, \'doc\', None):\n last_token = self._code.find_token(last_token, token.STRING)\n\n # Include @ from decorator\n if first_token.index > 0:\n prev = self._code.prev_token(first_token)\n if util.match_token(prev, token.OP, \'@\'):\n first_token = prev\n return (first_token, last_token)\n\n visit_classdef = handle_def\n visit_functiondef = handle_def\n\n def handle_following_brackets(self, node, last_token, opening_bracket):\n # This is for calls and subscripts, which have a pair of brackets\n # at the end which may contain no nodes, e.g. foo() or bar[:].\n # We look for the opening bracket and then let the matching pair be found automatically\n # Remember that last_token is at the end of all children,\n # so we are not worried about encountering a bracket that belongs to a child.\n first_child = next(self._iter_children(node))\n call_start = self._code.find_token(first_child.last_token, token.OP, opening_bracket)\n if call_start.index > last_token.index:\n last_token = call_start\n return last_token\n\n def visit_call(self, node, first_token, last_token):\n last_token = self.handle_following_brackets(node, last_token, \'(\')\n\n # Handling a python bug with decorators with empty parens, e.g.\n # @deco()\n # def ...\n if util.match_token(first_token, token.OP, \'@\'):\n first_token = self._code.next_token(first_token)\n return (first_token, last_token)\n\n def visit_subscript(self, node, first_token, last_token):\n last_token = self.handle_following_brackets(node, last_token, \'[\')\n return (first_token, last_token)\n\n def handle_bare_tuple(self, node, first_token, last_token):\n # A bare tuple doesn\'t include parens; if there is a trailing comma, make it part of the tuple.\n maybe_comma = self._code.next_token(last_token)\n if util.match_token(maybe_comma, token.OP, \',\'):\n last_token = maybe_comma\n return (first_token, last_token)\n\n if sys.version_info >= (3, 8):\n # In Python3.8 parsed tuples include parentheses when present.\n def handle_tuple_nonempty(self, node, first_token, last_token):\n # It\'s a bare tuple if the first token belongs to the first child. The first child may\n # include extraneous parentheses (which don\'t create new nodes), so account for those too.\n child = node.elts[0]\n child_first, child_last = self._gobble_parens(child.first_token, child.last_token, True)\n if first_token == child_first:\n return self.handle_bare_tuple(node, first_token, last_token)\n return (first_token, last_token)\n else:\n # Before python 3.8, parsed tuples do not include parens.\n def handle_tuple_nonempty(self, node, first_token, last_token):\n (first_token, last_token) = self.handle_bare_tuple(node, first_token, last_token)\n return self._gobble_parens(first_token, last_token, False)\n\n def visit_tuple(self, node, first_token, last_token):\n if not node.elts:\n # An empty tuple is just "()", and we need no further info.\n return (first_token, last_token)\n return self.handle_tuple_nonempty(node, first_token, last_token)\n\n def _gobble_parens(self, first_token, last_token, include_all=False):\n # Expands a range of tokens to include one or all pairs of surrounding parentheses, and\n # returns (first, last) tokens that include these parens.\n while first_token.index > 0:\n prev = self._code.prev_token(first_token)\n next = self._code.next_token(last_token)\n if util.match_token(prev, token.OP, \'(\') and util.match_token(next, token.OP, \')\'):\n first_token, last_token = prev, next\n if include_all:\n continue\n break\n return (first_token, last_token)\n\n def visit_str(self, node, first_token, last_token):\n return self.handle_str(first_token, last_token)\n\n def visit_joinedstr(self, node, first_token, last_token):\n return self.handle_str(first_token, last_token)\n\n def visit_bytes(self, node, first_token, last_token):\n return self.handle_str(first_token, last_token)\n\n def handle_str(self, first_token, last_token):\n # Multiple adjacent STRING tokens form a single string.\n last = self._code.next_token(last_token)\n while util.match_token(last, token.STRING):\n last_token = last\n last = self._code.next_token(last_token)\n return (first_token, last_token)\n\n def handle_num(self, node, value, first_token, last_token):\n # A constant like \'-1\' gets turned into two tokens; this will skip the \'-\'.\n while util.match_token(last_token, token.OP):\n last_token = self._code.next_token(last_token)\n\n if isinstance(value, complex):\n # A complex number like -2j cannot be compared directly to 0\n # A complex number like 1-2j is expressed as a binary operation\n # so we don\'t need to worry about it\n value = value.imag\n\n # This makes sure that the - is included\n if value < 0 and first_token.type == token.NUMBER:\n first_token = self._code.prev_token(first_token)\n return (first_token, last_token)\n\n def visit_num(self, node, first_token, last_token):\n return self.handle_num(node, node.n, first_token, last_token)\n\n # In Astroid, the Num and Str nodes are replaced by Const.\n def visit_const(self, node, first_token, last_token):\n if isinstance(node.value, numbers.Number):\n return self.handle_num(node, node.value, first_token, last_token)\n elif isinstance(node.value, (six.text_type, six.binary_type)):\n return self.visit_str(node, first_token, last_token)\n return (first_token, last_token)\n\n # In Python >= 3.6, there is a similar class \'Constant\' for literals\n # In 3.8 it became the type produced by ast.parse\n # https://bugs.python.org/issue32892\n visit_constant = visit_const\n\n def visit_keyword(self, node, first_token, last_token):\n # Until python 3.9 (https://bugs.python.org/issue40141),\n # ast.keyword nodes didn\'t have line info. Astroid has lineno None.\n if node.arg is not None and getattr(node, \'lineno\', None) is None:\n equals = self._code.find_token(first_token, token.OP, \'=\', reverse=True)\n name = self._code.prev_token(equals)\n util.expect_token(name, token.NAME, node.arg)\n first_token = name\n return (first_token, last_token)\n\n def visit_starred(self, node, first_token, last_token):\n # Astroid has \'Starred\' nodes (for "foo(*bar)" type args), but they need to be adjusted.\n if not util.match_token(first_token, token.OP, \'*\'):\n star = self._code.prev_token(first_token)\n if util.match_token(star, token.OP, \'*\'):\n first_token = star\n return (first_token, last_token)\n\n def visit_assignname(self, node, first_token, last_token):\n # Astroid may turn \'except\' clause into AssignName, but we need to adjust it.\n if util.match_token(first_token, token.NAME, \'except\'):\n colon = self._code.find_token(last_token, token.OP, \':\')\n first_token = last_token = self._code.prev_token(colon)\n return (first_token, last_token)\n\n if six.PY2:\n # No need for this on Python3, which already handles \'with\' nodes correctly.\n def visit_with(self, node, first_token, last_token):\n first = self._code.find_token(first_token, token.NAME, \'with\', reverse=True)\n return (first, last_token)\n\n # Async nodes should typically start with the word \'async\'\n # but Python < 3.7 doesn\'t put the col_offset there\n # AsyncFunctionDef is slightly different because it might have\n # decorators before that, which visit_functiondef handles\n def handle_async(self, node, first_token, last_token):\n if not first_token.string == \'async\':\n first_token = self._code.prev_token(first_token)\n return (first_token, last_token)\n\n visit_asyncfor = handle_async\n visit_asyncwith = handle_async\n\n def visit_asyncfunctiondef(self, node, first_token, last_token):\n if util.match_token(first_token, token.NAME, \'def\'):\n # Include the \'async\' token\n first_token = self._code.prev_token(first_token)\n return self.visit_functiondef(node, first_token, last_token)\n') + __stickytape_write_module('icecream/__init__.py', b"# -*- coding: utf-8 -*-\n\n#\n# IceCream - Never use print() to debug again\n#\n# Ansgar Grunseid\n# grunseid.com\n# grunseid@gmail.com\n#\n# License: MIT\n#\n\nfrom os.path import dirname, join as pjoin\n\nfrom .icecream import * # noqa\nfrom .builtins import install, uninstall\n\n# Import all variables in __version__.py without explicit imports.\nmeta = {}\nwith open(pjoin(dirname(__file__), '__version__.py')) as f:\n exec(f.read(), meta)\nglobals().update(dict((k, v) for k, v in meta.items() if k not in globals()))\n") + __stickytape_write_module('icecream/icecream.py', b'#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#\n# IceCream - Never use print() to debug again\n#\n# Ansgar Grunseid\n# grunseid.com\n# grunseid@gmail.com\n#\n# License: MIT\n#\n\nfrom __future__ import print_function\n\nimport ast\nimport inspect\nimport pprint\nimport sys\nfrom datetime import datetime\nfrom contextlib import contextmanager\nfrom os.path import basename\nfrom textwrap import dedent\n\nimport colorama\nimport executing\nfrom pygments import highlight\n# See https://gist.github.com/XVilka/8346728 for color support in various\n# terminals and thus whether to use Terminal256Formatter or\n# TerminalTrueColorFormatter.\nfrom pygments.formatters import Terminal256Formatter\nfrom pygments.lexers import PythonLexer as PyLexer, Python3Lexer as Py3Lexer\n\nfrom .coloring import SolarizedDark\n\n\nPYTHON2 = (sys.version_info[0] == 2)\n\n\n_absent = object()\n\n\ndef bindStaticVariable(name, value):\n def decorator(fn):\n setattr(fn, name, value)\n return fn\n return decorator\n\n\n@bindStaticVariable(\'formatter\', Terminal256Formatter(style=SolarizedDark))\n@bindStaticVariable(\n \'lexer\', PyLexer(ensurenl=False) if PYTHON2 else Py3Lexer(ensurenl=False))\ndef colorize(s):\n self = colorize\n return highlight(s, self.lexer, self.formatter)\n\n\n@contextmanager\ndef supportTerminalColorsInWindows():\n # Filter and replace ANSI escape sequences on Windows with equivalent Win32\n # API calls. This code does nothing on non-Windows systems.\n colorama.init()\n yield\n colorama.deinit()\n\n\ndef stderrPrint(*args):\n print(*args, file=sys.stderr)\n\n\ndef isLiteral(s):\n try:\n ast.literal_eval(s)\n except Exception:\n return False\n return True\n\n\ndef colorizedStderrPrint(s):\n colored = colorize(s)\n with supportTerminalColorsInWindows():\n stderrPrint(colored)\n\n\nDEFAULT_PREFIX = \'ic| \'\nDEFAULT_LINE_WRAP_WIDTH = 70 # Characters.\nDEFAULT_CONTEXT_DELIMITER = \'- \'\nDEFAULT_OUTPUT_FUNCTION = colorizedStderrPrint\nDEFAULT_ARG_TO_STRING_FUNCTION = pprint.pformat\n\n\nclass NoSourceAvailableError(OSError):\n """\n Raised when icecream fails to find or access source code that\'s\n required to parse and analyze. This can happen, for example, when\n\n - ic() is invoked inside an interactive shell, e.g. python -i.\n\n - The source code is mangled and/or packaged, e.g. with a project\n freezer like PyInstaller.\n\n - The underlying source code changed during execution. See\n https://stackoverflow.com/a/33175832.\n """\n infoMessage = (\n \'Failed to access the underlying source code for analysis. Was ic() \'\n \'invoked in an interpreter (e.g. python -i), a frozen application \'\n \'(e.g. packaged with PyInstaller), or did the underlying source code \'\n \'change during execution?\')\n\n\ndef callOrValue(obj):\n return obj() if callable(obj) else obj\n\n\nclass Source(executing.Source):\n def get_text_with_indentation(self, node):\n result = self.asttokens().get_text(node)\n if \'\\n\' in result:\n result = \' \' * node.first_token.start[1] + result\n result = dedent(result)\n result = result.strip()\n return result\n\n\ndef prefixLinesAfterFirst(prefix, s):\n lines = s.splitlines(True)\n\n for i in range(1, len(lines)):\n lines[i] = prefix + lines[i]\n\n return \'\'.join(lines)\n\n\ndef indented_lines(prefix, string):\n lines = string.splitlines()\n return [prefix + lines[0]] + [\n \' \' * len(prefix) + line\n for line in lines[1:]\n ]\n\n\ndef format_pair(prefix, arg, value):\n arg_lines = indented_lines(prefix, arg)\n value_prefix = arg_lines[-1] + \': \'\n\n looksLikeAString = value[0] + value[-1] in ["\'\'", \'""\']\n if looksLikeAString: # Align the start of multiline strings.\n value = prefixLinesAfterFirst(\' \', value)\n\n value_lines = indented_lines(value_prefix, value)\n lines = arg_lines[:-1] + value_lines\n return \'\\n\'.join(lines)\n\n\ndef argumentToString(obj):\n s = DEFAULT_ARG_TO_STRING_FUNCTION(obj)\n s = s.replace(\'\\\\n\', \'\\n\') # Preserve string newlines in output.\n return s\n\n\nclass IceCreamDebugger:\n _pairDelimiter = \', \' # Used by the tests in tests/.\n lineWrapWidth = DEFAULT_LINE_WRAP_WIDTH\n contextDelimiter = DEFAULT_CONTEXT_DELIMITER\n\n def __init__(self, prefix=DEFAULT_PREFIX,\n outputFunction=DEFAULT_OUTPUT_FUNCTION,\n argToStringFunction=argumentToString, includeContext=False):\n self.enabled = True\n self.prefix = prefix\n self.includeContext = includeContext\n self.outputFunction = outputFunction\n self.argToStringFunction = argToStringFunction\n\n def __call__(self, *args):\n if self.enabled:\n callFrame = inspect.currentframe().f_back\n try:\n out = self._format(callFrame, *args)\n except NoSourceAvailableError as err:\n prefix = callOrValue(self.prefix)\n out = prefix + \'Error: \' + err.infoMessage\n self.outputFunction(out)\n\n if not args: # E.g. ic().\n passthrough = None\n elif len(args) == 1: # E.g. ic(1).\n passthrough = args[0]\n else: # E.g. ic(1, 2, 3).\n passthrough = args\n\n return passthrough\n\n def format(self, *args):\n callFrame = inspect.currentframe().f_back\n out = self._format(callFrame, *args)\n return out\n\n def _format(self, callFrame, *args):\n prefix = callOrValue(self.prefix)\n\n callNode = Source.executing(callFrame).node\n if callNode is None:\n raise NoSourceAvailableError()\n\n context = self._formatContext(callFrame, callNode)\n if not args:\n time = self._formatTime()\n out = prefix + context + time\n else:\n if not self.includeContext:\n context = \'\'\n out = self._formatArgs(\n callFrame, callNode, prefix, context, args)\n\n return out\n\n def _formatArgs(self, callFrame, callNode, prefix, context, args):\n source = Source.for_frame(callFrame)\n sanitizedArgStrs = [\n source.get_text_with_indentation(arg)\n for arg in callNode.args]\n\n pairs = list(zip(sanitizedArgStrs, args))\n\n out = self._constructArgumentOutput(prefix, context, pairs)\n return out\n\n def _constructArgumentOutput(self, prefix, context, pairs):\n def argPrefix(arg):\n return \'%s: \' % arg\n\n pairs = [(arg, self.argToStringFunction(val)) for arg, val in pairs]\n # For cleaner output, if is a literal, eg 3, "string", b\'bytes\',\n # etc, only output the value, not the argument and the value, as the\n # argument and the value will be identical or nigh identical. Ex: with\n # ic("hello"), just output\n #\n # ic| \'hello\',\n #\n # instead of\n #\n # ic| "hello": \'hello\'.\n #\n pairStrs = [\n val if isLiteral(arg) else (argPrefix(arg) + val)\n for arg, val in pairs]\n\n allArgsOnOneLine = self._pairDelimiter.join(pairStrs)\n multilineArgs = len(allArgsOnOneLine.splitlines()) > 1\n\n contextDelimiter = self.contextDelimiter if context else \'\'\n allPairs = prefix + context + contextDelimiter + allArgsOnOneLine\n firstLineTooLong = len(allPairs.splitlines()[0]) > self.lineWrapWidth\n\n if multilineArgs or firstLineTooLong:\n # ic| foo.py:11 in foo()\n # multilineStr: \'line1\n # line2\'\n #\n # ic| foo.py:11 in foo()\n # a: 11111111111111111111\n # b: 22222222222222222222\n if context:\n lines = [prefix + context] + [\n format_pair(len(prefix) * \' \', arg, value)\n for arg, value in pairs\n ]\n # ic| multilineStr: \'line1\n # line2\'\n #\n # ic| a: 11111111111111111111\n # b: 22222222222222222222\n else:\n arg_lines = [\n format_pair(\'\', arg, value)\n for arg, value in pairs\n ]\n lines = indented_lines(prefix, \'\\n\'.join(arg_lines))\n # ic| foo.py:11 in foo()- a: 1, b: 2\n # ic| a: 1, b: 2, c: 3\n else:\n lines = [prefix + context + contextDelimiter + allArgsOnOneLine]\n\n return \'\\n\'.join(lines)\n\n def _formatContext(self, callFrame, callNode):\n filename, lineNumber, parentFunction = self._getContext(\n callFrame, callNode)\n\n if parentFunction != \'\':\n parentFunction = \'%s()\' % parentFunction\n\n context = \'%s:%s in %s\' % (filename, lineNumber, parentFunction)\n return context\n\n def _formatTime(self):\n now = datetime.now()\n formatted = now.strftime(\'%H:%M:%S.%f\')[:-3]\n return \' at %s\' % formatted\n\n def _getContext(self, callFrame, callNode):\n lineNumber = callNode.lineno\n frameInfo = inspect.getframeinfo(callFrame)\n parentFunction = frameInfo.function\n filename = basename(frameInfo.filename)\n\n return filename, lineNumber, parentFunction\n\n def enable(self):\n self.enabled = True\n\n def disable(self):\n self.enabled = False\n\n def configureOutput(self, prefix=_absent, outputFunction=_absent,\n argToStringFunction=_absent, includeContext=_absent):\n if prefix is not _absent:\n self.prefix = prefix\n\n if outputFunction is not _absent:\n self.outputFunction = outputFunction\n\n if argToStringFunction is not _absent:\n self.argToStringFunction = argToStringFunction\n\n if includeContext is not _absent:\n self.includeContext = includeContext\n\n\nic = IceCreamDebugger()\n') + __stickytape_write_module('colorama/__init__.py', b"# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.\nfrom .initialise import init, deinit, reinit, colorama_text\nfrom .ansi import Fore, Back, Style, Cursor\nfrom .ansitowin32 import AnsiToWin32\n\n__version__ = '0.4.4'\n") + __stickytape_write_module('colorama/initialise.py', b"# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.\nimport atexit\nimport contextlib\nimport sys\n\nfrom .ansitowin32 import AnsiToWin32\n\n\norig_stdout = None\norig_stderr = None\n\nwrapped_stdout = None\nwrapped_stderr = None\n\natexit_done = False\n\n\ndef reset_all():\n if AnsiToWin32 is not None: # Issue #74: objects might become None at exit\n AnsiToWin32(orig_stdout).reset_all()\n\n\ndef init(autoreset=False, convert=None, strip=None, wrap=True):\n\n if not wrap and any([autoreset, convert, strip]):\n raise ValueError('wrap=False conflicts with any other arg=True')\n\n global wrapped_stdout, wrapped_stderr\n global orig_stdout, orig_stderr\n\n orig_stdout = sys.stdout\n orig_stderr = sys.stderr\n\n if sys.stdout is None:\n wrapped_stdout = None\n else:\n sys.stdout = wrapped_stdout = \\\n wrap_stream(orig_stdout, convert, strip, autoreset, wrap)\n if sys.stderr is None:\n wrapped_stderr = None\n else:\n sys.stderr = wrapped_stderr = \\\n wrap_stream(orig_stderr, convert, strip, autoreset, wrap)\n\n global atexit_done\n if not atexit_done:\n atexit.register(reset_all)\n atexit_done = True\n\n\ndef deinit():\n if orig_stdout is not None:\n sys.stdout = orig_stdout\n if orig_stderr is not None:\n sys.stderr = orig_stderr\n\n\n@contextlib.contextmanager\ndef colorama_text(*args, **kwargs):\n init(*args, **kwargs)\n try:\n yield\n finally:\n deinit()\n\n\ndef reinit():\n if wrapped_stdout is not None:\n sys.stdout = wrapped_stdout\n if wrapped_stderr is not None:\n sys.stderr = wrapped_stderr\n\n\ndef wrap_stream(stream, convert, strip, autoreset, wrap):\n if wrap:\n wrapper = AnsiToWin32(stream,\n convert=convert, strip=strip, autoreset=autoreset)\n if wrapper.should_wrap():\n stream = wrapper.stream\n return stream\n") + __stickytape_write_module('colorama/ansitowin32.py', b'# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.\nimport re\nimport sys\nimport os\n\nfrom .ansi import AnsiFore, AnsiBack, AnsiStyle, Style, BEL\nfrom .winterm import WinTerm, WinColor, WinStyle\nfrom .win32 import windll, winapi_test\n\n\nwinterm = None\nif windll is not None:\n winterm = WinTerm()\n\n\nclass StreamWrapper(object):\n \'\'\'\n Wraps a stream (such as stdout), acting as a transparent proxy for all\n attribute access apart from method \'write()\', which is delegated to our\n Converter instance.\n \'\'\'\n def __init__(self, wrapped, converter):\n # double-underscore everything to prevent clashes with names of\n # attributes on the wrapped stream object.\n self.__wrapped = wrapped\n self.__convertor = converter\n\n def __getattr__(self, name):\n return getattr(self.__wrapped, name)\n\n def __enter__(self, *args, **kwargs):\n # special method lookup bypasses __getattr__/__getattribute__, see\n # https://stackoverflow.com/questions/12632894/why-doesnt-getattr-work-with-exit\n # thus, contextlib magic methods are not proxied via __getattr__\n return self.__wrapped.__enter__(*args, **kwargs)\n\n def __exit__(self, *args, **kwargs):\n return self.__wrapped.__exit__(*args, **kwargs)\n\n def write(self, text):\n self.__convertor.write(text)\n\n def isatty(self):\n stream = self.__wrapped\n if \'PYCHARM_HOSTED\' in os.environ:\n if stream is not None and (stream is sys.__stdout__ or stream is sys.__stderr__):\n return True\n try:\n stream_isatty = stream.isatty\n except AttributeError:\n return False\n else:\n return stream_isatty()\n\n @property\n def closed(self):\n stream = self.__wrapped\n try:\n return stream.closed\n except AttributeError:\n return True\n\n\nclass AnsiToWin32(object):\n \'\'\'\n Implements a \'write()\' method which, on Windows, will strip ANSI character\n sequences from the text, and if outputting to a tty, will convert them into\n win32 function calls.\n \'\'\'\n ANSI_CSI_RE = re.compile(\'\\001?\\033\\\\[((?:\\\\d|;)*)([a-zA-Z])\\002?\') # Control Sequence Introducer\n ANSI_OSC_RE = re.compile(\'\\001?\\033\\\\]([^\\a]*)(\\a)\\002?\') # Operating System Command\n\n def __init__(self, wrapped, convert=None, strip=None, autoreset=False):\n # The wrapped stream (normally sys.stdout or sys.stderr)\n self.wrapped = wrapped\n\n # should we reset colors to defaults after every .write()\n self.autoreset = autoreset\n\n # create the proxy wrapping our output stream\n self.stream = StreamWrapper(wrapped, self)\n\n on_windows = os.name == \'nt\'\n # We test if the WinAPI works, because even if we are on Windows\n # we may be using a terminal that doesn\'t support the WinAPI\n # (e.g. Cygwin Terminal). In this case it\'s up to the terminal\n # to support the ANSI codes.\n conversion_supported = on_windows and winapi_test()\n\n # should we strip ANSI sequences from our output?\n if strip is None:\n strip = conversion_supported or (not self.stream.closed and not self.stream.isatty())\n self.strip = strip\n\n # should we should convert ANSI sequences into win32 calls?\n if convert is None:\n convert = conversion_supported and not self.stream.closed and self.stream.isatty()\n self.convert = convert\n\n # dict of ansi codes to win32 functions and parameters\n self.win32_calls = self.get_win32_calls()\n\n # are we wrapping stderr?\n self.on_stderr = self.wrapped is sys.stderr\n\n def should_wrap(self):\n \'\'\'\n True if this class is actually needed. If false, then the output\n stream will not be affected, nor will win32 calls be issued, so\n wrapping stdout is not actually required. This will generally be\n False on non-Windows platforms, unless optional functionality like\n autoreset has been requested using kwargs to init()\n \'\'\'\n return self.convert or self.strip or self.autoreset\n\n def get_win32_calls(self):\n if self.convert and winterm:\n return {\n AnsiStyle.RESET_ALL: (winterm.reset_all, ),\n AnsiStyle.BRIGHT: (winterm.style, WinStyle.BRIGHT),\n AnsiStyle.DIM: (winterm.style, WinStyle.NORMAL),\n AnsiStyle.NORMAL: (winterm.style, WinStyle.NORMAL),\n AnsiFore.BLACK: (winterm.fore, WinColor.BLACK),\n AnsiFore.RED: (winterm.fore, WinColor.RED),\n AnsiFore.GREEN: (winterm.fore, WinColor.GREEN),\n AnsiFore.YELLOW: (winterm.fore, WinColor.YELLOW),\n AnsiFore.BLUE: (winterm.fore, WinColor.BLUE),\n AnsiFore.MAGENTA: (winterm.fore, WinColor.MAGENTA),\n AnsiFore.CYAN: (winterm.fore, WinColor.CYAN),\n AnsiFore.WHITE: (winterm.fore, WinColor.GREY),\n AnsiFore.RESET: (winterm.fore, ),\n AnsiFore.LIGHTBLACK_EX: (winterm.fore, WinColor.BLACK, True),\n AnsiFore.LIGHTRED_EX: (winterm.fore, WinColor.RED, True),\n AnsiFore.LIGHTGREEN_EX: (winterm.fore, WinColor.GREEN, True),\n AnsiFore.LIGHTYELLOW_EX: (winterm.fore, WinColor.YELLOW, True),\n AnsiFore.LIGHTBLUE_EX: (winterm.fore, WinColor.BLUE, True),\n AnsiFore.LIGHTMAGENTA_EX: (winterm.fore, WinColor.MAGENTA, True),\n AnsiFore.LIGHTCYAN_EX: (winterm.fore, WinColor.CYAN, True),\n AnsiFore.LIGHTWHITE_EX: (winterm.fore, WinColor.GREY, True),\n AnsiBack.BLACK: (winterm.back, WinColor.BLACK),\n AnsiBack.RED: (winterm.back, WinColor.RED),\n AnsiBack.GREEN: (winterm.back, WinColor.GREEN),\n AnsiBack.YELLOW: (winterm.back, WinColor.YELLOW),\n AnsiBack.BLUE: (winterm.back, WinColor.BLUE),\n AnsiBack.MAGENTA: (winterm.back, WinColor.MAGENTA),\n AnsiBack.CYAN: (winterm.back, WinColor.CYAN),\n AnsiBack.WHITE: (winterm.back, WinColor.GREY),\n AnsiBack.RESET: (winterm.back, ),\n AnsiBack.LIGHTBLACK_EX: (winterm.back, WinColor.BLACK, True),\n AnsiBack.LIGHTRED_EX: (winterm.back, WinColor.RED, True),\n AnsiBack.LIGHTGREEN_EX: (winterm.back, WinColor.GREEN, True),\n AnsiBack.LIGHTYELLOW_EX: (winterm.back, WinColor.YELLOW, True),\n AnsiBack.LIGHTBLUE_EX: (winterm.back, WinColor.BLUE, True),\n AnsiBack.LIGHTMAGENTA_EX: (winterm.back, WinColor.MAGENTA, True),\n AnsiBack.LIGHTCYAN_EX: (winterm.back, WinColor.CYAN, True),\n AnsiBack.LIGHTWHITE_EX: (winterm.back, WinColor.GREY, True),\n }\n return dict()\n\n def write(self, text):\n if self.strip or self.convert:\n self.write_and_convert(text)\n else:\n self.wrapped.write(text)\n self.wrapped.flush()\n if self.autoreset:\n self.reset_all()\n\n\n def reset_all(self):\n if self.convert:\n self.call_win32(\'m\', (0,))\n elif not self.strip and not self.stream.closed:\n self.wrapped.write(Style.RESET_ALL)\n\n\n def write_and_convert(self, text):\n \'\'\'\n Write the given text to our wrapped stream, stripping any ANSI\n sequences from the text, and optionally converting them into win32\n calls.\n \'\'\'\n cursor = 0\n text = self.convert_osc(text)\n for match in self.ANSI_CSI_RE.finditer(text):\n start, end = match.span()\n self.write_plain_text(text, cursor, start)\n self.convert_ansi(*match.groups())\n cursor = end\n self.write_plain_text(text, cursor, len(text))\n\n\n def write_plain_text(self, text, start, end):\n if start < end:\n self.wrapped.write(text[start:end])\n self.wrapped.flush()\n\n\n def convert_ansi(self, paramstring, command):\n if self.convert:\n params = self.extract_params(command, paramstring)\n self.call_win32(command, params)\n\n\n def extract_params(self, command, paramstring):\n if command in \'Hf\':\n params = tuple(int(p) if len(p) != 0 else 1 for p in paramstring.split(\';\'))\n while len(params) < 2:\n # defaults:\n params = params + (1,)\n else:\n params = tuple(int(p) for p in paramstring.split(\';\') if len(p) != 0)\n if len(params) == 0:\n # defaults:\n if command in \'JKm\':\n params = (0,)\n elif command in \'ABCD\':\n params = (1,)\n\n return params\n\n\n def call_win32(self, command, params):\n if command == \'m\':\n for param in params:\n if param in self.win32_calls:\n func_args = self.win32_calls[param]\n func = func_args[0]\n args = func_args[1:]\n kwargs = dict(on_stderr=self.on_stderr)\n func(*args, **kwargs)\n elif command in \'J\':\n winterm.erase_screen(params[0], on_stderr=self.on_stderr)\n elif command in \'K\':\n winterm.erase_line(params[0], on_stderr=self.on_stderr)\n elif command in \'Hf\': # cursor position - absolute\n winterm.set_cursor_position(params, on_stderr=self.on_stderr)\n elif command in \'ABCD\': # cursor position - relative\n n = params[0]\n # A - up, B - down, C - forward, D - back\n x, y = {\'A\': (0, -n), \'B\': (0, n), \'C\': (n, 0), \'D\': (-n, 0)}[command]\n winterm.cursor_adjust(x, y, on_stderr=self.on_stderr)\n\n\n def convert_osc(self, text):\n for match in self.ANSI_OSC_RE.finditer(text):\n start, end = match.span()\n text = text[:start] + text[end:]\n paramstring, command = match.groups()\n if command == BEL:\n if paramstring.count(";") == 1:\n params = paramstring.split(";")\n # 0 - change title and icon (we will only change title)\n # 1 - change icon (we don\'t support this)\n # 2 - change title\n if params[0] in \'02\':\n winterm.set_title(params[1])\n return text\n') + __stickytape_write_module('colorama/ansi.py', b"# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.\n'''\nThis module generates ANSI character codes to printing colors to terminals.\nSee: http://en.wikipedia.org/wiki/ANSI_escape_code\n'''\n\nCSI = '\\033['\nOSC = '\\033]'\nBEL = '\\a'\n\n\ndef code_to_chars(code):\n return CSI + str(code) + 'm'\n\ndef set_title(title):\n return OSC + '2;' + title + BEL\n\ndef clear_screen(mode=2):\n return CSI + str(mode) + 'J'\n\ndef clear_line(mode=2):\n return CSI + str(mode) + 'K'\n\n\nclass AnsiCodes(object):\n def __init__(self):\n # the subclasses declare class attributes which are numbers.\n # Upon instantiation we define instance attributes, which are the same\n # as the class attributes but wrapped with the ANSI escape sequence\n for name in dir(self):\n if not name.startswith('_'):\n value = getattr(self, name)\n setattr(self, name, code_to_chars(value))\n\n\nclass AnsiCursor(object):\n def UP(self, n=1):\n return CSI + str(n) + 'A'\n def DOWN(self, n=1):\n return CSI + str(n) + 'B'\n def FORWARD(self, n=1):\n return CSI + str(n) + 'C'\n def BACK(self, n=1):\n return CSI + str(n) + 'D'\n def POS(self, x=1, y=1):\n return CSI + str(y) + ';' + str(x) + 'H'\n\n\nclass AnsiFore(AnsiCodes):\n BLACK = 30\n RED = 31\n GREEN = 32\n YELLOW = 33\n BLUE = 34\n MAGENTA = 35\n CYAN = 36\n WHITE = 37\n RESET = 39\n\n # These are fairly well supported, but not part of the standard.\n LIGHTBLACK_EX = 90\n LIGHTRED_EX = 91\n LIGHTGREEN_EX = 92\n LIGHTYELLOW_EX = 93\n LIGHTBLUE_EX = 94\n LIGHTMAGENTA_EX = 95\n LIGHTCYAN_EX = 96\n LIGHTWHITE_EX = 97\n\n\nclass AnsiBack(AnsiCodes):\n BLACK = 40\n RED = 41\n GREEN = 42\n YELLOW = 43\n BLUE = 44\n MAGENTA = 45\n CYAN = 46\n WHITE = 47\n RESET = 49\n\n # These are fairly well supported, but not part of the standard.\n LIGHTBLACK_EX = 100\n LIGHTRED_EX = 101\n LIGHTGREEN_EX = 102\n LIGHTYELLOW_EX = 103\n LIGHTBLUE_EX = 104\n LIGHTMAGENTA_EX = 105\n LIGHTCYAN_EX = 106\n LIGHTWHITE_EX = 107\n\n\nclass AnsiStyle(AnsiCodes):\n BRIGHT = 1\n DIM = 2\n NORMAL = 22\n RESET_ALL = 0\n\nFore = AnsiFore()\nBack = AnsiBack()\nStyle = AnsiStyle()\nCursor = AnsiCursor()\n") + __stickytape_write_module('colorama/winterm.py', b"# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.\nfrom . import win32\n\n\n# from wincon.h\nclass WinColor(object):\n BLACK = 0\n BLUE = 1\n GREEN = 2\n CYAN = 3\n RED = 4\n MAGENTA = 5\n YELLOW = 6\n GREY = 7\n\n# from wincon.h\nclass WinStyle(object):\n NORMAL = 0x00 # dim text, dim background\n BRIGHT = 0x08 # bright text, dim background\n BRIGHT_BACKGROUND = 0x80 # dim text, bright background\n\nclass WinTerm(object):\n\n def __init__(self):\n self._default = win32.GetConsoleScreenBufferInfo(win32.STDOUT).wAttributes\n self.set_attrs(self._default)\n self._default_fore = self._fore\n self._default_back = self._back\n self._default_style = self._style\n # In order to emulate LIGHT_EX in windows, we borrow the BRIGHT style.\n # So that LIGHT_EX colors and BRIGHT style do not clobber each other,\n # we track them separately, since LIGHT_EX is overwritten by Fore/Back\n # and BRIGHT is overwritten by Style codes.\n self._light = 0\n\n def get_attrs(self):\n return self._fore + self._back * 16 + (self._style | self._light)\n\n def set_attrs(self, value):\n self._fore = value & 7\n self._back = (value >> 4) & 7\n self._style = value & (WinStyle.BRIGHT | WinStyle.BRIGHT_BACKGROUND)\n\n def reset_all(self, on_stderr=None):\n self.set_attrs(self._default)\n self.set_console(attrs=self._default)\n self._light = 0\n\n def fore(self, fore=None, light=False, on_stderr=False):\n if fore is None:\n fore = self._default_fore\n self._fore = fore\n # Emulate LIGHT_EX with BRIGHT Style\n if light:\n self._light |= WinStyle.BRIGHT\n else:\n self._light &= ~WinStyle.BRIGHT\n self.set_console(on_stderr=on_stderr)\n\n def back(self, back=None, light=False, on_stderr=False):\n if back is None:\n back = self._default_back\n self._back = back\n # Emulate LIGHT_EX with BRIGHT_BACKGROUND Style\n if light:\n self._light |= WinStyle.BRIGHT_BACKGROUND\n else:\n self._light &= ~WinStyle.BRIGHT_BACKGROUND\n self.set_console(on_stderr=on_stderr)\n\n def style(self, style=None, on_stderr=False):\n if style is None:\n style = self._default_style\n self._style = style\n self.set_console(on_stderr=on_stderr)\n\n def set_console(self, attrs=None, on_stderr=False):\n if attrs is None:\n attrs = self.get_attrs()\n handle = win32.STDOUT\n if on_stderr:\n handle = win32.STDERR\n win32.SetConsoleTextAttribute(handle, attrs)\n\n def get_position(self, handle):\n position = win32.GetConsoleScreenBufferInfo(handle).dwCursorPosition\n # Because Windows coordinates are 0-based,\n # and win32.SetConsoleCursorPosition expects 1-based.\n position.X += 1\n position.Y += 1\n return position\n\n def set_cursor_position(self, position=None, on_stderr=False):\n if position is None:\n # I'm not currently tracking the position, so there is no default.\n # position = self.get_position()\n return\n handle = win32.STDOUT\n if on_stderr:\n handle = win32.STDERR\n win32.SetConsoleCursorPosition(handle, position)\n\n def cursor_adjust(self, x, y, on_stderr=False):\n handle = win32.STDOUT\n if on_stderr:\n handle = win32.STDERR\n position = self.get_position(handle)\n adjusted_position = (position.Y + y, position.X + x)\n win32.SetConsoleCursorPosition(handle, adjusted_position, adjust=False)\n\n def erase_screen(self, mode=0, on_stderr=False):\n # 0 should clear from the cursor to the end of the screen.\n # 1 should clear from the cursor to the beginning of the screen.\n # 2 should clear the entire screen, and move cursor to (1,1)\n handle = win32.STDOUT\n if on_stderr:\n handle = win32.STDERR\n csbi = win32.GetConsoleScreenBufferInfo(handle)\n # get the number of character cells in the current buffer\n cells_in_screen = csbi.dwSize.X * csbi.dwSize.Y\n # get number of character cells before current cursor position\n cells_before_cursor = csbi.dwSize.X * csbi.dwCursorPosition.Y + csbi.dwCursorPosition.X\n if mode == 0:\n from_coord = csbi.dwCursorPosition\n cells_to_erase = cells_in_screen - cells_before_cursor\n elif mode == 1:\n from_coord = win32.COORD(0, 0)\n cells_to_erase = cells_before_cursor\n elif mode == 2:\n from_coord = win32.COORD(0, 0)\n cells_to_erase = cells_in_screen\n else:\n # invalid mode\n return\n # fill the entire screen with blanks\n win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord)\n # now set the buffer's attributes accordingly\n win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord)\n if mode == 2:\n # put the cursor where needed\n win32.SetConsoleCursorPosition(handle, (1, 1))\n\n def erase_line(self, mode=0, on_stderr=False):\n # 0 should clear from the cursor to the end of the line.\n # 1 should clear from the cursor to the beginning of the line.\n # 2 should clear the entire line.\n handle = win32.STDOUT\n if on_stderr:\n handle = win32.STDERR\n csbi = win32.GetConsoleScreenBufferInfo(handle)\n if mode == 0:\n from_coord = csbi.dwCursorPosition\n cells_to_erase = csbi.dwSize.X - csbi.dwCursorPosition.X\n elif mode == 1:\n from_coord = win32.COORD(0, csbi.dwCursorPosition.Y)\n cells_to_erase = csbi.dwCursorPosition.X\n elif mode == 2:\n from_coord = win32.COORD(0, csbi.dwCursorPosition.Y)\n cells_to_erase = csbi.dwSize.X\n else:\n # invalid mode\n return\n # fill the entire screen with blanks\n win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord)\n # now set the buffer's attributes accordingly\n win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord)\n\n def set_title(self, title):\n win32.SetConsoleTitle(title)\n") + __stickytape_write_module('colorama/win32.py', b'# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.\n\n# from winbase.h\nSTDOUT = -11\nSTDERR = -12\n\ntry:\n import ctypes\n from ctypes import LibraryLoader\n windll = LibraryLoader(ctypes.WinDLL)\n from ctypes import wintypes\nexcept (AttributeError, ImportError):\n windll = None\n SetConsoleTextAttribute = lambda *_: None\n winapi_test = lambda *_: None\nelse:\n from ctypes import byref, Structure, c_char, POINTER\n\n COORD = wintypes._COORD\n\n class CONSOLE_SCREEN_BUFFER_INFO(Structure):\n """struct in wincon.h."""\n _fields_ = [\n ("dwSize", COORD),\n ("dwCursorPosition", COORD),\n ("wAttributes", wintypes.WORD),\n ("srWindow", wintypes.SMALL_RECT),\n ("dwMaximumWindowSize", COORD),\n ]\n def __str__(self):\n return \'(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)\' % (\n self.dwSize.Y, self.dwSize.X\n , self.dwCursorPosition.Y, self.dwCursorPosition.X\n , self.wAttributes\n , self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right\n , self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X\n )\n\n _GetStdHandle = windll.kernel32.GetStdHandle\n _GetStdHandle.argtypes = [\n wintypes.DWORD,\n ]\n _GetStdHandle.restype = wintypes.HANDLE\n\n _GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo\n _GetConsoleScreenBufferInfo.argtypes = [\n wintypes.HANDLE,\n POINTER(CONSOLE_SCREEN_BUFFER_INFO),\n ]\n _GetConsoleScreenBufferInfo.restype = wintypes.BOOL\n\n _SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute\n _SetConsoleTextAttribute.argtypes = [\n wintypes.HANDLE,\n wintypes.WORD,\n ]\n _SetConsoleTextAttribute.restype = wintypes.BOOL\n\n _SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition\n _SetConsoleCursorPosition.argtypes = [\n wintypes.HANDLE,\n COORD,\n ]\n _SetConsoleCursorPosition.restype = wintypes.BOOL\n\n _FillConsoleOutputCharacterA = windll.kernel32.FillConsoleOutputCharacterA\n _FillConsoleOutputCharacterA.argtypes = [\n wintypes.HANDLE,\n c_char,\n wintypes.DWORD,\n COORD,\n POINTER(wintypes.DWORD),\n ]\n _FillConsoleOutputCharacterA.restype = wintypes.BOOL\n\n _FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute\n _FillConsoleOutputAttribute.argtypes = [\n wintypes.HANDLE,\n wintypes.WORD,\n wintypes.DWORD,\n COORD,\n POINTER(wintypes.DWORD),\n ]\n _FillConsoleOutputAttribute.restype = wintypes.BOOL\n\n _SetConsoleTitleW = windll.kernel32.SetConsoleTitleW\n _SetConsoleTitleW.argtypes = [\n wintypes.LPCWSTR\n ]\n _SetConsoleTitleW.restype = wintypes.BOOL\n\n def _winapi_test(handle):\n csbi = CONSOLE_SCREEN_BUFFER_INFO()\n success = _GetConsoleScreenBufferInfo(\n handle, byref(csbi))\n return bool(success)\n\n def winapi_test():\n return any(_winapi_test(h) for h in\n (_GetStdHandle(STDOUT), _GetStdHandle(STDERR)))\n\n def GetConsoleScreenBufferInfo(stream_id=STDOUT):\n handle = _GetStdHandle(stream_id)\n csbi = CONSOLE_SCREEN_BUFFER_INFO()\n success = _GetConsoleScreenBufferInfo(\n handle, byref(csbi))\n return csbi\n\n def SetConsoleTextAttribute(stream_id, attrs):\n handle = _GetStdHandle(stream_id)\n return _SetConsoleTextAttribute(handle, attrs)\n\n def SetConsoleCursorPosition(stream_id, position, adjust=True):\n position = COORD(*position)\n # If the position is out of range, do nothing.\n if position.Y <= 0 or position.X <= 0:\n return\n # Adjust for Windows\' SetConsoleCursorPosition:\n # 1. being 0-based, while ANSI is 1-based.\n # 2. expecting (x,y), while ANSI uses (y,x).\n adjusted_position = COORD(position.Y - 1, position.X - 1)\n if adjust:\n # Adjust for viewport\'s scroll position\n sr = GetConsoleScreenBufferInfo(STDOUT).srWindow\n adjusted_position.Y += sr.Top\n adjusted_position.X += sr.Left\n # Resume normal processing\n handle = _GetStdHandle(stream_id)\n return _SetConsoleCursorPosition(handle, adjusted_position)\n\n def FillConsoleOutputCharacter(stream_id, char, length, start):\n handle = _GetStdHandle(stream_id)\n char = c_char(char.encode())\n length = wintypes.DWORD(length)\n num_written = wintypes.DWORD(0)\n # Note that this is hard-coded for ANSI (vs wide) bytes.\n success = _FillConsoleOutputCharacterA(\n handle, char, length, start, byref(num_written))\n return num_written.value\n\n def FillConsoleOutputAttribute(stream_id, attr, length, start):\n \'\'\' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )\'\'\'\n handle = _GetStdHandle(stream_id)\n attribute = wintypes.WORD(attr)\n length = wintypes.DWORD(length)\n num_written = wintypes.DWORD(0)\n # Note that this is hard-coded for ANSI (vs wide) bytes.\n return _FillConsoleOutputAttribute(\n handle, attribute, length, start, byref(num_written))\n\n def SetConsoleTitle(title):\n return _SetConsoleTitleW(title)\n') + __stickytape_write_module('executing/__init__.py', b'"""\nGet information about what a frame is currently doing. Typical usage:\n\n import executing\n\n node = executing.Source.executing(frame).node\n # node will be an AST node or None\n"""\n\nfrom collections import namedtuple\n_VersionInfo = namedtuple(\'VersionInfo\', (\'major\', \'minor\', \'micro\'))\nfrom .executing import Source, Executing, only, NotOneValueFound, cache, future_flags\ntry:\n from .version import __version__\n if "dev" in __version__:\n raise ValueError\nexcept Exception:\n # version.py is auto-generated with the git tag when building\n __version__ = "???"\n __version_info__ = _VersionInfo(-1, -1, -1)\nelse:\n __version_info__ = _VersionInfo(*map(int, __version__.split(\'.\')))\n\n\n__all__ = ["Source"]\n') + __stickytape_write_module('executing/executing.py', b'import __future__\nimport ast\nimport dis\nimport functools\nimport inspect\nimport io\nimport linecache\nimport sys\nimport types\nfrom collections import defaultdict, namedtuple\nfrom itertools import islice\nfrom operator import attrgetter\nfrom threading import RLock\n\nPY3 = sys.version_info[0] == 3\n\nif PY3:\n # noinspection PyUnresolvedReferences\n from functools import lru_cache\n # noinspection PyUnresolvedReferences\n from tokenize import detect_encoding\n from itertools import zip_longest\n # noinspection PyUnresolvedReferences,PyCompatibility\n from pathlib import Path\n\n cache = lru_cache(maxsize=None)\n text_type = str\nelse:\n from lib2to3.pgen2.tokenize import detect_encoding, cookie_re as encoding_pattern\n from itertools import izip_longest as zip_longest\n\n\n class Path(object):\n pass\n\n\n def cache(func):\n d = {}\n\n @functools.wraps(func)\n def wrapper(*args):\n if args in d:\n return d[args]\n result = d[args] = func(*args)\n return result\n\n return wrapper\n\n\n # noinspection PyUnresolvedReferences\n text_type = unicode\ntry:\n # noinspection PyUnresolvedReferences\n _get_instructions = dis.get_instructions\nexcept AttributeError:\n class Instruction(namedtuple(\'Instruction\', \'offset argval opname starts_line\')):\n lineno = None\n\n\n from dis import HAVE_ARGUMENT, EXTENDED_ARG, hasconst, opname, findlinestarts\n\n # Based on dis.disassemble from 2.7\n # Left as similar as possible for easy diff\n\n def _get_instructions(co):\n code = co.co_code\n linestarts = dict(findlinestarts(co))\n n = len(code)\n i = 0\n extended_arg = 0\n while i < n:\n offset = i\n c = code[i]\n op = ord(c)\n lineno = linestarts.get(i)\n argval = None\n i = i + 1\n if op >= HAVE_ARGUMENT:\n oparg = ord(code[i]) + ord(code[i + 1]) * 256 + extended_arg\n extended_arg = 0\n i = i + 2\n if op == EXTENDED_ARG:\n extended_arg = oparg * 65536\n\n if op in hasconst:\n argval = co.co_consts[oparg]\n yield Instruction(offset, argval, opname[op], lineno)\n\n\ndef assert_(condition, message=""):\n """\n Like an assert statement, but unaffected by -O\n :param condition: value that is expected to be truthy\n :type message: Any\n """\n if not condition:\n raise AssertionError(str(message))\n\n\ndef get_instructions(co):\n lineno = None\n for inst in _get_instructions(co):\n lineno = inst.starts_line or lineno\n assert_(lineno)\n inst.lineno = lineno\n yield inst\n\n\nTESTING = 0\n\n\nclass NotOneValueFound(Exception):\n pass\n\n\ndef only(it):\n if hasattr(it, \'__len__\'):\n if len(it) != 1:\n raise NotOneValueFound(\'Expected one value, found %s\' % len(it))\n # noinspection PyTypeChecker\n return list(it)[0]\n\n lst = tuple(islice(it, 2))\n if len(lst) == 0:\n raise NotOneValueFound(\'Expected one value, found 0\')\n if len(lst) > 1:\n raise NotOneValueFound(\'Expected one value, found several\')\n return lst[0]\n\n\nclass Source(object):\n """\n The source code of a single file and associated metadata.\n\n The main method of interest is the classmethod `executing(frame)`.\n\n If you want an instance of this class, don\'t construct it.\n Ideally use the classmethod `for_frame(frame)`.\n If you don\'t have a frame, use `for_filename(filename [, module_globals])`.\n These methods cache instances by filename, so at most one instance exists per filename.\n\n Attributes:\n - filename\n - text\n - lines\n - tree: AST parsed from text, or None if text is not valid Python\n All nodes in the tree have an extra `parent` attribute\n\n Other methods of interest:\n - statements_at_line\n - asttokens\n - code_qualname\n """\n\n def __init__(self, filename, lines):\n """\n Don\'t call this constructor, see the class docstring.\n """\n\n self.filename = filename\n text = \'\'.join(lines)\n\n if not isinstance(text, text_type):\n encoding = self.detect_encoding(text)\n # noinspection PyUnresolvedReferences\n text = text.decode(encoding)\n lines = [line.decode(encoding) for line in lines]\n\n self.text = text\n self.lines = [line.rstrip(\'\\r\\n\') for line in lines]\n\n if PY3:\n ast_text = text\n else:\n # In python 2 it\'s a syntax error to parse unicode\n # with an encoding declaration, so we remove it but\n # leave empty lines in its place to keep line numbers the same\n ast_text = \'\'.join([\n \'\\n\' if i < 2 and encoding_pattern.match(line)\n else line\n for i, line in enumerate(lines)\n ])\n\n self._nodes_by_line = defaultdict(list)\n self.tree = None\n self._qualnames = {}\n\n try:\n self.tree = ast.parse(ast_text, filename=filename)\n except SyntaxError:\n pass\n else:\n for node in ast.walk(self.tree):\n for child in ast.iter_child_nodes(node):\n child.parent = node\n if hasattr(node, \'lineno\'):\n self._nodes_by_line[node.lineno].append(node)\n\n visitor = QualnameVisitor()\n visitor.visit(self.tree)\n self._qualnames = visitor.qualnames\n\n @classmethod\n def for_frame(cls, frame, use_cache=True):\n """\n Returns the `Source` object corresponding to the file the frame is executing in.\n """\n return cls.for_filename(frame.f_code.co_filename, frame.f_globals or {}, use_cache)\n\n @classmethod\n def for_filename(cls, filename, module_globals=None, use_cache=True):\n if isinstance(filename, Path):\n filename = str(filename)\n\n source_cache = cls._class_local(\'__source_cache\', {})\n if use_cache:\n try:\n return source_cache[filename]\n except KeyError:\n pass\n\n if not use_cache:\n linecache.checkcache(filename)\n\n lines = tuple(linecache.getlines(filename, module_globals))\n result = source_cache[filename] = cls._for_filename_and_lines(filename, lines)\n return result\n\n @classmethod\n def _for_filename_and_lines(cls, filename, lines):\n source_cache = cls._class_local(\'__source_cache_with_lines\', {})\n try:\n return source_cache[(filename, lines)]\n except KeyError:\n pass\n\n result = source_cache[(filename, lines)] = cls(filename, lines)\n return result\n\n @classmethod\n def lazycache(cls, frame):\n if hasattr(linecache, \'lazycache\'):\n linecache.lazycache(frame.f_code.co_filename, frame.f_globals)\n\n @classmethod\n def executing(cls, frame_or_tb):\n """\n Returns an `Executing` object representing the operation\n currently executing in the given frame or traceback object.\n """\n if isinstance(frame_or_tb, types.TracebackType):\n # https://docs.python.org/3/reference/datamodel.html#traceback-objects\n # "tb_lineno gives the line number where the exception occurred;\n # tb_lasti indicates the precise instruction.\n # The line number and last instruction in the traceback may differ\n # from the line number of its frame object\n # if the exception occurred in a try statement with no matching except clause\n # or with a finally clause."\n tb = frame_or_tb\n frame = tb.tb_frame\n lineno = tb.tb_lineno\n lasti = tb.tb_lasti\n else:\n frame = frame_or_tb\n lineno = frame.f_lineno\n lasti = frame.f_lasti\n\n code = frame.f_code\n key = (code, id(code), lasti)\n executing_cache = cls._class_local(\'__executing_cache\', {})\n\n try:\n args = executing_cache[key]\n except KeyError:\n def find(source, retry_cache):\n node = stmts = None\n tree = source.tree\n if tree:\n try:\n stmts = source.statements_at_line(lineno)\n if stmts:\n if code.co_filename.startswith(\'\':\n tree = _extract_ipython_statement(stmts, tree)\n node = NodeFinder(frame, stmts, tree, lasti).result\n except Exception as e:\n # These exceptions can be caused by the source code having changed\n # so the cached Source doesn\'t match the running code\n # (e.g. when using IPython %autoreload)\n # Try again with a fresh Source object\n if retry_cache and isinstance(e, (NotOneValueFound, AssertionError)):\n return find(\n source=cls.for_frame(frame, use_cache=False),\n retry_cache=False,\n )\n if TESTING:\n raise\n\n if node:\n new_stmts = {statement_containing_node(node)}\n assert_(new_stmts <= stmts)\n stmts = new_stmts\n\n return source, node, stmts\n\n args = find(source=cls.for_frame(frame), retry_cache=True)\n executing_cache[key] = args\n\n return Executing(frame, *args)\n\n @classmethod\n def _class_local(cls, name, default):\n """\n Returns an attribute directly associated with this class\n (as opposed to subclasses), setting default if necessary\n """\n # classes have a mappingproxy preventing us from using setdefault\n result = cls.__dict__.get(name, default)\n setattr(cls, name, result)\n return result\n\n @cache\n def statements_at_line(self, lineno):\n """\n Returns the statement nodes overlapping the given line.\n\n Returns at most one statement unless semicolons are present.\n\n If the `text` attribute is not valid python, meaning\n `tree` is None, returns an empty set.\n\n Otherwise, `Source.for_frame(frame).statements_at_line(frame.f_lineno)`\n should return at least one statement.\n """\n\n return {\n statement_containing_node(node)\n for node in\n self._nodes_by_line[lineno]\n }\n\n @cache\n def asttokens(self):\n """\n Returns an ASTTokens object for getting the source of specific AST nodes.\n\n See http://asttokens.readthedocs.io/en/latest/api-index.html\n """\n from asttokens import ASTTokens # must be installed separately\n return ASTTokens(\n self.text,\n tree=self.tree,\n filename=self.filename,\n )\n\n @staticmethod\n def decode_source(source):\n if isinstance(source, bytes):\n encoding = Source.detect_encoding(source)\n source = source.decode(encoding)\n return source\n\n @staticmethod\n def detect_encoding(source):\n return detect_encoding(io.BytesIO(source).readline)[0]\n\n def code_qualname(self, code):\n """\n Imitates the __qualname__ attribute of functions for code objects.\n Given:\n\n - A function `func`\n - A frame `frame` for an execution of `func`, meaning:\n `frame.f_code is func.__code__`\n\n `Source.for_frame(frame).code_qualname(frame.f_code)`\n will be equal to `func.__qualname__`*. Works for Python 2 as well,\n where of course no `__qualname__` attribute exists.\n\n Falls back to `code.co_name` if there is no appropriate qualname.\n\n Based on https://github.com/wbolster/qualname\n\n (* unless `func` is a lambda\n nested inside another lambda on the same line, in which case\n the outer lambda\'s qualname will be returned for the codes\n of both lambdas)\n """\n assert_(code.co_filename == self.filename)\n return self._qualnames.get((code.co_name, code.co_firstlineno), code.co_name)\n\n\nclass Executing(object):\n """\n Information about the operation a frame is currently executing.\n\n Generally you will just want `node`, which is the AST node being executed,\n or None if it\'s unknown.\n """\n\n def __init__(self, frame, source, node, stmts):\n self.frame = frame\n self.source = source\n self.node = node\n self.statements = stmts\n\n def code_qualname(self):\n return self.source.code_qualname(self.frame.f_code)\n\n def text(self):\n return self.source.asttokens().get_text(self.node)\n\n def text_range(self):\n return self.source.asttokens().get_text_range(self.node)\n\n\nclass QualnameVisitor(ast.NodeVisitor):\n def __init__(self):\n super(QualnameVisitor, self).__init__()\n self.stack = []\n self.qualnames = {}\n\n def add_qualname(self, node, name=None):\n name = name or node.name\n self.stack.append(name)\n if getattr(node, \'decorator_list\', ()):\n lineno = node.decorator_list[0].lineno\n else:\n lineno = node.lineno\n self.qualnames.setdefault((name, lineno), ".".join(self.stack))\n\n def visit_FunctionDef(self, node, name=None):\n self.add_qualname(node, name)\n self.stack.append(\'\')\n if isinstance(node, ast.Lambda):\n children = [node.body]\n else:\n children = node.body\n for child in children:\n self.visit(child)\n self.stack.pop()\n self.stack.pop()\n\n # Find lambdas in the function definition outside the body,\n # e.g. decorators or default arguments\n # Based on iter_child_nodes\n for field, child in ast.iter_fields(node):\n if field == \'body\':\n continue\n if isinstance(child, ast.AST):\n self.visit(child)\n elif isinstance(child, list):\n for grandchild in child:\n if isinstance(grandchild, ast.AST):\n self.visit(grandchild)\n\n visit_AsyncFunctionDef = visit_FunctionDef\n\n def visit_Lambda(self, node):\n # noinspection PyTypeChecker\n self.visit_FunctionDef(node, \'\')\n\n def visit_ClassDef(self, node):\n self.add_qualname(node)\n self.generic_visit(node)\n self.stack.pop()\n\n\nfuture_flags = sum(\n getattr(__future__, fname).compiler_flag\n for fname in __future__.all_feature_names\n)\n\n\ndef compile_similar_to(source, matching_code):\n return compile(\n source,\n matching_code.co_filename,\n \'exec\',\n flags=future_flags & matching_code.co_flags,\n dont_inherit=True,\n )\n\n\nsentinel = \'io8urthglkjdghvljusketgIYRFYUVGHFRTBGVHKGF78678957647698\'\n\n\nclass NodeFinder(object):\n def __init__(self, frame, stmts, tree, lasti):\n self.frame = frame\n self.tree = tree\n self.code = code = frame.f_code\n self.is_pytest = any(\n \'pytest\' in name.lower()\n for group in [code.co_names, code.co_varnames]\n for name in group\n )\n\n if self.is_pytest:\n self.ignore_linenos = frozenset(assert_linenos(tree))\n else:\n self.ignore_linenos = frozenset()\n\n instruction = self.get_actual_current_instruction(lasti)\n op_name = instruction.opname\n self.lasti = instruction.offset\n\n if op_name.startswith(\'CALL_\'):\n typ = ast.Call\n elif op_name.startswith((\'BINARY_SUBSCR\', \'SLICE+\')):\n typ = ast.Subscript\n elif op_name.startswith(\'BINARY_\'):\n typ = ast.BinOp\n elif op_name.startswith(\'UNARY_\'):\n typ = ast.UnaryOp\n elif op_name in (\'LOAD_ATTR\', \'LOAD_METHOD\', \'LOOKUP_METHOD\'):\n typ = ast.Attribute\n elif op_name in (\'COMPARE_OP\', \'IS_OP\', \'CONTAINS_OP\'):\n typ = ast.Compare\n else:\n raise RuntimeError(op_name)\n\n with lock:\n exprs = {\n node\n for stmt in stmts\n for node in ast.walk(stmt)\n if isinstance(node, typ)\n if not (hasattr(node, "ctx") and not isinstance(node.ctx, ast.Load))\n }\n\n self.result = only(list(self.matching_nodes(exprs)))\n\n def clean_instructions(self, code):\n return [\n inst\n for inst in get_instructions(code)\n if inst.opname != \'EXTENDED_ARG\'\n if inst.lineno not in self.ignore_linenos\n ]\n\n def get_original_clean_instructions(self):\n result = self.clean_instructions(self.code)\n\n # pypy sometimes (when is not clear)\n # inserts JUMP_IF_NOT_DEBUG instructions in bytecode\n # If they\'re not present in our compiled instructions,\n # ignore them in the original bytecode\n if not any(\n inst.opname == "JUMP_IF_NOT_DEBUG"\n for inst in self.compile_instructions()\n ):\n result = [\n inst for inst in result\n if inst.opname != "JUMP_IF_NOT_DEBUG"\n ]\n\n return result\n\n def matching_nodes(self, exprs):\n original_instructions = self.get_original_clean_instructions()\n original_index = only(\n i\n for i, inst in enumerate(original_instructions)\n if inst.offset == self.lasti\n )\n for i, expr in enumerate(exprs):\n setter = get_setter(expr)\n # noinspection PyArgumentList\n replacement = ast.BinOp(\n left=expr,\n op=ast.Pow(),\n right=ast.Str(s=sentinel),\n )\n ast.fix_missing_locations(replacement)\n setter(replacement)\n try:\n instructions = self.compile_instructions()\n finally:\n setter(expr)\n indices = [\n i\n for i, instruction in enumerate(instructions)\n if instruction.argval == sentinel\n ]\n\n # There can be several indices when the bytecode is duplicated,\n # as happens in a finally block in 3.9+\n # First we remove the opcodes caused by our modifications\n for index_num, sentinel_index in enumerate(indices):\n # Adjustment for removing sentinel instructions below\n # in past iterations\n sentinel_index -= index_num * 2\n\n assert_(instructions.pop(sentinel_index).opname == \'LOAD_CONST\')\n assert_(instructions.pop(sentinel_index).opname == \'BINARY_POWER\')\n\n # Then we see if any of the instruction indices match\n for index_num, sentinel_index in enumerate(indices):\n sentinel_index -= index_num * 2\n new_index = sentinel_index - 1\n\n if new_index != original_index:\n continue\n\n original_inst = original_instructions[original_index]\n new_inst = instructions[new_index]\n\n # In Python 3.9+, changing \'not x in y\' to \'not sentinel_transformation(x in y)\'\n # changes a CONTAINS_OP(invert=1) to CONTAINS_OP(invert=0),,UNARY_NOT\n if (\n original_inst.opname == new_inst.opname in (\'CONTAINS_OP\', \'IS_OP\')\n and original_inst.arg != new_inst.arg\n and (\n original_instructions[original_index + 1].opname\n != instructions[new_index + 1].opname == \'UNARY_NOT\'\n )):\n # Remove the difference for the upcoming assert\n instructions.pop(new_index + 1)\n\n # Check that the modified instructions don\'t have anything unexpected\n for inst1, inst2 in zip_longest(original_instructions, instructions):\n assert_(\n inst1.opname == inst2.opname or\n all(\n \'JUMP_IF_\' in inst.opname\n for inst in [inst1, inst2]\n ) or\n all(\n inst.opname in (\'JUMP_FORWARD\', \'JUMP_ABSOLUTE\')\n for inst in [inst1, inst2]\n )\n or (\n inst1.opname == \'PRINT_EXPR\' and\n inst2.opname == \'POP_TOP\'\n )\n or (\n inst1.opname in (\'LOAD_METHOD\', \'LOOKUP_METHOD\') and\n inst2.opname == \'LOAD_ATTR\'\n )\n or (\n inst1.opname == \'CALL_METHOD\' and\n inst2.opname == \'CALL_FUNCTION\'\n ),\n (inst1, inst2, ast.dump(expr), expr.lineno, self.code.co_filename)\n )\n\n yield expr\n\n def compile_instructions(self):\n module_code = compile_similar_to(self.tree, self.code)\n code = only(self.find_codes(module_code))\n return self.clean_instructions(code)\n\n def find_codes(self, root_code):\n checks = [\n attrgetter(\'co_firstlineno\'),\n attrgetter(\'co_name\'),\n attrgetter(\'co_freevars\'),\n attrgetter(\'co_cellvars\'),\n ]\n if not self.is_pytest:\n checks += [\n attrgetter(\'co_names\'),\n attrgetter(\'co_varnames\'),\n ]\n\n def matches(c):\n return all(\n f(c) == f(self.code)\n for f in checks\n )\n\n code_options = []\n if matches(root_code):\n code_options.append(root_code)\n\n def finder(code):\n for const in code.co_consts:\n if not inspect.iscode(const):\n continue\n\n if matches(const):\n code_options.append(const)\n finder(const)\n\n finder(root_code)\n return code_options\n\n def get_actual_current_instruction(self, lasti):\n """\n Get the instruction corresponding to the current\n frame offset, skipping EXTENDED_ARG instructions\n """\n # Don\'t use get_original_clean_instructions\n # because we need the actual instructions including\n # EXTENDED_ARG\n instructions = list(get_instructions(self.code))\n index = only(\n i\n for i, inst in enumerate(instructions)\n if inst.offset == lasti\n )\n\n while True:\n instruction = instructions[index]\n if instruction.opname != "EXTENDED_ARG":\n return instruction\n index += 1\n\n\ndef get_setter(node):\n parent = node.parent\n for name, field in ast.iter_fields(parent):\n if field is node:\n return lambda new_node: setattr(parent, name, new_node)\n elif isinstance(field, list):\n for i, item in enumerate(field):\n if item is node:\n def setter(new_node):\n field[i] = new_node\n\n return setter\n\n\nlock = RLock()\n\n\n@cache\ndef statement_containing_node(node):\n while not isinstance(node, ast.stmt):\n node = node.parent\n return node\n\n\ndef assert_linenos(tree):\n for node in ast.walk(tree):\n if (\n hasattr(node, \'parent\') and\n hasattr(node, \'lineno\') and\n isinstance(statement_containing_node(node), ast.Assert)\n ):\n yield node.lineno\n\n\ndef _extract_ipython_statement(stmts, tree):\n # IPython separates each statement in a cell to be executed separately\n # So NodeFinder should only compile one statement at a time or it\n # will find a code mismatch.\n stmt = list(stmts)[0]\n while not isinstance(stmt.parent, ast.Module):\n stmt = stmt.parent\n # use `ast.parse` instead of `ast.Module` for better portability\n # python3.8 changes the signature of `ast.Module`\n # Inspired by https://github.com/pallets/werkzeug/pull/1552/files\n tree = ast.parse("")\n tree.body = [stmt]\n ast.copy_location(tree, stmt)\n return tree\n') + __stickytape_write_module('executing/version.py', b"__version__ = '0.5.4'") + __stickytape_write_module('pygments/__init__.py', b'# -*- coding: utf-8 -*-\n"""\n Pygments\n ~~~~~~~~\n\n Pygments is a syntax highlighting package written in Python.\n\n It is a generic syntax highlighter for general use in all kinds of software\n such as forum systems, wikis or other applications that need to prettify\n source code. Highlights are:\n\n * a wide range of common languages and markup formats is supported\n * special attention is paid to details, increasing quality by a fair amount\n * support for new languages and formats are added easily\n * a number of output formats, presently HTML, LaTeX, RTF, SVG, all image\n formats that PIL supports, and ANSI sequences\n * it is usable as a command-line tool and as a library\n * ... and it highlights even Brainfuck!\n\n The `Pygments master branch`_ is installable with ``easy_install Pygments==dev``.\n\n .. _Pygments master branch:\n https://github.com/pygments/pygments/archive/master.zip#egg=Pygments-dev\n\n :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.\n :license: BSD, see LICENSE for details.\n"""\nimport sys\n\nfrom pygments.util import StringIO, BytesIO\n\n__version__ = \'2.5.2\'\n__docformat__ = \'restructuredtext\'\n\n__all__ = [\'lex\', \'format\', \'highlight\']\n\n\ndef lex(code, lexer):\n """\n Lex ``code`` with ``lexer`` and return an iterable of tokens.\n """\n try:\n return lexer.get_tokens(code)\n except TypeError as err:\n if (isinstance(err.args[0], str) and\n (\'unbound method get_tokens\' in err.args[0] or\n \'missing 1 required positional argument\' in err.args[0])):\n raise TypeError(\'lex() argument must be a lexer instance, \'\n \'not a class\')\n raise\n\n\ndef format(tokens, formatter, outfile=None): # pylint: disable=redefined-builtin\n """\n Format a tokenlist ``tokens`` with the formatter ``formatter``.\n\n If ``outfile`` is given and a valid file object (an object\n with a ``write`` method), the result will be written to it, otherwise\n it is returned as a string.\n """\n try:\n if not outfile:\n realoutfile = getattr(formatter, \'encoding\', None) and BytesIO() or StringIO()\n formatter.format(tokens, realoutfile)\n return realoutfile.getvalue()\n else:\n formatter.format(tokens, outfile)\n except TypeError as err:\n if (isinstance(err.args[0], str) and\n (\'unbound method format\' in err.args[0] or\n \'missing 1 required positional argument\' in err.args[0])):\n raise TypeError(\'format() argument must be a formatter instance, \'\n \'not a class\')\n raise\n\n\ndef highlight(code, lexer, formatter, outfile=None):\n """\n Lex ``code`` with ``lexer`` and format it with the formatter ``formatter``.\n\n If ``outfile`` is given and a valid file object (an object\n with a ``write`` method), the result will be written to it, otherwise\n it is returned as a string.\n """\n return format(lex(code, lexer), formatter, outfile)\n\n\nif __name__ == \'__main__\': # pragma: no cover\n from pygments.cmdline import main\n sys.exit(main(sys.argv))\n') + __stickytape_write_module('pygments/util.py', b'# -*- coding: utf-8 -*-\n"""\n pygments.util\n ~~~~~~~~~~~~~\n\n Utility functions.\n\n :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.\n :license: BSD, see LICENSE for details.\n"""\n\nimport re\nimport sys\n\n\nsplit_path_re = re.compile(r\'[/\\\\ ]\')\ndoctype_lookup_re = re.compile(r\'\'\'\n (<\\?.*?\\?>)?\\s*\n ]*>\n\'\'\', re.DOTALL | re.MULTILINE | re.VERBOSE)\ntag_re = re.compile(r\'<(.+?)(\\s.*?)?>.*?\',\n re.UNICODE | re.IGNORECASE | re.DOTALL | re.MULTILINE)\nxml_decl_re = re.compile(r\'\\s*<\\?xml[^>]*\\?>\', re.I)\n\n\nclass ClassNotFound(ValueError):\n """Raised if one of the lookup functions didn\'t find a matching class."""\n\n\nclass OptionError(Exception):\n pass\n\n\ndef get_choice_opt(options, optname, allowed, default=None, normcase=False):\n string = options.get(optname, default)\n if normcase:\n string = string.lower()\n if string not in allowed:\n raise OptionError(\'Value for option %s must be one of %s\' %\n (optname, \', \'.join(map(str, allowed))))\n return string\n\n\ndef get_bool_opt(options, optname, default=None):\n string = options.get(optname, default)\n if isinstance(string, bool):\n return string\n elif isinstance(string, int):\n return bool(string)\n elif not isinstance(string, string_types):\n raise OptionError(\'Invalid type %r for option %s; use \'\n \'1/0, yes/no, true/false, on/off\' % (\n string, optname))\n elif string.lower() in (\'1\', \'yes\', \'true\', \'on\'):\n return True\n elif string.lower() in (\'0\', \'no\', \'false\', \'off\'):\n return False\n else:\n raise OptionError(\'Invalid value %r for option %s; use \'\n \'1/0, yes/no, true/false, on/off\' % (\n string, optname))\n\n\ndef get_int_opt(options, optname, default=None):\n string = options.get(optname, default)\n try:\n return int(string)\n except TypeError:\n raise OptionError(\'Invalid type %r for option %s; you \'\n \'must give an integer value\' % (\n string, optname))\n except ValueError:\n raise OptionError(\'Invalid value %r for option %s; you \'\n \'must give an integer value\' % (\n string, optname))\n\n\ndef get_list_opt(options, optname, default=None):\n val = options.get(optname, default)\n if isinstance(val, string_types):\n return val.split()\n elif isinstance(val, (list, tuple)):\n return list(val)\n else:\n raise OptionError(\'Invalid type %r for option %s; you \'\n \'must give a list value\' % (\n val, optname))\n\n\ndef docstring_headline(obj):\n if not obj.__doc__:\n return \'\'\n res = []\n for line in obj.__doc__.strip().splitlines():\n if line.strip():\n res.append(" " + line.strip())\n else:\n break\n return \'\'.join(res).lstrip()\n\n\ndef make_analysator(f):\n """Return a static text analyser function that returns float values."""\n def text_analyse(text):\n try:\n rv = f(text)\n except Exception:\n return 0.0\n if not rv:\n return 0.0\n try:\n return min(1.0, max(0.0, float(rv)))\n except (ValueError, TypeError):\n return 0.0\n text_analyse.__doc__ = f.__doc__\n return staticmethod(text_analyse)\n\n\ndef shebang_matches(text, regex):\n r"""Check if the given regular expression matches the last part of the\n shebang if one exists.\n\n >>> from pygments.util import shebang_matches\n >>> shebang_matches(\'#!/usr/bin/env python\', r\'python(2\\.\\d)?\')\n True\n >>> shebang_matches(\'#!/usr/bin/python2.4\', r\'python(2\\.\\d)?\')\n True\n >>> shebang_matches(\'#!/usr/bin/python-ruby\', r\'python(2\\.\\d)?\')\n False\n >>> shebang_matches(\'#!/usr/bin/python/ruby\', r\'python(2\\.\\d)?\')\n False\n >>> shebang_matches(\'#!/usr/bin/startsomethingwith python\',\n ... r\'python(2\\.\\d)?\')\n True\n\n It also checks for common windows executable file extensions::\n\n >>> shebang_matches(\'#!C:\\\\Python2.4\\\\Python.exe\', r\'python(2\\.\\d)?\')\n True\n\n Parameters (``\'-f\'`` or ``\'--foo\'`` are ignored so ``\'perl\'`` does\n the same as ``\'perl -e\'``)\n\n Note that this method automatically searches the whole string (eg:\n the regular expression is wrapped in ``\'^$\'``)\n """\n index = text.find(\'\\n\')\n if index >= 0:\n first_line = text[:index].lower()\n else:\n first_line = text.lower()\n if first_line.startswith(\'#!\'):\n try:\n found = [x for x in split_path_re.split(first_line[2:].strip())\n if x and not x.startswith(\'-\')][-1]\n except IndexError:\n return False\n regex = re.compile(r\'^%s(\\.(exe|cmd|bat|bin))?$\' % regex, re.IGNORECASE)\n if regex.search(found) is not None:\n return True\n return False\n\n\ndef doctype_matches(text, regex):\n """Check if the doctype matches a regular expression (if present).\n\n Note that this method only checks the first part of a DOCTYPE.\n eg: \'html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"\'\n """\n m = doctype_lookup_re.match(text)\n if m is None:\n return False\n doctype = m.group(2)\n return re.compile(regex, re.I).match(doctype.strip()) is not None\n\n\ndef html_doctype_matches(text):\n """Check if the file looks like it has a html doctype."""\n return doctype_matches(text, r\'html\')\n\n\n_looks_like_xml_cache = {}\n\n\ndef looks_like_xml(text):\n """Check if a doctype exists or if we have some tags."""\n if xml_decl_re.match(text):\n return True\n key = hash(text)\n try:\n return _looks_like_xml_cache[key]\n except KeyError:\n m = doctype_lookup_re.match(text)\n if m is not None:\n return True\n rv = tag_re.search(text[:1000]) is not None\n _looks_like_xml_cache[key] = rv\n return rv\n\n\n# Python narrow build compatibility\n\ndef _surrogatepair(c):\n # Given a unicode character code\n # with length greater than 16 bits,\n # return the two 16 bit surrogate pair.\n # From example D28 of:\n # http://www.unicode.org/book/ch03.pdf\n return (0xd7c0 + (c >> 10), (0xdc00 + (c & 0x3ff)))\n\n\ndef unirange(a, b):\n """Returns a regular expression string to match the given non-BMP range."""\n if b < a:\n raise ValueError("Bad character range")\n if a < 0x10000 or b < 0x10000:\n raise ValueError("unirange is only defined for non-BMP ranges")\n\n if sys.maxunicode > 0xffff:\n # wide build\n return u\'[%s-%s]\' % (unichr(a), unichr(b))\n else:\n # narrow build stores surrogates, and the \'re\' module handles them\n # (incorrectly) as characters. Since there is still ordering among\n # these characters, expand the range to one that it understands. Some\n # background in http://bugs.python.org/issue3665 and\n # http://bugs.python.org/issue12749\n #\n # Additionally, the lower constants are using unichr rather than\n # literals because jython [which uses the wide path] can\'t load this\n # file if they are literals.\n ah, al = _surrogatepair(a)\n bh, bl = _surrogatepair(b)\n if ah == bh:\n return u\'(?:%s[%s-%s])\' % (unichr(ah), unichr(al), unichr(bl))\n else:\n buf = []\n buf.append(u\'%s[%s-%s]\' %\n (unichr(ah), unichr(al),\n ah == bh and unichr(bl) or unichr(0xdfff)))\n if ah - bh > 1:\n buf.append(u\'[%s-%s][%s-%s]\' %\n unichr(ah+1), unichr(bh-1), unichr(0xdc00), unichr(0xdfff))\n if ah != bh:\n buf.append(u\'%s[%s-%s]\' %\n (unichr(bh), unichr(0xdc00), unichr(bl)))\n\n return u\'(?:\' + u\'|\'.join(buf) + u\')\'\n\n\ndef format_lines(var_name, seq, raw=False, indent_level=0):\n """Formats a sequence of strings for output."""\n lines = []\n base_indent = \' \' * indent_level * 4\n inner_indent = \' \' * (indent_level + 1) * 4\n lines.append(base_indent + var_name + \' = (\')\n if raw:\n # These should be preformatted reprs of, say, tuples.\n for i in seq:\n lines.append(inner_indent + i + \',\')\n else:\n for i in seq:\n # Force use of single quotes\n r = repr(i + \'"\')\n lines.append(inner_indent + r[:-2] + r[-1] + \',\')\n lines.append(base_indent + \')\')\n return \'\\n\'.join(lines)\n\n\ndef duplicates_removed(it, already_seen=()):\n """\n Returns a list with duplicates removed from the iterable `it`.\n\n Order is preserved.\n """\n lst = []\n seen = set()\n for i in it:\n if i in seen or i in already_seen:\n continue\n lst.append(i)\n seen.add(i)\n return lst\n\n\nclass Future(object):\n """Generic class to defer some work.\n\n Handled specially in RegexLexerMeta, to support regex string construction at\n first use.\n """\n def get(self):\n raise NotImplementedError\n\n\ndef guess_decode(text):\n """Decode *text* with guessed encoding.\n\n First try UTF-8; this should fail for non-UTF-8 encodings.\n Then try the preferred locale encoding.\n Fall back to latin-1, which always works.\n """\n try:\n text = text.decode(\'utf-8\')\n return text, \'utf-8\'\n except UnicodeDecodeError:\n try:\n import locale\n prefencoding = locale.getpreferredencoding()\n text = text.decode()\n return text, prefencoding\n except (UnicodeDecodeError, LookupError):\n text = text.decode(\'latin1\')\n return text, \'latin1\'\n\n\ndef guess_decode_from_terminal(text, term):\n """Decode *text* coming from terminal *term*.\n\n First try the terminal encoding, if given.\n Then try UTF-8. Then try the preferred locale encoding.\n Fall back to latin-1, which always works.\n """\n if getattr(term, \'encoding\', None):\n try:\n text = text.decode(term.encoding)\n except UnicodeDecodeError:\n pass\n else:\n return text, term.encoding\n return guess_decode(text)\n\n\ndef terminal_encoding(term):\n """Return our best guess of encoding for the given *term*."""\n if getattr(term, \'encoding\', None):\n return term.encoding\n import locale\n return locale.getpreferredencoding()\n\n\n# Python 2/3 compatibility\n\nif sys.version_info < (3, 0):\n unichr = unichr\n xrange = xrange\n string_types = (str, unicode)\n text_type = unicode\n u_prefix = \'u\'\n iteritems = dict.iteritems\n itervalues = dict.itervalues\n import StringIO\n import cStringIO\n # unfortunately, io.StringIO in Python 2 doesn\'t accept str at all\n StringIO = StringIO.StringIO\n BytesIO = cStringIO.StringIO\nelse:\n unichr = chr\n xrange = range\n string_types = (str,)\n text_type = str\n u_prefix = \'\'\n iteritems = dict.items\n itervalues = dict.values\n from io import StringIO, BytesIO, TextIOWrapper\n\n class UnclosingTextIOWrapper(TextIOWrapper):\n # Don\'t close underlying buffer on destruction.\n def close(self):\n self.flush()\n\n\ndef add_metaclass(metaclass):\n """Class decorator for creating a class with a metaclass."""\n def wrapper(cls):\n orig_vars = cls.__dict__.copy()\n orig_vars.pop(\'__dict__\', None)\n orig_vars.pop(\'__weakref__\', None)\n for slots_var in orig_vars.get(\'__slots__\', ()):\n orig_vars.pop(slots_var)\n return metaclass(cls.__name__, cls.__bases__, orig_vars)\n return wrapper\n') + __stickytape_write_module('pygments/cmdline.py', b'# -*- coding: utf-8 -*-\n"""\n pygments.cmdline\n ~~~~~~~~~~~~~~~~\n\n Command line interface.\n\n :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.\n :license: BSD, see LICENSE for details.\n"""\n\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport getopt\nfrom textwrap import dedent\n\nfrom pygments import __version__, highlight\nfrom pygments.util import ClassNotFound, OptionError, docstring_headline, \\\n guess_decode, guess_decode_from_terminal, terminal_encoding\nfrom pygments.lexers import get_all_lexers, get_lexer_by_name, guess_lexer, \\\n load_lexer_from_file, get_lexer_for_filename, find_lexer_class_for_filename\nfrom pygments.lexers.special import TextLexer\nfrom pygments.formatters.latex import LatexEmbeddedLexer, LatexFormatter\nfrom pygments.formatters import get_all_formatters, get_formatter_by_name, \\\n load_formatter_from_file, get_formatter_for_filename, find_formatter_class\nfrom pygments.formatters.terminal import TerminalFormatter\nfrom pygments.formatters.terminal256 import Terminal256Formatter\nfrom pygments.filters import get_all_filters, find_filter_class\nfrom pygments.styles import get_all_styles, get_style_by_name\n\n\nUSAGE = """\\\nUsage: %s [-l | -g] [-F [:]] [-f ]\n [-O ] [-P ] [-s] [-v] [-x] [-o ] []\n\n %s -S

\' : \'\\U0001d4ab\',\n \'\\\\\' : \'\\U0001d4ac\',\n \'\\\\\' : \'\\U0000211b\',\n \'\\\\\' : \'\\U0001d4ae\',\n \'\\\\\' : \'\\U0001d4af\',\n \'\\\\\' : \'\\U0001d4b0\',\n \'\\\\\' : \'\\U0001d4b1\',\n \'\\\\\' : \'\\U0001d4b2\',\n \'\\\\\' : \'\\U0001d4b3\',\n \'\\\\\' : \'\\U0001d4b4\',\n \'\\\\\' : \'\\U0001d4b5\',\n \'\\\\\' : \'\\U0001d5ba\',\n \'\\\\\' : \'\\U0001d5bb\',\n \'\\\\\' : \'\\U0001d5bc\',\n \'\\\\\' : \'\\U0001d5bd\',\n \'\\\\\' : \'\\U0001d5be\',\n \'\\\\\' : \'\\U0001d5bf\',\n \'\\\\\' : \'\\U0001d5c0\',\n \'\\\\\' : \'\\U0001d5c1\',\n \'\\\\\' : \'\\U0001d5c2\',\n \'\\\\\' : \'\\U0001d5c3\',\n \'\\\\\' : \'\\U0001d5c4\',\n \'\\\\\' : \'\\U0001d5c5\',\n \'\\\\\' : \'\\U0001d5c6\',\n \'\\\\\' : \'\\U0001d5c7\',\n \'\\\\\' : \'\\U0001d5c8\',\n \'\\\\