Browse Source

Remove some unused python packages

Ian Neal 3 months ago
parent
commit
c81517218d

+ 0 - 45838
mozilla-release/patches/1602773-2no3-75a1.patch

@@ -1,45838 +0,0 @@
-# HG changeset patch
-# User Alessio Placitelli <alessio.placitelli@gmail.com>
-# Date 1583163245 0
-# Node ID de7b6ed631c807e831743b64bd33fcbfc6811f4f
-# Parent  2389395c5ec44dab6ba64c198ce2f1ac44b10289
-Bug 1602773 - Vendor glean_parser and its dependencies. r=ahal CLOSED TREE
-
-Differential Revision: https://phabricator.services.mozilla.com/D64313
-
-diff --git a/build/virtualenv_packages.txt b/build/virtualenv_packages.txt
---- a/build/virtualenv_packages.txt
-+++ b/build/virtualenv_packages.txt
-@@ -1,38 +1,49 @@
- mozilla.pth:python/mach
- mozilla.pth:python/mozboot
- mozilla.pth:python/mozbuild
- mozilla.pth:python/mozlint
- mozilla.pth:python/mozperftest
- mozilla.pth:python/mozrelease
- mozilla.pth:python/mozterm
- mozilla.pth:python/mozversioncontrol
-+mozilla.pth:third_party/python/appdirs
- mozilla.pth:third_party/python/atomicwrites
- mozilla.pth:third_party/python/attrs/src
- python2:mozilla.pth:third_party/python/backports
- mozilla.pth:third_party/python/biplist
- mozilla.pth:third_party/python/blessings
- mozilla.pth:third_party/python/Click
- mozilla.pth:third_party/python/compare-locales
- mozilla.pth:third_party/python/configobj
- mozilla.pth:third_party/python/cookies
- mozilla.pth:third_party/python/cram
-+mozilla.pth:third_party/python/diskcache
- mozilla.pth:third_party/python/distro
- mozilla.pth:third_party/python/dlmanager
- mozilla.pth:third_party/python/ecdsa/src
- python2:mozilla.pth:third_party/python/enum34
- mozilla.pth:third_party/python/fluent.migrate
- mozilla.pth:third_party/python/fluent.syntax
- mozilla.pth:third_party/python/funcsigs
- python2:mozilla.pth:third_party/python/futures
-+python3:mozilla.pth:third_party/python/glean_parser
-+mozilla.pth:third_party/python/importlib_metadata
-+mozilla.pth:third_party/python/iso8601
-+mozilla.pth:third_party/python/Jinja2
-+mozilla.pth:third_party/python/jsonschema
-+mozilla.pth:third_party/python/MarkupSafe/src
- mozilla.pth:third_party/python/more-itertools
- mozilla.pth:third_party/python/packaging
- mozilla.pth:third_party/python/pathlib2
-+mozilla.pth:third_party/python/pathspec
-+mozilla.pth:third_party/python/pep487
- mozilla.pth:third_party/python/gyp/pylib
-+mozilla.pth:third_party/python/pyrsistent
- mozilla.pth:third_party/python/python-hglib
- mozilla.pth:third_party/python/pluggy
- mozilla.pth:third_party/python/jsmin
- !windows:optional:setup.py:third_party/python/psutil:build_ext:--inplace
- !windows:mozilla.pth:third_party/python/psutil
- windows:mozilla.pth:third_party/python/psutil-cp27-none-win_amd64
- mozilla.pth:third_party/python/pylru
- mozilla.pth:third_party/python/pyparsing
-@@ -46,16 +57,18 @@ mozilla.pth:third_party/python/slugid
- mozilla.pth:third_party/python/py
- mozilla.pth:third_party/python/pytest/src
- mozilla.pth:third_party/python/pytoml
- mozilla.pth:third_party/python/redo
- mozilla.pth:third_party/python/responses
- mozilla.pth:third_party/python/six
- mozilla.pth:third_party/python/voluptuous
- mozilla.pth:third_party/python/json-e
-+mozilla.pth:third_party/python/yamllint
-+mozilla.pth:third_party/python/zipp
- mozilla.pth:build
- mozilla.pth:build/pymake
- mozilla.pth:config
- mozilla.pth:config/mozunit
- mozilla.pth:dom/bindings
- mozilla.pth:dom/bindings/parser
- mozilla.pth:layout/tools/reftest
- mozilla.pth:third_party/python/ply
-diff --git a/third_party/python/Jinja2/jinja2/__init__.py b/third_party/python/Jinja2/jinja2/__init__.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/Jinja2/jinja2/__init__.py
-@@ -0,0 +1,44 @@
-+# -*- coding: utf-8 -*-
-+"""Jinja is a template engine written in pure Python. It provides a
-+non-XML syntax that supports inline expressions and an optional
-+sandboxed environment.
-+"""
-+from markupsafe import escape
-+from markupsafe import Markup
-+
-+from .bccache import BytecodeCache
-+from .bccache import FileSystemBytecodeCache
-+from .bccache import MemcachedBytecodeCache
-+from .environment import Environment
-+from .environment import Template
-+from .exceptions import TemplateAssertionError
-+from .exceptions import TemplateError
-+from .exceptions import TemplateNotFound
-+from .exceptions import TemplateRuntimeError
-+from .exceptions import TemplatesNotFound
-+from .exceptions import TemplateSyntaxError
-+from .exceptions import UndefinedError
-+from .filters import contextfilter
-+from .filters import environmentfilter
-+from .filters import evalcontextfilter
-+from .loaders import BaseLoader
-+from .loaders import ChoiceLoader
-+from .loaders import DictLoader
-+from .loaders import FileSystemLoader
-+from .loaders import FunctionLoader
-+from .loaders import ModuleLoader
-+from .loaders import PackageLoader
-+from .loaders import PrefixLoader
-+from .runtime import ChainableUndefined
-+from .runtime import DebugUndefined
-+from .runtime import make_logging_undefined
-+from .runtime import StrictUndefined
-+from .runtime import Undefined
-+from .utils import clear_caches
-+from .utils import contextfunction
-+from .utils import environmentfunction
-+from .utils import evalcontextfunction
-+from .utils import is_undefined
-+from .utils import select_autoescape
-+
-+__version__ = "2.11.1"
-diff --git a/third_party/python/Jinja2/jinja2/_compat.py b/third_party/python/Jinja2/jinja2/_compat.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/Jinja2/jinja2/_compat.py
-@@ -0,0 +1,132 @@
-+# -*- coding: utf-8 -*-
-+# flake8: noqa
-+import marshal
-+import sys
-+
-+PY2 = sys.version_info[0] == 2
-+PYPY = hasattr(sys, "pypy_translation_info")
-+_identity = lambda x: x
-+
-+if not PY2:
-+    unichr = chr
-+    range_type = range
-+    text_type = str
-+    string_types = (str,)
-+    integer_types = (int,)
-+
-+    iterkeys = lambda d: iter(d.keys())
-+    itervalues = lambda d: iter(d.values())
-+    iteritems = lambda d: iter(d.items())
-+
-+    import pickle
-+    from io import BytesIO, StringIO
-+
-+    NativeStringIO = StringIO
-+
-+    def reraise(tp, value, tb=None):
-+        if value.__traceback__ is not tb:
-+            raise value.with_traceback(tb)
-+        raise value
-+
-+    ifilter = filter
-+    imap = map
-+    izip = zip
-+    intern = sys.intern
-+
-+    implements_iterator = _identity
-+    implements_to_string = _identity
-+    encode_filename = _identity
-+
-+    marshal_dump = marshal.dump
-+    marshal_load = marshal.load
-+
-+else:
-+    unichr = unichr
-+    text_type = unicode
-+    range_type = xrange
-+    string_types = (str, unicode)
-+    integer_types = (int, long)
-+
-+    iterkeys = lambda d: d.iterkeys()
-+    itervalues = lambda d: d.itervalues()
-+    iteritems = lambda d: d.iteritems()
-+
-+    import cPickle as pickle
-+    from cStringIO import StringIO as BytesIO, StringIO
-+
-+    NativeStringIO = BytesIO
-+
-+    exec("def reraise(tp, value, tb=None):\n raise tp, value, tb")
-+
-+    from itertools import imap, izip, ifilter
-+
-+    intern = intern
-+
-+    def implements_iterator(cls):
-+        cls.next = cls.__next__
-+        del cls.__next__
-+        return cls
-+
-+    def implements_to_string(cls):
-+        cls.__unicode__ = cls.__str__
-+        cls.__str__ = lambda x: x.__unicode__().encode("utf-8")
-+        return cls
-+
-+    def encode_filename(filename):
-+        if isinstance(filename, unicode):
-+            return filename.encode("utf-8")
-+        return filename
-+
-+    def marshal_dump(code, f):
-+        if isinstance(f, file):
-+            marshal.dump(code, f)
-+        else:
-+            f.write(marshal.dumps(code))
-+
-+    def marshal_load(f):
-+        if isinstance(f, file):
-+            return marshal.load(f)
-+        return marshal.loads(f.read())
-+
-+
-+def with_metaclass(meta, *bases):
-+    """Create a base class with a metaclass."""
-+    # This requires a bit of explanation: the basic idea is to make a
-+    # dummy metaclass for one level of class instantiation that replaces
-+    # itself with the actual metaclass.
-+    class metaclass(type):
-+        def __new__(cls, name, this_bases, d):
-+            return meta(name, bases, d)
-+
-+    return type.__new__(metaclass, "temporary_class", (), {})
-+
-+
-+try:
-+    from urllib.parse import quote_from_bytes as url_quote
-+except ImportError:
-+    from urllib import quote as url_quote
-+
-+
-+try:
-+    from collections import abc
-+except ImportError:
-+    import collections as abc
-+
-+
-+try:
-+    from os import fspath
-+except ImportError:
-+    try:
-+        from pathlib import PurePath
-+    except ImportError:
-+        PurePath = None
-+
-+    def fspath(path):
-+        if hasattr(path, "__fspath__"):
-+            return path.__fspath__()
-+
-+        # Python 3.5 doesn't have __fspath__ yet, use str.
-+        if PurePath is not None and isinstance(path, PurePath):
-+            return str(path)
-+
-+        return path
-diff --git a/third_party/python/Jinja2/jinja2/_identifier.py b/third_party/python/Jinja2/jinja2/_identifier.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/Jinja2/jinja2/_identifier.py
-@@ -0,0 +1,6 @@
-+import re
-+
-+# generated by scripts/generate_identifier_pattern.py
-+pattern = re.compile(
-+    r"[\w·̀-ͯ·҃-֑҇-ׇֽֿׁׂׅׄؐ-ًؚ-ٰٟۖ-ۜ۟-۪ۤۧۨ-ܑۭܰ-݊ަ-ް߫-߳ࠖ-࠙ࠛ-ࠣࠥ-ࠧࠩ-࡙࠭-࡛ࣔ-ࣣ࣡-ःऺ-़ा-ॏ॑-ॗॢॣঁ-ঃ়া-ৄেৈো-্ৗৢৣਁ-ਃ਼ਾ-ੂੇੈੋ-੍ੑੰੱੵઁ-ઃ઼ા-ૅે-ૉો-્ૢૣଁ-ଃ଼ା-ୄେୈୋ-୍ୖୗୢୣஂா-ூெ-ைொ-்ௗఀ-ఃా-ౄె-ైొ-్ౕౖౢౣಁ-ಃ಼ಾ-ೄೆ-ೈೊ-್ೕೖೢೣഁ-ഃാ-ൄെ-ൈൊ-്ൗൢൣංඃ්ා-ුූෘ-ෟෲෳัิ-ฺ็-๎ັິ-ູົຼ່-ໍ༹༘༙༵༷༾༿ཱ-྄྆྇ྍ-ྗྙ-ྼ࿆ါ-ှၖ-ၙၞ-ၠၢ-ၤၧ-ၭၱ-ၴႂ-ႍႏႚ-ႝ፝-፟ᜒ-᜔ᜲ-᜴ᝒᝓᝲᝳ឴-៓៝᠋-᠍ᢅᢆᢩᤠ-ᤫᤰ-᤻ᨗ-ᨛᩕ-ᩞ᩠-᩿᩼᪰-᪽ᬀ-ᬄ᬴-᭄᭫-᭳ᮀ-ᮂᮡ-ᮭ᯦-᯳ᰤ-᰷᳐-᳔᳒-᳨᳭ᳲ-᳴᳸᳹᷀-᷵᷻-᷿‿⁀⁔⃐-⃥⃜⃡-⃰℘℮⳯-⵿⳱ⷠ-〪ⷿ-゙゚〯꙯ꙴ-꙽ꚞꚟ꛰꛱ꠂ꠆ꠋꠣ-ꠧꢀꢁꢴ-ꣅ꣠-꣱ꤦ-꤭ꥇ-꥓ꦀ-ꦃ꦳-꧀ꧥꨩ-ꨶꩃꩌꩍꩻ-ꩽꪰꪲ-ꪴꪷꪸꪾ꪿꫁ꫫ-ꫯꫵ꫶ꯣ-ꯪ꯬꯭ﬞ︀-️︠-︯︳︴﹍-﹏_𐇽𐋠𐍶-𐍺𐨁-𐨃𐨅𐨆𐨌-𐨏𐨸-𐨿𐨺𐫦𐫥𑀀-𑀂𑀸-𑁆𑁿-𑂂𑂰-𑂺𑄀-𑄂𑄧-𑅳𑄴𑆀-𑆂𑆳-𑇊𑇀-𑇌𑈬-𑈷𑈾𑋟-𑋪𑌀-𑌃𑌼𑌾-𑍄𑍇𑍈𑍋-𑍍𑍗𑍢𑍣𑍦-𑍬𑍰-𑍴𑐵-𑑆𑒰-𑓃𑖯-𑖵𑖸-𑗀𑗜𑗝𑘰-𑙀𑚫-𑚷𑜝-𑜫𑰯-𑰶𑰸-𑰿𑲒-𑲧𑲩-𑲶𖫰-𖫴𖬰-𖬶𖽑-𖽾𖾏-𖾒𛲝𛲞𝅥-𝅩𝅭-𝅲𝅻-𝆂𝆅-𝆋𝆪-𝆭𝉂-𝉄𝨀-𝨶𝨻-𝩬𝩵𝪄𝪛-𝪟𝪡-𝪯𞀀-𞀆𞀈-𞀘𞀛-𞀡𞀣𞀤𞀦-𞣐𞀪-𞣖𞥄-𞥊󠄀-󠇯]+"  # noqa: B950
-+)
-diff --git a/third_party/python/Jinja2/jinja2/asyncfilters.py b/third_party/python/Jinja2/jinja2/asyncfilters.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/Jinja2/jinja2/asyncfilters.py
-@@ -0,0 +1,159 @@
-+from functools import wraps
-+
-+from . import filters
-+from .asyncsupport import auto_aiter
-+from .asyncsupport import auto_await
-+
-+
-+async def auto_to_seq(value):
-+    seq = []
-+    if hasattr(value, "__aiter__"):
-+        async for item in value:
-+            seq.append(item)
-+    else:
-+        for item in value:
-+            seq.append(item)
-+    return seq
-+
-+
-+async def async_select_or_reject(args, kwargs, modfunc, lookup_attr):
-+    seq, func = filters.prepare_select_or_reject(args, kwargs, modfunc, lookup_attr)
-+    if seq:
-+        async for item in auto_aiter(seq):
-+            if func(item):
-+                yield item
-+
-+
-+def dualfilter(normal_filter, async_filter):
-+    wrap_evalctx = False
-+    if getattr(normal_filter, "environmentfilter", False):
-+
-+        def is_async(args):
-+            return args[0].is_async
-+
-+        wrap_evalctx = False
-+    else:
-+        if not getattr(normal_filter, "evalcontextfilter", False) and not getattr(
-+            normal_filter, "contextfilter", False
-+        ):
-+            wrap_evalctx = True
-+
-+        def is_async(args):
-+            return args[0].environment.is_async
-+
-+    @wraps(normal_filter)
-+    def wrapper(*args, **kwargs):
-+        b = is_async(args)
-+        if wrap_evalctx:
-+            args = args[1:]
-+        if b:
-+            return async_filter(*args, **kwargs)
-+        return normal_filter(*args, **kwargs)
-+
-+    if wrap_evalctx:
-+        wrapper.evalcontextfilter = True
-+
-+    wrapper.asyncfiltervariant = True
-+
-+    return wrapper
-+
-+
-+def asyncfiltervariant(original):
-+    def decorator(f):
-+        return dualfilter(original, f)
-+
-+    return decorator
-+
-+
-+@asyncfiltervariant(filters.do_first)
-+async def do_first(environment, seq):
-+    try:
-+        return await auto_aiter(seq).__anext__()
-+    except StopAsyncIteration:
-+        return environment.undefined("No first item, sequence was empty.")
-+
-+
-+@asyncfiltervariant(filters.do_groupby)
-+async def do_groupby(environment, value, attribute):
-+    expr = filters.make_attrgetter(environment, attribute)
-+    return [
-+        filters._GroupTuple(key, await auto_to_seq(values))
-+        for key, values in filters.groupby(
-+            sorted(await auto_to_seq(value), key=expr), expr
-+        )
-+    ]
-+
-+
-+@asyncfiltervariant(filters.do_join)
-+async def do_join(eval_ctx, value, d=u"", attribute=None):
-+    return filters.do_join(eval_ctx, await auto_to_seq(value), d, attribute)
-+
-+
-+@asyncfiltervariant(filters.do_list)
-+async def do_list(value):
-+    return await auto_to_seq(value)
-+
-+
-+@asyncfiltervariant(filters.do_reject)
-+async def do_reject(*args, **kwargs):
-+    return async_select_or_reject(args, kwargs, lambda x: not x, False)
-+
-+
-+@asyncfiltervariant(filters.do_rejectattr)
-+async def do_rejectattr(*args, **kwargs):
-+    return async_select_or_reject(args, kwargs, lambda x: not x, True)
-+
-+
-+@asyncfiltervariant(filters.do_select)
-+async def do_select(*args, **kwargs):
-+    return async_select_or_reject(args, kwargs, lambda x: x, False)
-+
-+
-+@asyncfiltervariant(filters.do_selectattr)
-+async def do_selectattr(*args, **kwargs):
-+    return async_select_or_reject(args, kwargs, lambda x: x, True)
-+
-+
-+@asyncfiltervariant(filters.do_map)
-+async def do_map(*args, **kwargs):
-+    seq, func = filters.prepare_map(args, kwargs)
-+    if seq:
-+        async for item in auto_aiter(seq):
-+            yield await auto_await(func(item))
-+
-+
-+@asyncfiltervariant(filters.do_sum)
-+async def do_sum(environment, iterable, attribute=None, start=0):
-+    rv = start
-+    if attribute is not None:
-+        func = filters.make_attrgetter(environment, attribute)
-+    else:
-+
-+        def func(x):
-+            return x
-+
-+    async for item in auto_aiter(iterable):
-+        rv += func(item)
-+    return rv
-+
-+
-+@asyncfiltervariant(filters.do_slice)
-+async def do_slice(value, slices, fill_with=None):
-+    return filters.do_slice(await auto_to_seq(value), slices, fill_with)
-+
-+
-+ASYNC_FILTERS = {
-+    "first": do_first,
-+    "groupby": do_groupby,
-+    "join": do_join,
-+    "list": do_list,
-+    # we intentionally do not support do_last because that would be
-+    # ridiculous
-+    "reject": do_reject,
-+    "rejectattr": do_rejectattr,
-+    "map": do_map,
-+    "select": do_select,
-+    "selectattr": do_selectattr,
-+    "sum": do_sum,
-+    "slice": do_slice,
-+}
-diff --git a/third_party/python/Jinja2/jinja2/asyncsupport.py b/third_party/python/Jinja2/jinja2/asyncsupport.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/Jinja2/jinja2/asyncsupport.py
-@@ -0,0 +1,264 @@
-+# -*- coding: utf-8 -*-
-+"""The code for async support. Importing this patches Jinja on supported
-+Python versions.
-+"""
-+import asyncio
-+import inspect
-+from functools import update_wrapper
-+
-+from markupsafe import Markup
-+
-+from .environment import TemplateModule
-+from .runtime import LoopContext
-+from .utils import concat
-+from .utils import internalcode
-+from .utils import missing
-+
-+
-+async def concat_async(async_gen):
-+    rv = []
-+
-+    async def collect():
-+        async for event in async_gen:
-+            rv.append(event)
-+
-+    await collect()
-+    return concat(rv)
-+
-+
-+async def generate_async(self, *args, **kwargs):
-+    vars = dict(*args, **kwargs)
-+    try:
-+        async for event in self.root_render_func(self.new_context(vars)):
-+            yield event
-+    except Exception:
-+        yield self.environment.handle_exception()
-+
-+
-+def wrap_generate_func(original_generate):
-+    def _convert_generator(self, loop, args, kwargs):
-+        async_gen = self.generate_async(*args, **kwargs)
-+        try:
-+            while 1:
-+                yield loop.run_until_complete(async_gen.__anext__())
-+        except StopAsyncIteration:
-+            pass
-+
-+    def generate(self, *args, **kwargs):
-+        if not self.environment.is_async:
-+            return original_generate(self, *args, **kwargs)
-+        return _convert_generator(self, asyncio.get_event_loop(), args, kwargs)
-+
-+    return update_wrapper(generate, original_generate)
-+
-+
-+async def render_async(self, *args, **kwargs):
-+    if not self.environment.is_async:
-+        raise RuntimeError("The environment was not created with async mode enabled.")
-+
-+    vars = dict(*args, **kwargs)
-+    ctx = self.new_context(vars)
-+
-+    try:
-+        return await concat_async(self.root_render_func(ctx))
-+    except Exception:
-+        return self.environment.handle_exception()
-+
-+
-+def wrap_render_func(original_render):
-+    def render(self, *args, **kwargs):
-+        if not self.environment.is_async:
-+            return original_render(self, *args, **kwargs)
-+        loop = asyncio.get_event_loop()
-+        return loop.run_until_complete(self.render_async(*args, **kwargs))
-+
-+    return update_wrapper(render, original_render)
-+
-+
-+def wrap_block_reference_call(original_call):
-+    @internalcode
-+    async def async_call(self):
-+        rv = await concat_async(self._stack[self._depth](self._context))
-+        if self._context.eval_ctx.autoescape:
-+            rv = Markup(rv)
-+        return rv
-+
-+    @internalcode
-+    def __call__(self):
-+        if not self._context.environment.is_async:
-+            return original_call(self)
-+        return async_call(self)
-+
-+    return update_wrapper(__call__, original_call)
-+
-+
-+def wrap_macro_invoke(original_invoke):
-+    @internalcode
-+    async def async_invoke(self, arguments, autoescape):
-+        rv = await self._func(*arguments)
-+        if autoescape:
-+            rv = Markup(rv)
-+        return rv
-+
-+    @internalcode
-+    def _invoke(self, arguments, autoescape):
-+        if not self._environment.is_async:
-+            return original_invoke(self, arguments, autoescape)
-+        return async_invoke(self, arguments, autoescape)
-+
-+    return update_wrapper(_invoke, original_invoke)
-+
-+
-+@internalcode
-+async def get_default_module_async(self):
-+    if self._module is not None:
-+        return self._module
-+    self._module = rv = await self.make_module_async()
-+    return rv
-+
-+
-+def wrap_default_module(original_default_module):
-+    @internalcode
-+    def _get_default_module(self):
-+        if self.environment.is_async:
-+            raise RuntimeError("Template module attribute is unavailable in async mode")
-+        return original_default_module(self)
-+
-+    return _get_default_module
-+
-+
-+async def make_module_async(self, vars=None, shared=False, locals=None):
-+    context = self.new_context(vars, shared, locals)
-+    body_stream = []
-+    async for item in self.root_render_func(context):
-+        body_stream.append(item)
-+    return TemplateModule(self, context, body_stream)
-+
-+
-+def patch_template():
-+    from . import Template
-+
-+    Template.generate = wrap_generate_func(Template.generate)
-+    Template.generate_async = update_wrapper(generate_async, Template.generate_async)
-+    Template.render_async = update_wrapper(render_async, Template.render_async)
-+    Template.render = wrap_render_func(Template.render)
-+    Template._get_default_module = wrap_default_module(Template._get_default_module)
-+    Template._get_default_module_async = get_default_module_async
-+    Template.make_module_async = update_wrapper(
-+        make_module_async, Template.make_module_async
-+    )
-+
-+
-+def patch_runtime():
-+    from .runtime import BlockReference, Macro
-+
-+    BlockReference.__call__ = wrap_block_reference_call(BlockReference.__call__)
-+    Macro._invoke = wrap_macro_invoke(Macro._invoke)
-+
-+
-+def patch_filters():
-+    from .filters import FILTERS
-+    from .asyncfilters import ASYNC_FILTERS
-+
-+    FILTERS.update(ASYNC_FILTERS)
-+
-+
-+def patch_all():
-+    patch_template()
-+    patch_runtime()
-+    patch_filters()
-+
-+
-+async def auto_await(value):
-+    if inspect.isawaitable(value):
-+        return await value
-+    return value
-+
-+
-+async def auto_aiter(iterable):
-+    if hasattr(iterable, "__aiter__"):
-+        async for item in iterable:
-+            yield item
-+        return
-+    for item in iterable:
-+        yield item
-+
-+
-+class AsyncLoopContext(LoopContext):
-+    _to_iterator = staticmethod(auto_aiter)
-+
-+    @property
-+    async def length(self):
-+        if self._length is not None:
-+            return self._length
-+
-+        try:
-+            self._length = len(self._iterable)
-+        except TypeError:
-+            iterable = [x async for x in self._iterator]
-+            self._iterator = self._to_iterator(iterable)
-+            self._length = len(iterable) + self.index + (self._after is not missing)
-+
-+        return self._length
-+
-+    @property
-+    async def revindex0(self):
-+        return await self.length - self.index
-+
-+    @property
-+    async def revindex(self):
-+        return await self.length - self.index0
-+
-+    async def _peek_next(self):
-+        if self._after is not missing:
-+            return self._after
-+
-+        try:
-+            self._after = await self._iterator.__anext__()
-+        except StopAsyncIteration:
-+            self._after = missing
-+
-+        return self._after
-+
-+    @property
-+    async def last(self):
-+        return await self._peek_next() is missing
-+
-+    @property
-+    async def nextitem(self):
-+        rv = await self._peek_next()
-+
-+        if rv is missing:
-+            return self._undefined("there is no next item")
-+
-+        return rv
-+
-+    def __aiter__(self):
-+        return self
-+
-+    async def __anext__(self):
-+        if self._after is not missing:
-+            rv = self._after
-+            self._after = missing
-+        else:
-+            rv = await self._iterator.__anext__()
-+
-+        self.index0 += 1
-+        self._before = self._current
-+        self._current = rv
-+        return rv, self
-+
-+
-+async def make_async_loop_context(iterable, undefined, recurse=None, depth0=0):
-+    import warnings
-+
-+    warnings.warn(
-+        "This template must be recompiled with at least Jinja 2.11, or"
-+        " it will fail in 3.0.",
-+        DeprecationWarning,
-+        stacklevel=2,
-+    )
-+    return AsyncLoopContext(iterable, undefined, recurse, depth0)
-+
-+
-+patch_all()
-diff --git a/third_party/python/Jinja2/jinja2/bccache.py b/third_party/python/Jinja2/jinja2/bccache.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/Jinja2/jinja2/bccache.py
-@@ -0,0 +1,350 @@
-+# -*- coding: utf-8 -*-
-+"""The optional bytecode cache system. This is useful if you have very
-+complex template situations and the compilation of all those templates
-+slows down your application too much.
-+
-+Situations where this is useful are often forking web applications that
-+are initialized on the first request.
-+"""
-+import errno
-+import fnmatch
-+import os
-+import stat
-+import sys
-+import tempfile
-+from hashlib import sha1
-+from os import listdir
-+from os import path
-+
-+from ._compat import BytesIO
-+from ._compat import marshal_dump
-+from ._compat import marshal_load
-+from ._compat import pickle
-+from ._compat import text_type
-+from .utils import open_if_exists
-+
-+bc_version = 4
-+# Magic bytes to identify Jinja bytecode cache files. Contains the
-+# Python major and minor version to avoid loading incompatible bytecode
-+# if a project upgrades its Python version.
-+bc_magic = (
-+    b"j2"
-+    + pickle.dumps(bc_version, 2)
-+    + pickle.dumps((sys.version_info[0] << 24) | sys.version_info[1], 2)
-+)
-+
-+
-+class Bucket(object):
-+    """Buckets are used to store the bytecode for one template.  It's created
-+    and initialized by the bytecode cache and passed to the loading functions.
-+
-+    The buckets get an internal checksum from the cache assigned and use this
-+    to automatically reject outdated cache material.  Individual bytecode
-+    cache subclasses don't have to care about cache invalidation.
-+    """
-+
-+    def __init__(self, environment, key, checksum):
-+        self.environment = environment
-+        self.key = key
-+        self.checksum = checksum
-+        self.reset()
-+
-+    def reset(self):
-+        """Resets the bucket (unloads the bytecode)."""
-+        self.code = None
-+
-+    def load_bytecode(self, f):
-+        """Loads bytecode from a file or file like object."""
-+        # make sure the magic header is correct
-+        magic = f.read(len(bc_magic))
-+        if magic != bc_magic:
-+            self.reset()
-+            return
-+        # the source code of the file changed, we need to reload
-+        checksum = pickle.load(f)
-+        if self.checksum != checksum:
-+            self.reset()
-+            return
-+        # if marshal_load fails then we need to reload
-+        try:
-+            self.code = marshal_load(f)
-+        except (EOFError, ValueError, TypeError):
-+            self.reset()
-+            return
-+
-+    def write_bytecode(self, f):
-+        """Dump the bytecode into the file or file like object passed."""
-+        if self.code is None:
-+            raise TypeError("can't write empty bucket")
-+        f.write(bc_magic)
-+        pickle.dump(self.checksum, f, 2)
-+        marshal_dump(self.code, f)
-+
-+    def bytecode_from_string(self, string):
-+        """Load bytecode from a string."""
-+        self.load_bytecode(BytesIO(string))
-+
-+    def bytecode_to_string(self):
-+        """Return the bytecode as string."""
-+        out = BytesIO()
-+        self.write_bytecode(out)
-+        return out.getvalue()
-+
-+
-+class BytecodeCache(object):
-+    """To implement your own bytecode cache you have to subclass this class
-+    and override :meth:`load_bytecode` and :meth:`dump_bytecode`.  Both of
-+    these methods are passed a :class:`~jinja2.bccache.Bucket`.
-+
-+    A very basic bytecode cache that saves the bytecode on the file system::
-+
-+        from os import path
-+
-+        class MyCache(BytecodeCache):
-+
-+            def __init__(self, directory):
-+                self.directory = directory
-+
-+            def load_bytecode(self, bucket):
-+                filename = path.join(self.directory, bucket.key)
-+                if path.exists(filename):
-+                    with open(filename, 'rb') as f:
-+                        bucket.load_bytecode(f)
-+
-+            def dump_bytecode(self, bucket):
-+                filename = path.join(self.directory, bucket.key)
-+                with open(filename, 'wb') as f:
-+                    bucket.write_bytecode(f)
-+
-+    A more advanced version of a filesystem based bytecode cache is part of
-+    Jinja.
-+    """
-+
-+    def load_bytecode(self, bucket):
-+        """Subclasses have to override this method to load bytecode into a
-+        bucket.  If they are not able to find code in the cache for the
-+        bucket, it must not do anything.
-+        """
-+        raise NotImplementedError()
-+
-+    def dump_bytecode(self, bucket):
-+        """Subclasses have to override this method to write the bytecode
-+        from a bucket back to the cache.  If it unable to do so it must not
-+        fail silently but raise an exception.
-+        """
-+        raise NotImplementedError()
-+
-+    def clear(self):
-+        """Clears the cache.  This method is not used by Jinja but should be
-+        implemented to allow applications to clear the bytecode cache used
-+        by a particular environment.
-+        """
-+
-+    def get_cache_key(self, name, filename=None):
-+        """Returns the unique hash key for this template name."""
-+        hash = sha1(name.encode("utf-8"))
-+        if filename is not None:
-+            filename = "|" + filename
-+            if isinstance(filename, text_type):
-+                filename = filename.encode("utf-8")
-+            hash.update(filename)
-+        return hash.hexdigest()
-+
-+    def get_source_checksum(self, source):
-+        """Returns a checksum for the source."""
-+        return sha1(source.encode("utf-8")).hexdigest()
-+
-+    def get_bucket(self, environment, name, filename, source):
-+        """Return a cache bucket for the given template.  All arguments are
-+        mandatory but filename may be `None`.
-+        """
-+        key = self.get_cache_key(name, filename)
-+        checksum = self.get_source_checksum(source)
-+        bucket = Bucket(environment, key, checksum)
-+        self.load_bytecode(bucket)
-+        return bucket
-+
-+    def set_bucket(self, bucket):
-+        """Put the bucket into the cache."""
-+        self.dump_bytecode(bucket)
-+
-+
-+class FileSystemBytecodeCache(BytecodeCache):
-+    """A bytecode cache that stores bytecode on the filesystem.  It accepts
-+    two arguments: The directory where the cache items are stored and a
-+    pattern string that is used to build the filename.
-+
-+    If no directory is specified a default cache directory is selected.  On
-+    Windows the user's temp directory is used, on UNIX systems a directory
-+    is created for the user in the system temp directory.
-+
-+    The pattern can be used to have multiple separate caches operate on the
-+    same directory.  The default pattern is ``'__jinja2_%s.cache'``.  ``%s``
-+    is replaced with the cache key.
-+
-+    >>> bcc = FileSystemBytecodeCache('/tmp/jinja_cache', '%s.cache')
-+
-+    This bytecode cache supports clearing of the cache using the clear method.
-+    """
-+
-+    def __init__(self, directory=None, pattern="__jinja2_%s.cache"):
-+        if directory is None:
-+            directory = self._get_default_cache_dir()
-+        self.directory = directory
-+        self.pattern = pattern
-+
-+    def _get_default_cache_dir(self):
-+        def _unsafe_dir():
-+            raise RuntimeError(
-+                "Cannot determine safe temp directory.  You "
-+                "need to explicitly provide one."
-+            )
-+
-+        tmpdir = tempfile.gettempdir()
-+
-+        # On windows the temporary directory is used specific unless
-+        # explicitly forced otherwise.  We can just use that.
-+        if os.name == "nt":
-+            return tmpdir
-+        if not hasattr(os, "getuid"):
-+            _unsafe_dir()
-+
-+        dirname = "_jinja2-cache-%d" % os.getuid()
-+        actual_dir = os.path.join(tmpdir, dirname)
-+
-+        try:
-+            os.mkdir(actual_dir, stat.S_IRWXU)
-+        except OSError as e:
-+            if e.errno != errno.EEXIST:
-+                raise
-+        try:
-+            os.chmod(actual_dir, stat.S_IRWXU)
-+            actual_dir_stat = os.lstat(actual_dir)
-+            if (
-+                actual_dir_stat.st_uid != os.getuid()
-+                or not stat.S_ISDIR(actual_dir_stat.st_mode)
-+                or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU
-+            ):
-+                _unsafe_dir()
-+        except OSError as e:
-+            if e.errno != errno.EEXIST:
-+                raise
-+
-+        actual_dir_stat = os.lstat(actual_dir)
-+        if (
-+            actual_dir_stat.st_uid != os.getuid()
-+            or not stat.S_ISDIR(actual_dir_stat.st_mode)
-+            or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU
-+        ):
-+            _unsafe_dir()
-+
-+        return actual_dir
-+
-+    def _get_cache_filename(self, bucket):
-+        return path.join(self.directory, self.pattern % bucket.key)
-+
-+    def load_bytecode(self, bucket):
-+        f = open_if_exists(self._get_cache_filename(bucket), "rb")
-+        if f is not None:
-+            try:
-+                bucket.load_bytecode(f)
-+            finally:
-+                f.close()
-+
-+    def dump_bytecode(self, bucket):
-+        f = open(self._get_cache_filename(bucket), "wb")
-+        try:
-+            bucket.write_bytecode(f)
-+        finally:
-+            f.close()
-+
-+    def clear(self):
-+        # imported lazily here because google app-engine doesn't support
-+        # write access on the file system and the function does not exist
-+        # normally.
-+        from os import remove
-+
-+        files = fnmatch.filter(listdir(self.directory), self.pattern % "*")
-+        for filename in files:
-+            try:
-+                remove(path.join(self.directory, filename))
-+            except OSError:
-+                pass
-+
-+
-+class MemcachedBytecodeCache(BytecodeCache):
-+    """This class implements a bytecode cache that uses a memcache cache for
-+    storing the information.  It does not enforce a specific memcache library
-+    (tummy's memcache or cmemcache) but will accept any class that provides
-+    the minimal interface required.
-+
-+    Libraries compatible with this class:
-+
-+    -   `cachelib <https://github.com/pallets/cachelib>`_
-+    -   `python-memcached <https://pypi.org/project/python-memcached/>`_
-+
-+    (Unfortunately the django cache interface is not compatible because it
-+    does not support storing binary data, only unicode.  You can however pass
-+    the underlying cache client to the bytecode cache which is available
-+    as `django.core.cache.cache._client`.)
-+
-+    The minimal interface for the client passed to the constructor is this:
-+
-+    .. class:: MinimalClientInterface
-+
-+        .. method:: set(key, value[, timeout])
-+
-+            Stores the bytecode in the cache.  `value` is a string and
-+            `timeout` the timeout of the key.  If timeout is not provided
-+            a default timeout or no timeout should be assumed, if it's
-+            provided it's an integer with the number of seconds the cache
-+            item should exist.
-+
-+        .. method:: get(key)
-+
-+            Returns the value for the cache key.  If the item does not
-+            exist in the cache the return value must be `None`.
-+
-+    The other arguments to the constructor are the prefix for all keys that
-+    is added before the actual cache key and the timeout for the bytecode in
-+    the cache system.  We recommend a high (or no) timeout.
-+
-+    This bytecode cache does not support clearing of used items in the cache.
-+    The clear method is a no-operation function.
-+
-+    .. versionadded:: 2.7
-+       Added support for ignoring memcache errors through the
-+       `ignore_memcache_errors` parameter.
-+    """
-+
-+    def __init__(
-+        self,
-+        client,
-+        prefix="jinja2/bytecode/",
-+        timeout=None,
-+        ignore_memcache_errors=True,
-+    ):
-+        self.client = client
-+        self.prefix = prefix
-+        self.timeout = timeout
-+        self.ignore_memcache_errors = ignore_memcache_errors
-+
-+    def load_bytecode(self, bucket):
-+        try:
-+            code = self.client.get(self.prefix + bucket.key)
-+        except Exception:
-+            if not self.ignore_memcache_errors:
-+                raise
-+            code = None
-+        if code is not None:
-+            bucket.bytecode_from_string(code)
-+
-+    def dump_bytecode(self, bucket):
-+        args = (self.prefix + bucket.key, bucket.bytecode_to_string())
-+        if self.timeout is not None:
-+            args += (self.timeout,)
-+        try:
-+            self.client.set(*args)
-+        except Exception:
-+            if not self.ignore_memcache_errors:
-+                raise
-diff --git a/third_party/python/Jinja2/jinja2/compiler.py b/third_party/python/Jinja2/jinja2/compiler.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/Jinja2/jinja2/compiler.py
-@@ -0,0 +1,1843 @@
-+# -*- coding: utf-8 -*-
-+"""Compiles nodes from the parser into Python code."""
-+from collections import namedtuple
-+from functools import update_wrapper
-+from itertools import chain
-+from keyword import iskeyword as is_python_keyword
-+
-+from markupsafe import escape
-+from markupsafe import Markup
-+
-+from . import nodes
-+from ._compat import imap
-+from ._compat import iteritems
-+from ._compat import izip
-+from ._compat import NativeStringIO
-+from ._compat import range_type
-+from ._compat import string_types
-+from ._compat import text_type
-+from .exceptions import TemplateAssertionError
-+from .idtracking import Symbols
-+from .idtracking import VAR_LOAD_ALIAS
-+from .idtracking import VAR_LOAD_PARAMETER
-+from .idtracking import VAR_LOAD_RESOLVE
-+from .idtracking import VAR_LOAD_UNDEFINED
-+from .nodes import EvalContext
-+from .optimizer import Optimizer
-+from .utils import concat
-+from .visitor import NodeVisitor
-+
-+operators = {
-+    "eq": "==",
-+    "ne": "!=",
-+    "gt": ">",
-+    "gteq": ">=",
-+    "lt": "<",
-+    "lteq": "<=",
-+    "in": "in",
-+    "notin": "not in",
-+}
-+
-+# what method to iterate over items do we want to use for dict iteration
-+# in generated code?  on 2.x let's go with iteritems, on 3.x with items
-+if hasattr(dict, "iteritems"):
-+    dict_item_iter = "iteritems"
-+else:
-+    dict_item_iter = "items"
-+
-+code_features = ["division"]
-+
-+# does this python version support generator stops? (PEP 0479)
-+try:
-+    exec("from __future__ import generator_stop")
-+    code_features.append("generator_stop")
-+except SyntaxError:
-+    pass
-+
-+# does this python version support yield from?
-+try:
-+    exec("def f(): yield from x()")
-+except SyntaxError:
-+    supports_yield_from = False
-+else:
-+    supports_yield_from = True
-+
-+
-+def optimizeconst(f):
-+    def new_func(self, node, frame, **kwargs):
-+        # Only optimize if the frame is not volatile
-+        if self.optimized and not frame.eval_ctx.volatile:
-+            new_node = self.optimizer.visit(node, frame.eval_ctx)
-+            if new_node != node:
-+                return self.visit(new_node, frame)
-+        return f(self, node, frame, **kwargs)
-+
-+    return update_wrapper(new_func, f)
-+
-+
-+def generate(
-+    node, environment, name, filename, stream=None, defer_init=False, optimized=True
-+):
-+    """Generate the python source for a node tree."""
-+    if not isinstance(node, nodes.Template):
-+        raise TypeError("Can't compile non template nodes")
-+    generator = environment.code_generator_class(
-+        environment, name, filename, stream, defer_init, optimized
-+    )
-+    generator.visit(node)
-+    if stream is None:
-+        return generator.stream.getvalue()
-+
-+
-+def has_safe_repr(value):
-+    """Does the node have a safe representation?"""
-+    if value is None or value is NotImplemented or value is Ellipsis:
-+        return True
-+    if type(value) in (bool, int, float, complex, range_type, Markup) + string_types:
-+        return True
-+    if type(value) in (tuple, list, set, frozenset):
-+        for item in value:
-+            if not has_safe_repr(item):
-+                return False
-+        return True
-+    elif type(value) is dict:
-+        for key, value in iteritems(value):
-+            if not has_safe_repr(key):
-+                return False
-+            if not has_safe_repr(value):
-+                return False
-+        return True
-+    return False
-+
-+
-+def find_undeclared(nodes, names):
-+    """Check if the names passed are accessed undeclared.  The return value
-+    is a set of all the undeclared names from the sequence of names found.
-+    """
-+    visitor = UndeclaredNameVisitor(names)
-+    try:
-+        for node in nodes:
-+            visitor.visit(node)
-+    except VisitorExit:
-+        pass
-+    return visitor.undeclared
-+
-+
-+class MacroRef(object):
-+    def __init__(self, node):
-+        self.node = node
-+        self.accesses_caller = False
-+        self.accesses_kwargs = False
-+        self.accesses_varargs = False
-+
-+
-+class Frame(object):
-+    """Holds compile time information for us."""
-+
-+    def __init__(self, eval_ctx, parent=None, level=None):
-+        self.eval_ctx = eval_ctx
-+        self.symbols = Symbols(parent and parent.symbols or None, level=level)
-+
-+        # a toplevel frame is the root + soft frames such as if conditions.
-+        self.toplevel = False
-+
-+        # the root frame is basically just the outermost frame, so no if
-+        # conditions.  This information is used to optimize inheritance
-+        # situations.
-+        self.rootlevel = False
-+
-+        # in some dynamic inheritance situations the compiler needs to add
-+        # write tests around output statements.
-+        self.require_output_check = parent and parent.require_output_check
-+
-+        # inside some tags we are using a buffer rather than yield statements.
-+        # this for example affects {% filter %} or {% macro %}.  If a frame
-+        # is buffered this variable points to the name of the list used as
-+        # buffer.
-+        self.buffer = None
-+
-+        # the name of the block we're in, otherwise None.
-+        self.block = parent and parent.block or None
-+
-+        # the parent of this frame
-+        self.parent = parent
-+
-+        if parent is not None:
-+            self.buffer = parent.buffer
-+
-+    def copy(self):
-+        """Create a copy of the current one."""
-+        rv = object.__new__(self.__class__)
-+        rv.__dict__.update(self.__dict__)
-+        rv.symbols = self.symbols.copy()
-+        return rv
-+
-+    def inner(self, isolated=False):
-+        """Return an inner frame."""
-+        if isolated:
-+            return Frame(self.eval_ctx, level=self.symbols.level + 1)
-+        return Frame(self.eval_ctx, self)
-+
-+    def soft(self):
-+        """Return a soft frame.  A soft frame may not be modified as
-+        standalone thing as it shares the resources with the frame it
-+        was created of, but it's not a rootlevel frame any longer.
-+
-+        This is only used to implement if-statements.
-+        """
-+        rv = self.copy()
-+        rv.rootlevel = False
-+        return rv
-+
-+    __copy__ = copy
-+
-+
-+class VisitorExit(RuntimeError):
-+    """Exception used by the `UndeclaredNameVisitor` to signal a stop."""
-+
-+
-+class DependencyFinderVisitor(NodeVisitor):
-+    """A visitor that collects filter and test calls."""
-+
-+    def __init__(self):
-+        self.filters = set()
-+        self.tests = set()
-+
-+    def visit_Filter(self, node):
-+        self.generic_visit(node)
-+        self.filters.add(node.name)
-+
-+    def visit_Test(self, node):
-+        self.generic_visit(node)
-+        self.tests.add(node.name)
-+
-+    def visit_Block(self, node):
-+        """Stop visiting at blocks."""
-+
-+
-+class UndeclaredNameVisitor(NodeVisitor):
-+    """A visitor that checks if a name is accessed without being
-+    declared.  This is different from the frame visitor as it will
-+    not stop at closure frames.
-+    """
-+
-+    def __init__(self, names):
-+        self.names = set(names)
-+        self.undeclared = set()
-+
-+    def visit_Name(self, node):
-+        if node.ctx == "load" and node.name in self.names:
-+            self.undeclared.add(node.name)
-+            if self.undeclared == self.names:
-+                raise VisitorExit()
-+        else:
-+            self.names.discard(node.name)
-+
-+    def visit_Block(self, node):
-+        """Stop visiting a blocks."""
-+
-+
-+class CompilerExit(Exception):
-+    """Raised if the compiler encountered a situation where it just
-+    doesn't make sense to further process the code.  Any block that
-+    raises such an exception is not further processed.
-+    """
-+
-+
-+class CodeGenerator(NodeVisitor):
-+    def __init__(
-+        self, environment, name, filename, stream=None, defer_init=False, optimized=True
-+    ):
-+        if stream is None:
-+            stream = NativeStringIO()
-+        self.environment = environment
-+        self.name = name
-+        self.filename = filename
-+        self.stream = stream
-+        self.created_block_context = False
-+        self.defer_init = defer_init
-+        self.optimized = optimized
-+        if optimized:
-+            self.optimizer = Optimizer(environment)
-+
-+        # aliases for imports
-+        self.import_aliases = {}
-+
-+        # a registry for all blocks.  Because blocks are moved out
-+        # into the global python scope they are registered here
-+        self.blocks = {}
-+
-+        # the number of extends statements so far
-+        self.extends_so_far = 0
-+
-+        # some templates have a rootlevel extends.  In this case we
-+        # can safely assume that we're a child template and do some
-+        # more optimizations.
-+        self.has_known_extends = False
-+
-+        # the current line number
-+        self.code_lineno = 1
-+
-+        # registry of all filters and tests (global, not block local)
-+        self.tests = {}
-+        self.filters = {}
-+
-+        # the debug information
-+        self.debug_info = []
-+        self._write_debug_info = None
-+
-+        # the number of new lines before the next write()
-+        self._new_lines = 0
-+
-+        # the line number of the last written statement
-+        self._last_line = 0
-+
-+        # true if nothing was written so far.
-+        self._first_write = True
-+
-+        # used by the `temporary_identifier` method to get new
-+        # unique, temporary identifier
-+        self._last_identifier = 0
-+
-+        # the current indentation
-+        self._indentation = 0
-+
-+        # Tracks toplevel assignments
-+        self._assign_stack = []
-+
-+        # Tracks parameter definition blocks
-+        self._param_def_block = []
-+
-+        # Tracks the current context.
-+        self._context_reference_stack = ["context"]
-+
-+    # -- Various compilation helpers
-+
-+    def fail(self, msg, lineno):
-+        """Fail with a :exc:`TemplateAssertionError`."""
-+        raise TemplateAssertionError(msg, lineno, self.name, self.filename)
-+
-+    def temporary_identifier(self):
-+        """Get a new unique identifier."""
-+        self._last_identifier += 1
-+        return "t_%d" % self._last_identifier
-+
-+    def buffer(self, frame):
-+        """Enable buffering for the frame from that point onwards."""
-+        frame.buffer = self.temporary_identifier()
-+        self.writeline("%s = []" % frame.buffer)
-+
-+    def return_buffer_contents(self, frame, force_unescaped=False):
-+        """Return the buffer contents of the frame."""
-+        if not force_unescaped:
-+            if frame.eval_ctx.volatile:
-+                self.writeline("if context.eval_ctx.autoescape:")
-+                self.indent()
-+                self.writeline("return Markup(concat(%s))" % frame.buffer)
-+                self.outdent()
-+                self.writeline("else:")
-+                self.indent()
-+                self.writeline("return concat(%s)" % frame.buffer)
-+                self.outdent()
-+                return
-+            elif frame.eval_ctx.autoescape:
-+                self.writeline("return Markup(concat(%s))" % frame.buffer)
-+                return
-+        self.writeline("return concat(%s)" % frame.buffer)
-+
-+    def indent(self):
-+        """Indent by one."""
-+        self._indentation += 1
-+
-+    def outdent(self, step=1):
-+        """Outdent by step."""
-+        self._indentation -= step
-+
-+    def start_write(self, frame, node=None):
-+        """Yield or write into the frame buffer."""
-+        if frame.buffer is None:
-+            self.writeline("yield ", node)
-+        else:
-+            self.writeline("%s.append(" % frame.buffer, node)
-+
-+    def end_write(self, frame):
-+        """End the writing process started by `start_write`."""
-+        if frame.buffer is not None:
-+            self.write(")")
-+
-+    def simple_write(self, s, frame, node=None):
-+        """Simple shortcut for start_write + write + end_write."""
-+        self.start_write(frame, node)
-+        self.write(s)
-+        self.end_write(frame)
-+
-+    def blockvisit(self, nodes, frame):
-+        """Visit a list of nodes as block in a frame.  If the current frame
-+        is no buffer a dummy ``if 0: yield None`` is written automatically.
-+        """
-+        try:
-+            self.writeline("pass")
-+            for node in nodes:
-+                self.visit(node, frame)
-+        except CompilerExit:
-+            pass
-+
-+    def write(self, x):
-+        """Write a string into the output stream."""
-+        if self._new_lines:
-+            if not self._first_write:
-+                self.stream.write("\n" * self._new_lines)
-+                self.code_lineno += self._new_lines
-+                if self._write_debug_info is not None:
-+                    self.debug_info.append((self._write_debug_info, self.code_lineno))
-+                    self._write_debug_info = None
-+            self._first_write = False
-+            self.stream.write("    " * self._indentation)
-+            self._new_lines = 0
-+        self.stream.write(x)
-+
-+    def writeline(self, x, node=None, extra=0):
-+        """Combination of newline and write."""
-+        self.newline(node, extra)
-+        self.write(x)
-+
-+    def newline(self, node=None, extra=0):
-+        """Add one or more newlines before the next write."""
-+        self._new_lines = max(self._new_lines, 1 + extra)
-+        if node is not None and node.lineno != self._last_line:
-+            self._write_debug_info = node.lineno
-+            self._last_line = node.lineno
-+
-+    def signature(self, node, frame, extra_kwargs=None):
-+        """Writes a function call to the stream for the current node.
-+        A leading comma is added automatically.  The extra keyword
-+        arguments may not include python keywords otherwise a syntax
-+        error could occur.  The extra keyword arguments should be given
-+        as python dict.
-+        """
-+        # if any of the given keyword arguments is a python keyword
-+        # we have to make sure that no invalid call is created.
-+        kwarg_workaround = False
-+        for kwarg in chain((x.key for x in node.kwargs), extra_kwargs or ()):
-+            if is_python_keyword(kwarg):
-+                kwarg_workaround = True
-+                break
-+
-+        for arg in node.args:
-+            self.write(", ")
-+            self.visit(arg, frame)
-+
-+        if not kwarg_workaround:
-+            for kwarg in node.kwargs:
-+                self.write(", ")
-+                self.visit(kwarg, frame)
-+            if extra_kwargs is not None:
-+                for key, value in iteritems(extra_kwargs):
-+                    self.write(", %s=%s" % (key, value))
-+        if node.dyn_args:
-+            self.write(", *")
-+            self.visit(node.dyn_args, frame)
-+
-+        if kwarg_workaround:
-+            if node.dyn_kwargs is not None:
-+                self.write(", **dict({")
-+            else:
-+                self.write(", **{")
-+            for kwarg in node.kwargs:
-+                self.write("%r: " % kwarg.key)
-+                self.visit(kwarg.value, frame)
-+                self.write(", ")
-+            if extra_kwargs is not None:
-+                for key, value in iteritems(extra_kwargs):
-+                    self.write("%r: %s, " % (key, value))
-+            if node.dyn_kwargs is not None:
-+                self.write("}, **")
-+                self.visit(node.dyn_kwargs, frame)
-+                self.write(")")
-+            else:
-+                self.write("}")
-+
-+        elif node.dyn_kwargs is not None:
-+            self.write(", **")
-+            self.visit(node.dyn_kwargs, frame)
-+
-+    def pull_dependencies(self, nodes):
-+        """Pull all the dependencies."""
-+        visitor = DependencyFinderVisitor()
-+        for node in nodes:
-+            visitor.visit(node)
-+        for dependency in "filters", "tests":
-+            mapping = getattr(self, dependency)
-+            for name in getattr(visitor, dependency):
-+                if name not in mapping:
-+                    mapping[name] = self.temporary_identifier()
-+                self.writeline(
-+                    "%s = environment.%s[%r]" % (mapping[name], dependency, name)
-+                )
-+
-+    def enter_frame(self, frame):
-+        undefs = []
-+        for target, (action, param) in iteritems(frame.symbols.loads):
-+            if action == VAR_LOAD_PARAMETER:
-+                pass
-+            elif action == VAR_LOAD_RESOLVE:
-+                self.writeline("%s = %s(%r)" % (target, self.get_resolve_func(), param))
-+            elif action == VAR_LOAD_ALIAS:
-+                self.writeline("%s = %s" % (target, param))
-+            elif action == VAR_LOAD_UNDEFINED:
-+                undefs.append(target)
-+            else:
-+                raise NotImplementedError("unknown load instruction")
-+        if undefs:
-+            self.writeline("%s = missing" % " = ".join(undefs))
-+
-+    def leave_frame(self, frame, with_python_scope=False):
-+        if not with_python_scope:
-+            undefs = []
-+            for target, _ in iteritems(frame.symbols.loads):
-+                undefs.append(target)
-+            if undefs:
-+                self.writeline("%s = missing" % " = ".join(undefs))
-+
-+    def func(self, name):
-+        if self.environment.is_async:
-+            return "async def %s" % name
-+        return "def %s" % name
-+
-+    def macro_body(self, node, frame):
-+        """Dump the function def of a macro or call block."""
-+        frame = frame.inner()
-+        frame.symbols.analyze_node(node)
-+        macro_ref = MacroRef(node)
-+
-+        explicit_caller = None
-+        skip_special_params = set()
-+        args = []
-+        for idx, arg in enumerate(node.args):
-+            if arg.name == "caller":
-+                explicit_caller = idx
-+            if arg.name in ("kwargs", "varargs"):
-+                skip_special_params.add(arg.name)
-+            args.append(frame.symbols.ref(arg.name))
-+
-+        undeclared = find_undeclared(node.body, ("caller", "kwargs", "varargs"))
-+
-+        if "caller" in undeclared:
-+            # In older Jinja versions there was a bug that allowed caller
-+            # to retain the special behavior even if it was mentioned in
-+            # the argument list.  However thankfully this was only really
-+            # working if it was the last argument.  So we are explicitly
-+            # checking this now and error out if it is anywhere else in
-+            # the argument list.
-+            if explicit_caller is not None:
-+                try:
-+                    node.defaults[explicit_caller - len(node.args)]
-+                except IndexError:
-+                    self.fail(
-+                        "When defining macros or call blocks the "
-+                        'special "caller" argument must be omitted '
-+                        "or be given a default.",
-+                        node.lineno,
-+                    )
-+            else:
-+                args.append(frame.symbols.declare_parameter("caller"))
-+            macro_ref.accesses_caller = True
-+        if "kwargs" in undeclared and "kwargs" not in skip_special_params:
-+            args.append(frame.symbols.declare_parameter("kwargs"))
-+            macro_ref.accesses_kwargs = True
-+        if "varargs" in undeclared and "varargs" not in skip_special_params:
-+            args.append(frame.symbols.declare_parameter("varargs"))
-+            macro_ref.accesses_varargs = True
-+
-+        # macros are delayed, they never require output checks
-+        frame.require_output_check = False
-+        frame.symbols.analyze_node(node)
-+        self.writeline("%s(%s):" % (self.func("macro"), ", ".join(args)), node)
-+        self.indent()
-+
-+        self.buffer(frame)
-+        self.enter_frame(frame)
-+
-+        self.push_parameter_definitions(frame)
-+        for idx, arg in enumerate(node.args):
-+            ref = frame.symbols.ref(arg.name)
-+            self.writeline("if %s is missing:" % ref)
-+            self.indent()
-+            try:
-+                default = node.defaults[idx - len(node.args)]
-+            except IndexError:
-+                self.writeline(
-+                    "%s = undefined(%r, name=%r)"
-+                    % (ref, "parameter %r was not provided" % arg.name, arg.name)
-+                )
-+            else:
-+                self.writeline("%s = " % ref)
-+                self.visit(default, frame)
-+            self.mark_parameter_stored(ref)
-+            self.outdent()
-+        self.pop_parameter_definitions()
-+
-+        self.blockvisit(node.body, frame)
-+        self.return_buffer_contents(frame, force_unescaped=True)
-+        self.leave_frame(frame, with_python_scope=True)
-+        self.outdent()
-+
-+        return frame, macro_ref
-+
-+    def macro_def(self, macro_ref, frame):
-+        """Dump the macro definition for the def created by macro_body."""
-+        arg_tuple = ", ".join(repr(x.name) for x in macro_ref.node.args)
-+        name = getattr(macro_ref.node, "name", None)
-+        if len(macro_ref.node.args) == 1:
-+            arg_tuple += ","
-+        self.write(
-+            "Macro(environment, macro, %r, (%s), %r, %r, %r, "
-+            "context.eval_ctx.autoescape)"
-+            % (
-+                name,
-+                arg_tuple,
-+                macro_ref.accesses_kwargs,
-+                macro_ref.accesses_varargs,
-+                macro_ref.accesses_caller,
-+            )
-+        )
-+
-+    def position(self, node):
-+        """Return a human readable position for the node."""
-+        rv = "line %d" % node.lineno
-+        if self.name is not None:
-+            rv += " in " + repr(self.name)
-+        return rv
-+
-+    def dump_local_context(self, frame):
-+        return "{%s}" % ", ".join(
-+            "%r: %s" % (name, target)
-+            for name, target in iteritems(frame.symbols.dump_stores())
-+        )
-+
-+    def write_commons(self):
-+        """Writes a common preamble that is used by root and block functions.
-+        Primarily this sets up common local helpers and enforces a generator
-+        through a dead branch.
-+        """
-+        self.writeline("resolve = context.resolve_or_missing")
-+        self.writeline("undefined = environment.undefined")
-+        # always use the standard Undefined class for the implicit else of
-+        # conditional expressions
-+        self.writeline("cond_expr_undefined = Undefined")
-+        self.writeline("if 0: yield None")
-+
-+    def push_parameter_definitions(self, frame):
-+        """Pushes all parameter targets from the given frame into a local
-+        stack that permits tracking of yet to be assigned parameters.  In
-+        particular this enables the optimization from `visit_Name` to skip
-+        undefined expressions for parameters in macros as macros can reference
-+        otherwise unbound parameters.
-+        """
-+        self._param_def_block.append(frame.symbols.dump_param_targets())
-+
-+    def pop_parameter_definitions(self):
-+        """Pops the current parameter definitions set."""
-+        self._param_def_block.pop()
-+
-+    def mark_parameter_stored(self, target):
-+        """Marks a parameter in the current parameter definitions as stored.
-+        This will skip the enforced undefined checks.
-+        """
-+        if self._param_def_block:
-+            self._param_def_block[-1].discard(target)
-+
-+    def push_context_reference(self, target):
-+        self._context_reference_stack.append(target)
-+
-+    def pop_context_reference(self):
-+        self._context_reference_stack.pop()
-+
-+    def get_context_ref(self):
-+        return self._context_reference_stack[-1]
-+
-+    def get_resolve_func(self):
-+        target = self._context_reference_stack[-1]
-+        if target == "context":
-+            return "resolve"
-+        return "%s.resolve" % target
-+
-+    def derive_context(self, frame):
-+        return "%s.derived(%s)" % (
-+            self.get_context_ref(),
-+            self.dump_local_context(frame),
-+        )
-+
-+    def parameter_is_undeclared(self, target):
-+        """Checks if a given target is an undeclared parameter."""
-+        if not self._param_def_block:
-+            return False
-+        return target in self._param_def_block[-1]
-+
-+    def push_assign_tracking(self):
-+        """Pushes a new layer for assignment tracking."""
-+        self._assign_stack.append(set())
-+
-+    def pop_assign_tracking(self, frame):
-+        """Pops the topmost level for assignment tracking and updates the
-+        context variables if necessary.
-+        """
-+        vars = self._assign_stack.pop()
-+        if not frame.toplevel or not vars:
-+            return
-+        public_names = [x for x in vars if x[:1] != "_"]
-+        if len(vars) == 1:
-+            name = next(iter(vars))
-+            ref = frame.symbols.ref(name)
-+            self.writeline("context.vars[%r] = %s" % (name, ref))
-+        else:
-+            self.writeline("context.vars.update({")
-+            for idx, name in enumerate(vars):
-+                if idx:
-+                    self.write(", ")
-+                ref = frame.symbols.ref(name)
-+                self.write("%r: %s" % (name, ref))
-+            self.write("})")
-+        if public_names:
-+            if len(public_names) == 1:
-+                self.writeline("context.exported_vars.add(%r)" % public_names[0])
-+            else:
-+                self.writeline(
-+                    "context.exported_vars.update((%s))"
-+                    % ", ".join(imap(repr, public_names))
-+                )
-+
-+    # -- Statement Visitors
-+
-+    def visit_Template(self, node, frame=None):
-+        assert frame is None, "no root frame allowed"
-+        eval_ctx = EvalContext(self.environment, self.name)
-+
-+        from .runtime import exported
-+
-+        self.writeline("from __future__ import %s" % ", ".join(code_features))
-+        self.writeline("from jinja2.runtime import " + ", ".join(exported))
-+
-+        if self.environment.is_async:
-+            self.writeline(
-+                "from jinja2.asyncsupport import auto_await, "
-+                "auto_aiter, AsyncLoopContext"
-+            )
-+
-+        # if we want a deferred initialization we cannot move the
-+        # environment into a local name
-+        envenv = not self.defer_init and ", environment=environment" or ""
-+
-+        # do we have an extends tag at all?  If not, we can save some
-+        # overhead by just not processing any inheritance code.
-+        have_extends = node.find(nodes.Extends) is not None
-+
-+        # find all blocks
-+        for block in node.find_all(nodes.Block):
-+            if block.name in self.blocks:
-+                self.fail("block %r defined twice" % block.name, block.lineno)
-+            self.blocks[block.name] = block
-+
-+        # find all imports and import them
-+        for import_ in node.find_all(nodes.ImportedName):
-+            if import_.importname not in self.import_aliases:
-+                imp = import_.importname
-+                self.import_aliases[imp] = alias = self.temporary_identifier()
-+                if "." in imp:
-+                    module, obj = imp.rsplit(".", 1)
-+                    self.writeline("from %s import %s as %s" % (module, obj, alias))
-+                else:
-+                    self.writeline("import %s as %s" % (imp, alias))
-+
-+        # add the load name
-+        self.writeline("name = %r" % self.name)
-+
-+        # generate the root render function.
-+        self.writeline(
-+            "%s(context, missing=missing%s):" % (self.func("root"), envenv), extra=1
-+        )
-+        self.indent()
-+        self.write_commons()
-+
-+        # process the root
-+        frame = Frame(eval_ctx)
-+        if "self" in find_undeclared(node.body, ("self",)):
-+            ref = frame.symbols.declare_parameter("self")
-+            self.writeline("%s = TemplateReference(context)" % ref)
-+        frame.symbols.analyze_node(node)
-+        frame.toplevel = frame.rootlevel = True
-+        frame.require_output_check = have_extends and not self.has_known_extends
-+        if have_extends:
-+            self.writeline("parent_template = None")
-+        self.enter_frame(frame)
-+        self.pull_dependencies(node.body)
-+        self.blockvisit(node.body, frame)
-+        self.leave_frame(frame, with_python_scope=True)
-+        self.outdent()
-+
-+        # make sure that the parent root is called.
-+        if have_extends:
-+            if not self.has_known_extends:
-+                self.indent()
-+                self.writeline("if parent_template is not None:")
-+            self.indent()
-+            if supports_yield_from and not self.environment.is_async:
-+                self.writeline("yield from parent_template.root_render_func(context)")
-+            else:
-+                self.writeline(
-+                    "%sfor event in parent_template."
-+                    "root_render_func(context):"
-+                    % (self.environment.is_async and "async " or "")
-+                )
-+                self.indent()
-+                self.writeline("yield event")
-+                self.outdent()
-+            self.outdent(1 + (not self.has_known_extends))
-+
-+        # at this point we now have the blocks collected and can visit them too.
-+        for name, block in iteritems(self.blocks):
-+            self.writeline(
-+                "%s(context, missing=missing%s):"
-+                % (self.func("block_" + name), envenv),
-+                block,
-+                1,
-+            )
-+            self.indent()
-+            self.write_commons()
-+            # It's important that we do not make this frame a child of the
-+            # toplevel template.  This would cause a variety of
-+            # interesting issues with identifier tracking.
-+            block_frame = Frame(eval_ctx)
-+            undeclared = find_undeclared(block.body, ("self", "super"))
-+            if "self" in undeclared:
-+                ref = block_frame.symbols.declare_parameter("self")
-+                self.writeline("%s = TemplateReference(context)" % ref)
-+            if "super" in undeclared:
-+                ref = block_frame.symbols.declare_parameter("super")
-+                self.writeline("%s = context.super(%r, block_%s)" % (ref, name, name))
-+            block_frame.symbols.analyze_node(block)
-+            block_frame.block = name
-+            self.enter_frame(block_frame)
-+            self.pull_dependencies(block.body)
-+            self.blockvisit(block.body, block_frame)
-+            self.leave_frame(block_frame, with_python_scope=True)
-+            self.outdent()
-+
-+        self.writeline(
-+            "blocks = {%s}" % ", ".join("%r: block_%s" % (x, x) for x in self.blocks),
-+            extra=1,
-+        )
-+
-+        # add a function that returns the debug info
-+        self.writeline(
-+            "debug_info = %r" % "&".join("%s=%s" % x for x in self.debug_info)
-+        )
-+
-+    def visit_Block(self, node, frame):
-+        """Call a block and register it for the template."""
-+        level = 0
-+        if frame.toplevel:
-+            # if we know that we are a child template, there is no need to
-+            # check if we are one
-+            if self.has_known_extends:
-+                return
-+            if self.extends_so_far > 0:
-+                self.writeline("if parent_template is None:")
-+                self.indent()
-+                level += 1
-+
-+        if node.scoped:
-+            context = self.derive_context(frame)
-+        else:
-+            context = self.get_context_ref()
-+
-+        if (
-+            supports_yield_from
-+            and not self.environment.is_async
-+            and frame.buffer is None
-+        ):
-+            self.writeline(
-+                "yield from context.blocks[%r][0](%s)" % (node.name, context), node
-+            )
-+        else:
-+            loop = self.environment.is_async and "async for" or "for"
-+            self.writeline(
-+                "%s event in context.blocks[%r][0](%s):" % (loop, node.name, context),
-+                node,
-+            )
-+            self.indent()
-+            self.simple_write("event", frame)
-+            self.outdent()
-+
-+        self.outdent(level)
-+
-+    def visit_Extends(self, node, frame):
-+        """Calls the extender."""
-+        if not frame.toplevel:
-+            self.fail("cannot use extend from a non top-level scope", node.lineno)
-+
-+        # if the number of extends statements in general is zero so
-+        # far, we don't have to add a check if something extended
-+        # the template before this one.
-+        if self.extends_so_far > 0:
-+
-+            # if we have a known extends we just add a template runtime
-+            # error into the generated code.  We could catch that at compile
-+            # time too, but i welcome it not to confuse users by throwing the
-+            # same error at different times just "because we can".
-+            if not self.has_known_extends:
-+                self.writeline("if parent_template is not None:")
-+                self.indent()
-+            self.writeline("raise TemplateRuntimeError(%r)" % "extended multiple times")
-+
-+            # if we have a known extends already we don't need that code here
-+            # as we know that the template execution will end here.
-+            if self.has_known_extends:
-+                raise CompilerExit()
-+            else:
-+                self.outdent()
-+
-+        self.writeline("parent_template = environment.get_template(", node)
-+        self.visit(node.template, frame)
-+        self.write(", %r)" % self.name)
-+        self.writeline(
-+            "for name, parent_block in parent_template.blocks.%s():" % dict_item_iter
-+        )
-+        self.indent()
-+        self.writeline("context.blocks.setdefault(name, []).append(parent_block)")
-+        self.outdent()
-+
-+        # if this extends statement was in the root level we can take
-+        # advantage of that information and simplify the generated code
-+        # in the top level from this point onwards
-+        if frame.rootlevel:
-+            self.has_known_extends = True
-+
-+        # and now we have one more
-+        self.extends_so_far += 1
-+
-+    def visit_Include(self, node, frame):
-+        """Handles includes."""
-+        if node.ignore_missing:
-+            self.writeline("try:")
-+            self.indent()
-+
-+        func_name = "get_or_select_template"
-+        if isinstance(node.template, nodes.Const):
-+            if isinstance(node.template.value, string_types):
-+                func_name = "get_template"
-+            elif isinstance(node.template.value, (tuple, list)):
-+                func_name = "select_template"
-+        elif isinstance(node.template, (nodes.Tuple, nodes.List)):
-+            func_name = "select_template"
-+
-+        self.writeline("template = environment.%s(" % func_name, node)
-+        self.visit(node.template, frame)
-+        self.write(", %r)" % self.name)
-+        if node.ignore_missing:
-+            self.outdent()
-+            self.writeline("except TemplateNotFound:")
-+            self.indent()
-+            self.writeline("pass")
-+            self.outdent()
-+            self.writeline("else:")
-+            self.indent()
-+
-+        skip_event_yield = False
-+        if node.with_context:
-+            loop = self.environment.is_async and "async for" or "for"
-+            self.writeline(
-+                "%s event in template.root_render_func("
-+                "template.new_context(context.get_all(), True, "
-+                "%s)):" % (loop, self.dump_local_context(frame))
-+            )
-+        elif self.environment.is_async:
-+            self.writeline(
-+                "for event in (await "
-+                "template._get_default_module_async())"
-+                "._body_stream:"
-+            )
-+        else:
-+            if supports_yield_from:
-+                self.writeline("yield from template._get_default_module()._body_stream")
-+                skip_event_yield = True
-+            else:
-+                self.writeline(
-+                    "for event in template._get_default_module()._body_stream:"
-+                )
-+
-+        if not skip_event_yield:
-+            self.indent()
-+            self.simple_write("event", frame)
-+            self.outdent()
-+
-+        if node.ignore_missing:
-+            self.outdent()
-+
-+    def visit_Import(self, node, frame):
-+        """Visit regular imports."""
-+        self.writeline("%s = " % frame.symbols.ref(node.target), node)
-+        if frame.toplevel:
-+            self.write("context.vars[%r] = " % node.target)
-+        if self.environment.is_async:
-+            self.write("await ")
-+        self.write("environment.get_template(")
-+        self.visit(node.template, frame)
-+        self.write(", %r)." % self.name)
-+        if node.with_context:
-+            self.write(
-+                "make_module%s(context.get_all(), True, %s)"
-+                % (
-+                    self.environment.is_async and "_async" or "",
-+                    self.dump_local_context(frame),
-+                )
-+            )
-+        elif self.environment.is_async:
-+            self.write("_get_default_module_async()")
-+        else:
-+            self.write("_get_default_module()")
-+        if frame.toplevel and not node.target.startswith("_"):
-+            self.writeline("context.exported_vars.discard(%r)" % node.target)
-+
-+    def visit_FromImport(self, node, frame):
-+        """Visit named imports."""
-+        self.newline(node)
-+        self.write(
-+            "included_template = %senvironment.get_template("
-+            % (self.environment.is_async and "await " or "")
-+        )
-+        self.visit(node.template, frame)
-+        self.write(", %r)." % self.name)
-+        if node.with_context:
-+            self.write(
-+                "make_module%s(context.get_all(), True, %s)"
-+                % (
-+                    self.environment.is_async and "_async" or "",
-+                    self.dump_local_context(frame),
-+                )
-+            )
-+        elif self.environment.is_async:
-+            self.write("_get_default_module_async()")
-+        else:
-+            self.write("_get_default_module()")
-+
-+        var_names = []
-+        discarded_names = []
-+        for name in node.names:
-+            if isinstance(name, tuple):
-+                name, alias = name
-+            else:
-+                alias = name
-+            self.writeline(
-+                "%s = getattr(included_template, "
-+                "%r, missing)" % (frame.symbols.ref(alias), name)
-+            )
-+            self.writeline("if %s is missing:" % frame.symbols.ref(alias))
-+            self.indent()
-+            self.writeline(
-+                "%s = undefined(%r %% "
-+                "included_template.__name__, "
-+                "name=%r)"
-+                % (
-+                    frame.symbols.ref(alias),
-+                    "the template %%r (imported on %s) does "
-+                    "not export the requested name %s"
-+                    % (self.position(node), repr(name)),
-+                    name,
-+                )
-+            )
-+            self.outdent()
-+            if frame.toplevel:
-+                var_names.append(alias)
-+                if not alias.startswith("_"):
-+                    discarded_names.append(alias)
-+
-+        if var_names:
-+            if len(var_names) == 1:
-+                name = var_names[0]
-+                self.writeline(
-+                    "context.vars[%r] = %s" % (name, frame.symbols.ref(name))
-+                )
-+            else:
-+                self.writeline(
-+                    "context.vars.update({%s})"
-+                    % ", ".join(
-+                        "%r: %s" % (name, frame.symbols.ref(name)) for name in var_names
-+                    )
-+                )
-+        if discarded_names:
-+            if len(discarded_names) == 1:
-+                self.writeline("context.exported_vars.discard(%r)" % discarded_names[0])
-+            else:
-+                self.writeline(
-+                    "context.exported_vars.difference_"
-+                    "update((%s))" % ", ".join(imap(repr, discarded_names))
-+                )
-+
-+    def visit_For(self, node, frame):
-+        loop_frame = frame.inner()
-+        test_frame = frame.inner()
-+        else_frame = frame.inner()
-+
-+        # try to figure out if we have an extended loop.  An extended loop
-+        # is necessary if the loop is in recursive mode if the special loop
-+        # variable is accessed in the body.
-+        extended_loop = node.recursive or "loop" in find_undeclared(
-+            node.iter_child_nodes(only=("body",)), ("loop",)
-+        )
-+
-+        loop_ref = None
-+        if extended_loop:
-+            loop_ref = loop_frame.symbols.declare_parameter("loop")
-+
-+        loop_frame.symbols.analyze_node(node, for_branch="body")
-+        if node.else_:
-+            else_frame.symbols.analyze_node(node, for_branch="else")
-+
-+        if node.test:
-+            loop_filter_func = self.temporary_identifier()
-+            test_frame.symbols.analyze_node(node, for_branch="test")
-+            self.writeline("%s(fiter):" % self.func(loop_filter_func), node.test)
-+            self.indent()
-+            self.enter_frame(test_frame)
-+            self.writeline(self.environment.is_async and "async for " or "for ")
-+            self.visit(node.target, loop_frame)
-+            self.write(" in ")
-+            self.write(self.environment.is_async and "auto_aiter(fiter)" or "fiter")
-+            self.write(":")
-+            self.indent()
-+            self.writeline("if ", node.test)
-+            self.visit(node.test, test_frame)
-+            self.write(":")
-+            self.indent()
-+            self.writeline("yield ")
-+            self.visit(node.target, loop_frame)
-+            self.outdent(3)
-+            self.leave_frame(test_frame, with_python_scope=True)
-+
-+        # if we don't have an recursive loop we have to find the shadowed
-+        # variables at that point.  Because loops can be nested but the loop
-+        # variable is a special one we have to enforce aliasing for it.
-+        if node.recursive:
-+            self.writeline(
-+                "%s(reciter, loop_render_func, depth=0):" % self.func("loop"), node
-+            )
-+            self.indent()
-+            self.buffer(loop_frame)
-+
-+            # Use the same buffer for the else frame
-+            else_frame.buffer = loop_frame.buffer
-+
-+        # make sure the loop variable is a special one and raise a template
-+        # assertion error if a loop tries to write to loop
-+        if extended_loop:
-+            self.writeline("%s = missing" % loop_ref)
-+
-+        for name in node.find_all(nodes.Name):
-+            if name.ctx == "store" and name.name == "loop":
-+                self.fail(
-+                    "Can't assign to special loop variable in for-loop target",
-+                    name.lineno,
-+                )
-+
-+        if node.else_:
-+            iteration_indicator = self.temporary_identifier()
-+            self.writeline("%s = 1" % iteration_indicator)
-+
-+        self.writeline(self.environment.is_async and "async for " or "for ", node)
-+        self.visit(node.target, loop_frame)
-+        if extended_loop:
-+            if self.environment.is_async:
-+                self.write(", %s in AsyncLoopContext(" % loop_ref)
-+            else:
-+                self.write(", %s in LoopContext(" % loop_ref)
-+        else:
-+            self.write(" in ")
-+
-+        if node.test:
-+            self.write("%s(" % loop_filter_func)
-+        if node.recursive:
-+            self.write("reciter")
-+        else:
-+            if self.environment.is_async and not extended_loop:
-+                self.write("auto_aiter(")
-+            self.visit(node.iter, frame)
-+            if self.environment.is_async and not extended_loop:
-+                self.write(")")
-+        if node.test:
-+            self.write(")")
-+
-+        if node.recursive:
-+            self.write(", undefined, loop_render_func, depth):")
-+        else:
-+            self.write(extended_loop and ", undefined):" or ":")
-+
-+        self.indent()
-+        self.enter_frame(loop_frame)
-+
-+        self.blockvisit(node.body, loop_frame)
-+        if node.else_:
-+            self.writeline("%s = 0" % iteration_indicator)
-+        self.outdent()
-+        self.leave_frame(
-+            loop_frame, with_python_scope=node.recursive and not node.else_
-+        )
-+
-+        if node.else_:
-+            self.writeline("if %s:" % iteration_indicator)
-+            self.indent()
-+            self.enter_frame(else_frame)
-+            self.blockvisit(node.else_, else_frame)
-+            self.leave_frame(else_frame)
-+            self.outdent()
-+
-+        # if the node was recursive we have to return the buffer contents
-+        # and start the iteration code
-+        if node.recursive:
-+            self.return_buffer_contents(loop_frame)
-+            self.outdent()
-+            self.start_write(frame, node)
-+            if self.environment.is_async:
-+                self.write("await ")
-+            self.write("loop(")
-+            if self.environment.is_async:
-+                self.write("auto_aiter(")
-+            self.visit(node.iter, frame)
-+            if self.environment.is_async:
-+                self.write(")")
-+            self.write(", loop)")
-+            self.end_write(frame)
-+
-+    def visit_If(self, node, frame):
-+        if_frame = frame.soft()
-+        self.writeline("if ", node)
-+        self.visit(node.test, if_frame)
-+        self.write(":")
-+        self.indent()
-+        self.blockvisit(node.body, if_frame)
-+        self.outdent()
-+        for elif_ in node.elif_:
-+            self.writeline("elif ", elif_)
-+            self.visit(elif_.test, if_frame)
-+            self.write(":")
-+            self.indent()
-+            self.blockvisit(elif_.body, if_frame)
-+            self.outdent()
-+        if node.else_:
-+            self.writeline("else:")
-+            self.indent()
-+            self.blockvisit(node.else_, if_frame)
-+            self.outdent()
-+
-+    def visit_Macro(self, node, frame):
-+        macro_frame, macro_ref = self.macro_body(node, frame)
-+        self.newline()
-+        if frame.toplevel:
-+            if not node.name.startswith("_"):
-+                self.write("context.exported_vars.add(%r)" % node.name)
-+            self.writeline("context.vars[%r] = " % node.name)
-+        self.write("%s = " % frame.symbols.ref(node.name))
-+        self.macro_def(macro_ref, macro_frame)
-+
-+    def visit_CallBlock(self, node, frame):
-+        call_frame, macro_ref = self.macro_body(node, frame)
-+        self.writeline("caller = ")
-+        self.macro_def(macro_ref, call_frame)
-+        self.start_write(frame, node)
-+        self.visit_Call(node.call, frame, forward_caller=True)
-+        self.end_write(frame)
-+
-+    def visit_FilterBlock(self, node, frame):
-+        filter_frame = frame.inner()
-+        filter_frame.symbols.analyze_node(node)
-+        self.enter_frame(filter_frame)
-+        self.buffer(filter_frame)
-+        self.blockvisit(node.body, filter_frame)
-+        self.start_write(frame, node)
-+        self.visit_Filter(node.filter, filter_frame)
-+        self.end_write(frame)
-+        self.leave_frame(filter_frame)
-+
-+    def visit_With(self, node, frame):
-+        with_frame = frame.inner()
-+        with_frame.symbols.analyze_node(node)
-+        self.enter_frame(with_frame)
-+        for target, expr in izip(node.targets, node.values):
-+            self.newline()
-+            self.visit(target, with_frame)
-+            self.write(" = ")
-+            self.visit(expr, frame)
-+        self.blockvisit(node.body, with_frame)
-+        self.leave_frame(with_frame)
-+
-+    def visit_ExprStmt(self, node, frame):
-+        self.newline(node)
-+        self.visit(node.node, frame)
-+
-+    _FinalizeInfo = namedtuple("_FinalizeInfo", ("const", "src"))
-+    #: The default finalize function if the environment isn't configured
-+    #: with one. Or if the environment has one, this is called on that
-+    #: function's output for constants.
-+    _default_finalize = text_type
-+    _finalize = None
-+
-+    def _make_finalize(self):
-+        """Build the finalize function to be used on constants and at
-+        runtime. Cached so it's only created once for all output nodes.
-+
-+        Returns a ``namedtuple`` with the following attributes:
-+
-+        ``const``
-+            A function to finalize constant data at compile time.
-+
-+        ``src``
-+            Source code to output around nodes to be evaluated at
-+            runtime.
-+        """
-+        if self._finalize is not None:
-+            return self._finalize
-+
-+        finalize = default = self._default_finalize
-+        src = None
-+
-+        if self.environment.finalize:
-+            src = "environment.finalize("
-+            env_finalize = self.environment.finalize
-+
-+            def finalize(value):
-+                return default(env_finalize(value))
-+
-+            if getattr(env_finalize, "contextfunction", False):
-+                src += "context, "
-+                finalize = None  # noqa: F811
-+            elif getattr(env_finalize, "evalcontextfunction", False):
-+                src += "context.eval_ctx, "
-+                finalize = None
-+            elif getattr(env_finalize, "environmentfunction", False):
-+                src += "environment, "
-+
-+                def finalize(value):
-+                    return default(env_finalize(self.environment, value))
-+
-+        self._finalize = self._FinalizeInfo(finalize, src)
-+        return self._finalize
-+
-+    def _output_const_repr(self, group):
-+        """Given a group of constant values converted from ``Output``
-+        child nodes, produce a string to write to the template module
-+        source.
-+        """
-+        return repr(concat(group))
-+
-+    def _output_child_to_const(self, node, frame, finalize):
-+        """Try to optimize a child of an ``Output`` node by trying to
-+        convert it to constant, finalized data at compile time.
-+
-+        If :exc:`Impossible` is raised, the node is not constant and
-+        will be evaluated at runtime. Any other exception will also be
-+        evaluated at runtime for easier debugging.
-+        """
-+        const = node.as_const(frame.eval_ctx)
-+
-+        if frame.eval_ctx.autoescape:
-+            const = escape(const)
-+
-+        # Template data doesn't go through finalize.
-+        if isinstance(node, nodes.TemplateData):
-+            return text_type(const)
-+
-+        return finalize.const(const)
-+
-+    def _output_child_pre(self, node, frame, finalize):
-+        """Output extra source code before visiting a child of an
-+        ``Output`` node.
-+        """
-+        if frame.eval_ctx.volatile:
-+            self.write("(escape if context.eval_ctx.autoescape else to_string)(")
-+        elif frame.eval_ctx.autoescape:
-+            self.write("escape(")
-+        else:
-+            self.write("to_string(")
-+
-+        if finalize.src is not None:
-+            self.write(finalize.src)
-+
-+    def _output_child_post(self, node, frame, finalize):
-+        """Output extra source code after visiting a child of an
-+        ``Output`` node.
-+        """
-+        self.write(")")
-+
-+        if finalize.src is not None:
-+            self.write(")")
-+
-+    def visit_Output(self, node, frame):
-+        # If an extends is active, don't render outside a block.
-+        if frame.require_output_check:
-+            # A top-level extends is known to exist at compile time.
-+            if self.has_known_extends:
-+                return
-+
-+            self.writeline("if parent_template is None:")
-+            self.indent()
-+
-+        finalize = self._make_finalize()
-+        body = []
-+
-+        # Evaluate constants at compile time if possible. Each item in
-+        # body will be either a list of static data or a node to be
-+        # evaluated at runtime.
-+        for child in node.nodes:
-+            try:
-+                if not (
-+                    # If the finalize function requires runtime context,
-+                    # constants can't be evaluated at compile time.
-+                    finalize.const
-+                    # Unless it's basic template data that won't be
-+                    # finalized anyway.
-+                    or isinstance(child, nodes.TemplateData)
-+                ):
-+                    raise nodes.Impossible()
-+
-+                const = self._output_child_to_const(child, frame, finalize)
-+            except (nodes.Impossible, Exception):
-+                # The node was not constant and needs to be evaluated at
-+                # runtime. Or another error was raised, which is easier
-+                # to debug at runtime.
-+                body.append(child)
-+                continue
-+
-+            if body and isinstance(body[-1], list):
-+                body[-1].append(const)
-+            else:
-+                body.append([const])
-+
-+        if frame.buffer is not None:
-+            if len(body) == 1:
-+                self.writeline("%s.append(" % frame.buffer)
-+            else:
-+                self.writeline("%s.extend((" % frame.buffer)
-+
-+            self.indent()
-+
-+        for item in body:
-+            if isinstance(item, list):
-+                # A group of constant data to join and output.
-+                val = self._output_const_repr(item)
-+
-+                if frame.buffer is None:
-+                    self.writeline("yield " + val)
-+                else:
-+                    self.writeline(val + ",")
-+            else:
-+                if frame.buffer is None:
-+                    self.writeline("yield ", item)
-+                else:
-+                    self.newline(item)
-+
-+                # A node to be evaluated at runtime.
-+                self._output_child_pre(item, frame, finalize)
-+                self.visit(item, frame)
-+                self._output_child_post(item, frame, finalize)
-+
-+                if frame.buffer is not None:
-+                    self.write(",")
-+
-+        if frame.buffer is not None:
-+            self.outdent()
-+            self.writeline(")" if len(body) == 1 else "))")
-+
-+        if frame.require_output_check:
-+            self.outdent()
-+
-+    def visit_Assign(self, node, frame):
-+        self.push_assign_tracking()
-+        self.newline(node)
-+        self.visit(node.target, frame)
-+        self.write(" = ")
-+        self.visit(node.node, frame)
-+        self.pop_assign_tracking(frame)
-+
-+    def visit_AssignBlock(self, node, frame):
-+        self.push_assign_tracking()
-+        block_frame = frame.inner()
-+        # This is a special case.  Since a set block always captures we
-+        # will disable output checks.  This way one can use set blocks
-+        # toplevel even in extended templates.
-+        block_frame.require_output_check = False
-+        block_frame.symbols.analyze_node(node)
-+        self.enter_frame(block_frame)
-+        self.buffer(block_frame)
-+        self.blockvisit(node.body, block_frame)
-+        self.newline(node)
-+        self.visit(node.target, frame)
-+        self.write(" = (Markup if context.eval_ctx.autoescape else identity)(")
-+        if node.filter is not None:
-+            self.visit_Filter(node.filter, block_frame)
-+        else:
-+            self.write("concat(%s)" % block_frame.buffer)
-+        self.write(")")
-+        self.pop_assign_tracking(frame)
-+        self.leave_frame(block_frame)
-+
-+    # -- Expression Visitors
-+
-+    def visit_Name(self, node, frame):
-+        if node.ctx == "store" and frame.toplevel:
-+            if self._assign_stack:
-+                self._assign_stack[-1].add(node.name)
-+        ref = frame.symbols.ref(node.name)
-+
-+        # If we are looking up a variable we might have to deal with the
-+        # case where it's undefined.  We can skip that case if the load
-+        # instruction indicates a parameter which are always defined.
-+        if node.ctx == "load":
-+            load = frame.symbols.find_load(ref)
-+            if not (
-+                load is not None
-+                and load[0] == VAR_LOAD_PARAMETER
-+                and not self.parameter_is_undeclared(ref)
-+            ):
-+                self.write(
-+                    "(undefined(name=%r) if %s is missing else %s)"
-+                    % (node.name, ref, ref)
-+                )
-+                return
-+
-+        self.write(ref)
-+
-+    def visit_NSRef(self, node, frame):
-+        # NSRefs can only be used to store values; since they use the normal
-+        # `foo.bar` notation they will be parsed as a normal attribute access
-+        # when used anywhere but in a `set` context
-+        ref = frame.symbols.ref(node.name)
-+        self.writeline("if not isinstance(%s, Namespace):" % ref)
-+        self.indent()
-+        self.writeline(
-+            "raise TemplateRuntimeError(%r)"
-+            % "cannot assign attribute on non-namespace object"
-+        )
-+        self.outdent()
-+        self.writeline("%s[%r]" % (ref, node.attr))
-+
-+    def visit_Const(self, node, frame):
-+        val = node.as_const(frame.eval_ctx)
-+        if isinstance(val, float):
-+            self.write(str(val))
-+        else:
-+            self.write(repr(val))
-+
-+    def visit_TemplateData(self, node, frame):
-+        try:
-+            self.write(repr(node.as_const(frame.eval_ctx)))
-+        except nodes.Impossible:
-+            self.write(
-+                "(Markup if context.eval_ctx.autoescape else identity)(%r)" % node.data
-+            )
-+
-+    def visit_Tuple(self, node, frame):
-+        self.write("(")
-+        idx = -1
-+        for idx, item in enumerate(node.items):
-+            if idx:
-+                self.write(", ")
-+            self.visit(item, frame)
-+        self.write(idx == 0 and ",)" or ")")
-+
-+    def visit_List(self, node, frame):
-+        self.write("[")
-+        for idx, item in enumerate(node.items):
-+            if idx:
-+                self.write(", ")
-+            self.visit(item, frame)
-+        self.write("]")
-+
-+    def visit_Dict(self, node, frame):
-+        self.write("{")
-+        for idx, item in enumerate(node.items):
-+            if idx:
-+                self.write(", ")
-+            self.visit(item.key, frame)
-+            self.write(": ")
-+            self.visit(item.value, frame)
-+        self.write("}")
-+
-+    def binop(operator, interceptable=True):  # noqa: B902
-+        @optimizeconst
-+        def visitor(self, node, frame):
-+            if (
-+                self.environment.sandboxed
-+                and operator in self.environment.intercepted_binops
-+            ):
-+                self.write("environment.call_binop(context, %r, " % operator)
-+                self.visit(node.left, frame)
-+                self.write(", ")
-+                self.visit(node.right, frame)
-+            else:
-+                self.write("(")
-+                self.visit(node.left, frame)
-+                self.write(" %s " % operator)
-+                self.visit(node.right, frame)
-+            self.write(")")
-+
-+        return visitor
-+
-+    def uaop(operator, interceptable=True):  # noqa: B902
-+        @optimizeconst
-+        def visitor(self, node, frame):
-+            if (
-+                self.environment.sandboxed
-+                and operator in self.environment.intercepted_unops
-+            ):
-+                self.write("environment.call_unop(context, %r, " % operator)
-+                self.visit(node.node, frame)
-+            else:
-+                self.write("(" + operator)
-+                self.visit(node.node, frame)
-+            self.write(")")
-+
-+        return visitor
-+
-+    visit_Add = binop("+")
-+    visit_Sub = binop("-")
-+    visit_Mul = binop("*")
-+    visit_Div = binop("/")
-+    visit_FloorDiv = binop("//")
-+    visit_Pow = binop("**")
-+    visit_Mod = binop("%")
-+    visit_And = binop("and", interceptable=False)
-+    visit_Or = binop("or", interceptable=False)
-+    visit_Pos = uaop("+")
-+    visit_Neg = uaop("-")
-+    visit_Not = uaop("not ", interceptable=False)
-+    del binop, uaop
-+
-+    @optimizeconst
-+    def visit_Concat(self, node, frame):
-+        if frame.eval_ctx.volatile:
-+            func_name = "(context.eval_ctx.volatile and markup_join or unicode_join)"
-+        elif frame.eval_ctx.autoescape:
-+            func_name = "markup_join"
-+        else:
-+            func_name = "unicode_join"
-+        self.write("%s((" % func_name)
-+        for arg in node.nodes:
-+            self.visit(arg, frame)
-+            self.write(", ")
-+        self.write("))")
-+
-+    @optimizeconst
-+    def visit_Compare(self, node, frame):
-+        self.write("(")
-+        self.visit(node.expr, frame)
-+        for op in node.ops:
-+            self.visit(op, frame)
-+        self.write(")")
-+
-+    def visit_Operand(self, node, frame):
-+        self.write(" %s " % operators[node.op])
-+        self.visit(node.expr, frame)
-+
-+    @optimizeconst
-+    def visit_Getattr(self, node, frame):
-+        if self.environment.is_async:
-+            self.write("(await auto_await(")
-+
-+        self.write("environment.getattr(")
-+        self.visit(node.node, frame)
-+        self.write(", %r)" % node.attr)
-+
-+        if self.environment.is_async:
-+            self.write("))")
-+
-+    @optimizeconst
-+    def visit_Getitem(self, node, frame):
-+        # slices bypass the environment getitem method.
-+        if isinstance(node.arg, nodes.Slice):
-+            self.visit(node.node, frame)
-+            self.write("[")
-+            self.visit(node.arg, frame)
-+            self.write("]")
-+        else:
-+            if self.environment.is_async:
-+                self.write("(await auto_await(")
-+
-+            self.write("environment.getitem(")
-+            self.visit(node.node, frame)
-+            self.write(", ")
-+            self.visit(node.arg, frame)
-+            self.write(")")
-+
-+            if self.environment.is_async:
-+                self.write("))")
-+
-+    def visit_Slice(self, node, frame):
-+        if node.start is not None:
-+            self.visit(node.start, frame)
-+        self.write(":")
-+        if node.stop is not None:
-+            self.visit(node.stop, frame)
-+        if node.step is not None:
-+            self.write(":")
-+            self.visit(node.step, frame)
-+
-+    @optimizeconst
-+    def visit_Filter(self, node, frame):
-+        if self.environment.is_async:
-+            self.write("await auto_await(")
-+        self.write(self.filters[node.name] + "(")
-+        func = self.environment.filters.get(node.name)
-+        if func is None:
-+            self.fail("no filter named %r" % node.name, node.lineno)
-+        if getattr(func, "contextfilter", False):
-+            self.write("context, ")
-+        elif getattr(func, "evalcontextfilter", False):
-+            self.write("context.eval_ctx, ")
-+        elif getattr(func, "environmentfilter", False):
-+            self.write("environment, ")
-+
-+        # if the filter node is None we are inside a filter block
-+        # and want to write to the current buffer
-+        if node.node is not None:
-+            self.visit(node.node, frame)
-+        elif frame.eval_ctx.volatile:
-+            self.write(
-+                "(context.eval_ctx.autoescape and"
-+                " Markup(concat(%s)) or concat(%s))" % (frame.buffer, frame.buffer)
-+            )
-+        elif frame.eval_ctx.autoescape:
-+            self.write("Markup(concat(%s))" % frame.buffer)
-+        else:
-+            self.write("concat(%s)" % frame.buffer)
-+        self.signature(node, frame)
-+        self.write(")")
-+        if self.environment.is_async:
-+            self.write(")")
-+
-+    @optimizeconst
-+    def visit_Test(self, node, frame):
-+        self.write(self.tests[node.name] + "(")
-+        if node.name not in self.environment.tests:
-+            self.fail("no test named %r" % node.name, node.lineno)
-+        self.visit(node.node, frame)
-+        self.signature(node, frame)
-+        self.write(")")
-+
-+    @optimizeconst
-+    def visit_CondExpr(self, node, frame):
-+        def write_expr2():
-+            if node.expr2 is not None:
-+                return self.visit(node.expr2, frame)
-+            self.write(
-+                "cond_expr_undefined(%r)"
-+                % (
-+                    "the inline if-"
-+                    "expression on %s evaluated to false and "
-+                    "no else section was defined." % self.position(node)
-+                )
-+            )
-+
-+        self.write("(")
-+        self.visit(node.expr1, frame)
-+        self.write(" if ")
-+        self.visit(node.test, frame)
-+        self.write(" else ")
-+        write_expr2()
-+        self.write(")")
-+
-+    @optimizeconst
-+    def visit_Call(self, node, frame, forward_caller=False):
-+        if self.environment.is_async:
-+            self.write("await auto_await(")
-+        if self.environment.sandboxed:
-+            self.write("environment.call(context, ")
-+        else:
-+            self.write("context.call(")
-+        self.visit(node.node, frame)
-+        extra_kwargs = forward_caller and {"caller": "caller"} or None
-+        self.signature(node, frame, extra_kwargs)
-+        self.write(")")
-+        if self.environment.is_async:
-+            self.write(")")
-+
-+    def visit_Keyword(self, node, frame):
-+        self.write(node.key + "=")
-+        self.visit(node.value, frame)
-+
-+    # -- Unused nodes for extensions
-+
-+    def visit_MarkSafe(self, node, frame):
-+        self.write("Markup(")
-+        self.visit(node.expr, frame)
-+        self.write(")")
-+
-+    def visit_MarkSafeIfAutoescape(self, node, frame):
-+        self.write("(context.eval_ctx.autoescape and Markup or identity)(")
-+        self.visit(node.expr, frame)
-+        self.write(")")
-+
-+    def visit_EnvironmentAttribute(self, node, frame):
-+        self.write("environment." + node.name)
-+
-+    def visit_ExtensionAttribute(self, node, frame):
-+        self.write("environment.extensions[%r].%s" % (node.identifier, node.name))
-+
-+    def visit_ImportedName(self, node, frame):
-+        self.write(self.import_aliases[node.importname])
-+
-+    def visit_InternalName(self, node, frame):
-+        self.write(node.name)
-+
-+    def visit_ContextReference(self, node, frame):
-+        self.write("context")
-+
-+    def visit_DerivedContextReference(self, node, frame):
-+        self.write(self.derive_context(frame))
-+
-+    def visit_Continue(self, node, frame):
-+        self.writeline("continue", node)
-+
-+    def visit_Break(self, node, frame):
-+        self.writeline("break", node)
-+
-+    def visit_Scope(self, node, frame):
-+        scope_frame = frame.inner()
-+        scope_frame.symbols.analyze_node(node)
-+        self.enter_frame(scope_frame)
-+        self.blockvisit(node.body, scope_frame)
-+        self.leave_frame(scope_frame)
-+
-+    def visit_OverlayScope(self, node, frame):
-+        ctx = self.temporary_identifier()
-+        self.writeline("%s = %s" % (ctx, self.derive_context(frame)))
-+        self.writeline("%s.vars = " % ctx)
-+        self.visit(node.context, frame)
-+        self.push_context_reference(ctx)
-+
-+        scope_frame = frame.inner(isolated=True)
-+        scope_frame.symbols.analyze_node(node)
-+        self.enter_frame(scope_frame)
-+        self.blockvisit(node.body, scope_frame)
-+        self.leave_frame(scope_frame)
-+        self.pop_context_reference()
-+
-+    def visit_EvalContextModifier(self, node, frame):
-+        for keyword in node.options:
-+            self.writeline("context.eval_ctx.%s = " % keyword.key)
-+            self.visit(keyword.value, frame)
-+            try:
-+                val = keyword.value.as_const(frame.eval_ctx)
-+            except nodes.Impossible:
-+                frame.eval_ctx.volatile = True
-+            else:
-+                setattr(frame.eval_ctx, keyword.key, val)
-+
-+    def visit_ScopedEvalContextModifier(self, node, frame):
-+        old_ctx_name = self.temporary_identifier()
-+        saved_ctx = frame.eval_ctx.save()
-+        self.writeline("%s = context.eval_ctx.save()" % old_ctx_name)
-+        self.visit_EvalContextModifier(node, frame)
-+        for child in node.body:
-+            self.visit(child, frame)
-+        frame.eval_ctx.revert(saved_ctx)
-+        self.writeline("context.eval_ctx.revert(%s)" % old_ctx_name)
-diff --git a/third_party/python/Jinja2/jinja2/constants.py b/third_party/python/Jinja2/jinja2/constants.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/Jinja2/jinja2/constants.py
-@@ -0,0 +1,21 @@
-+# -*- coding: utf-8 -*-
-+#: list of lorem ipsum words used by the lipsum() helper function
-+LOREM_IPSUM_WORDS = u"""\
-+a ac accumsan ad adipiscing aenean aliquam aliquet amet ante aptent arcu at
-+auctor augue bibendum blandit class commodo condimentum congue consectetuer
-+consequat conubia convallis cras cubilia cum curabitur curae cursus dapibus
-+diam dictum dictumst dignissim dis dolor donec dui duis egestas eget eleifend
-+elementum elit enim erat eros est et etiam eu euismod facilisi facilisis fames
-+faucibus felis fermentum feugiat fringilla fusce gravida habitant habitasse hac
-+hendrerit hymenaeos iaculis id imperdiet in inceptos integer interdum ipsum
-+justo lacinia lacus laoreet lectus leo libero ligula litora lobortis lorem
-+luctus maecenas magna magnis malesuada massa mattis mauris metus mi molestie
-+mollis montes morbi mus nam nascetur natoque nec neque netus nibh nisi nisl non
-+nonummy nostra nulla nullam nunc odio orci ornare parturient pede pellentesque
-+penatibus per pharetra phasellus placerat platea porta porttitor posuere
-+potenti praesent pretium primis proin pulvinar purus quam quis quisque rhoncus
-+ridiculus risus rutrum sagittis sapien scelerisque sed sem semper senectus sit
-+sociis sociosqu sodales sollicitudin suscipit suspendisse taciti tellus tempor
-+tempus tincidunt torquent tortor tristique turpis ullamcorper ultrices
-+ultricies urna ut varius vehicula vel velit venenatis vestibulum vitae vivamus
-+viverra volutpat vulputate"""
-diff --git a/third_party/python/Jinja2/jinja2/debug.py b/third_party/python/Jinja2/jinja2/debug.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/Jinja2/jinja2/debug.py
-@@ -0,0 +1,271 @@
-+import sys
-+from types import CodeType
-+
-+from . import TemplateSyntaxError
-+from ._compat import PYPY
-+from .utils import internal_code
-+from .utils import missing
-+
-+
-+def rewrite_traceback_stack(source=None):
-+    """Rewrite the current exception to replace any tracebacks from
-+    within compiled template code with tracebacks that look like they
-+    came from the template source.
-+
-+    This must be called within an ``except`` block.
-+
-+    :param exc_info: A :meth:`sys.exc_info` tuple. If not provided,
-+        the current ``exc_info`` is used.
-+    :param source: For ``TemplateSyntaxError``, the original source if
-+        known.
-+    :return: A :meth:`sys.exc_info` tuple that can be re-raised.
-+    """
-+    exc_type, exc_value, tb = sys.exc_info()
-+
-+    if isinstance(exc_value, TemplateSyntaxError) and not exc_value.translated:
-+        exc_value.translated = True
-+        exc_value.source = source
-+
-+        try:
-+            # Remove the old traceback on Python 3, otherwise the frames
-+            # from the compiler still show up.
-+            exc_value.with_traceback(None)
-+        except AttributeError:
-+            pass
-+
-+        # Outside of runtime, so the frame isn't executing template
-+        # code, but it still needs to point at the template.
-+        tb = fake_traceback(
-+            exc_value, None, exc_value.filename or "<unknown>", exc_value.lineno
-+        )
-+    else:
-+        # Skip the frame for the render function.
-+        tb = tb.tb_next
-+
-+    stack = []
-+
-+    # Build the stack of traceback object, replacing any in template
-+    # code with the source file and line information.
-+    while tb is not None:
-+        # Skip frames decorated with @internalcode. These are internal
-+        # calls that aren't useful in template debugging output.
-+        if tb.tb_frame.f_code in internal_code:
-+            tb = tb.tb_next
-+            continue
-+
-+        template = tb.tb_frame.f_globals.get("__jinja_template__")
-+
-+        if template is not None:
-+            lineno = template.get_corresponding_lineno(tb.tb_lineno)
-+            fake_tb = fake_traceback(exc_value, tb, template.filename, lineno)
-+            stack.append(fake_tb)
-+        else:
-+            stack.append(tb)
-+
-+        tb = tb.tb_next
-+
-+    tb_next = None
-+
-+    # Assign tb_next in reverse to avoid circular references.
-+    for tb in reversed(stack):
-+        tb_next = tb_set_next(tb, tb_next)
-+
-+    return exc_type, exc_value, tb_next
-+
-+
-+def fake_traceback(exc_value, tb, filename, lineno):
-+    """Produce a new traceback object that looks like it came from the
-+    template source instead of the compiled code. The filename, line
-+    number, and location name will point to the template, and the local
-+    variables will be the current template context.
-+
-+    :param exc_value: The original exception to be re-raised to create
-+        the new traceback.
-+    :param tb: The original traceback to get the local variables and
-+        code info from.
-+    :param filename: The template filename.
-+    :param lineno: The line number in the template source.
-+    """
-+    if tb is not None:
-+        # Replace the real locals with the context that would be
-+        # available at that point in the template.
-+        locals = get_template_locals(tb.tb_frame.f_locals)
-+        locals.pop("__jinja_exception__", None)
-+    else:
-+        locals = {}
-+
-+    globals = {
-+        "__name__": filename,
-+        "__file__": filename,
-+        "__jinja_exception__": exc_value,
-+    }
-+    # Raise an exception at the correct line number.
-+    code = compile("\n" * (lineno - 1) + "raise __jinja_exception__", filename, "exec")
-+
-+    # Build a new code object that points to the template file and
-+    # replaces the location with a block name.
-+    try:
-+        location = "template"
-+
-+        if tb is not None:
-+            function = tb.tb_frame.f_code.co_name
-+
-+            if function == "root":
-+                location = "top-level template code"
-+            elif function.startswith("block_"):
-+                location = 'block "%s"' % function[6:]
-+
-+        # Collect arguments for the new code object. CodeType only
-+        # accepts positional arguments, and arguments were inserted in
-+        # new Python versions.
-+        code_args = []
-+
-+        for attr in (
-+            "argcount",
-+            "posonlyargcount",  # Python 3.8
-+            "kwonlyargcount",  # Python 3
-+            "nlocals",
-+            "stacksize",
-+            "flags",
-+            "code",  # codestring
-+            "consts",  # constants
-+            "names",
-+            "varnames",
-+            ("filename", filename),
-+            ("name", location),
-+            "firstlineno",
-+            "lnotab",
-+            "freevars",
-+            "cellvars",
-+        ):
-+            if isinstance(attr, tuple):
-+                # Replace with given value.
-+                code_args.append(attr[1])
-+                continue
-+
-+            try:
-+                # Copy original value if it exists.
-+                code_args.append(getattr(code, "co_" + attr))
-+            except AttributeError:
-+                # Some arguments were added later.
-+                continue
-+
-+        code = CodeType(*code_args)
-+    except Exception:
-+        # Some environments such as Google App Engine don't support
-+        # modifying code objects.
-+        pass
-+
-+    # Execute the new code, which is guaranteed to raise, and return
-+    # the new traceback without this frame.
-+    try:
-+        exec(code, globals, locals)
-+    except BaseException:
-+        return sys.exc_info()[2].tb_next
-+
-+
-+def get_template_locals(real_locals):
-+    """Based on the runtime locals, get the context that would be
-+    available at that point in the template.
-+    """
-+    # Start with the current template context.
-+    ctx = real_locals.get("context")
-+
-+    if ctx:
-+        data = ctx.get_all().copy()
-+    else:
-+        data = {}
-+
-+    # Might be in a derived context that only sets local variables
-+    # rather than pushing a context. Local variables follow the scheme
-+    # l_depth_name. Find the highest-depth local that has a value for
-+    # each name.
-+    local_overrides = {}
-+
-+    for name, value in real_locals.items():
-+        if not name.startswith("l_") or value is missing:
-+            # Not a template variable, or no longer relevant.
-+            continue
-+
-+        try:
-+            _, depth, name = name.split("_", 2)
-+            depth = int(depth)
-+        except ValueError:
-+            continue
-+
-+        cur_depth = local_overrides.get(name, (-1,))[0]
-+
-+        if cur_depth < depth:
-+            local_overrides[name] = (depth, value)
-+
-+    # Modify the context with any derived context.
-+    for name, (_, value) in local_overrides.items():
-+        if value is missing:
-+            data.pop(name, None)
-+        else:
-+            data[name] = value
-+
-+    return data
-+
-+
-+if sys.version_info >= (3, 7):
-+    # tb_next is directly assignable as of Python 3.7
-+    def tb_set_next(tb, tb_next):
-+        tb.tb_next = tb_next
-+        return tb
-+
-+
-+elif PYPY:
-+    # PyPy might have special support, and won't work with ctypes.
-+    try:
-+        import tputil
-+    except ImportError:
-+        # Without tproxy support, use the original traceback.
-+        def tb_set_next(tb, tb_next):
-+            return tb
-+
-+    else:
-+        # With tproxy support, create a proxy around the traceback that
-+        # returns the new tb_next.
-+        def tb_set_next(tb, tb_next):
-+            def controller(op):
-+                if op.opname == "__getattribute__" and op.args[0] == "tb_next":
-+                    return tb_next
-+
-+                return op.delegate()
-+
-+            return tputil.make_proxy(controller, obj=tb)
-+
-+
-+else:
-+    # Use ctypes to assign tb_next at the C level since it's read-only
-+    # from Python.
-+    import ctypes
-+
-+    class _CTraceback(ctypes.Structure):
-+        _fields_ = [
-+            # Extra PyObject slots when compiled with Py_TRACE_REFS.
-+            (
-+                "PyObject_HEAD",
-+                ctypes.c_byte * (32 if hasattr(sys, "getobjects") else 16),
-+            ),
-+            # Only care about tb_next as an object, not a traceback.
-+            ("tb_next", ctypes.py_object),
-+        ]
-+
-+    def tb_set_next(tb, tb_next):
-+        c_tb = _CTraceback.from_address(id(tb))
-+
-+        # Clear out the old tb_next.
-+        if tb.tb_next is not None:
-+            c_tb_next = ctypes.py_object(tb.tb_next)
-+            c_tb.tb_next = ctypes.py_object()
-+            ctypes.pythonapi.Py_DecRef(c_tb_next)
-+
-+        # Assign the new tb_next.
-+        if tb_next is not None:
-+            c_tb_next = ctypes.py_object(tb_next)
-+            ctypes.pythonapi.Py_IncRef(c_tb_next)
-+            c_tb.tb_next = c_tb_next
-+
-+        return tb
-diff --git a/third_party/python/Jinja2/jinja2/defaults.py b/third_party/python/Jinja2/jinja2/defaults.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/Jinja2/jinja2/defaults.py
-@@ -0,0 +1,44 @@
-+# -*- coding: utf-8 -*-
-+from ._compat import range_type
-+from .filters import FILTERS as DEFAULT_FILTERS  # noqa: F401
-+from .tests import TESTS as DEFAULT_TESTS  # noqa: F401
-+from .utils import Cycler
-+from .utils import generate_lorem_ipsum
-+from .utils import Joiner
-+from .utils import Namespace
-+
-+# defaults for the parser / lexer
-+BLOCK_START_STRING = "{%"
-+BLOCK_END_STRING = "%}"
-+VARIABLE_START_STRING = "{{"
-+VARIABLE_END_STRING = "}}"
-+COMMENT_START_STRING = "{#"
-+COMMENT_END_STRING = "#}"
-+LINE_STATEMENT_PREFIX = None
-+LINE_COMMENT_PREFIX = None
-+TRIM_BLOCKS = False
-+LSTRIP_BLOCKS = False
-+NEWLINE_SEQUENCE = "\n"
-+KEEP_TRAILING_NEWLINE = False
-+
-+# default filters, tests and namespace
-+
-+DEFAULT_NAMESPACE = {
-+    "range": range_type,
-+    "dict": dict,
-+    "lipsum": generate_lorem_ipsum,
-+    "cycler": Cycler,
-+    "joiner": Joiner,
-+    "namespace": Namespace,
-+}
-+
-+# default policies
-+DEFAULT_POLICIES = {
-+    "compiler.ascii_str": True,
-+    "urlize.rel": "noopener",
-+    "urlize.target": None,
-+    "truncate.leeway": 5,
-+    "json.dumps_function": None,
-+    "json.dumps_kwargs": {"sort_keys": True},
-+    "ext.i18n.trimmed": False,
-+}
-diff --git a/third_party/python/Jinja2/jinja2/environment.py b/third_party/python/Jinja2/jinja2/environment.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/Jinja2/jinja2/environment.py
-@@ -0,0 +1,1362 @@
-+# -*- coding: utf-8 -*-
-+"""Classes for managing templates and their runtime and compile time
-+options.
-+"""
-+import os
-+import sys
-+import weakref
-+from functools import partial
-+from functools import reduce
-+
-+from markupsafe import Markup
-+
-+from . import nodes
-+from ._compat import encode_filename
-+from ._compat import implements_iterator
-+from ._compat import implements_to_string
-+from ._compat import iteritems
-+from ._compat import PY2
-+from ._compat import PYPY
-+from ._compat import reraise
-+from ._compat import string_types
-+from ._compat import text_type
-+from .compiler import CodeGenerator
-+from .compiler import generate
-+from .defaults import BLOCK_END_STRING
-+from .defaults import BLOCK_START_STRING
-+from .defaults import COMMENT_END_STRING
-+from .defaults import COMMENT_START_STRING
-+from .defaults import DEFAULT_FILTERS
-+from .defaults import DEFAULT_NAMESPACE
-+from .defaults import DEFAULT_POLICIES
-+from .defaults import DEFAULT_TESTS
-+from .defaults import KEEP_TRAILING_NEWLINE
-+from .defaults import LINE_COMMENT_PREFIX
-+from .defaults import LINE_STATEMENT_PREFIX
-+from .defaults import LSTRIP_BLOCKS
-+from .defaults import NEWLINE_SEQUENCE
-+from .defaults import TRIM_BLOCKS
-+from .defaults import VARIABLE_END_STRING
-+from .defaults import VARIABLE_START_STRING
-+from .exceptions import TemplateNotFound
-+from .exceptions import TemplateRuntimeError
-+from .exceptions import TemplatesNotFound
-+from .exceptions import TemplateSyntaxError
-+from .exceptions import UndefinedError
-+from .lexer import get_lexer
-+from .lexer import TokenStream
-+from .nodes import EvalContext
-+from .parser import Parser
-+from .runtime import Context
-+from .runtime import new_context
-+from .runtime import Undefined
-+from .utils import concat
-+from .utils import consume
-+from .utils import have_async_gen
-+from .utils import import_string
-+from .utils import internalcode
-+from .utils import LRUCache
-+from .utils import missing
-+
-+# for direct template usage we have up to ten living environments
-+_spontaneous_environments = LRUCache(10)
-+
-+
-+def get_spontaneous_environment(cls, *args):
-+    """Return a new spontaneous environment. A spontaneous environment
-+    is used for templates created directly rather than through an
-+    existing environment.
-+
-+    :param cls: Environment class to create.
-+    :param args: Positional arguments passed to environment.
-+    """
-+    key = (cls, args)
-+
-+    try:
-+        return _spontaneous_environments[key]
-+    except KeyError:
-+        _spontaneous_environments[key] = env = cls(*args)
-+        env.shared = True
-+        return env
-+
-+
-+def create_cache(size):
-+    """Return the cache class for the given size."""
-+    if size == 0:
-+        return None
-+    if size < 0:
-+        return {}
-+    return LRUCache(size)
-+
-+
-+def copy_cache(cache):
-+    """Create an empty copy of the given cache."""
-+    if cache is None:
-+        return None
-+    elif type(cache) is dict:
-+        return {}
-+    return LRUCache(cache.capacity)
-+
-+
-+def load_extensions(environment, extensions):
-+    """Load the extensions from the list and bind it to the environment.
-+    Returns a dict of instantiated environments.
-+    """
-+    result = {}
-+    for extension in extensions:
-+        if isinstance(extension, string_types):
-+            extension = import_string(extension)
-+        result[extension.identifier] = extension(environment)
-+    return result
-+
-+
-+def fail_for_missing_callable(string, name):
-+    msg = string % name
-+    if isinstance(name, Undefined):
-+        try:
-+            name._fail_with_undefined_error()
-+        except Exception as e:
-+            msg = "%s (%s; did you forget to quote the callable name?)" % (msg, e)
-+    raise TemplateRuntimeError(msg)
-+
-+
-+def _environment_sanity_check(environment):
-+    """Perform a sanity check on the environment."""
-+    assert issubclass(
-+        environment.undefined, Undefined
-+    ), "undefined must be a subclass of undefined because filters depend on it."
-+    assert (
-+        environment.block_start_string
-+        != environment.variable_start_string
-+        != environment.comment_start_string
-+    ), "block, variable and comment start strings must be different"
-+    assert environment.newline_sequence in (
-+        "\r",
-+        "\r\n",
-+        "\n",
-+    ), "newline_sequence set to unknown line ending string."
-+    return environment
-+
-+
-+class Environment(object):
-+    r"""The core component of Jinja is the `Environment`.  It contains
-+    important shared variables like configuration, filters, tests,
-+    globals and others.  Instances of this class may be modified if
-+    they are not shared and if no template was loaded so far.
-+    Modifications on environments after the first template was loaded
-+    will lead to surprising effects and undefined behavior.
-+
-+    Here are the possible initialization parameters:
-+
-+        `block_start_string`
-+            The string marking the beginning of a block.  Defaults to ``'{%'``.
-+
-+        `block_end_string`
-+            The string marking the end of a block.  Defaults to ``'%}'``.
-+
-+        `variable_start_string`
-+            The string marking the beginning of a print statement.
-+            Defaults to ``'{{'``.
-+
-+        `variable_end_string`
-+            The string marking the end of a print statement.  Defaults to
-+            ``'}}'``.
-+
-+        `comment_start_string`
-+            The string marking the beginning of a comment.  Defaults to ``'{#'``.
-+
-+        `comment_end_string`
-+            The string marking the end of a comment.  Defaults to ``'#}'``.
-+
-+        `line_statement_prefix`
-+            If given and a string, this will be used as prefix for line based
-+            statements.  See also :ref:`line-statements`.
-+
-+        `line_comment_prefix`
-+            If given and a string, this will be used as prefix for line based
-+            comments.  See also :ref:`line-statements`.
-+
-+            .. versionadded:: 2.2
-+
-+        `trim_blocks`
-+            If this is set to ``True`` the first newline after a block is
-+            removed (block, not variable tag!).  Defaults to `False`.
-+
-+        `lstrip_blocks`
-+            If this is set to ``True`` leading spaces and tabs are stripped
-+            from the start of a line to a block.  Defaults to `False`.
-+
-+        `newline_sequence`
-+            The sequence that starts a newline.  Must be one of ``'\r'``,
-+            ``'\n'`` or ``'\r\n'``.  The default is ``'\n'`` which is a
-+            useful default for Linux and OS X systems as well as web
-+            applications.
-+
-+        `keep_trailing_newline`
-+            Preserve the trailing newline when rendering templates.
-+            The default is ``False``, which causes a single newline,
-+            if present, to be stripped from the end of the template.
-+
-+            .. versionadded:: 2.7
-+
-+        `extensions`
-+            List of Jinja extensions to use.  This can either be import paths
-+            as strings or extension classes.  For more information have a
-+            look at :ref:`the extensions documentation <jinja-extensions>`.
-+
-+        `optimized`
-+            should the optimizer be enabled?  Default is ``True``.
-+
-+        `undefined`
-+            :class:`Undefined` or a subclass of it that is used to represent
-+            undefined values in the template.
-+
-+        `finalize`
-+            A callable that can be used to process the result of a variable
-+            expression before it is output.  For example one can convert
-+            ``None`` implicitly into an empty string here.
-+
-+        `autoescape`
-+            If set to ``True`` the XML/HTML autoescaping feature is enabled by
-+            default.  For more details about autoescaping see
-+            :class:`~markupsafe.Markup`.  As of Jinja 2.4 this can also
-+            be a callable that is passed the template name and has to
-+            return ``True`` or ``False`` depending on autoescape should be
-+            enabled by default.
-+
-+            .. versionchanged:: 2.4
-+               `autoescape` can now be a function
-+
-+        `loader`
-+            The template loader for this environment.
-+
-+        `cache_size`
-+            The size of the cache.  Per default this is ``400`` which means
-+            that if more than 400 templates are loaded the loader will clean
-+            out the least recently used template.  If the cache size is set to
-+            ``0`` templates are recompiled all the time, if the cache size is
-+            ``-1`` the cache will not be cleaned.
-+
-+            .. versionchanged:: 2.8
-+               The cache size was increased to 400 from a low 50.
-+
-+        `auto_reload`
-+            Some loaders load templates from locations where the template
-+            sources may change (ie: file system or database).  If
-+            ``auto_reload`` is set to ``True`` (default) every time a template is
-+            requested the loader checks if the source changed and if yes, it
-+            will reload the template.  For higher performance it's possible to
-+            disable that.
-+
-+        `bytecode_cache`
-+            If set to a bytecode cache object, this object will provide a
-+            cache for the internal Jinja bytecode so that templates don't
-+            have to be parsed if they were not changed.
-+
-+            See :ref:`bytecode-cache` for more information.
-+
-+        `enable_async`
-+            If set to true this enables async template execution which allows
-+            you to take advantage of newer Python features.  This requires
-+            Python 3.6 or later.
-+    """
-+
-+    #: if this environment is sandboxed.  Modifying this variable won't make
-+    #: the environment sandboxed though.  For a real sandboxed environment
-+    #: have a look at jinja2.sandbox.  This flag alone controls the code
-+    #: generation by the compiler.
-+    sandboxed = False
-+
-+    #: True if the environment is just an overlay
-+    overlayed = False
-+
-+    #: the environment this environment is linked to if it is an overlay
-+    linked_to = None
-+
-+    #: shared environments have this set to `True`.  A shared environment
-+    #: must not be modified
-+    shared = False
-+
-+    #: the class that is used for code generation.  See
-+    #: :class:`~jinja2.compiler.CodeGenerator` for more information.
-+    code_generator_class = CodeGenerator
-+
-+    #: the context class thatis used for templates.  See
-+    #: :class:`~jinja2.runtime.Context` for more information.
-+    context_class = Context
-+
-+    def __init__(
-+        self,
-+        block_start_string=BLOCK_START_STRING,
-+        block_end_string=BLOCK_END_STRING,
-+        variable_start_string=VARIABLE_START_STRING,
-+        variable_end_string=VARIABLE_END_STRING,
-+        comment_start_string=COMMENT_START_STRING,
-+        comment_end_string=COMMENT_END_STRING,
-+        line_statement_prefix=LINE_STATEMENT_PREFIX,
-+        line_comment_prefix=LINE_COMMENT_PREFIX,
-+        trim_blocks=TRIM_BLOCKS,
-+        lstrip_blocks=LSTRIP_BLOCKS,
-+        newline_sequence=NEWLINE_SEQUENCE,
-+        keep_trailing_newline=KEEP_TRAILING_NEWLINE,
-+        extensions=(),
-+        optimized=True,
-+        undefined=Undefined,
-+        finalize=None,
-+        autoescape=False,
-+        loader=None,
-+        cache_size=400,
-+        auto_reload=True,
-+        bytecode_cache=None,
-+        enable_async=False,
-+    ):
-+        # !!Important notice!!
-+        #   The constructor accepts quite a few arguments that should be
-+        #   passed by keyword rather than position.  However it's important to
-+        #   not change the order of arguments because it's used at least
-+        #   internally in those cases:
-+        #       -   spontaneous environments (i18n extension and Template)
-+        #       -   unittests
-+        #   If parameter changes are required only add parameters at the end
-+        #   and don't change the arguments (or the defaults!) of the arguments
-+        #   existing already.
-+
-+        # lexer / parser information
-+        self.block_start_string = block_start_string
-+        self.block_end_string = block_end_string
-+        self.variable_start_string = variable_start_string
-+        self.variable_end_string = variable_end_string
-+        self.comment_start_string = comment_start_string
-+        self.comment_end_string = comment_end_string
-+        self.line_statement_prefix = line_statement_prefix
-+        self.line_comment_prefix = line_comment_prefix
-+        self.trim_blocks = trim_blocks
-+        self.lstrip_blocks = lstrip_blocks
-+        self.newline_sequence = newline_sequence
-+        self.keep_trailing_newline = keep_trailing_newline
-+
-+        # runtime information
-+        self.undefined = undefined
-+        self.optimized = optimized
-+        self.finalize = finalize
-+        self.autoescape = autoescape
-+
-+        # defaults
-+        self.filters = DEFAULT_FILTERS.copy()
-+        self.tests = DEFAULT_TESTS.copy()
-+        self.globals = DEFAULT_NAMESPACE.copy()
-+
-+        # set the loader provided
-+        self.loader = loader
-+        self.cache = create_cache(cache_size)
-+        self.bytecode_cache = bytecode_cache
-+        self.auto_reload = auto_reload
-+
-+        # configurable policies
-+        self.policies = DEFAULT_POLICIES.copy()
-+
-+        # load extensions
-+        self.extensions = load_extensions(self, extensions)
-+
-+        self.enable_async = enable_async
-+        self.is_async = self.enable_async and have_async_gen
-+        if self.is_async:
-+            # runs patch_all() to enable async support
-+            from . import asyncsupport  # noqa: F401
-+
-+        _environment_sanity_check(self)
-+
-+    def add_extension(self, extension):
-+        """Adds an extension after the environment was created.
-+
-+        .. versionadded:: 2.5
-+        """
-+        self.extensions.update(load_extensions(self, [extension]))
-+
-+    def extend(self, **attributes):
-+        """Add the items to the instance of the environment if they do not exist
-+        yet.  This is used by :ref:`extensions <writing-extensions>` to register
-+        callbacks and configuration values without breaking inheritance.
-+        """
-+        for key, value in iteritems(attributes):
-+            if not hasattr(self, key):
-+                setattr(self, key, value)
-+
-+    def overlay(
-+        self,
-+        block_start_string=missing,
-+        block_end_string=missing,
-+        variable_start_string=missing,
-+        variable_end_string=missing,
-+        comment_start_string=missing,
-+        comment_end_string=missing,
-+        line_statement_prefix=missing,
-+        line_comment_prefix=missing,
-+        trim_blocks=missing,
-+        lstrip_blocks=missing,
-+        extensions=missing,
-+        optimized=missing,
-+        undefined=missing,
-+        finalize=missing,
-+        autoescape=missing,
-+        loader=missing,
-+        cache_size=missing,
-+        auto_reload=missing,
-+        bytecode_cache=missing,
-+    ):
-+        """Create a new overlay environment that shares all the data with the
-+        current environment except for cache and the overridden attributes.
-+        Extensions cannot be removed for an overlayed environment.  An overlayed
-+        environment automatically gets all the extensions of the environment it
-+        is linked to plus optional extra extensions.
-+
-+        Creating overlays should happen after the initial environment was set
-+        up completely.  Not all attributes are truly linked, some are just
-+        copied over so modifications on the original environment may not shine
-+        through.
-+        """
-+        args = dict(locals())
-+        del args["self"], args["cache_size"], args["extensions"]
-+
-+        rv = object.__new__(self.__class__)
-+        rv.__dict__.update(self.__dict__)
-+        rv.overlayed = True
-+        rv.linked_to = self
-+
-+        for key, value in iteritems(args):
-+            if value is not missing:
-+                setattr(rv, key, value)
-+
-+        if cache_size is not missing:
-+            rv.cache = create_cache(cache_size)
-+        else:
-+            rv.cache = copy_cache(self.cache)
-+
-+        rv.extensions = {}
-+        for key, value in iteritems(self.extensions):
-+            rv.extensions[key] = value.bind(rv)
-+        if extensions is not missing:
-+            rv.extensions.update(load_extensions(rv, extensions))
-+
-+        return _environment_sanity_check(rv)
-+
-+    lexer = property(get_lexer, doc="The lexer for this environment.")
-+
-+    def iter_extensions(self):
-+        """Iterates over the extensions by priority."""
-+        return iter(sorted(self.extensions.values(), key=lambda x: x.priority))
-+
-+    def getitem(self, obj, argument):
-+        """Get an item or attribute of an object but prefer the item."""
-+        try:
-+            return obj[argument]
-+        except (AttributeError, TypeError, LookupError):
-+            if isinstance(argument, string_types):
-+                try:
-+                    attr = str(argument)
-+                except Exception:
-+                    pass
-+                else:
-+                    try:
-+                        return getattr(obj, attr)
-+                    except AttributeError:
-+                        pass
-+            return self.undefined(obj=obj, name=argument)
-+
-+    def getattr(self, obj, attribute):
-+        """Get an item or attribute of an object but prefer the attribute.
-+        Unlike :meth:`getitem` the attribute *must* be a bytestring.
-+        """
-+        try:
-+            return getattr(obj, attribute)
-+        except AttributeError:
-+            pass
-+        try:
-+            return obj[attribute]
-+        except (TypeError, LookupError, AttributeError):
-+            return self.undefined(obj=obj, name=attribute)
-+
-+    def call_filter(
-+        self, name, value, args=None, kwargs=None, context=None, eval_ctx=None
-+    ):
-+        """Invokes a filter on a value the same way the compiler does it.
-+
-+        Note that on Python 3 this might return a coroutine in case the
-+        filter is running from an environment in async mode and the filter
-+        supports async execution.  It's your responsibility to await this
-+        if needed.
-+
-+        .. versionadded:: 2.7
-+        """
-+        func = self.filters.get(name)
-+        if func is None:
-+            fail_for_missing_callable("no filter named %r", name)
-+        args = [value] + list(args or ())
-+        if getattr(func, "contextfilter", False):
-+            if context is None:
-+                raise TemplateRuntimeError(
-+                    "Attempted to invoke context filter without context"
-+                )
-+            args.insert(0, context)
-+        elif getattr(func, "evalcontextfilter", False):
-+            if eval_ctx is None:
-+                if context is not None:
-+                    eval_ctx = context.eval_ctx
-+                else:
-+                    eval_ctx = EvalContext(self)
-+            args.insert(0, eval_ctx)
-+        elif getattr(func, "environmentfilter", False):
-+            args.insert(0, self)
-+        return func(*args, **(kwargs or {}))
-+
-+    def call_test(self, name, value, args=None, kwargs=None):
-+        """Invokes a test on a value the same way the compiler does it.
-+
-+        .. versionadded:: 2.7
-+        """
-+        func = self.tests.get(name)
-+        if func is None:
-+            fail_for_missing_callable("no test named %r", name)
-+        return func(value, *(args or ()), **(kwargs or {}))
-+
-+    @internalcode
-+    def parse(self, source, name=None, filename=None):
-+        """Parse the sourcecode and return the abstract syntax tree.  This
-+        tree of nodes is used by the compiler to convert the template into
-+        executable source- or bytecode.  This is useful for debugging or to
-+        extract information from templates.
-+
-+        If you are :ref:`developing Jinja extensions <writing-extensions>`
-+        this gives you a good overview of the node tree generated.
-+        """
-+        try:
-+            return self._parse(source, name, filename)
-+        except TemplateSyntaxError:
-+            self.handle_exception(source=source)
-+
-+    def _parse(self, source, name, filename):
-+        """Internal parsing function used by `parse` and `compile`."""
-+        return Parser(self, source, name, encode_filename(filename)).parse()
-+
-+    def lex(self, source, name=None, filename=None):
-+        """Lex the given sourcecode and return a generator that yields
-+        tokens as tuples in the form ``(lineno, token_type, value)``.
-+        This can be useful for :ref:`extension development <writing-extensions>`
-+        and debugging templates.
-+
-+        This does not perform preprocessing.  If you want the preprocessing
-+        of the extensions to be applied you have to filter source through
-+        the :meth:`preprocess` method.
-+        """
-+        source = text_type(source)
-+        try:
-+            return self.lexer.tokeniter(source, name, filename)
-+        except TemplateSyntaxError:
-+            self.handle_exception(source=source)
-+
-+    def preprocess(self, source, name=None, filename=None):
-+        """Preprocesses the source with all extensions.  This is automatically
-+        called for all parsing and compiling methods but *not* for :meth:`lex`
-+        because there you usually only want the actual source tokenized.
-+        """
-+        return reduce(
-+            lambda s, e: e.preprocess(s, name, filename),
-+            self.iter_extensions(),
-+            text_type(source),
-+        )
-+
-+    def _tokenize(self, source, name, filename=None, state=None):
-+        """Called by the parser to do the preprocessing and filtering
-+        for all the extensions.  Returns a :class:`~jinja2.lexer.TokenStream`.
-+        """
-+        source = self.preprocess(source, name, filename)
-+        stream = self.lexer.tokenize(source, name, filename, state)
-+        for ext in self.iter_extensions():
-+            stream = ext.filter_stream(stream)
-+            if not isinstance(stream, TokenStream):
-+                stream = TokenStream(stream, name, filename)
-+        return stream
-+
-+    def _generate(self, source, name, filename, defer_init=False):
-+        """Internal hook that can be overridden to hook a different generate
-+        method in.
-+
-+        .. versionadded:: 2.5
-+        """
-+        return generate(
-+            source,
-+            self,
-+            name,
-+            filename,
-+            defer_init=defer_init,
-+            optimized=self.optimized,
-+        )
-+
-+    def _compile(self, source, filename):
-+        """Internal hook that can be overridden to hook a different compile
-+        method in.
-+
-+        .. versionadded:: 2.5
-+        """
-+        return compile(source, filename, "exec")
-+
-+    @internalcode
-+    def compile(self, source, name=None, filename=None, raw=False, defer_init=False):
-+        """Compile a node or template source code.  The `name` parameter is
-+        the load name of the template after it was joined using
-+        :meth:`join_path` if necessary, not the filename on the file system.
-+        the `filename` parameter is the estimated filename of the template on
-+        the file system.  If the template came from a database or memory this
-+        can be omitted.
-+
-+        The return value of this method is a python code object.  If the `raw`
-+        parameter is `True` the return value will be a string with python
-+        code equivalent to the bytecode returned otherwise.  This method is
-+        mainly used internally.
-+
-+        `defer_init` is use internally to aid the module code generator.  This
-+        causes the generated code to be able to import without the global
-+        environment variable to be set.
-+
-+        .. versionadded:: 2.4
-+           `defer_init` parameter added.
-+        """
-+        source_hint = None
-+        try:
-+            if isinstance(source, string_types):
-+                source_hint = source
-+                source = self._parse(source, name, filename)
-+            source = self._generate(source, name, filename, defer_init=defer_init)
-+            if raw:
-+                return source
-+            if filename is None:
-+                filename = "<template>"
-+            else:
-+                filename = encode_filename(filename)
-+            return self._compile(source, filename)
-+        except TemplateSyntaxError:
-+            self.handle_exception(source=source_hint)
-+
-+    def compile_expression(self, source, undefined_to_none=True):
-+        """A handy helper method that returns a callable that accepts keyword
-+        arguments that appear as variables in the expression.  If called it
-+        returns the result of the expression.
-+
-+        This is useful if applications want to use the same rules as Jinja
-+        in template "configuration files" or similar situations.
-+
-+        Example usage:
-+
-+        >>> env = Environment()
-+        >>> expr = env.compile_expression('foo == 42')
-+        >>> expr(foo=23)
-+        False
-+        >>> expr(foo=42)
-+        True
-+
-+        Per default the return value is converted to `None` if the
-+        expression returns an undefined value.  This can be changed
-+        by setting `undefined_to_none` to `False`.
-+
-+        >>> env.compile_expression('var')() is None
-+        True
-+        >>> env.compile_expression('var', undefined_to_none=False)()
-+        Undefined
-+
-+        .. versionadded:: 2.1
-+        """
-+        parser = Parser(self, source, state="variable")
-+        try:
-+            expr = parser.parse_expression()
-+            if not parser.stream.eos:
-+                raise TemplateSyntaxError(
-+                    "chunk after expression", parser.stream.current.lineno, None, None
-+                )
-+            expr.set_environment(self)
-+        except TemplateSyntaxError:
-+            if sys.exc_info() is not None:
-+                self.handle_exception(source=source)
-+
-+        body = [nodes.Assign(nodes.Name("result", "store"), expr, lineno=1)]
-+        template = self.from_string(nodes.Template(body, lineno=1))
-+        return TemplateExpression(template, undefined_to_none)
-+
-+    def compile_templates(
-+        self,
-+        target,
-+        extensions=None,
-+        filter_func=None,
-+        zip="deflated",
-+        log_function=None,
-+        ignore_errors=True,
-+        py_compile=False,
-+    ):
-+        """Finds all the templates the loader can find, compiles them
-+        and stores them in `target`.  If `zip` is `None`, instead of in a
-+        zipfile, the templates will be stored in a directory.
-+        By default a deflate zip algorithm is used. To switch to
-+        the stored algorithm, `zip` can be set to ``'stored'``.
-+
-+        `extensions` and `filter_func` are passed to :meth:`list_templates`.
-+        Each template returned will be compiled to the target folder or
-+        zipfile.
-+
-+        By default template compilation errors are ignored.  In case a
-+        log function is provided, errors are logged.  If you want template
-+        syntax errors to abort the compilation you can set `ignore_errors`
-+        to `False` and you will get an exception on syntax errors.
-+
-+        If `py_compile` is set to `True` .pyc files will be written to the
-+        target instead of standard .py files.  This flag does not do anything
-+        on pypy and Python 3 where pyc files are not picked up by itself and
-+        don't give much benefit.
-+
-+        .. versionadded:: 2.4
-+        """
-+        from .loaders import ModuleLoader
-+
-+        if log_function is None:
-+
-+            def log_function(x):
-+                pass
-+
-+        if py_compile:
-+            if not PY2 or PYPY:
-+                import warnings
-+
-+                warnings.warn(
-+                    "'py_compile=True' has no effect on PyPy or Python"
-+                    " 3 and will be removed in version 3.0",
-+                    DeprecationWarning,
-+                    stacklevel=2,
-+                )
-+                py_compile = False
-+            else:
-+                import imp
-+                import marshal
-+
-+                py_header = imp.get_magic() + u"\xff\xff\xff\xff".encode("iso-8859-15")
-+
-+                # Python 3.3 added a source filesize to the header
-+                if sys.version_info >= (3, 3):
-+                    py_header += u"\x00\x00\x00\x00".encode("iso-8859-15")
-+
-+        def write_file(filename, data):
-+            if zip:
-+                info = ZipInfo(filename)
-+                info.external_attr = 0o755 << 16
-+                zip_file.writestr(info, data)
-+            else:
-+                if isinstance(data, text_type):
-+                    data = data.encode("utf8")
-+
-+                with open(os.path.join(target, filename), "wb") as f:
-+                    f.write(data)
-+
-+        if zip is not None:
-+            from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED, ZIP_STORED
-+
-+            zip_file = ZipFile(
-+                target, "w", dict(deflated=ZIP_DEFLATED, stored=ZIP_STORED)[zip]
-+            )
-+            log_function('Compiling into Zip archive "%s"' % target)
-+        else:
-+            if not os.path.isdir(target):
-+                os.makedirs(target)
-+            log_function('Compiling into folder "%s"' % target)
-+
-+        try:
-+            for name in self.list_templates(extensions, filter_func):
-+                source, filename, _ = self.loader.get_source(self, name)
-+                try:
-+                    code = self.compile(source, name, filename, True, True)
-+                except TemplateSyntaxError as e:
-+                    if not ignore_errors:
-+                        raise
-+                    log_function('Could not compile "%s": %s' % (name, e))
-+                    continue
-+
-+                filename = ModuleLoader.get_module_filename(name)
-+
-+                if py_compile:
-+                    c = self._compile(code, encode_filename(filename))
-+                    write_file(filename + "c", py_header + marshal.dumps(c))
-+                    log_function('Byte-compiled "%s" as %s' % (name, filename + "c"))
-+                else:
-+                    write_file(filename, code)
-+                    log_function('Compiled "%s" as %s' % (name, filename))
-+        finally:
-+            if zip:
-+                zip_file.close()
-+
-+        log_function("Finished compiling templates")
-+
-+    def list_templates(self, extensions=None, filter_func=None):
-+        """Returns a list of templates for this environment.  This requires
-+        that the loader supports the loader's
-+        :meth:`~BaseLoader.list_templates` method.
-+
-+        If there are other files in the template folder besides the
-+        actual templates, the returned list can be filtered.  There are two
-+        ways: either `extensions` is set to a list of file extensions for
-+        templates, or a `filter_func` can be provided which is a callable that
-+        is passed a template name and should return `True` if it should end up
-+        in the result list.
-+
-+        If the loader does not support that, a :exc:`TypeError` is raised.
-+
-+        .. versionadded:: 2.4
-+        """
-+        names = self.loader.list_templates()
-+
-+        if extensions is not None:
-+            if filter_func is not None:
-+                raise TypeError(
-+                    "either extensions or filter_func can be passed, but not both"
-+                )
-+
-+            def filter_func(x):
-+                return "." in x and x.rsplit(".", 1)[1] in extensions
-+
-+        if filter_func is not None:
-+            names = [name for name in names if filter_func(name)]
-+
-+        return names
-+
-+    def handle_exception(self, source=None):
-+        """Exception handling helper.  This is used internally to either raise
-+        rewritten exceptions or return a rendered traceback for the template.
-+        """
-+        from .debug import rewrite_traceback_stack
-+
-+        reraise(*rewrite_traceback_stack(source=source))
-+
-+    def join_path(self, template, parent):
-+        """Join a template with the parent.  By default all the lookups are
-+        relative to the loader root so this method returns the `template`
-+        parameter unchanged, but if the paths should be relative to the
-+        parent template, this function can be used to calculate the real
-+        template name.
-+
-+        Subclasses may override this method and implement template path
-+        joining here.
-+        """
-+        return template
-+
-+    @internalcode
-+    def _load_template(self, name, globals):
-+        if self.loader is None:
-+            raise TypeError("no loader for this environment specified")
-+        cache_key = (weakref.ref(self.loader), name)
-+        if self.cache is not None:
-+            template = self.cache.get(cache_key)
-+            if template is not None and (
-+                not self.auto_reload or template.is_up_to_date
-+            ):
-+                return template
-+        template = self.loader.load(self, name, globals)
-+        if self.cache is not None:
-+            self.cache[cache_key] = template
-+        return template
-+
-+    @internalcode
-+    def get_template(self, name, parent=None, globals=None):
-+        """Load a template from the loader.  If a loader is configured this
-+        method asks the loader for the template and returns a :class:`Template`.
-+        If the `parent` parameter is not `None`, :meth:`join_path` is called
-+        to get the real template name before loading.
-+
-+        The `globals` parameter can be used to provide template wide globals.
-+        These variables are available in the context at render time.
-+
-+        If the template does not exist a :exc:`TemplateNotFound` exception is
-+        raised.
-+
-+        .. versionchanged:: 2.4
-+           If `name` is a :class:`Template` object it is returned from the
-+           function unchanged.
-+        """
-+        if isinstance(name, Template):
-+            return name
-+        if parent is not None:
-+            name = self.join_path(name, parent)
-+        return self._load_template(name, self.make_globals(globals))
-+
-+    @internalcode
-+    def select_template(self, names, parent=None, globals=None):
-+        """Works like :meth:`get_template` but tries a number of templates
-+        before it fails.  If it cannot find any of the templates, it will
-+        raise a :exc:`TemplatesNotFound` exception.
-+
-+        .. versionchanged:: 2.11
-+            If names is :class:`Undefined`, an :exc:`UndefinedError` is
-+            raised instead. If no templates were found and names
-+            contains :class:`Undefined`, the message is more helpful.
-+
-+        .. versionchanged:: 2.4
-+           If `names` contains a :class:`Template` object it is returned
-+           from the function unchanged.
-+
-+        .. versionadded:: 2.3
-+        """
-+        if isinstance(names, Undefined):
-+            names._fail_with_undefined_error()
-+
-+        if not names:
-+            raise TemplatesNotFound(
-+                message=u"Tried to select from an empty list " u"of templates."
-+            )
-+        globals = self.make_globals(globals)
-+        for name in names:
-+            if isinstance(name, Template):
-+                return name
-+            if parent is not None:
-+                name = self.join_path(name, parent)
-+            try:
-+                return self._load_template(name, globals)
-+            except (TemplateNotFound, UndefinedError):
-+                pass
-+        raise TemplatesNotFound(names)
-+
-+    @internalcode
-+    def get_or_select_template(self, template_name_or_list, parent=None, globals=None):
-+        """Does a typecheck and dispatches to :meth:`select_template`
-+        if an iterable of template names is given, otherwise to
-+        :meth:`get_template`.
-+
-+        .. versionadded:: 2.3
-+        """
-+        if isinstance(template_name_or_list, (string_types, Undefined)):
-+            return self.get_template(template_name_or_list, parent, globals)
-+        elif isinstance(template_name_or_list, Template):
-+            return template_name_or_list
-+        return self.select_template(template_name_or_list, parent, globals)
-+
-+    def from_string(self, source, globals=None, template_class=None):
-+        """Load a template from a string.  This parses the source given and
-+        returns a :class:`Template` object.
-+        """
-+        globals = self.make_globals(globals)
-+        cls = template_class or self.template_class
-+        return cls.from_code(self, self.compile(source), globals, None)
-+
-+    def make_globals(self, d):
-+        """Return a dict for the globals."""
-+        if not d:
-+            return self.globals
-+        return dict(self.globals, **d)
-+
-+
-+class Template(object):
-+    """The central template object.  This class represents a compiled template
-+    and is used to evaluate it.
-+
-+    Normally the template object is generated from an :class:`Environment` but
-+    it also has a constructor that makes it possible to create a template
-+    instance directly using the constructor.  It takes the same arguments as
-+    the environment constructor but it's not possible to specify a loader.
-+
-+    Every template object has a few methods and members that are guaranteed
-+    to exist.  However it's important that a template object should be
-+    considered immutable.  Modifications on the object are not supported.
-+
-+    Template objects created from the constructor rather than an environment
-+    do have an `environment` attribute that points to a temporary environment
-+    that is probably shared with other templates created with the constructor
-+    and compatible settings.
-+
-+    >>> template = Template('Hello {{ name }}!')
-+    >>> template.render(name='John Doe') == u'Hello John Doe!'
-+    True
-+    >>> stream = template.stream(name='John Doe')
-+    >>> next(stream) == u'Hello John Doe!'
-+    True
-+    >>> next(stream)
-+    Traceback (most recent call last):
-+        ...
-+    StopIteration
-+    """
-+
-+    #: Type of environment to create when creating a template directly
-+    #: rather than through an existing environment.
-+    environment_class = Environment
-+
-+    def __new__(
-+        cls,
-+        source,
-+        block_start_string=BLOCK_START_STRING,
-+        block_end_string=BLOCK_END_STRING,
-+        variable_start_string=VARIABLE_START_STRING,
-+        variable_end_string=VARIABLE_END_STRING,
-+        comment_start_string=COMMENT_START_STRING,
-+        comment_end_string=COMMENT_END_STRING,
-+        line_statement_prefix=LINE_STATEMENT_PREFIX,
-+        line_comment_prefix=LINE_COMMENT_PREFIX,
-+        trim_blocks=TRIM_BLOCKS,
-+        lstrip_blocks=LSTRIP_BLOCKS,
-+        newline_sequence=NEWLINE_SEQUENCE,
-+        keep_trailing_newline=KEEP_TRAILING_NEWLINE,
-+        extensions=(),
-+        optimized=True,
-+        undefined=Undefined,
-+        finalize=None,
-+        autoescape=False,
-+        enable_async=False,
-+    ):
-+        env = get_spontaneous_environment(
-+            cls.environment_class,
-+            block_start_string,
-+            block_end_string,
-+            variable_start_string,
-+            variable_end_string,
-+            comment_start_string,
-+            comment_end_string,
-+            line_statement_prefix,
-+            line_comment_prefix,
-+            trim_blocks,
-+            lstrip_blocks,
-+            newline_sequence,
-+            keep_trailing_newline,
-+            frozenset(extensions),
-+            optimized,
-+            undefined,
-+            finalize,
-+            autoescape,
-+            None,
-+            0,
-+            False,
-+            None,
-+            enable_async,
-+        )
-+        return env.from_string(source, template_class=cls)
-+
-+    @classmethod
-+    def from_code(cls, environment, code, globals, uptodate=None):
-+        """Creates a template object from compiled code and the globals.  This
-+        is used by the loaders and environment to create a template object.
-+        """
-+        namespace = {"environment": environment, "__file__": code.co_filename}
-+        exec(code, namespace)
-+        rv = cls._from_namespace(environment, namespace, globals)
-+        rv._uptodate = uptodate
-+        return rv
-+
-+    @classmethod
-+    def from_module_dict(cls, environment, module_dict, globals):
-+        """Creates a template object from a module.  This is used by the
-+        module loader to create a template object.
-+
-+        .. versionadded:: 2.4
-+        """
-+        return cls._from_namespace(environment, module_dict, globals)
-+
-+    @classmethod
-+    def _from_namespace(cls, environment, namespace, globals):
-+        t = object.__new__(cls)
-+        t.environment = environment
-+        t.globals = globals
-+        t.name = namespace["name"]
-+        t.filename = namespace["__file__"]
-+        t.blocks = namespace["blocks"]
-+
-+        # render function and module
-+        t.root_render_func = namespace["root"]
-+        t._module = None
-+
-+        # debug and loader helpers
-+        t._debug_info = namespace["debug_info"]
-+        t._uptodate = None
-+
-+        # store the reference
-+        namespace["environment"] = environment
-+        namespace["__jinja_template__"] = t
-+
-+        return t
-+
-+    def render(self, *args, **kwargs):
-+        """This method accepts the same arguments as the `dict` constructor:
-+        A dict, a dict subclass or some keyword arguments.  If no arguments
-+        are given the context will be empty.  These two calls do the same::
-+
-+            template.render(knights='that say nih')
-+            template.render({'knights': 'that say nih'})
-+
-+        This will return the rendered template as unicode string.
-+        """
-+        vars = dict(*args, **kwargs)
-+        try:
-+            return concat(self.root_render_func(self.new_context(vars)))
-+        except Exception:
-+            self.environment.handle_exception()
-+
-+    def render_async(self, *args, **kwargs):
-+        """This works similar to :meth:`render` but returns a coroutine
-+        that when awaited returns the entire rendered template string.  This
-+        requires the async feature to be enabled.
-+
-+        Example usage::
-+
-+            await template.render_async(knights='that say nih; asynchronously')
-+        """
-+        # see asyncsupport for the actual implementation
-+        raise NotImplementedError(
-+            "This feature is not available for this version of Python"
-+        )
-+
-+    def stream(self, *args, **kwargs):
-+        """Works exactly like :meth:`generate` but returns a
-+        :class:`TemplateStream`.
-+        """
-+        return TemplateStream(self.generate(*args, **kwargs))
-+
-+    def generate(self, *args, **kwargs):
-+        """For very large templates it can be useful to not render the whole
-+        template at once but evaluate each statement after another and yield
-+        piece for piece.  This method basically does exactly that and returns
-+        a generator that yields one item after another as unicode strings.
-+
-+        It accepts the same arguments as :meth:`render`.
-+        """
-+        vars = dict(*args, **kwargs)
-+        try:
-+            for event in self.root_render_func(self.new_context(vars)):
-+                yield event
-+        except Exception:
-+            yield self.environment.handle_exception()
-+
-+    def generate_async(self, *args, **kwargs):
-+        """An async version of :meth:`generate`.  Works very similarly but
-+        returns an async iterator instead.
-+        """
-+        # see asyncsupport for the actual implementation
-+        raise NotImplementedError(
-+            "This feature is not available for this version of Python"
-+        )
-+
-+    def new_context(self, vars=None, shared=False, locals=None):
-+        """Create a new :class:`Context` for this template.  The vars
-+        provided will be passed to the template.  Per default the globals
-+        are added to the context.  If shared is set to `True` the data
-+        is passed as is to the context without adding the globals.
-+
-+        `locals` can be a dict of local variables for internal usage.
-+        """
-+        return new_context(
-+            self.environment, self.name, self.blocks, vars, shared, self.globals, locals
-+        )
-+
-+    def make_module(self, vars=None, shared=False, locals=None):
-+        """This method works like the :attr:`module` attribute when called
-+        without arguments but it will evaluate the template on every call
-+        rather than caching it.  It's also possible to provide
-+        a dict which is then used as context.  The arguments are the same
-+        as for the :meth:`new_context` method.
-+        """
-+        return TemplateModule(self, self.new_context(vars, shared, locals))
-+
-+    def make_module_async(self, vars=None, shared=False, locals=None):
-+        """As template module creation can invoke template code for
-+        asynchronous executions this method must be used instead of the
-+        normal :meth:`make_module` one.  Likewise the module attribute
-+        becomes unavailable in async mode.
-+        """
-+        # see asyncsupport for the actual implementation
-+        raise NotImplementedError(
-+            "This feature is not available for this version of Python"
-+        )
-+
-+    @internalcode
-+    def _get_default_module(self):
-+        if self._module is not None:
-+            return self._module
-+        self._module = rv = self.make_module()
-+        return rv
-+
-+    @property
-+    def module(self):
-+        """The template as module.  This is used for imports in the
-+        template runtime but is also useful if one wants to access
-+        exported template variables from the Python layer:
-+
-+        >>> t = Template('{% macro foo() %}42{% endmacro %}23')
-+        >>> str(t.module)
-+        '23'
-+        >>> t.module.foo() == u'42'
-+        True
-+
-+        This attribute is not available if async mode is enabled.
-+        """
-+        return self._get_default_module()
-+
-+    def get_corresponding_lineno(self, lineno):
-+        """Return the source line number of a line number in the
-+        generated bytecode as they are not in sync.
-+        """
-+        for template_line, code_line in reversed(self.debug_info):
-+            if code_line <= lineno:
-+                return template_line
-+        return 1
-+
-+    @property
-+    def is_up_to_date(self):
-+        """If this variable is `False` there is a newer version available."""
-+        if self._uptodate is None:
-+            return True
-+        return self._uptodate()
-+
-+    @property
-+    def debug_info(self):
-+        """The debug info mapping."""
-+        if self._debug_info:
-+            return [tuple(map(int, x.split("="))) for x in self._debug_info.split("&")]
-+        return []
-+
-+    def __repr__(self):
-+        if self.name is None:
-+            name = "memory:%x" % id(self)
-+        else:
-+            name = repr(self.name)
-+        return "<%s %s>" % (self.__class__.__name__, name)
-+
-+
-+@implements_to_string
-+class TemplateModule(object):
-+    """Represents an imported template.  All the exported names of the
-+    template are available as attributes on this object.  Additionally
-+    converting it into an unicode- or bytestrings renders the contents.
-+    """
-+
-+    def __init__(self, template, context, body_stream=None):
-+        if body_stream is None:
-+            if context.environment.is_async:
-+                raise RuntimeError(
-+                    "Async mode requires a body stream "
-+                    "to be passed to a template module.  Use "
-+                    "the async methods of the API you are "
-+                    "using."
-+                )
-+            body_stream = list(template.root_render_func(context))
-+        self._body_stream = body_stream
-+        self.__dict__.update(context.get_exported())
-+        self.__name__ = template.name
-+
-+    def __html__(self):
-+        return Markup(concat(self._body_stream))
-+
-+    def __str__(self):
-+        return concat(self._body_stream)
-+
-+    def __repr__(self):
-+        if self.__name__ is None:
-+            name = "memory:%x" % id(self)
-+        else:
-+            name = repr(self.__name__)
-+        return "<%s %s>" % (self.__class__.__name__, name)
-+
-+
-+class TemplateExpression(object):
-+    """The :meth:`jinja2.Environment.compile_expression` method returns an
-+    instance of this object.  It encapsulates the expression-like access
-+    to the template with an expression it wraps.
-+    """
-+
-+    def __init__(self, template, undefined_to_none):
-+        self._template = template
-+        self._undefined_to_none = undefined_to_none
-+
-+    def __call__(self, *args, **kwargs):
-+        context = self._template.new_context(dict(*args, **kwargs))
-+        consume(self._template.root_render_func(context))
-+        rv = context.vars["result"]
-+        if self._undefined_to_none and isinstance(rv, Undefined):
-+            rv = None
-+        return rv
-+
-+
-+@implements_iterator
-+class TemplateStream(object):
-+    """A template stream works pretty much like an ordinary python generator
-+    but it can buffer multiple items to reduce the number of total iterations.
-+    Per default the output is unbuffered which means that for every unbuffered
-+    instruction in the template one unicode string is yielded.
-+
-+    If buffering is enabled with a buffer size of 5, five items are combined
-+    into a new unicode string.  This is mainly useful if you are streaming
-+    big templates to a client via WSGI which flushes after each iteration.
-+    """
-+
-+    def __init__(self, gen):
-+        self._gen = gen
-+        self.disable_buffering()
-+
-+    def dump(self, fp, encoding=None, errors="strict"):
-+        """Dump the complete stream into a file or file-like object.
-+        Per default unicode strings are written, if you want to encode
-+        before writing specify an `encoding`.
-+
-+        Example usage::
-+
-+            Template('Hello {{ name }}!').stream(name='foo').dump('hello.html')
-+        """
-+        close = False
-+        if isinstance(fp, string_types):
-+            if encoding is None:
-+                encoding = "utf-8"
-+            fp = open(fp, "wb")
-+            close = True
-+        try:
-+            if encoding is not None:
-+                iterable = (x.encode(encoding, errors) for x in self)
-+            else:
-+                iterable = self
-+            if hasattr(fp, "writelines"):
-+                fp.writelines(iterable)
-+            else:
-+                for item in iterable:
-+                    fp.write(item)
-+        finally:
-+            if close:
-+                fp.close()
-+
-+    def disable_buffering(self):
-+        """Disable the output buffering."""
-+        self._next = partial(next, self._gen)
-+        self.buffered = False
-+
-+    def _buffered_generator(self, size):
-+        buf = []
-+        c_size = 0
-+        push = buf.append
-+
-+        while 1:
-+            try:
-+                while c_size < size:
-+                    c = next(self._gen)
-+                    push(c)
-+                    if c:
-+                        c_size += 1
-+            except StopIteration:
-+                if not c_size:
-+                    return
-+            yield concat(buf)
-+            del buf[:]
-+            c_size = 0
-+
-+    def enable_buffering(self, size=5):
-+        """Enable buffering.  Buffer `size` items before yielding them."""
-+        if size <= 1:
-+            raise ValueError("buffer size too small")
-+
-+        self.buffered = True
-+        self._next = partial(next, self._buffered_generator(size))
-+
-+    def __iter__(self):
-+        return self
-+
-+    def __next__(self):
-+        return self._next()
-+
-+
-+# hook in default template class.  if anyone reads this comment: ignore that
-+# it's possible to use custom templates ;-)
-+Environment.template_class = Template
-diff --git a/third_party/python/Jinja2/jinja2/exceptions.py b/third_party/python/Jinja2/jinja2/exceptions.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/Jinja2/jinja2/exceptions.py
-@@ -0,0 +1,177 @@
-+# -*- coding: utf-8 -*-
-+from ._compat import imap
-+from ._compat import implements_to_string
-+from ._compat import PY2
-+from ._compat import text_type
-+
-+
-+class TemplateError(Exception):
-+    """Baseclass for all template errors."""
-+
-+    if PY2:
-+
-+        def __init__(self, message=None):
-+            if message is not None:
-+                message = text_type(message).encode("utf-8")
-+            Exception.__init__(self, message)
-+
-+        @property
-+        def message(self):
-+            if self.args:
-+                message = self.args[0]
-+                if message is not None:
-+                    return message.decode("utf-8", "replace")
-+
-+        def __unicode__(self):
-+            return self.message or u""
-+
-+    else:
-+
-+        def __init__(self, message=None):
-+            Exception.__init__(self, message)
-+
-+        @property
-+        def message(self):
-+            if self.args:
-+                message = self.args[0]
-+                if message is not None:
-+                    return message
-+
-+
-+@implements_to_string
-+class TemplateNotFound(IOError, LookupError, TemplateError):
-+    """Raised if a template does not exist.
-+
-+    .. versionchanged:: 2.11
-+        If the given name is :class:`Undefined` and no message was
-+        provided, an :exc:`UndefinedError` is raised.
-+    """
-+
-+    # looks weird, but removes the warning descriptor that just
-+    # bogusly warns us about message being deprecated
-+    message = None
-+
-+    def __init__(self, name, message=None):
-+        IOError.__init__(self, name)
-+
-+        if message is None:
-+            from .runtime import Undefined
-+
-+            if isinstance(name, Undefined):
-+                name._fail_with_undefined_error()
-+
-+            message = name
-+
-+        self.message = message
-+        self.name = name
-+        self.templates = [name]
-+
-+    def __str__(self):
-+        return self.message
-+
-+
-+class TemplatesNotFound(TemplateNotFound):
-+    """Like :class:`TemplateNotFound` but raised if multiple templates
-+    are selected.  This is a subclass of :class:`TemplateNotFound`
-+    exception, so just catching the base exception will catch both.
-+
-+    .. versionchanged:: 2.11
-+        If a name in the list of names is :class:`Undefined`, a message
-+        about it being undefined is shown rather than the empty string.
-+
-+    .. versionadded:: 2.2
-+    """
-+
-+    def __init__(self, names=(), message=None):
-+        if message is None:
-+            from .runtime import Undefined
-+
-+            parts = []
-+
-+            for name in names:
-+                if isinstance(name, Undefined):
-+                    parts.append(name._undefined_message)
-+                else:
-+                    parts.append(name)
-+
-+            message = u"none of the templates given were found: " + u", ".join(
-+                imap(text_type, parts)
-+            )
-+        TemplateNotFound.__init__(self, names and names[-1] or None, message)
-+        self.templates = list(names)
-+
-+
-+@implements_to_string
-+class TemplateSyntaxError(TemplateError):
-+    """Raised to tell the user that there is a problem with the template."""
-+
-+    def __init__(self, message, lineno, name=None, filename=None):
-+        TemplateError.__init__(self, message)
-+        self.lineno = lineno
-+        self.name = name
-+        self.filename = filename
-+        self.source = None
-+
-+        # this is set to True if the debug.translate_syntax_error
-+        # function translated the syntax error into a new traceback
-+        self.translated = False
-+
-+    def __str__(self):
-+        # for translated errors we only return the message
-+        if self.translated:
-+            return self.message
-+
-+        # otherwise attach some stuff
-+        location = "line %d" % self.lineno
-+        name = self.filename or self.name
-+        if name:
-+            location = 'File "%s", %s' % (name, location)
-+        lines = [self.message, "  " + location]
-+
-+        # if the source is set, add the line to the output
-+        if self.source is not None:
-+            try:
-+                line = self.source.splitlines()[self.lineno - 1]
-+            except IndexError:
-+                line = None
-+            if line:
-+                lines.append("    " + line.strip())
-+
-+        return u"\n".join(lines)
-+
-+    def __reduce__(self):
-+        # https://bugs.python.org/issue1692335 Exceptions that take
-+        # multiple required arguments have problems with pickling.
-+        # Without this, raises TypeError: __init__() missing 1 required
-+        # positional argument: 'lineno'
-+        return self.__class__, (self.message, self.lineno, self.name, self.filename)
-+
-+
-+class TemplateAssertionError(TemplateSyntaxError):
-+    """Like a template syntax error, but covers cases where something in the
-+    template caused an error at compile time that wasn't necessarily caused
-+    by a syntax error.  However it's a direct subclass of
-+    :exc:`TemplateSyntaxError` and has the same attributes.
-+    """
-+
-+
-+class TemplateRuntimeError(TemplateError):
-+    """A generic runtime error in the template engine.  Under some situations
-+    Jinja may raise this exception.
-+    """
-+
-+
-+class UndefinedError(TemplateRuntimeError):
-+    """Raised if a template tries to operate on :class:`Undefined`."""
-+
-+
-+class SecurityError(TemplateRuntimeError):
-+    """Raised if a template tries to do something insecure if the
-+    sandbox is enabled.
-+    """
-+
-+
-+class FilterArgumentError(TemplateRuntimeError):
-+    """This error is raised if a filter was called with inappropriate
-+    arguments
-+    """
-diff --git a/third_party/python/Jinja2/jinja2/ext.py b/third_party/python/Jinja2/jinja2/ext.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/Jinja2/jinja2/ext.py
-@@ -0,0 +1,704 @@
-+# -*- coding: utf-8 -*-
-+"""Extension API for adding custom tags and behavior."""
-+import pprint
-+import re
-+from sys import version_info
-+
-+from markupsafe import Markup
-+
-+from . import nodes
-+from ._compat import iteritems
-+from ._compat import string_types
-+from ._compat import with_metaclass
-+from .defaults import BLOCK_END_STRING
-+from .defaults import BLOCK_START_STRING
-+from .defaults import COMMENT_END_STRING
-+from .defaults import COMMENT_START_STRING
-+from .defaults import KEEP_TRAILING_NEWLINE
-+from .defaults import LINE_COMMENT_PREFIX
-+from .defaults import LINE_STATEMENT_PREFIX
-+from .defaults import LSTRIP_BLOCKS
-+from .defaults import NEWLINE_SEQUENCE
-+from .defaults import TRIM_BLOCKS
-+from .defaults import VARIABLE_END_STRING
-+from .defaults import VARIABLE_START_STRING
-+from .environment import Environment
-+from .exceptions import TemplateAssertionError
-+from .exceptions import TemplateSyntaxError
-+from .nodes import ContextReference
-+from .runtime import concat
-+from .utils import contextfunction
-+from .utils import import_string
-+
-+# the only real useful gettext functions for a Jinja template.  Note
-+# that ugettext must be assigned to gettext as Jinja doesn't support
-+# non unicode strings.
-+GETTEXT_FUNCTIONS = ("_", "gettext", "ngettext")
-+
-+_ws_re = re.compile(r"\s*\n\s*")
-+
-+
-+class ExtensionRegistry(type):
-+    """Gives the extension an unique identifier."""
-+
-+    def __new__(mcs, name, bases, d):
-+        rv = type.__new__(mcs, name, bases, d)
-+        rv.identifier = rv.__module__ + "." + rv.__name__
-+        return rv
-+
-+
-+class Extension(with_metaclass(ExtensionRegistry, object)):
-+    """Extensions can be used to add extra functionality to the Jinja template
-+    system at the parser level.  Custom extensions are bound to an environment
-+    but may not store environment specific data on `self`.  The reason for
-+    this is that an extension can be bound to another environment (for
-+    overlays) by creating a copy and reassigning the `environment` attribute.
-+
-+    As extensions are created by the environment they cannot accept any
-+    arguments for configuration.  One may want to work around that by using
-+    a factory function, but that is not possible as extensions are identified
-+    by their import name.  The correct way to configure the extension is
-+    storing the configuration values on the environment.  Because this way the
-+    environment ends up acting as central configuration storage the
-+    attributes may clash which is why extensions have to ensure that the names
-+    they choose for configuration are not too generic.  ``prefix`` for example
-+    is a terrible name, ``fragment_cache_prefix`` on the other hand is a good
-+    name as includes the name of the extension (fragment cache).
-+    """
-+
-+    #: if this extension parses this is the list of tags it's listening to.
-+    tags = set()
-+
-+    #: the priority of that extension.  This is especially useful for
-+    #: extensions that preprocess values.  A lower value means higher
-+    #: priority.
-+    #:
-+    #: .. versionadded:: 2.4
-+    priority = 100
-+
-+    def __init__(self, environment):
-+        self.environment = environment
-+
-+    def bind(self, environment):
-+        """Create a copy of this extension bound to another environment."""
-+        rv = object.__new__(self.__class__)
-+        rv.__dict__.update(self.__dict__)
-+        rv.environment = environment
-+        return rv
-+
-+    def preprocess(self, source, name, filename=None):
-+        """This method is called before the actual lexing and can be used to
-+        preprocess the source.  The `filename` is optional.  The return value
-+        must be the preprocessed source.
-+        """
-+        return source
-+
-+    def filter_stream(self, stream):
-+        """It's passed a :class:`~jinja2.lexer.TokenStream` that can be used
-+        to filter tokens returned.  This method has to return an iterable of
-+        :class:`~jinja2.lexer.Token`\\s, but it doesn't have to return a
-+        :class:`~jinja2.lexer.TokenStream`.
-+        """
-+        return stream
-+
-+    def parse(self, parser):
-+        """If any of the :attr:`tags` matched this method is called with the
-+        parser as first argument.  The token the parser stream is pointing at
-+        is the name token that matched.  This method has to return one or a
-+        list of multiple nodes.
-+        """
-+        raise NotImplementedError()
-+
-+    def attr(self, name, lineno=None):
-+        """Return an attribute node for the current extension.  This is useful
-+        to pass constants on extensions to generated template code.
-+
-+        ::
-+
-+            self.attr('_my_attribute', lineno=lineno)
-+        """
-+        return nodes.ExtensionAttribute(self.identifier, name, lineno=lineno)
-+
-+    def call_method(
-+        self, name, args=None, kwargs=None, dyn_args=None, dyn_kwargs=None, lineno=None
-+    ):
-+        """Call a method of the extension.  This is a shortcut for
-+        :meth:`attr` + :class:`jinja2.nodes.Call`.
-+        """
-+        if args is None:
-+            args = []
-+        if kwargs is None:
-+            kwargs = []
-+        return nodes.Call(
-+            self.attr(name, lineno=lineno),
-+            args,
-+            kwargs,
-+            dyn_args,
-+            dyn_kwargs,
-+            lineno=lineno,
-+        )
-+
-+
-+@contextfunction
-+def _gettext_alias(__context, *args, **kwargs):
-+    return __context.call(__context.resolve("gettext"), *args, **kwargs)
-+
-+
-+def _make_new_gettext(func):
-+    @contextfunction
-+    def gettext(__context, __string, **variables):
-+        rv = __context.call(func, __string)
-+        if __context.eval_ctx.autoescape:
-+            rv = Markup(rv)
-+        # Always treat as a format string, even if there are no
-+        # variables. This makes translation strings more consistent
-+        # and predictable. This requires escaping
-+        return rv % variables
-+
-+    return gettext
-+
-+
-+def _make_new_ngettext(func):
-+    @contextfunction
-+    def ngettext(__context, __singular, __plural, __num, **variables):
-+        variables.setdefault("num", __num)
-+        rv = __context.call(func, __singular, __plural, __num)
-+        if __context.eval_ctx.autoescape:
-+            rv = Markup(rv)
-+        # Always treat as a format string, see gettext comment above.
-+        return rv % variables
-+
-+    return ngettext
-+
-+
-+class InternationalizationExtension(Extension):
-+    """This extension adds gettext support to Jinja."""
-+
-+    tags = {"trans"}
-+
-+    # TODO: the i18n extension is currently reevaluating values in a few
-+    # situations.  Take this example:
-+    #   {% trans count=something() %}{{ count }} foo{% pluralize
-+    #     %}{{ count }} fooss{% endtrans %}
-+    # something is called twice here.  One time for the gettext value and
-+    # the other time for the n-parameter of the ngettext function.
-+
-+    def __init__(self, environment):
-+        Extension.__init__(self, environment)
-+        environment.globals["_"] = _gettext_alias
-+        environment.extend(
-+            install_gettext_translations=self._install,
-+            install_null_translations=self._install_null,
-+            install_gettext_callables=self._install_callables,
-+            uninstall_gettext_translations=self._uninstall,
-+            extract_translations=self._extract,
-+            newstyle_gettext=False,
-+        )
-+
-+    def _install(self, translations, newstyle=None):
-+        gettext = getattr(translations, "ugettext", None)
-+        if gettext is None:
-+            gettext = translations.gettext
-+        ngettext = getattr(translations, "ungettext", None)
-+        if ngettext is None:
-+            ngettext = translations.ngettext
-+        self._install_callables(gettext, ngettext, newstyle)
-+
-+    def _install_null(self, newstyle=None):
-+        self._install_callables(
-+            lambda x: x, lambda s, p, n: (n != 1 and (p,) or (s,))[0], newstyle
-+        )
-+
-+    def _install_callables(self, gettext, ngettext, newstyle=None):
-+        if newstyle is not None:
-+            self.environment.newstyle_gettext = newstyle
-+        if self.environment.newstyle_gettext:
-+            gettext = _make_new_gettext(gettext)
-+            ngettext = _make_new_ngettext(ngettext)
-+        self.environment.globals.update(gettext=gettext, ngettext=ngettext)
-+
-+    def _uninstall(self, translations):
-+        for key in "gettext", "ngettext":
-+            self.environment.globals.pop(key, None)
-+
-+    def _extract(self, source, gettext_functions=GETTEXT_FUNCTIONS):
-+        if isinstance(source, string_types):
-+            source = self.environment.parse(source)
-+        return extract_from_ast(source, gettext_functions)
-+
-+    def parse(self, parser):
-+        """Parse a translatable tag."""
-+        lineno = next(parser.stream).lineno
-+        num_called_num = False
-+
-+        # find all the variables referenced.  Additionally a variable can be
-+        # defined in the body of the trans block too, but this is checked at
-+        # a later state.
-+        plural_expr = None
-+        plural_expr_assignment = None
-+        variables = {}
-+        trimmed = None
-+        while parser.stream.current.type != "block_end":
-+            if variables:
-+                parser.stream.expect("comma")
-+
-+            # skip colon for python compatibility
-+            if parser.stream.skip_if("colon"):
-+                break
-+
-+            name = parser.stream.expect("name")
-+            if name.value in variables:
-+                parser.fail(
-+                    "translatable variable %r defined twice." % name.value,
-+                    name.lineno,
-+                    exc=TemplateAssertionError,
-+                )
-+
-+            # expressions
-+            if parser.stream.current.type == "assign":
-+                next(parser.stream)
-+                variables[name.value] = var = parser.parse_expression()
-+            elif trimmed is None and name.value in ("trimmed", "notrimmed"):
-+                trimmed = name.value == "trimmed"
-+                continue
-+            else:
-+                variables[name.value] = var = nodes.Name(name.value, "load")
-+
-+            if plural_expr is None:
-+                if isinstance(var, nodes.Call):
-+                    plural_expr = nodes.Name("_trans", "load")
-+                    variables[name.value] = plural_expr
-+                    plural_expr_assignment = nodes.Assign(
-+                        nodes.Name("_trans", "store"), var
-+                    )
-+                else:
-+                    plural_expr = var
-+                num_called_num = name.value == "num"
-+
-+        parser.stream.expect("block_end")
-+
-+        plural = None
-+        have_plural = False
-+        referenced = set()
-+
-+        # now parse until endtrans or pluralize
-+        singular_names, singular = self._parse_block(parser, True)
-+        if singular_names:
-+            referenced.update(singular_names)
-+            if plural_expr is None:
-+                plural_expr = nodes.Name(singular_names[0], "load")
-+                num_called_num = singular_names[0] == "num"
-+
-+        # if we have a pluralize block, we parse that too
-+        if parser.stream.current.test("name:pluralize"):
-+            have_plural = True
-+            next(parser.stream)
-+            if parser.stream.current.type != "block_end":
-+                name = parser.stream.expect("name")
-+                if name.value not in variables:
-+                    parser.fail(
-+                        "unknown variable %r for pluralization" % name.value,
-+                        name.lineno,
-+                        exc=TemplateAssertionError,
-+                    )
-+                plural_expr = variables[name.value]
-+                num_called_num = name.value == "num"
-+            parser.stream.expect("block_end")
-+            plural_names, plural = self._parse_block(parser, False)
-+            next(parser.stream)
-+            referenced.update(plural_names)
-+        else:
-+            next(parser.stream)
-+
-+        # register free names as simple name expressions
-+        for var in referenced:
-+            if var not in variables:
-+                variables[var] = nodes.Name(var, "load")
-+
-+        if not have_plural:
-+            plural_expr = None
-+        elif plural_expr is None:
-+            parser.fail("pluralize without variables", lineno)
-+
-+        if trimmed is None:
-+            trimmed = self.environment.policies["ext.i18n.trimmed"]
-+        if trimmed:
-+            singular = self._trim_whitespace(singular)
-+            if plural:
-+                plural = self._trim_whitespace(plural)
-+
-+        node = self._make_node(
-+            singular,
-+            plural,
-+            variables,
-+            plural_expr,
-+            bool(referenced),
-+            num_called_num and have_plural,
-+        )
-+        node.set_lineno(lineno)
-+        if plural_expr_assignment is not None:
-+            return [plural_expr_assignment, node]
-+        else:
-+            return node
-+
-+    def _trim_whitespace(self, string, _ws_re=_ws_re):
-+        return _ws_re.sub(" ", string.strip())
-+
-+    def _parse_block(self, parser, allow_pluralize):
-+        """Parse until the next block tag with a given name."""
-+        referenced = []
-+        buf = []
-+        while 1:
-+            if parser.stream.current.type == "data":
-+                buf.append(parser.stream.current.value.replace("%", "%%"))
-+                next(parser.stream)
-+            elif parser.stream.current.type == "variable_begin":
-+                next(parser.stream)
-+                name = parser.stream.expect("name").value
-+                referenced.append(name)
-+                buf.append("%%(%s)s" % name)
-+                parser.stream.expect("variable_end")
-+            elif parser.stream.current.type == "block_begin":
-+                next(parser.stream)
-+                if parser.stream.current.test("name:endtrans"):
-+                    break
-+                elif parser.stream.current.test("name:pluralize"):
-+                    if allow_pluralize:
-+                        break
-+                    parser.fail(
-+                        "a translatable section can have only one pluralize section"
-+                    )
-+                parser.fail(
-+                    "control structures in translatable sections are not allowed"
-+                )
-+            elif parser.stream.eos:
-+                parser.fail("unclosed translation block")
-+            else:
-+                raise RuntimeError("internal parser error")
-+
-+        return referenced, concat(buf)
-+
-+    def _make_node(
-+        self, singular, plural, variables, plural_expr, vars_referenced, num_called_num
-+    ):
-+        """Generates a useful node from the data provided."""
-+        # no variables referenced?  no need to escape for old style
-+        # gettext invocations only if there are vars.
-+        if not vars_referenced and not self.environment.newstyle_gettext:
-+            singular = singular.replace("%%", "%")
-+            if plural:
-+                plural = plural.replace("%%", "%")
-+
-+        # singular only:
-+        if plural_expr is None:
-+            gettext = nodes.Name("gettext", "load")
-+            node = nodes.Call(gettext, [nodes.Const(singular)], [], None, None)
-+
-+        # singular and plural
-+        else:
-+            ngettext = nodes.Name("ngettext", "load")
-+            node = nodes.Call(
-+                ngettext,
-+                [nodes.Const(singular), nodes.Const(plural), plural_expr],
-+                [],
-+                None,
-+                None,
-+            )
-+
-+        # in case newstyle gettext is used, the method is powerful
-+        # enough to handle the variable expansion and autoescape
-+        # handling itself
-+        if self.environment.newstyle_gettext:
-+            for key, value in iteritems(variables):
-+                # the function adds that later anyways in case num was
-+                # called num, so just skip it.
-+                if num_called_num and key == "num":
-+                    continue
-+                node.kwargs.append(nodes.Keyword(key, value))
-+
-+        # otherwise do that here
-+        else:
-+            # mark the return value as safe if we are in an
-+            # environment with autoescaping turned on
-+            node = nodes.MarkSafeIfAutoescape(node)
-+            if variables:
-+                node = nodes.Mod(
-+                    node,
-+                    nodes.Dict(
-+                        [
-+                            nodes.Pair(nodes.Const(key), value)
-+                            for key, value in variables.items()
-+                        ]
-+                    ),
-+                )
-+        return nodes.Output([node])
-+
-+
-+class ExprStmtExtension(Extension):
-+    """Adds a `do` tag to Jinja that works like the print statement just
-+    that it doesn't print the return value.
-+    """
-+
-+    tags = set(["do"])
-+
-+    def parse(self, parser):
-+        node = nodes.ExprStmt(lineno=next(parser.stream).lineno)
-+        node.node = parser.parse_tuple()
-+        return node
-+
-+
-+class LoopControlExtension(Extension):
-+    """Adds break and continue to the template engine."""
-+
-+    tags = set(["break", "continue"])
-+
-+    def parse(self, parser):
-+        token = next(parser.stream)
-+        if token.value == "break":
-+            return nodes.Break(lineno=token.lineno)
-+        return nodes.Continue(lineno=token.lineno)
-+
-+
-+class WithExtension(Extension):
-+    pass
-+
-+
-+class AutoEscapeExtension(Extension):
-+    pass
-+
-+
-+class DebugExtension(Extension):
-+    """A ``{% debug %}`` tag that dumps the available variables,
-+    filters, and tests.
-+
-+    .. code-block:: html+jinja
-+
-+        <pre>{% debug %}</pre>
-+
-+    .. code-block:: text
-+
-+        {'context': {'cycler': <class 'jinja2.utils.Cycler'>,
-+                     ...,
-+                     'namespace': <class 'jinja2.utils.Namespace'>},
-+         'filters': ['abs', 'attr', 'batch', 'capitalize', 'center', 'count', 'd',
-+                     ..., 'urlencode', 'urlize', 'wordcount', 'wordwrap', 'xmlattr'],
-+         'tests': ['!=', '<', '<=', '==', '>', '>=', 'callable', 'defined',
-+                   ..., 'odd', 'sameas', 'sequence', 'string', 'undefined', 'upper']}
-+
-+    .. versionadded:: 2.11.0
-+    """
-+
-+    tags = {"debug"}
-+
-+    def parse(self, parser):
-+        lineno = parser.stream.expect("name:debug").lineno
-+        context = ContextReference()
-+        result = self.call_method("_render", [context], lineno=lineno)
-+        return nodes.Output([result], lineno=lineno)
-+
-+    def _render(self, context):
-+        result = {
-+            "context": context.get_all(),
-+            "filters": sorted(self.environment.filters.keys()),
-+            "tests": sorted(self.environment.tests.keys()),
-+        }
-+
-+        # Set the depth since the intent is to show the top few names.
-+        if version_info[:2] >= (3, 4):
-+            return pprint.pformat(result, depth=3, compact=True)
-+        else:
-+            return pprint.pformat(result, depth=3)
-+
-+
-+def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS, babel_style=True):
-+    """Extract localizable strings from the given template node.  Per
-+    default this function returns matches in babel style that means non string
-+    parameters as well as keyword arguments are returned as `None`.  This
-+    allows Babel to figure out what you really meant if you are using
-+    gettext functions that allow keyword arguments for placeholder expansion.
-+    If you don't want that behavior set the `babel_style` parameter to `False`
-+    which causes only strings to be returned and parameters are always stored
-+    in tuples.  As a consequence invalid gettext calls (calls without a single
-+    string parameter or string parameters after non-string parameters) are
-+    skipped.
-+
-+    This example explains the behavior:
-+
-+    >>> from jinja2 import Environment
-+    >>> env = Environment()
-+    >>> node = env.parse('{{ (_("foo"), _(), ngettext("foo", "bar", 42)) }}')
-+    >>> list(extract_from_ast(node))
-+    [(1, '_', 'foo'), (1, '_', ()), (1, 'ngettext', ('foo', 'bar', None))]
-+    >>> list(extract_from_ast(node, babel_style=False))
-+    [(1, '_', ('foo',)), (1, 'ngettext', ('foo', 'bar'))]
-+
-+    For every string found this function yields a ``(lineno, function,
-+    message)`` tuple, where:
-+
-+    * ``lineno`` is the number of the line on which the string was found,
-+    * ``function`` is the name of the ``gettext`` function used (if the
-+      string was extracted from embedded Python code), and
-+    *  ``message`` is the string itself (a ``unicode`` object, or a tuple
-+       of ``unicode`` objects for functions with multiple string arguments).
-+
-+    This extraction function operates on the AST and is because of that unable
-+    to extract any comments.  For comment support you have to use the babel
-+    extraction interface or extract comments yourself.
-+    """
-+    for node in node.find_all(nodes.Call):
-+        if (
-+            not isinstance(node.node, nodes.Name)
-+            or node.node.name not in gettext_functions
-+        ):
-+            continue
-+
-+        strings = []
-+        for arg in node.args:
-+            if isinstance(arg, nodes.Const) and isinstance(arg.value, string_types):
-+                strings.append(arg.value)
-+            else:
-+                strings.append(None)
-+
-+        for _ in node.kwargs:
-+            strings.append(None)
-+        if node.dyn_args is not None:
-+            strings.append(None)
-+        if node.dyn_kwargs is not None:
-+            strings.append(None)
-+
-+        if not babel_style:
-+            strings = tuple(x for x in strings if x is not None)
-+            if not strings:
-+                continue
-+        else:
-+            if len(strings) == 1:
-+                strings = strings[0]
-+            else:
-+                strings = tuple(strings)
-+        yield node.lineno, node.node.name, strings
-+
-+
-+class _CommentFinder(object):
-+    """Helper class to find comments in a token stream.  Can only
-+    find comments for gettext calls forwards.  Once the comment
-+    from line 4 is found, a comment for line 1 will not return a
-+    usable value.
-+    """
-+
-+    def __init__(self, tokens, comment_tags):
-+        self.tokens = tokens
-+        self.comment_tags = comment_tags
-+        self.offset = 0
-+        self.last_lineno = 0
-+
-+    def find_backwards(self, offset):
-+        try:
-+            for _, token_type, token_value in reversed(
-+                self.tokens[self.offset : offset]
-+            ):
-+                if token_type in ("comment", "linecomment"):
-+                    try:
-+                        prefix, comment = token_value.split(None, 1)
-+                    except ValueError:
-+                        continue
-+                    if prefix in self.comment_tags:
-+                        return [comment.rstrip()]
-+            return []
-+        finally:
-+            self.offset = offset
-+
-+    def find_comments(self, lineno):
-+        if not self.comment_tags or self.last_lineno > lineno:
-+            return []
-+        for idx, (token_lineno, _, _) in enumerate(self.tokens[self.offset :]):
-+            if token_lineno > lineno:
-+                return self.find_backwards(self.offset + idx)
-+        return self.find_backwards(len(self.tokens))
-+
-+
-+def babel_extract(fileobj, keywords, comment_tags, options):
-+    """Babel extraction method for Jinja templates.
-+
-+    .. versionchanged:: 2.3
-+       Basic support for translation comments was added.  If `comment_tags`
-+       is now set to a list of keywords for extraction, the extractor will
-+       try to find the best preceding comment that begins with one of the
-+       keywords.  For best results, make sure to not have more than one
-+       gettext call in one line of code and the matching comment in the
-+       same line or the line before.
-+
-+    .. versionchanged:: 2.5.1
-+       The `newstyle_gettext` flag can be set to `True` to enable newstyle
-+       gettext calls.
-+
-+    .. versionchanged:: 2.7
-+       A `silent` option can now be provided.  If set to `False` template
-+       syntax errors are propagated instead of being ignored.
-+
-+    :param fileobj: the file-like object the messages should be extracted from
-+    :param keywords: a list of keywords (i.e. function names) that should be
-+                     recognized as translation functions
-+    :param comment_tags: a list of translator tags to search for and include
-+                         in the results.
-+    :param options: a dictionary of additional options (optional)
-+    :return: an iterator over ``(lineno, funcname, message, comments)`` tuples.
-+             (comments will be empty currently)
-+    """
-+    extensions = set()
-+    for extension in options.get("extensions", "").split(","):
-+        extension = extension.strip()
-+        if not extension:
-+            continue
-+        extensions.add(import_string(extension))
-+    if InternationalizationExtension not in extensions:
-+        extensions.add(InternationalizationExtension)
-+
-+    def getbool(options, key, default=False):
-+        return options.get(key, str(default)).lower() in ("1", "on", "yes", "true")
-+
-+    silent = getbool(options, "silent", True)
-+    environment = Environment(
-+        options.get("block_start_string", BLOCK_START_STRING),
-+        options.get("block_end_string", BLOCK_END_STRING),
-+        options.get("variable_start_string", VARIABLE_START_STRING),
-+        options.get("variable_end_string", VARIABLE_END_STRING),
-+        options.get("comment_start_string", COMMENT_START_STRING),
-+        options.get("comment_end_string", COMMENT_END_STRING),
-+        options.get("line_statement_prefix") or LINE_STATEMENT_PREFIX,
-+        options.get("line_comment_prefix") or LINE_COMMENT_PREFIX,
-+        getbool(options, "trim_blocks", TRIM_BLOCKS),
-+        getbool(options, "lstrip_blocks", LSTRIP_BLOCKS),
-+        NEWLINE_SEQUENCE,
-+        getbool(options, "keep_trailing_newline", KEEP_TRAILING_NEWLINE),
-+        frozenset(extensions),
-+        cache_size=0,
-+        auto_reload=False,
-+    )
-+
-+    if getbool(options, "trimmed"):
-+        environment.policies["ext.i18n.trimmed"] = True
-+    if getbool(options, "newstyle_gettext"):
-+        environment.newstyle_gettext = True
-+
-+    source = fileobj.read().decode(options.get("encoding", "utf-8"))
-+    try:
-+        node = environment.parse(source)
-+        tokens = list(environment.lex(environment.preprocess(source)))
-+    except TemplateSyntaxError:
-+        if not silent:
-+            raise
-+        # skip templates with syntax errors
-+        return
-+
-+    finder = _CommentFinder(tokens, comment_tags)
-+    for lineno, func, message in extract_from_ast(node, keywords):
-+        yield lineno, func, message, finder.find_comments(lineno)
-+
-+
-+#: nicer import names
-+i18n = InternationalizationExtension
-+do = ExprStmtExtension
-+loopcontrols = LoopControlExtension
-+with_ = WithExtension
-+autoescape = AutoEscapeExtension
-+debug = DebugExtension
-diff --git a/third_party/python/Jinja2/jinja2/filters.py b/third_party/python/Jinja2/jinja2/filters.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/Jinja2/jinja2/filters.py
-@@ -0,0 +1,1382 @@
-+# -*- coding: utf-8 -*-
-+"""Built-in template filters used with the ``|`` operator."""
-+import math
-+import random
-+import re
-+import warnings
-+from collections import namedtuple
-+from itertools import chain
-+from itertools import groupby
-+
-+from markupsafe import escape
-+from markupsafe import Markup
-+from markupsafe import soft_unicode
-+
-+from ._compat import abc
-+from ._compat import imap
-+from ._compat import iteritems
-+from ._compat import string_types
-+from ._compat import text_type
-+from .exceptions import FilterArgumentError
-+from .runtime import Undefined
-+from .utils import htmlsafe_json_dumps
-+from .utils import pformat
-+from .utils import unicode_urlencode
-+from .utils import urlize
-+
-+_word_re = re.compile(r"\w+", re.UNICODE)
-+_word_beginning_split_re = re.compile(r"([-\s\(\{\[\<]+)", re.UNICODE)
-+
-+
-+def contextfilter(f):
-+    """Decorator for marking context dependent filters. The current
-+    :class:`Context` will be passed as first argument.
-+    """
-+    f.contextfilter = True
-+    return f
-+
-+
-+def evalcontextfilter(f):
-+    """Decorator for marking eval-context dependent filters.  An eval
-+    context object is passed as first argument.  For more information
-+    about the eval context, see :ref:`eval-context`.
-+
-+    .. versionadded:: 2.4
-+    """
-+    f.evalcontextfilter = True
-+    return f
-+
-+
-+def environmentfilter(f):
-+    """Decorator for marking environment dependent filters.  The current
-+    :class:`Environment` is passed to the filter as first argument.
-+    """
-+    f.environmentfilter = True
-+    return f
-+
-+
-+def ignore_case(value):
-+    """For use as a postprocessor for :func:`make_attrgetter`. Converts strings
-+    to lowercase and returns other types as-is."""
-+    return value.lower() if isinstance(value, string_types) else value
-+
-+
-+def make_attrgetter(environment, attribute, postprocess=None, default=None):
-+    """Returns a callable that looks up the given attribute from a
-+    passed object with the rules of the environment.  Dots are allowed
-+    to access attributes of attributes.  Integer parts in paths are
-+    looked up as integers.
-+    """
-+    attribute = _prepare_attribute_parts(attribute)
-+
-+    def attrgetter(item):
-+        for part in attribute:
-+            item = environment.getitem(item, part)
-+
-+            if default and isinstance(item, Undefined):
-+                item = default
-+
-+        if postprocess is not None:
-+            item = postprocess(item)
-+
-+        return item
-+
-+    return attrgetter
-+
-+
-+def make_multi_attrgetter(environment, attribute, postprocess=None):
-+    """Returns a callable that looks up the given comma separated
-+    attributes from a passed object with the rules of the environment.
-+    Dots are allowed to access attributes of each attribute.  Integer
-+    parts in paths are looked up as integers.
-+
-+    The value returned by the returned callable is a list of extracted
-+    attribute values.
-+
-+    Examples of attribute: "attr1,attr2", "attr1.inner1.0,attr2.inner2.0", etc.
-+    """
-+    attribute_parts = (
-+        attribute.split(",") if isinstance(attribute, string_types) else [attribute]
-+    )
-+    attribute = [
-+        _prepare_attribute_parts(attribute_part) for attribute_part in attribute_parts
-+    ]
-+
-+    def attrgetter(item):
-+        items = [None] * len(attribute)
-+        for i, attribute_part in enumerate(attribute):
-+            item_i = item
-+            for part in attribute_part:
-+                item_i = environment.getitem(item_i, part)
-+
-+            if postprocess is not None:
-+                item_i = postprocess(item_i)
-+
-+            items[i] = item_i
-+        return items
-+
-+    return attrgetter
-+
-+
-+def _prepare_attribute_parts(attr):
-+    if attr is None:
-+        return []
-+    elif isinstance(attr, string_types):
-+        return [int(x) if x.isdigit() else x for x in attr.split(".")]
-+    else:
-+        return [attr]
-+
-+
-+def do_forceescape(value):
-+    """Enforce HTML escaping.  This will probably double escape variables."""
-+    if hasattr(value, "__html__"):
-+        value = value.__html__()
-+    return escape(text_type(value))
-+
-+
-+def do_urlencode(value):
-+    """Quote data for use in a URL path or query using UTF-8.
-+
-+    Basic wrapper around :func:`urllib.parse.quote` when given a
-+    string, or :func:`urllib.parse.urlencode` for a dict or iterable.
-+
-+    :param value: Data to quote. A string will be quoted directly. A
-+        dict or iterable of ``(key, value)`` pairs will be joined as a
-+        query string.
-+
-+    When given a string, "/" is not quoted. HTTP servers treat "/" and
-+    "%2F" equivalently in paths. If you need quoted slashes, use the
-+    ``|replace("/", "%2F")`` filter.
-+
-+    .. versionadded:: 2.7
-+    """
-+    if isinstance(value, string_types) or not isinstance(value, abc.Iterable):
-+        return unicode_urlencode(value)
-+
-+    if isinstance(value, dict):
-+        items = iteritems(value)
-+    else:
-+        items = iter(value)
-+
-+    return u"&".join(
-+        "%s=%s" % (unicode_urlencode(k, for_qs=True), unicode_urlencode(v, for_qs=True))
-+        for k, v in items
-+    )
-+
-+
-+@evalcontextfilter
-+def do_replace(eval_ctx, s, old, new, count=None):
-+    """Return a copy of the value with all occurrences of a substring
-+    replaced with a new one. The first argument is the substring
-+    that should be replaced, the second is the replacement string.
-+    If the optional third argument ``count`` is given, only the first
-+    ``count`` occurrences are replaced:
-+
-+    .. sourcecode:: jinja
-+
-+        {{ "Hello World"|replace("Hello", "Goodbye") }}
-+            -> Goodbye World
-+
-+        {{ "aaaaargh"|replace("a", "d'oh, ", 2) }}
-+            -> d'oh, d'oh, aaargh
-+    """
-+    if count is None:
-+        count = -1
-+    if not eval_ctx.autoescape:
-+        return text_type(s).replace(text_type(old), text_type(new), count)
-+    if (
-+        hasattr(old, "__html__")
-+        or hasattr(new, "__html__")
-+        and not hasattr(s, "__html__")
-+    ):
-+        s = escape(s)
-+    else:
-+        s = soft_unicode(s)
-+    return s.replace(soft_unicode(old), soft_unicode(new), count)
-+
-+
-+def do_upper(s):
-+    """Convert a value to uppercase."""
-+    return soft_unicode(s).upper()
-+
-+
-+def do_lower(s):
-+    """Convert a value to lowercase."""
-+    return soft_unicode(s).lower()
-+
-+
-+@evalcontextfilter
-+def do_xmlattr(_eval_ctx, d, autospace=True):
-+    """Create an SGML/XML attribute string based on the items in a dict.
-+    All values that are neither `none` nor `undefined` are automatically
-+    escaped:
-+
-+    .. sourcecode:: html+jinja
-+
-+        <ul{{ {'class': 'my_list', 'missing': none,
-+                'id': 'list-%d'|format(variable)}|xmlattr }}>
-+        ...
-+        </ul>
-+
-+    Results in something like this:
-+
-+    .. sourcecode:: html
-+
-+        <ul class="my_list" id="list-42">
-+        ...
-+        </ul>
-+
-+    As you can see it automatically prepends a space in front of the item
-+    if the filter returned something unless the second parameter is false.
-+    """
-+    rv = u" ".join(
-+        u'%s="%s"' % (escape(key), escape(value))
-+        for key, value in iteritems(d)
-+        if value is not None and not isinstance(value, Undefined)
-+    )
-+    if autospace and rv:
-+        rv = u" " + rv
-+    if _eval_ctx.autoescape:
-+        rv = Markup(rv)
-+    return rv
-+
-+
-+def do_capitalize(s):
-+    """Capitalize a value. The first character will be uppercase, all others
-+    lowercase.
-+    """
-+    return soft_unicode(s).capitalize()
-+
-+
-+def do_title(s):
-+    """Return a titlecased version of the value. I.e. words will start with
-+    uppercase letters, all remaining characters are lowercase.
-+    """
-+    return "".join(
-+        [
-+            item[0].upper() + item[1:].lower()
-+            for item in _word_beginning_split_re.split(soft_unicode(s))
-+            if item
-+        ]
-+    )
-+
-+
-+def do_dictsort(value, case_sensitive=False, by="key", reverse=False):
-+    """Sort a dict and yield (key, value) pairs. Because python dicts are
-+    unsorted you may want to use this function to order them by either
-+    key or value:
-+
-+    .. sourcecode:: jinja
-+
-+        {% for item in mydict|dictsort %}
-+            sort the dict by key, case insensitive
-+
-+        {% for item in mydict|dictsort(reverse=true) %}
-+            sort the dict by key, case insensitive, reverse order
-+
-+        {% for item in mydict|dictsort(true) %}
-+            sort the dict by key, case sensitive
-+
-+        {% for item in mydict|dictsort(false, 'value') %}
-+            sort the dict by value, case insensitive
-+    """
-+    if by == "key":
-+        pos = 0
-+    elif by == "value":
-+        pos = 1
-+    else:
-+        raise FilterArgumentError('You can only sort by either "key" or "value"')
-+
-+    def sort_func(item):
-+        value = item[pos]
-+
-+        if not case_sensitive:
-+            value = ignore_case(value)
-+
-+        return value
-+
-+    return sorted(value.items(), key=sort_func, reverse=reverse)
-+
-+
-+@environmentfilter
-+def do_sort(environment, value, reverse=False, case_sensitive=False, attribute=None):
-+    """Sort an iterable using Python's :func:`sorted`.
-+
-+    .. sourcecode:: jinja
-+
-+        {% for city in cities|sort %}
-+            ...
-+        {% endfor %}
-+
-+    :param reverse: Sort descending instead of ascending.
-+    :param case_sensitive: When sorting strings, sort upper and lower
-+        case separately.
-+    :param attribute: When sorting objects or dicts, an attribute or
-+        key to sort by. Can use dot notation like ``"address.city"``.
-+        Can be a list of attributes like ``"age,name"``.
-+
-+    The sort is stable, it does not change the relative order of
-+    elements that compare equal. This makes it is possible to chain
-+    sorts on different attributes and ordering.
-+
-+    .. sourcecode:: jinja
-+
-+        {% for user in users|sort(attribute="name")
-+            |sort(reverse=true, attribute="age") %}
-+            ...
-+        {% endfor %}
-+
-+    As a shortcut to chaining when the direction is the same for all
-+    attributes, pass a comma separate list of attributes.
-+
-+    .. sourcecode:: jinja
-+
-+        {% for user users|sort(attribute="age,name") %}
-+            ...
-+        {% endfor %}
-+
-+    .. versionchanged:: 2.11.0
-+        The ``attribute`` parameter can be a comma separated list of
-+        attributes, e.g. ``"age,name"``.
-+
-+    .. versionchanged:: 2.6
-+       The ``attribute`` parameter was added.
-+    """
-+    key_func = make_multi_attrgetter(
-+        environment, attribute, postprocess=ignore_case if not case_sensitive else None
-+    )
-+    return sorted(value, key=key_func, reverse=reverse)
-+
-+
-+@environmentfilter
-+def do_unique(environment, value, case_sensitive=False, attribute=None):
-+    """Returns a list of unique items from the given iterable.
-+
-+    .. sourcecode:: jinja
-+
-+        {{ ['foo', 'bar', 'foobar', 'FooBar']|unique|list }}
-+            -> ['foo', 'bar', 'foobar']
-+
-+    The unique items are yielded in the same order as their first occurrence in
-+    the iterable passed to the filter.
-+
-+    :param case_sensitive: Treat upper and lower case strings as distinct.
-+    :param attribute: Filter objects with unique values for this attribute.
-+    """
-+    getter = make_attrgetter(
-+        environment, attribute, postprocess=ignore_case if not case_sensitive else None
-+    )
-+    seen = set()
-+
-+    for item in value:
-+        key = getter(item)
-+
-+        if key not in seen:
-+            seen.add(key)
-+            yield item
-+
-+
-+def _min_or_max(environment, value, func, case_sensitive, attribute):
-+    it = iter(value)
-+
-+    try:
-+        first = next(it)
-+    except StopIteration:
-+        return environment.undefined("No aggregated item, sequence was empty.")
-+
-+    key_func = make_attrgetter(
-+        environment, attribute, postprocess=ignore_case if not case_sensitive else None
-+    )
-+    return func(chain([first], it), key=key_func)
-+
-+
-+@environmentfilter
-+def do_min(environment, value, case_sensitive=False, attribute=None):
-+    """Return the smallest item from the sequence.
-+
-+    .. sourcecode:: jinja
-+
-+        {{ [1, 2, 3]|min }}
-+            -> 1
-+
-+    :param case_sensitive: Treat upper and lower case strings as distinct.
-+    :param attribute: Get the object with the min value of this attribute.
-+    """
-+    return _min_or_max(environment, value, min, case_sensitive, attribute)
-+
-+
-+@environmentfilter
-+def do_max(environment, value, case_sensitive=False, attribute=None):
-+    """Return the largest item from the sequence.
-+
-+    .. sourcecode:: jinja
-+
-+        {{ [1, 2, 3]|max }}
-+            -> 3
-+
-+    :param case_sensitive: Treat upper and lower case strings as distinct.
-+    :param attribute: Get the object with the max value of this attribute.
-+    """
-+    return _min_or_max(environment, value, max, case_sensitive, attribute)
-+
-+
-+def do_default(value, default_value=u"", boolean=False):
-+    """If the value is undefined it will return the passed default value,
-+    otherwise the value of the variable:
-+
-+    .. sourcecode:: jinja
-+
-+        {{ my_variable|default('my_variable is not defined') }}
-+
-+    This will output the value of ``my_variable`` if the variable was
-+    defined, otherwise ``'my_variable is not defined'``. If you want
-+    to use default with variables that evaluate to false you have to
-+    set the second parameter to `true`:
-+
-+    .. sourcecode:: jinja
-+
-+        {{ ''|default('the string was empty', true) }}
-+
-+    .. versionchanged:: 2.11
-+       It's now possible to configure the :class:`~jinja2.Environment` with
-+       :class:`~jinja2.ChainableUndefined` to make the `default` filter work
-+       on nested elements and attributes that may contain undefined values
-+       in the chain without getting an :exc:`~jinja2.UndefinedError`.
-+    """
-+    if isinstance(value, Undefined) or (boolean and not value):
-+        return default_value
-+    return value
-+
-+
-+@evalcontextfilter
-+def do_join(eval_ctx, value, d=u"", attribute=None):
-+    """Return a string which is the concatenation of the strings in the
-+    sequence. The separator between elements is an empty string per
-+    default, you can define it with the optional parameter:
-+
-+    .. sourcecode:: jinja
-+
-+        {{ [1, 2, 3]|join('|') }}
-+            -> 1|2|3
-+
-+        {{ [1, 2, 3]|join }}
-+            -> 123
-+
-+    It is also possible to join certain attributes of an object:
-+
-+    .. sourcecode:: jinja
-+
-+        {{ users|join(', ', attribute='username') }}
-+
-+    .. versionadded:: 2.6
-+       The `attribute` parameter was added.
-+    """
-+    if attribute is not None:
-+        value = imap(make_attrgetter(eval_ctx.environment, attribute), value)
-+
-+    # no automatic escaping?  joining is a lot easier then
-+    if not eval_ctx.autoescape:
-+        return text_type(d).join(imap(text_type, value))
-+
-+    # if the delimiter doesn't have an html representation we check
-+    # if any of the items has.  If yes we do a coercion to Markup
-+    if not hasattr(d, "__html__"):
-+        value = list(value)
-+        do_escape = False
-+        for idx, item in enumerate(value):
-+            if hasattr(item, "__html__"):
-+                do_escape = True
-+            else:
-+                value[idx] = text_type(item)
-+        if do_escape:
-+            d = escape(d)
-+        else:
-+            d = text_type(d)
-+        return d.join(value)
-+
-+    # no html involved, to normal joining
-+    return soft_unicode(d).join(imap(soft_unicode, value))
-+
-+
-+def do_center(value, width=80):
-+    """Centers the value in a field of a given width."""
-+    return text_type(value).center(width)
-+
-+
-+@environmentfilter
-+def do_first(environment, seq):
-+    """Return the first item of a sequence."""
-+    try:
-+        return next(iter(seq))
-+    except StopIteration:
-+        return environment.undefined("No first item, sequence was empty.")
-+
-+
-+@environmentfilter
-+def do_last(environment, seq):
-+    """
-+    Return the last item of a sequence.
-+
-+    Note: Does not work with generators. You may want to explicitly
-+    convert it to a list:
-+
-+    .. sourcecode:: jinja
-+
-+        {{ data | selectattr('name', '==', 'Jinja') | list | last }}
-+    """
-+    try:
-+        return next(iter(reversed(seq)))
-+    except StopIteration:
-+        return environment.undefined("No last item, sequence was empty.")
-+
-+
-+@contextfilter
-+def do_random(context, seq):
-+    """Return a random item from the sequence."""
-+    try:
-+        return random.choice(seq)
-+    except IndexError:
-+        return context.environment.undefined("No random item, sequence was empty.")
-+
-+
-+def do_filesizeformat(value, binary=False):
-+    """Format the value like a 'human-readable' file size (i.e. 13 kB,
-+    4.1 MB, 102 Bytes, etc).  Per default decimal prefixes are used (Mega,
-+    Giga, etc.), if the second parameter is set to `True` the binary
-+    prefixes are used (Mebi, Gibi).
-+    """
-+    bytes = float(value)
-+    base = binary and 1024 or 1000
-+    prefixes = [
-+        (binary and "KiB" or "kB"),
-+        (binary and "MiB" or "MB"),
-+        (binary and "GiB" or "GB"),
-+        (binary and "TiB" or "TB"),
-+        (binary and "PiB" or "PB"),
-+        (binary and "EiB" or "EB"),
-+        (binary and "ZiB" or "ZB"),
-+        (binary and "YiB" or "YB"),
-+    ]
-+    if bytes == 1:
-+        return "1 Byte"
-+    elif bytes < base:
-+        return "%d Bytes" % bytes
-+    else:
-+        for i, prefix in enumerate(prefixes):
-+            unit = base ** (i + 2)
-+            if bytes < unit:
-+                return "%.1f %s" % ((base * bytes / unit), prefix)
-+        return "%.1f %s" % ((base * bytes / unit), prefix)
-+
-+
-+def do_pprint(value, verbose=False):
-+    """Pretty print a variable. Useful for debugging.
-+
-+    With Jinja 1.2 onwards you can pass it a parameter.  If this parameter
-+    is truthy the output will be more verbose (this requires `pretty`)
-+    """
-+    return pformat(value, verbose=verbose)
-+
-+
-+@evalcontextfilter
-+def do_urlize(
-+    eval_ctx, value, trim_url_limit=None, nofollow=False, target=None, rel=None
-+):
-+    """Converts URLs in plain text into clickable links.
-+
-+    If you pass the filter an additional integer it will shorten the urls
-+    to that number. Also a third argument exists that makes the urls
-+    "nofollow":
-+
-+    .. sourcecode:: jinja
-+
-+        {{ mytext|urlize(40, true) }}
-+            links are shortened to 40 chars and defined with rel="nofollow"
-+
-+    If *target* is specified, the ``target`` attribute will be added to the
-+    ``<a>`` tag:
-+
-+    .. sourcecode:: jinja
-+
-+       {{ mytext|urlize(40, target='_blank') }}
-+
-+    .. versionchanged:: 2.8+
-+       The *target* parameter was added.
-+    """
-+    policies = eval_ctx.environment.policies
-+    rel = set((rel or "").split() or [])
-+    if nofollow:
-+        rel.add("nofollow")
-+    rel.update((policies["urlize.rel"] or "").split())
-+    if target is None:
-+        target = policies["urlize.target"]
-+    rel = " ".join(sorted(rel)) or None
-+    rv = urlize(value, trim_url_limit, rel=rel, target=target)
-+    if eval_ctx.autoescape:
-+        rv = Markup(rv)
-+    return rv
-+
-+
-+def do_indent(s, width=4, first=False, blank=False, indentfirst=None):
-+    """Return a copy of the string with each line indented by 4 spaces. The
-+    first line and blank lines are not indented by default.
-+
-+    :param width: Number of spaces to indent by.
-+    :param first: Don't skip indenting the first line.
-+    :param blank: Don't skip indenting empty lines.
-+
-+    .. versionchanged:: 2.10
-+        Blank lines are not indented by default.
-+
-+        Rename the ``indentfirst`` argument to ``first``.
-+    """
-+    if indentfirst is not None:
-+        warnings.warn(
-+            "The 'indentfirst' argument is renamed to 'first' and will"
-+            " be removed in version 3.0.",
-+            DeprecationWarning,
-+            stacklevel=2,
-+        )
-+        first = indentfirst
-+
-+    indention = u" " * width
-+    newline = u"\n"
-+
-+    if isinstance(s, Markup):
-+        indention = Markup(indention)
-+        newline = Markup(newline)
-+
-+    s += newline  # this quirk is necessary for splitlines method
-+
-+    if blank:
-+        rv = (newline + indention).join(s.splitlines())
-+    else:
-+        lines = s.splitlines()
-+        rv = lines.pop(0)
-+
-+        if lines:
-+            rv += newline + newline.join(
-+                indention + line if line else line for line in lines
-+            )
-+
-+    if first:
-+        rv = indention + rv
-+
-+    return rv
-+
-+
-+@environmentfilter
-+def do_truncate(env, s, length=255, killwords=False, end="...", leeway=None):
-+    """Return a truncated copy of the string. The length is specified
-+    with the first parameter which defaults to ``255``. If the second
-+    parameter is ``true`` the filter will cut the text at length. Otherwise
-+    it will discard the last word. If the text was in fact
-+    truncated it will append an ellipsis sign (``"..."``). If you want a
-+    different ellipsis sign than ``"..."`` you can specify it using the
-+    third parameter. Strings that only exceed the length by the tolerance
-+    margin given in the fourth parameter will not be truncated.
-+
-+    .. sourcecode:: jinja
-+
-+        {{ "foo bar baz qux"|truncate(9) }}
-+            -> "foo..."
-+        {{ "foo bar baz qux"|truncate(9, True) }}
-+            -> "foo ba..."
-+        {{ "foo bar baz qux"|truncate(11) }}
-+            -> "foo bar baz qux"
-+        {{ "foo bar baz qux"|truncate(11, False, '...', 0) }}
-+            -> "foo bar..."
-+
-+    The default leeway on newer Jinja versions is 5 and was 0 before but
-+    can be reconfigured globally.
-+    """
-+    if leeway is None:
-+        leeway = env.policies["truncate.leeway"]
-+    assert length >= len(end), "expected length >= %s, got %s" % (len(end), length)
-+    assert leeway >= 0, "expected leeway >= 0, got %s" % leeway
-+    if len(s) <= length + leeway:
-+        return s
-+    if killwords:
-+        return s[: length - len(end)] + end
-+    result = s[: length - len(end)].rsplit(" ", 1)[0]
-+    return result + end
-+
-+
-+@environmentfilter
-+def do_wordwrap(
-+    environment,
-+    s,
-+    width=79,
-+    break_long_words=True,
-+    wrapstring=None,
-+    break_on_hyphens=True,
-+):
-+    """Wrap a string to the given width. Existing newlines are treated
-+    as paragraphs to be wrapped separately.
-+
-+    :param s: Original text to wrap.
-+    :param width: Maximum length of wrapped lines.
-+    :param break_long_words: If a word is longer than ``width``, break
-+        it across lines.
-+    :param break_on_hyphens: If a word contains hyphens, it may be split
-+        across lines.
-+    :param wrapstring: String to join each wrapped line. Defaults to
-+        :attr:`Environment.newline_sequence`.
-+
-+    .. versionchanged:: 2.11
-+        Existing newlines are treated as paragraphs wrapped separately.
-+
-+    .. versionchanged:: 2.11
-+        Added the ``break_on_hyphens`` parameter.
-+
-+    .. versionchanged:: 2.7
-+        Added the ``wrapstring`` parameter.
-+    """
-+
-+    import textwrap
-+
-+    if not wrapstring:
-+        wrapstring = environment.newline_sequence
-+
-+    # textwrap.wrap doesn't consider existing newlines when wrapping.
-+    # If the string has a newline before width, wrap will still insert
-+    # a newline at width, resulting in a short line. Instead, split and
-+    # wrap each paragraph individually.
-+    return wrapstring.join(
-+        [
-+            wrapstring.join(
-+                textwrap.wrap(
-+                    line,
-+                    width=width,
-+                    expand_tabs=False,
-+                    replace_whitespace=False,
-+                    break_long_words=break_long_words,
-+                    break_on_hyphens=break_on_hyphens,
-+                )
-+            )
-+            for line in s.splitlines()
-+        ]
-+    )
-+
-+
-+def do_wordcount(s):
-+    """Count the words in that string."""
-+    return len(_word_re.findall(s))
-+
-+
-+def do_int(value, default=0, base=10):
-+    """Convert the value into an integer. If the
-+    conversion doesn't work it will return ``0``. You can
-+    override this default using the first parameter. You
-+    can also override the default base (10) in the second
-+    parameter, which handles input with prefixes such as
-+    0b, 0o and 0x for bases 2, 8 and 16 respectively.
-+    The base is ignored for decimal numbers and non-string values.
-+    """
-+    try:
-+        if isinstance(value, string_types):
-+            return int(value, base)
-+        return int(value)
-+    except (TypeError, ValueError):
-+        # this quirk is necessary so that "42.23"|int gives 42.
-+        try:
-+            return int(float(value))
-+        except (TypeError, ValueError):
-+            return default
-+
-+
-+def do_float(value, default=0.0):
-+    """Convert the value into a floating point number. If the
-+    conversion doesn't work it will return ``0.0``. You can
-+    override this default using the first parameter.
-+    """
-+    try:
-+        return float(value)
-+    except (TypeError, ValueError):
-+        return default
-+
-+
-+def do_format(value, *args, **kwargs):
-+    """Apply the given values to a `printf-style`_ format string, like
-+    ``string % values``.
-+
-+    .. sourcecode:: jinja
-+
-+        {{ "%s, %s!"|format(greeting, name) }}
-+        Hello, World!
-+
-+    In most cases it should be more convenient and efficient to use the
-+    ``%`` operator or :meth:`str.format`.
-+
-+    .. code-block:: text
-+
-+        {{ "%s, %s!" % (greeting, name) }}
-+        {{ "{}, {}!".format(greeting, name) }}
-+
-+    .. _printf-style: https://docs.python.org/library/stdtypes.html
-+        #printf-style-string-formatting
-+    """
-+    if args and kwargs:
-+        raise FilterArgumentError(
-+            "can't handle positional and keyword arguments at the same time"
-+        )
-+    return soft_unicode(value) % (kwargs or args)
-+
-+
-+def do_trim(value, chars=None):
-+    """Strip leading and trailing characters, by default whitespace."""
-+    return soft_unicode(value).strip(chars)
-+
-+
-+def do_striptags(value):
-+    """Strip SGML/XML tags and replace adjacent whitespace by one space."""
-+    if hasattr(value, "__html__"):
-+        value = value.__html__()
-+    return Markup(text_type(value)).striptags()
-+
-+
-+def do_slice(value, slices, fill_with=None):
-+    """Slice an iterator and return a list of lists containing
-+    those items. Useful if you want to create a div containing
-+    three ul tags that represent columns:
-+
-+    .. sourcecode:: html+jinja
-+
-+        <div class="columnwrapper">
-+          {%- for column in items|slice(3) %}
-+            <ul class="column-{{ loop.index }}">
-+            {%- for item in column %}
-+              <li>{{ item }}</li>
-+            {%- endfor %}
-+            </ul>
-+          {%- endfor %}
-+        </div>
-+
-+    If you pass it a second argument it's used to fill missing
-+    values on the last iteration.
-+    """
-+    seq = list(value)
-+    length = len(seq)
-+    items_per_slice = length // slices
-+    slices_with_extra = length % slices
-+    offset = 0
-+    for slice_number in range(slices):
-+        start = offset + slice_number * items_per_slice
-+        if slice_number < slices_with_extra:
-+            offset += 1
-+        end = offset + (slice_number + 1) * items_per_slice
-+        tmp = seq[start:end]
-+        if fill_with is not None and slice_number >= slices_with_extra:
-+            tmp.append(fill_with)
-+        yield tmp
-+
-+
-+def do_batch(value, linecount, fill_with=None):
-+    """
-+    A filter that batches items. It works pretty much like `slice`
-+    just the other way round. It returns a list of lists with the
-+    given number of items. If you provide a second parameter this
-+    is used to fill up missing items. See this example:
-+
-+    .. sourcecode:: html+jinja
-+
-+        <table>
-+        {%- for row in items|batch(3, '&nbsp;') %}
-+          <tr>
-+          {%- for column in row %}
-+            <td>{{ column }}</td>
-+          {%- endfor %}
-+          </tr>
-+        {%- endfor %}
-+        </table>
-+    """
-+    tmp = []
-+    for item in value:
-+        if len(tmp) == linecount:
-+            yield tmp
-+            tmp = []
-+        tmp.append(item)
-+    if tmp:
-+        if fill_with is not None and len(tmp) < linecount:
-+            tmp += [fill_with] * (linecount - len(tmp))
-+        yield tmp
-+
-+
-+def do_round(value, precision=0, method="common"):
-+    """Round the number to a given precision. The first
-+    parameter specifies the precision (default is ``0``), the
-+    second the rounding method:
-+
-+    - ``'common'`` rounds either up or down
-+    - ``'ceil'`` always rounds up
-+    - ``'floor'`` always rounds down
-+
-+    If you don't specify a method ``'common'`` is used.
-+
-+    .. sourcecode:: jinja
-+
-+        {{ 42.55|round }}
-+            -> 43.0
-+        {{ 42.55|round(1, 'floor') }}
-+            -> 42.5
-+
-+    Note that even if rounded to 0 precision, a float is returned.  If
-+    you need a real integer, pipe it through `int`:
-+
-+    .. sourcecode:: jinja
-+
-+        {{ 42.55|round|int }}
-+            -> 43
-+    """
-+    if method not in {"common", "ceil", "floor"}:
-+        raise FilterArgumentError("method must be common, ceil or floor")
-+    if method == "common":
-+        return round(value, precision)
-+    func = getattr(math, method)
-+    return func(value * (10 ** precision)) / (10 ** precision)
-+
-+
-+# Use a regular tuple repr here.  This is what we did in the past and we
-+# really want to hide this custom type as much as possible.  In particular
-+# we do not want to accidentally expose an auto generated repr in case
-+# people start to print this out in comments or something similar for
-+# debugging.
-+_GroupTuple = namedtuple("_GroupTuple", ["grouper", "list"])
-+_GroupTuple.__repr__ = tuple.__repr__
-+_GroupTuple.__str__ = tuple.__str__
-+
-+
-+@environmentfilter
-+def do_groupby(environment, value, attribute):
-+    """Group a sequence of objects by an attribute using Python's
-+    :func:`itertools.groupby`. The attribute can use dot notation for
-+    nested access, like ``"address.city"``. Unlike Python's ``groupby``,
-+    the values are sorted first so only one group is returned for each
-+    unique value.
-+
-+    For example, a list of ``User`` objects with a ``city`` attribute
-+    can be rendered in groups. In this example, ``grouper`` refers to
-+    the ``city`` value of the group.
-+
-+    .. sourcecode:: html+jinja
-+
-+        <ul>{% for city, items in users|groupby("city") %}
-+          <li>{{ city }}
-+            <ul>{% for user in items %}
-+              <li>{{ user.name }}
-+            {% endfor %}</ul>
-+          </li>
-+        {% endfor %}</ul>
-+
-+    ``groupby`` yields namedtuples of ``(grouper, list)``, which
-+    can be used instead of the tuple unpacking above. ``grouper`` is the
-+    value of the attribute, and ``list`` is the items with that value.
-+
-+    .. sourcecode:: html+jinja
-+
-+        <ul>{% for group in users|groupby("city") %}
-+          <li>{{ group.grouper }}: {{ group.list|join(", ") }}
-+        {% endfor %}</ul>
-+
-+    .. versionchanged:: 2.6
-+        The attribute supports dot notation for nested access.
-+    """
-+    expr = make_attrgetter(environment, attribute)
-+    return [
-+        _GroupTuple(key, list(values))
-+        for key, values in groupby(sorted(value, key=expr), expr)
-+    ]
-+
-+
-+@environmentfilter
-+def do_sum(environment, iterable, attribute=None, start=0):
-+    """Returns the sum of a sequence of numbers plus the value of parameter
-+    'start' (which defaults to 0).  When the sequence is empty it returns
-+    start.
-+
-+    It is also possible to sum up only certain attributes:
-+
-+    .. sourcecode:: jinja
-+
-+        Total: {{ items|sum(attribute='price') }}
-+
-+    .. versionchanged:: 2.6
-+       The `attribute` parameter was added to allow suming up over
-+       attributes.  Also the `start` parameter was moved on to the right.
-+    """
-+    if attribute is not None:
-+        iterable = imap(make_attrgetter(environment, attribute), iterable)
-+    return sum(iterable, start)
-+
-+
-+def do_list(value):
-+    """Convert the value into a list.  If it was a string the returned list
-+    will be a list of characters.
-+    """
-+    return list(value)
-+
-+
-+def do_mark_safe(value):
-+    """Mark the value as safe which means that in an environment with automatic
-+    escaping enabled this variable will not be escaped.
-+    """
-+    return Markup(value)
-+
-+
-+def do_mark_unsafe(value):
-+    """Mark a value as unsafe.  This is the reverse operation for :func:`safe`."""
-+    return text_type(value)
-+
-+
-+def do_reverse(value):
-+    """Reverse the object or return an iterator that iterates over it the other
-+    way round.
-+    """
-+    if isinstance(value, string_types):
-+        return value[::-1]
-+    try:
-+        return reversed(value)
-+    except TypeError:
-+        try:
-+            rv = list(value)
-+            rv.reverse()
-+            return rv
-+        except TypeError:
-+            raise FilterArgumentError("argument must be iterable")
-+
-+
-+@environmentfilter
-+def do_attr(environment, obj, name):
-+    """Get an attribute of an object.  ``foo|attr("bar")`` works like
-+    ``foo.bar`` just that always an attribute is returned and items are not
-+    looked up.
-+
-+    See :ref:`Notes on subscriptions <notes-on-subscriptions>` for more details.
-+    """
-+    try:
-+        name = str(name)
-+    except UnicodeError:
-+        pass
-+    else:
-+        try:
-+            value = getattr(obj, name)
-+        except AttributeError:
-+            pass
-+        else:
-+            if environment.sandboxed and not environment.is_safe_attribute(
-+                obj, name, value
-+            ):
-+                return environment.unsafe_undefined(obj, name)
-+            return value
-+    return environment.undefined(obj=obj, name=name)
-+
-+
-+@contextfilter
-+def do_map(*args, **kwargs):
-+    """Applies a filter on a sequence of objects or looks up an attribute.
-+    This is useful when dealing with lists of objects but you are really
-+    only interested in a certain value of it.
-+
-+    The basic usage is mapping on an attribute.  Imagine you have a list
-+    of users but you are only interested in a list of usernames:
-+
-+    .. sourcecode:: jinja
-+
-+        Users on this page: {{ users|map(attribute='username')|join(', ') }}
-+
-+    You can specify a ``default`` value to use if an object in the list
-+    does not have the given attribute.
-+
-+    .. sourcecode:: jinja
-+
-+        {{ users|map(attribute="username", default="Anonymous")|join(", ") }}
-+
-+    Alternatively you can let it invoke a filter by passing the name of the
-+    filter and the arguments afterwards.  A good example would be applying a
-+    text conversion filter on a sequence:
-+
-+    .. sourcecode:: jinja
-+
-+        Users on this page: {{ titles|map('lower')|join(', ') }}
-+
-+    Similar to a generator comprehension such as:
-+
-+    .. code-block:: python
-+
-+        (u.username for u in users)
-+        (u.username or "Anonymous" for u in users)
-+        (do_lower(x) for x in titles)
-+
-+    .. versionchanged:: 2.11.0
-+        Added the ``default`` parameter.
-+
-+    .. versionadded:: 2.7
-+    """
-+    seq, func = prepare_map(args, kwargs)
-+    if seq:
-+        for item in seq:
-+            yield func(item)
-+
-+
-+@contextfilter
-+def do_select(*args, **kwargs):
-+    """Filters a sequence of objects by applying a test to each object,
-+    and only selecting the objects with the test succeeding.
-+
-+    If no test is specified, each object will be evaluated as a boolean.
-+
-+    Example usage:
-+
-+    .. sourcecode:: jinja
-+
-+        {{ numbers|select("odd") }}
-+        {{ numbers|select("odd") }}
-+        {{ numbers|select("divisibleby", 3) }}
-+        {{ numbers|select("lessthan", 42) }}
-+        {{ strings|select("equalto", "mystring") }}
-+
-+    Similar to a generator comprehension such as:
-+
-+    .. code-block:: python
-+
-+        (n for n in numbers if test_odd(n))
-+        (n for n in numbers if test_divisibleby(n, 3))
-+
-+    .. versionadded:: 2.7
-+    """
-+    return select_or_reject(args, kwargs, lambda x: x, False)
-+
-+
-+@contextfilter
-+def do_reject(*args, **kwargs):
-+    """Filters a sequence of objects by applying a test to each object,
-+    and rejecting the objects with the test succeeding.
-+
-+    If no test is specified, each object will be evaluated as a boolean.
-+
-+    Example usage:
-+
-+    .. sourcecode:: jinja
-+
-+        {{ numbers|reject("odd") }}
-+
-+    Similar to a generator comprehension such as:
-+
-+    .. code-block:: python
-+
-+        (n for n in numbers if not test_odd(n))
-+
-+    .. versionadded:: 2.7
-+    """
-+    return select_or_reject(args, kwargs, lambda x: not x, False)
-+
-+
-+@contextfilter
-+def do_selectattr(*args, **kwargs):
-+    """Filters a sequence of objects by applying a test to the specified
-+    attribute of each object, and only selecting the objects with the
-+    test succeeding.
-+
-+    If no test is specified, the attribute's value will be evaluated as
-+    a boolean.
-+
-+    Example usage:
-+
-+    .. sourcecode:: jinja
-+
-+        {{ users|selectattr("is_active") }}
-+        {{ users|selectattr("email", "none") }}
-+
-+    Similar to a generator comprehension such as:
-+
-+    .. code-block:: python
-+
-+        (u for user in users if user.is_active)
-+        (u for user in users if test_none(user.email))
-+
-+    .. versionadded:: 2.7
-+    """
-+    return select_or_reject(args, kwargs, lambda x: x, True)
-+
-+
-+@contextfilter
-+def do_rejectattr(*args, **kwargs):
-+    """Filters a sequence of objects by applying a test to the specified
-+    attribute of each object, and rejecting the objects with the test
-+    succeeding.
-+
-+    If no test is specified, the attribute's value will be evaluated as
-+    a boolean.
-+
-+    .. sourcecode:: jinja
-+
-+        {{ users|rejectattr("is_active") }}
-+        {{ users|rejectattr("email", "none") }}
-+
-+    Similar to a generator comprehension such as:
-+
-+    .. code-block:: python
-+
-+        (u for user in users if not user.is_active)
-+        (u for user in users if not test_none(user.email))
-+
-+    .. versionadded:: 2.7
-+    """
-+    return select_or_reject(args, kwargs, lambda x: not x, True)
-+
-+
-+@evalcontextfilter
-+def do_tojson(eval_ctx, value, indent=None):
-+    """Dumps a structure to JSON so that it's safe to use in ``<script>``
-+    tags.  It accepts the same arguments and returns a JSON string.  Note that
-+    this is available in templates through the ``|tojson`` filter which will
-+    also mark the result as safe.  Due to how this function escapes certain
-+    characters this is safe even if used outside of ``<script>`` tags.
-+
-+    The following characters are escaped in strings:
-+
-+    -   ``<``
-+    -   ``>``
-+    -   ``&``
-+    -   ``'``
-+
-+    This makes it safe to embed such strings in any place in HTML with the
-+    notable exception of double quoted attributes.  In that case single
-+    quote your attributes or HTML escape it in addition.
-+
-+    The indent parameter can be used to enable pretty printing.  Set it to
-+    the number of spaces that the structures should be indented with.
-+
-+    Note that this filter is for use in HTML contexts only.
-+
-+    .. versionadded:: 2.9
-+    """
-+    policies = eval_ctx.environment.policies
-+    dumper = policies["json.dumps_function"]
-+    options = policies["json.dumps_kwargs"]
-+    if indent is not None:
-+        options = dict(options)
-+        options["indent"] = indent
-+    return htmlsafe_json_dumps(value, dumper=dumper, **options)
-+
-+
-+def prepare_map(args, kwargs):
-+    context = args[0]
-+    seq = args[1]
-+    default = None
-+
-+    if len(args) == 2 and "attribute" in kwargs:
-+        attribute = kwargs.pop("attribute")
-+        default = kwargs.pop("default", None)
-+        if kwargs:
-+            raise FilterArgumentError(
-+                "Unexpected keyword argument %r" % next(iter(kwargs))
-+            )
-+        func = make_attrgetter(context.environment, attribute, default=default)
-+    else:
-+        try:
-+            name = args[2]
-+            args = args[3:]
-+        except LookupError:
-+            raise FilterArgumentError("map requires a filter argument")
-+
-+        def func(item):
-+            return context.environment.call_filter(
-+                name, item, args, kwargs, context=context
-+            )
-+
-+    return seq, func
-+
-+
-+def prepare_select_or_reject(args, kwargs, modfunc, lookup_attr):
-+    context = args[0]
-+    seq = args[1]
-+    if lookup_attr:
-+        try:
-+            attr = args[2]
-+        except LookupError:
-+            raise FilterArgumentError("Missing parameter for attribute name")
-+        transfunc = make_attrgetter(context.environment, attr)
-+        off = 1
-+    else:
-+        off = 0
-+
-+        def transfunc(x):
-+            return x
-+
-+    try:
-+        name = args[2 + off]
-+        args = args[3 + off :]
-+
-+        def func(item):
-+            return context.environment.call_test(name, item, args, kwargs)
-+
-+    except LookupError:
-+        func = bool
-+
-+    return seq, lambda item: modfunc(func(transfunc(item)))
-+
-+
-+def select_or_reject(args, kwargs, modfunc, lookup_attr):
-+    seq, func = prepare_select_or_reject(args, kwargs, modfunc, lookup_attr)
-+    if seq:
-+        for item in seq:
-+            if func(item):
-+                yield item
-+
-+
-+FILTERS = {
-+    "abs": abs,
-+    "attr": do_attr,
-+    "batch": do_batch,
-+    "capitalize": do_capitalize,
-+    "center": do_center,
-+    "count": len,
-+    "d": do_default,
-+    "default": do_default,
-+    "dictsort": do_dictsort,
-+    "e": escape,
-+    "escape": escape,
-+    "filesizeformat": do_filesizeformat,
-+    "first": do_first,
-+    "float": do_float,
-+    "forceescape": do_forceescape,
-+    "format": do_format,
-+    "groupby": do_groupby,
-+    "indent": do_indent,
-+    "int": do_int,
-+    "join": do_join,
-+    "last": do_last,
-+    "length": len,
-+    "list": do_list,
-+    "lower": do_lower,
-+    "map": do_map,
-+    "min": do_min,
-+    "max": do_max,
-+    "pprint": do_pprint,
-+    "random": do_random,
-+    "reject": do_reject,
-+    "rejectattr": do_rejectattr,
-+    "replace": do_replace,
-+    "reverse": do_reverse,
-+    "round": do_round,
-+    "safe": do_mark_safe,
-+    "select": do_select,
-+    "selectattr": do_selectattr,
-+    "slice": do_slice,
-+    "sort": do_sort,
-+    "string": soft_unicode,
-+    "striptags": do_striptags,
-+    "sum": do_sum,
-+    "title": do_title,
-+    "trim": do_trim,
-+    "truncate": do_truncate,
-+    "unique": do_unique,
-+    "upper": do_upper,
-+    "urlencode": do_urlencode,
-+    "urlize": do_urlize,
-+    "wordcount": do_wordcount,
-+    "wordwrap": do_wordwrap,
-+    "xmlattr": do_xmlattr,
-+    "tojson": do_tojson,
-+}
-diff --git a/third_party/python/Jinja2/jinja2/idtracking.py b/third_party/python/Jinja2/jinja2/idtracking.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/Jinja2/jinja2/idtracking.py
-@@ -0,0 +1,290 @@
-+from ._compat import iteritems
-+from .visitor import NodeVisitor
-+
-+VAR_LOAD_PARAMETER = "param"
-+VAR_LOAD_RESOLVE = "resolve"
-+VAR_LOAD_ALIAS = "alias"
-+VAR_LOAD_UNDEFINED = "undefined"
-+
-+
-+def find_symbols(nodes, parent_symbols=None):
-+    sym = Symbols(parent=parent_symbols)
-+    visitor = FrameSymbolVisitor(sym)
-+    for node in nodes:
-+        visitor.visit(node)
-+    return sym
-+
-+
-+def symbols_for_node(node, parent_symbols=None):
-+    sym = Symbols(parent=parent_symbols)
-+    sym.analyze_node(node)
-+    return sym
-+
-+
-+class Symbols(object):
-+    def __init__(self, parent=None, level=None):
-+        if level is None:
-+            if parent is None:
-+                level = 0
-+            else:
-+                level = parent.level + 1
-+        self.level = level
-+        self.parent = parent
-+        self.refs = {}
-+        self.loads = {}
-+        self.stores = set()
-+
-+    def analyze_node(self, node, **kwargs):
-+        visitor = RootVisitor(self)
-+        visitor.visit(node, **kwargs)
-+
-+    def _define_ref(self, name, load=None):
-+        ident = "l_%d_%s" % (self.level, name)
-+        self.refs[name] = ident
-+        if load is not None:
-+            self.loads[ident] = load
-+        return ident
-+
-+    def find_load(self, target):
-+        if target in self.loads:
-+            return self.loads[target]
-+        if self.parent is not None:
-+            return self.parent.find_load(target)
-+
-+    def find_ref(self, name):
-+        if name in self.refs:
-+            return self.refs[name]
-+        if self.parent is not None:
-+            return self.parent.find_ref(name)
-+
-+    def ref(self, name):
-+        rv = self.find_ref(name)
-+        if rv is None:
-+            raise AssertionError(
-+                "Tried to resolve a name to a reference that "
-+                "was unknown to the frame (%r)" % name
-+            )
-+        return rv
-+
-+    def copy(self):
-+        rv = object.__new__(self.__class__)
-+        rv.__dict__.update(self.__dict__)
-+        rv.refs = self.refs.copy()
-+        rv.loads = self.loads.copy()
-+        rv.stores = self.stores.copy()
-+        return rv
-+
-+    def store(self, name):
-+        self.stores.add(name)
-+
-+        # If we have not see the name referenced yet, we need to figure
-+        # out what to set it to.
-+        if name not in self.refs:
-+            # If there is a parent scope we check if the name has a
-+            # reference there.  If it does it means we might have to alias
-+            # to a variable there.
-+            if self.parent is not None:
-+                outer_ref = self.parent.find_ref(name)
-+                if outer_ref is not None:
-+                    self._define_ref(name, load=(VAR_LOAD_ALIAS, outer_ref))
-+                    return
-+
-+            # Otherwise we can just set it to undefined.
-+            self._define_ref(name, load=(VAR_LOAD_UNDEFINED, None))
-+
-+    def declare_parameter(self, name):
-+        self.stores.add(name)
-+        return self._define_ref(name, load=(VAR_LOAD_PARAMETER, None))
-+
-+    def load(self, name):
-+        target = self.find_ref(name)
-+        if target is None:
-+            self._define_ref(name, load=(VAR_LOAD_RESOLVE, name))
-+
-+    def branch_update(self, branch_symbols):
-+        stores = {}
-+        for branch in branch_symbols:
-+            for target in branch.stores:
-+                if target in self.stores:
-+                    continue
-+                stores[target] = stores.get(target, 0) + 1
-+
-+        for sym in branch_symbols:
-+            self.refs.update(sym.refs)
-+            self.loads.update(sym.loads)
-+            self.stores.update(sym.stores)
-+
-+        for name, branch_count in iteritems(stores):
-+            if branch_count == len(branch_symbols):
-+                continue
-+            target = self.find_ref(name)
-+            assert target is not None, "should not happen"
-+
-+            if self.parent is not None:
-+                outer_target = self.parent.find_ref(name)
-+                if outer_target is not None:
-+                    self.loads[target] = (VAR_LOAD_ALIAS, outer_target)
-+                    continue
-+            self.loads[target] = (VAR_LOAD_RESOLVE, name)
-+
-+    def dump_stores(self):
-+        rv = {}
-+        node = self
-+        while node is not None:
-+            for name in node.stores:
-+                if name not in rv:
-+                    rv[name] = self.find_ref(name)
-+            node = node.parent
-+        return rv
-+
-+    def dump_param_targets(self):
-+        rv = set()
-+        node = self
-+        while node is not None:
-+            for target, (instr, _) in iteritems(self.loads):
-+                if instr == VAR_LOAD_PARAMETER:
-+                    rv.add(target)
-+            node = node.parent
-+        return rv
-+
-+
-+class RootVisitor(NodeVisitor):
-+    def __init__(self, symbols):
-+        self.sym_visitor = FrameSymbolVisitor(symbols)
-+
-+    def _simple_visit(self, node, **kwargs):
-+        for child in node.iter_child_nodes():
-+            self.sym_visitor.visit(child)
-+
-+    visit_Template = (
-+        visit_Block
-+    ) = (
-+        visit_Macro
-+    ) = (
-+        visit_FilterBlock
-+    ) = visit_Scope = visit_If = visit_ScopedEvalContextModifier = _simple_visit
-+
-+    def visit_AssignBlock(self, node, **kwargs):
-+        for child in node.body:
-+            self.sym_visitor.visit(child)
-+
-+    def visit_CallBlock(self, node, **kwargs):
-+        for child in node.iter_child_nodes(exclude=("call",)):
-+            self.sym_visitor.visit(child)
-+
-+    def visit_OverlayScope(self, node, **kwargs):
-+        for child in node.body:
-+            self.sym_visitor.visit(child)
-+
-+    def visit_For(self, node, for_branch="body", **kwargs):
-+        if for_branch == "body":
-+            self.sym_visitor.visit(node.target, store_as_param=True)
-+            branch = node.body
-+        elif for_branch == "else":
-+            branch = node.else_
-+        elif for_branch == "test":
-+            self.sym_visitor.visit(node.target, store_as_param=True)
-+            if node.test is not None:
-+                self.sym_visitor.visit(node.test)
-+            return
-+        else:
-+            raise RuntimeError("Unknown for branch")
-+        for item in branch or ():
-+            self.sym_visitor.visit(item)
-+
-+    def visit_With(self, node, **kwargs):
-+        for target in node.targets:
-+            self.sym_visitor.visit(target)
-+        for child in node.body:
-+            self.sym_visitor.visit(child)
-+
-+    def generic_visit(self, node, *args, **kwargs):
-+        raise NotImplementedError(
-+            "Cannot find symbols for %r" % node.__class__.__name__
-+        )
-+
-+
-+class FrameSymbolVisitor(NodeVisitor):
-+    """A visitor for `Frame.inspect`."""
-+
-+    def __init__(self, symbols):
-+        self.symbols = symbols
-+
-+    def visit_Name(self, node, store_as_param=False, **kwargs):
-+        """All assignments to names go through this function."""
-+        if store_as_param or node.ctx == "param":
-+            self.symbols.declare_parameter(node.name)
-+        elif node.ctx == "store":
-+            self.symbols.store(node.name)
-+        elif node.ctx == "load":
-+            self.symbols.load(node.name)
-+
-+    def visit_NSRef(self, node, **kwargs):
-+        self.symbols.load(node.name)
-+
-+    def visit_If(self, node, **kwargs):
-+        self.visit(node.test, **kwargs)
-+
-+        original_symbols = self.symbols
-+
-+        def inner_visit(nodes):
-+            self.symbols = rv = original_symbols.copy()
-+            for subnode in nodes:
-+                self.visit(subnode, **kwargs)
-+            self.symbols = original_symbols
-+            return rv
-+
-+        body_symbols = inner_visit(node.body)
-+        elif_symbols = inner_visit(node.elif_)
-+        else_symbols = inner_visit(node.else_ or ())
-+
-+        self.symbols.branch_update([body_symbols, elif_symbols, else_symbols])
-+
-+    def visit_Macro(self, node, **kwargs):
-+        self.symbols.store(node.name)
-+
-+    def visit_Import(self, node, **kwargs):
-+        self.generic_visit(node, **kwargs)
-+        self.symbols.store(node.target)
-+
-+    def visit_FromImport(self, node, **kwargs):
-+        self.generic_visit(node, **kwargs)
-+        for name in node.names:
-+            if isinstance(name, tuple):
-+                self.symbols.store(name[1])
-+            else:
-+                self.symbols.store(name)
-+
-+    def visit_Assign(self, node, **kwargs):
-+        """Visit assignments in the correct order."""
-+        self.visit(node.node, **kwargs)
-+        self.visit(node.target, **kwargs)
-+
-+    def visit_For(self, node, **kwargs):
-+        """Visiting stops at for blocks.  However the block sequence
-+        is visited as part of the outer scope.
-+        """
-+        self.visit(node.iter, **kwargs)
-+
-+    def visit_CallBlock(self, node, **kwargs):
-+        self.visit(node.call, **kwargs)
-+
-+    def visit_FilterBlock(self, node, **kwargs):
-+        self.visit(node.filter, **kwargs)
-+
-+    def visit_With(self, node, **kwargs):
-+        for target in node.values:
-+            self.visit(target)
-+
-+    def visit_AssignBlock(self, node, **kwargs):
-+        """Stop visiting at block assigns."""
-+        self.visit(node.target, **kwargs)
-+
-+    def visit_Scope(self, node, **kwargs):
-+        """Stop visiting at scopes."""
-+
-+    def visit_Block(self, node, **kwargs):
-+        """Stop visiting at blocks."""
-+
-+    def visit_OverlayScope(self, node, **kwargs):
-+        """Do not visit into overlay scopes."""
-diff --git a/third_party/python/Jinja2/jinja2/lexer.py b/third_party/python/Jinja2/jinja2/lexer.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/Jinja2/jinja2/lexer.py
-@@ -0,0 +1,841 @@
-+# -*- coding: utf-8 -*-
-+"""Implements a Jinja / Python combination lexer. The ``Lexer`` class
-+is used to do some preprocessing. It filters out invalid operators like
-+the bitshift operators we don't allow in templates. It separates
-+template code and python code in expressions.
-+"""
-+import re
-+from ast import literal_eval
-+from collections import deque
-+from operator import itemgetter
-+
-+from ._compat import implements_iterator
-+from ._compat import intern
-+from ._compat import iteritems
-+from ._compat import text_type
-+from .exceptions import TemplateSyntaxError
-+from .utils import LRUCache
-+
-+# cache for the lexers. Exists in order to be able to have multiple
-+# environments with the same lexer
-+_lexer_cache = LRUCache(50)
-+
-+# static regular expressions
-+whitespace_re = re.compile(r"\s+", re.U)
-+newline_re = re.compile(r"(\r\n|\r|\n)")
-+string_re = re.compile(
-+    r"('([^'\\]*(?:\\.[^'\\]*)*)'" r'|"([^"\\]*(?:\\.[^"\\]*)*)")', re.S
-+)
-+integer_re = re.compile(r"(\d+_)*\d+")
-+float_re = re.compile(
-+    r"""
-+    (?<!\.)  # doesn't start with a .
-+    (\d+_)*\d+  # digits, possibly _ separated
-+    (
-+        (\.(\d+_)*\d+)?  # optional fractional part
-+        e[+\-]?(\d+_)*\d+  # exponent part
-+    |
-+        \.(\d+_)*\d+  # required fractional part
-+    )
-+    """,
-+    re.IGNORECASE | re.VERBOSE,
-+)
-+
-+try:
-+    # check if this Python supports Unicode identifiers
-+    compile("föö", "<unknown>", "eval")
-+except SyntaxError:
-+    # Python 2, no Unicode support, use ASCII identifiers
-+    name_re = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*")
-+    check_ident = False
-+else:
-+    # Unicode support, import generated re pattern and set flag to use
-+    # str.isidentifier to validate during lexing.
-+    from ._identifier import pattern as name_re
-+
-+    check_ident = True
-+
-+# internal the tokens and keep references to them
-+TOKEN_ADD = intern("add")
-+TOKEN_ASSIGN = intern("assign")
-+TOKEN_COLON = intern("colon")
-+TOKEN_COMMA = intern("comma")
-+TOKEN_DIV = intern("div")
-+TOKEN_DOT = intern("dot")
-+TOKEN_EQ = intern("eq")
-+TOKEN_FLOORDIV = intern("floordiv")
-+TOKEN_GT = intern("gt")
-+TOKEN_GTEQ = intern("gteq")
-+TOKEN_LBRACE = intern("lbrace")
-+TOKEN_LBRACKET = intern("lbracket")
-+TOKEN_LPAREN = intern("lparen")
-+TOKEN_LT = intern("lt")
-+TOKEN_LTEQ = intern("lteq")
-+TOKEN_MOD = intern("mod")
-+TOKEN_MUL = intern("mul")
-+TOKEN_NE = intern("ne")
-+TOKEN_PIPE = intern("pipe")
-+TOKEN_POW = intern("pow")
-+TOKEN_RBRACE = intern("rbrace")
-+TOKEN_RBRACKET = intern("rbracket")
-+TOKEN_RPAREN = intern("rparen")
-+TOKEN_SEMICOLON = intern("semicolon")
-+TOKEN_SUB = intern("sub")
-+TOKEN_TILDE = intern("tilde")
-+TOKEN_WHITESPACE = intern("whitespace")
-+TOKEN_FLOAT = intern("float")
-+TOKEN_INTEGER = intern("integer")
-+TOKEN_NAME = intern("name")
-+TOKEN_STRING = intern("string")
-+TOKEN_OPERATOR = intern("operator")
-+TOKEN_BLOCK_BEGIN = intern("block_begin")
-+TOKEN_BLOCK_END = intern("block_end")
-+TOKEN_VARIABLE_BEGIN = intern("variable_begin")
-+TOKEN_VARIABLE_END = intern("variable_end")
-+TOKEN_RAW_BEGIN = intern("raw_begin")
-+TOKEN_RAW_END = intern("raw_end")
-+TOKEN_COMMENT_BEGIN = intern("comment_begin")
-+TOKEN_COMMENT_END = intern("comment_end")
-+TOKEN_COMMENT = intern("comment")
-+TOKEN_LINESTATEMENT_BEGIN = intern("linestatement_begin")
-+TOKEN_LINESTATEMENT_END = intern("linestatement_end")
-+TOKEN_LINECOMMENT_BEGIN = intern("linecomment_begin")
-+TOKEN_LINECOMMENT_END = intern("linecomment_end")
-+TOKEN_LINECOMMENT = intern("linecomment")
-+TOKEN_DATA = intern("data")
-+TOKEN_INITIAL = intern("initial")
-+TOKEN_EOF = intern("eof")
-+
-+# bind operators to token types
-+operators = {
-+    "+": TOKEN_ADD,
-+    "-": TOKEN_SUB,
-+    "/": TOKEN_DIV,
-+    "//": TOKEN_FLOORDIV,
-+    "*": TOKEN_MUL,
-+    "%": TOKEN_MOD,
-+    "**": TOKEN_POW,
-+    "~": TOKEN_TILDE,
-+    "[": TOKEN_LBRACKET,
-+    "]": TOKEN_RBRACKET,
-+    "(": TOKEN_LPAREN,
-+    ")": TOKEN_RPAREN,
-+    "{": TOKEN_LBRACE,
-+    "}": TOKEN_RBRACE,
-+    "==": TOKEN_EQ,
-+    "!=": TOKEN_NE,
-+    ">": TOKEN_GT,
-+    ">=": TOKEN_GTEQ,
-+    "<": TOKEN_LT,
-+    "<=": TOKEN_LTEQ,
-+    "=": TOKEN_ASSIGN,
-+    ".": TOKEN_DOT,
-+    ":": TOKEN_COLON,
-+    "|": TOKEN_PIPE,
-+    ",": TOKEN_COMMA,
-+    ";": TOKEN_SEMICOLON,
-+}
-+
-+reverse_operators = dict([(v, k) for k, v in iteritems(operators)])
-+assert len(operators) == len(reverse_operators), "operators dropped"
-+operator_re = re.compile(
-+    "(%s)" % "|".join(re.escape(x) for x in sorted(operators, key=lambda x: -len(x)))
-+)
-+
-+ignored_tokens = frozenset(
-+    [
-+        TOKEN_COMMENT_BEGIN,
-+        TOKEN_COMMENT,
-+        TOKEN_COMMENT_END,
-+        TOKEN_WHITESPACE,
-+        TOKEN_LINECOMMENT_BEGIN,
-+        TOKEN_LINECOMMENT_END,
-+        TOKEN_LINECOMMENT,
-+    ]
-+)
-+ignore_if_empty = frozenset(
-+    [TOKEN_WHITESPACE, TOKEN_DATA, TOKEN_COMMENT, TOKEN_LINECOMMENT]
-+)
-+
-+
-+def _describe_token_type(token_type):
-+    if token_type in reverse_operators:
-+        return reverse_operators[token_type]
-+    return {
-+        TOKEN_COMMENT_BEGIN: "begin of comment",
-+        TOKEN_COMMENT_END: "end of comment",
-+        TOKEN_COMMENT: "comment",
-+        TOKEN_LINECOMMENT: "comment",
-+        TOKEN_BLOCK_BEGIN: "begin of statement block",
-+        TOKEN_BLOCK_END: "end of statement block",
-+        TOKEN_VARIABLE_BEGIN: "begin of print statement",
-+        TOKEN_VARIABLE_END: "end of print statement",
-+        TOKEN_LINESTATEMENT_BEGIN: "begin of line statement",
-+        TOKEN_LINESTATEMENT_END: "end of line statement",
-+        TOKEN_DATA: "template data / text",
-+        TOKEN_EOF: "end of template",
-+    }.get(token_type, token_type)
-+
-+
-+def describe_token(token):
-+    """Returns a description of the token."""
-+    if token.type == TOKEN_NAME:
-+        return token.value
-+    return _describe_token_type(token.type)
-+
-+
-+def describe_token_expr(expr):
-+    """Like `describe_token` but for token expressions."""
-+    if ":" in expr:
-+        type, value = expr.split(":", 1)
-+        if type == TOKEN_NAME:
-+            return value
-+    else:
-+        type = expr
-+    return _describe_token_type(type)
-+
-+
-+def count_newlines(value):
-+    """Count the number of newline characters in the string.  This is
-+    useful for extensions that filter a stream.
-+    """
-+    return len(newline_re.findall(value))
-+
-+
-+def compile_rules(environment):
-+    """Compiles all the rules from the environment into a list of rules."""
-+    e = re.escape
-+    rules = [
-+        (
-+            len(environment.comment_start_string),
-+            TOKEN_COMMENT_BEGIN,
-+            e(environment.comment_start_string),
-+        ),
-+        (
-+            len(environment.block_start_string),
-+            TOKEN_BLOCK_BEGIN,
-+            e(environment.block_start_string),
-+        ),
-+        (
-+            len(environment.variable_start_string),
-+            TOKEN_VARIABLE_BEGIN,
-+            e(environment.variable_start_string),
-+        ),
-+    ]
-+
-+    if environment.line_statement_prefix is not None:
-+        rules.append(
-+            (
-+                len(environment.line_statement_prefix),
-+                TOKEN_LINESTATEMENT_BEGIN,
-+                r"^[ \t\v]*" + e(environment.line_statement_prefix),
-+            )
-+        )
-+    if environment.line_comment_prefix is not None:
-+        rules.append(
-+            (
-+                len(environment.line_comment_prefix),
-+                TOKEN_LINECOMMENT_BEGIN,
-+                r"(?:^|(?<=\S))[^\S\r\n]*" + e(environment.line_comment_prefix),
-+            )
-+        )
-+
-+    return [x[1:] for x in sorted(rules, reverse=True)]
-+
-+
-+class Failure(object):
-+    """Class that raises a `TemplateSyntaxError` if called.
-+    Used by the `Lexer` to specify known errors.
-+    """
-+
-+    def __init__(self, message, cls=TemplateSyntaxError):
-+        self.message = message
-+        self.error_class = cls
-+
-+    def __call__(self, lineno, filename):
-+        raise self.error_class(self.message, lineno, filename)
-+
-+
-+class Token(tuple):
-+    """Token class."""
-+
-+    __slots__ = ()
-+    lineno, type, value = (property(itemgetter(x)) for x in range(3))
-+
-+    def __new__(cls, lineno, type, value):
-+        return tuple.__new__(cls, (lineno, intern(str(type)), value))
-+
-+    def __str__(self):
-+        if self.type in reverse_operators:
-+            return reverse_operators[self.type]
-+        elif self.type == "name":
-+            return self.value
-+        return self.type
-+
-+    def test(self, expr):
-+        """Test a token against a token expression.  This can either be a
-+        token type or ``'token_type:token_value'``.  This can only test
-+        against string values and types.
-+        """
-+        # here we do a regular string equality check as test_any is usually
-+        # passed an iterable of not interned strings.
-+        if self.type == expr:
-+            return True
-+        elif ":" in expr:
-+            return expr.split(":", 1) == [self.type, self.value]
-+        return False
-+
-+    def test_any(self, *iterable):
-+        """Test against multiple token expressions."""
-+        for expr in iterable:
-+            if self.test(expr):
-+                return True
-+        return False
-+
-+    def __repr__(self):
-+        return "Token(%r, %r, %r)" % (self.lineno, self.type, self.value)
-+
-+
-+@implements_iterator
-+class TokenStreamIterator(object):
-+    """The iterator for tokenstreams.  Iterate over the stream
-+    until the eof token is reached.
-+    """
-+
-+    def __init__(self, stream):
-+        self.stream = stream
-+
-+    def __iter__(self):
-+        return self
-+
-+    def __next__(self):
-+        token = self.stream.current
-+        if token.type is TOKEN_EOF:
-+            self.stream.close()
-+            raise StopIteration()
-+        next(self.stream)
-+        return token
-+
-+
-+@implements_iterator
-+class TokenStream(object):
-+    """A token stream is an iterable that yields :class:`Token`\\s.  The
-+    parser however does not iterate over it but calls :meth:`next` to go
-+    one token ahead.  The current active token is stored as :attr:`current`.
-+    """
-+
-+    def __init__(self, generator, name, filename):
-+        self._iter = iter(generator)
-+        self._pushed = deque()
-+        self.name = name
-+        self.filename = filename
-+        self.closed = False
-+        self.current = Token(1, TOKEN_INITIAL, "")
-+        next(self)
-+
-+    def __iter__(self):
-+        return TokenStreamIterator(self)
-+
-+    def __bool__(self):
-+        return bool(self._pushed) or self.current.type is not TOKEN_EOF
-+
-+    __nonzero__ = __bool__  # py2
-+
-+    @property
-+    def eos(self):
-+        """Are we at the end of the stream?"""
-+        return not self
-+
-+    def push(self, token):
-+        """Push a token back to the stream."""
-+        self._pushed.append(token)
-+
-+    def look(self):
-+        """Look at the next token."""
-+        old_token = next(self)
-+        result = self.current
-+        self.push(result)
-+        self.current = old_token
-+        return result
-+
-+    def skip(self, n=1):
-+        """Got n tokens ahead."""
-+        for _ in range(n):
-+            next(self)
-+
-+    def next_if(self, expr):
-+        """Perform the token test and return the token if it matched.
-+        Otherwise the return value is `None`.
-+        """
-+        if self.current.test(expr):
-+            return next(self)
-+
-+    def skip_if(self, expr):
-+        """Like :meth:`next_if` but only returns `True` or `False`."""
-+        return self.next_if(expr) is not None
-+
-+    def __next__(self):
-+        """Go one token ahead and return the old one.
-+
-+        Use the built-in :func:`next` instead of calling this directly.
-+        """
-+        rv = self.current
-+        if self._pushed:
-+            self.current = self._pushed.popleft()
-+        elif self.current.type is not TOKEN_EOF:
-+            try:
-+                self.current = next(self._iter)
-+            except StopIteration:
-+                self.close()
-+        return rv
-+
-+    def close(self):
-+        """Close the stream."""
-+        self.current = Token(self.current.lineno, TOKEN_EOF, "")
-+        self._iter = None
-+        self.closed = True
-+
-+    def expect(self, expr):
-+        """Expect a given token type and return it.  This accepts the same
-+        argument as :meth:`jinja2.lexer.Token.test`.
-+        """
-+        if not self.current.test(expr):
-+            expr = describe_token_expr(expr)
-+            if self.current.type is TOKEN_EOF:
-+                raise TemplateSyntaxError(
-+                    "unexpected end of template, expected %r." % expr,
-+                    self.current.lineno,
-+                    self.name,
-+                    self.filename,
-+                )
-+            raise TemplateSyntaxError(
-+                "expected token %r, got %r" % (expr, describe_token(self.current)),
-+                self.current.lineno,
-+                self.name,
-+                self.filename,
-+            )
-+        try:
-+            return self.current
-+        finally:
-+            next(self)
-+
-+
-+def get_lexer(environment):
-+    """Return a lexer which is probably cached."""
-+    key = (
-+        environment.block_start_string,
-+        environment.block_end_string,
-+        environment.variable_start_string,
-+        environment.variable_end_string,
-+        environment.comment_start_string,
-+        environment.comment_end_string,
-+        environment.line_statement_prefix,
-+        environment.line_comment_prefix,
-+        environment.trim_blocks,
-+        environment.lstrip_blocks,
-+        environment.newline_sequence,
-+        environment.keep_trailing_newline,
-+    )
-+    lexer = _lexer_cache.get(key)
-+    if lexer is None:
-+        lexer = Lexer(environment)
-+        _lexer_cache[key] = lexer
-+    return lexer
-+
-+
-+class OptionalLStrip(tuple):
-+    """A special tuple for marking a point in the state that can have
-+    lstrip applied.
-+    """
-+
-+    __slots__ = ()
-+
-+    # Even though it looks like a no-op, creating instances fails
-+    # without this.
-+    def __new__(cls, *members, **kwargs):
-+        return super(OptionalLStrip, cls).__new__(cls, members)
-+
-+
-+class Lexer(object):
-+    """Class that implements a lexer for a given environment. Automatically
-+    created by the environment class, usually you don't have to do that.
-+
-+    Note that the lexer is not automatically bound to an environment.
-+    Multiple environments can share the same lexer.
-+    """
-+
-+    def __init__(self, environment):
-+        # shortcuts
-+        e = re.escape
-+
-+        def c(x):
-+            return re.compile(x, re.M | re.S)
-+
-+        # lexing rules for tags
-+        tag_rules = [
-+            (whitespace_re, TOKEN_WHITESPACE, None),
-+            (float_re, TOKEN_FLOAT, None),
-+            (integer_re, TOKEN_INTEGER, None),
-+            (name_re, TOKEN_NAME, None),
-+            (string_re, TOKEN_STRING, None),
-+            (operator_re, TOKEN_OPERATOR, None),
-+        ]
-+
-+        # assemble the root lexing rule. because "|" is ungreedy
-+        # we have to sort by length so that the lexer continues working
-+        # as expected when we have parsing rules like <% for block and
-+        # <%= for variables. (if someone wants asp like syntax)
-+        # variables are just part of the rules if variable processing
-+        # is required.
-+        root_tag_rules = compile_rules(environment)
-+
-+        # block suffix if trimming is enabled
-+        block_suffix_re = environment.trim_blocks and "\\n?" or ""
-+
-+        # If lstrip is enabled, it should not be applied if there is any
-+        # non-whitespace between the newline and block.
-+        self.lstrip_unless_re = c(r"[^ \t]") if environment.lstrip_blocks else None
-+
-+        self.newline_sequence = environment.newline_sequence
-+        self.keep_trailing_newline = environment.keep_trailing_newline
-+
-+        # global lexing rules
-+        self.rules = {
-+            "root": [
-+                # directives
-+                (
-+                    c(
-+                        "(.*?)(?:%s)"
-+                        % "|".join(
-+                            [
-+                                r"(?P<raw_begin>%s(\-|\+|)\s*raw\s*(?:\-%s\s*|%s))"
-+                                % (
-+                                    e(environment.block_start_string),
-+                                    e(environment.block_end_string),
-+                                    e(environment.block_end_string),
-+                                )
-+                            ]
-+                            + [
-+                                r"(?P<%s>%s(\-|\+|))" % (n, r)
-+                                for n, r in root_tag_rules
-+                            ]
-+                        )
-+                    ),
-+                    OptionalLStrip(TOKEN_DATA, "#bygroup"),
-+                    "#bygroup",
-+                ),
-+                # data
-+                (c(".+"), TOKEN_DATA, None),
-+            ],
-+            # comments
-+            TOKEN_COMMENT_BEGIN: [
-+                (
-+                    c(
-+                        r"(.*?)((?:\-%s\s*|%s)%s)"
-+                        % (
-+                            e(environment.comment_end_string),
-+                            e(environment.comment_end_string),
-+                            block_suffix_re,
-+                        )
-+                    ),
-+                    (TOKEN_COMMENT, TOKEN_COMMENT_END),
-+                    "#pop",
-+                ),
-+                (c("(.)"), (Failure("Missing end of comment tag"),), None),
-+            ],
-+            # blocks
-+            TOKEN_BLOCK_BEGIN: [
-+                (
-+                    c(
-+                        r"(?:\-%s\s*|%s)%s"
-+                        % (
-+                            e(environment.block_end_string),
-+                            e(environment.block_end_string),
-+                            block_suffix_re,
-+                        )
-+                    ),
-+                    TOKEN_BLOCK_END,
-+                    "#pop",
-+                ),
-+            ]
-+            + tag_rules,
-+            # variables
-+            TOKEN_VARIABLE_BEGIN: [
-+                (
-+                    c(
-+                        r"\-%s\s*|%s"
-+                        % (
-+                            e(environment.variable_end_string),
-+                            e(environment.variable_end_string),
-+                        )
-+                    ),
-+                    TOKEN_VARIABLE_END,
-+                    "#pop",
-+                )
-+            ]
-+            + tag_rules,
-+            # raw block
-+            TOKEN_RAW_BEGIN: [
-+                (
-+                    c(
-+                        r"(.*?)((?:%s(\-|\+|))\s*endraw\s*(?:\-%s\s*|%s%s))"
-+                        % (
-+                            e(environment.block_start_string),
-+                            e(environment.block_end_string),
-+                            e(environment.block_end_string),
-+                            block_suffix_re,
-+                        )
-+                    ),
-+                    OptionalLStrip(TOKEN_DATA, TOKEN_RAW_END),
-+                    "#pop",
-+                ),
-+                (c("(.)"), (Failure("Missing end of raw directive"),), None),
-+            ],
-+            # line statements
-+            TOKEN_LINESTATEMENT_BEGIN: [
-+                (c(r"\s*(\n|$)"), TOKEN_LINESTATEMENT_END, "#pop")
-+            ]
-+            + tag_rules,
-+            # line comments
-+            TOKEN_LINECOMMENT_BEGIN: [
-+                (
-+                    c(r"(.*?)()(?=\n|$)"),
-+                    (TOKEN_LINECOMMENT, TOKEN_LINECOMMENT_END),
-+                    "#pop",
-+                )
-+            ],
-+        }
-+
-+    def _normalize_newlines(self, value):
-+        """Called for strings and template data to normalize it to unicode."""
-+        return newline_re.sub(self.newline_sequence, value)
-+
-+    def tokenize(self, source, name=None, filename=None, state=None):
-+        """Calls tokeniter + tokenize and wraps it in a token stream."""
-+        stream = self.tokeniter(source, name, filename, state)
-+        return TokenStream(self.wrap(stream, name, filename), name, filename)
-+
-+    def wrap(self, stream, name=None, filename=None):
-+        """This is called with the stream as returned by `tokenize` and wraps
-+        every token in a :class:`Token` and converts the value.
-+        """
-+        for lineno, token, value in stream:
-+            if token in ignored_tokens:
-+                continue
-+            elif token == TOKEN_LINESTATEMENT_BEGIN:
-+                token = TOKEN_BLOCK_BEGIN
-+            elif token == TOKEN_LINESTATEMENT_END:
-+                token = TOKEN_BLOCK_END
-+            # we are not interested in those tokens in the parser
-+            elif token in (TOKEN_RAW_BEGIN, TOKEN_RAW_END):
-+                continue
-+            elif token == TOKEN_DATA:
-+                value = self._normalize_newlines(value)
-+            elif token == "keyword":
-+                token = value
-+            elif token == TOKEN_NAME:
-+                value = str(value)
-+                if check_ident and not value.isidentifier():
-+                    raise TemplateSyntaxError(
-+                        "Invalid character in identifier", lineno, name, filename
-+                    )
-+            elif token == TOKEN_STRING:
-+                # try to unescape string
-+                try:
-+                    value = (
-+                        self._normalize_newlines(value[1:-1])
-+                        .encode("ascii", "backslashreplace")
-+                        .decode("unicode-escape")
-+                    )
-+                except Exception as e:
-+                    msg = str(e).split(":")[-1].strip()
-+                    raise TemplateSyntaxError(msg, lineno, name, filename)
-+            elif token == TOKEN_INTEGER:
-+                value = int(value.replace("_", ""))
-+            elif token == TOKEN_FLOAT:
-+                # remove all "_" first to support more Python versions
-+                value = literal_eval(value.replace("_", ""))
-+            elif token == TOKEN_OPERATOR:
-+                token = operators[value]
-+            yield Token(lineno, token, value)
-+
-+    def tokeniter(self, source, name, filename=None, state=None):
-+        """This method tokenizes the text and returns the tokens in a
-+        generator.  Use this method if you just want to tokenize a template.
-+        """
-+        source = text_type(source)
-+        lines = source.splitlines()
-+        if self.keep_trailing_newline and source:
-+            for newline in ("\r\n", "\r", "\n"):
-+                if source.endswith(newline):
-+                    lines.append("")
-+                    break
-+        source = "\n".join(lines)
-+        pos = 0
-+        lineno = 1
-+        stack = ["root"]
-+        if state is not None and state != "root":
-+            assert state in ("variable", "block"), "invalid state"
-+            stack.append(state + "_begin")
-+        statetokens = self.rules[stack[-1]]
-+        source_length = len(source)
-+        balancing_stack = []
-+        lstrip_unless_re = self.lstrip_unless_re
-+
-+        while 1:
-+            # tokenizer loop
-+            for regex, tokens, new_state in statetokens:
-+                m = regex.match(source, pos)
-+                # if no match we try again with the next rule
-+                if m is None:
-+                    continue
-+
-+                # we only match blocks and variables if braces / parentheses
-+                # are balanced. continue parsing with the lower rule which
-+                # is the operator rule. do this only if the end tags look
-+                # like operators
-+                if balancing_stack and tokens in (
-+                    TOKEN_VARIABLE_END,
-+                    TOKEN_BLOCK_END,
-+                    TOKEN_LINESTATEMENT_END,
-+                ):
-+                    continue
-+
-+                # tuples support more options
-+                if isinstance(tokens, tuple):
-+                    groups = m.groups()
-+
-+                    if isinstance(tokens, OptionalLStrip):
-+                        # Rule supports lstrip. Match will look like
-+                        # text, block type, whitespace control, type, control, ...
-+                        text = groups[0]
-+
-+                        # Skipping the text and first type, every other group is the
-+                        # whitespace control for each type. One of the groups will be
-+                        # -, +, or empty string instead of None.
-+                        strip_sign = next(g for g in groups[2::2] if g is not None)
-+
-+                        if strip_sign == "-":
-+                            # Strip all whitespace between the text and the tag.
-+                            groups = (text.rstrip(),) + groups[1:]
-+                        elif (
-+                            # Not marked for preserving whitespace.
-+                            strip_sign != "+"
-+                            # lstrip is enabled.
-+                            and lstrip_unless_re is not None
-+                            # Not a variable expression.
-+                            and not m.groupdict().get(TOKEN_VARIABLE_BEGIN)
-+                        ):
-+                            # The start of text between the last newline and the tag.
-+                            l_pos = text.rfind("\n") + 1
-+
-+                            # If there's only whitespace between the newline and the
-+                            # tag, strip it.
-+                            if not lstrip_unless_re.search(text, l_pos):
-+                                groups = (text[:l_pos],) + groups[1:]
-+
-+                    for idx, token in enumerate(tokens):
-+                        # failure group
-+                        if token.__class__ is Failure:
-+                            raise token(lineno, filename)
-+                        # bygroup is a bit more complex, in that case we
-+                        # yield for the current token the first named
-+                        # group that matched
-+                        elif token == "#bygroup":
-+                            for key, value in iteritems(m.groupdict()):
-+                                if value is not None:
-+                                    yield lineno, key, value
-+                                    lineno += value.count("\n")
-+                                    break
-+                            else:
-+                                raise RuntimeError(
-+                                    "%r wanted to resolve "
-+                                    "the token dynamically"
-+                                    " but no group matched" % regex
-+                                )
-+                        # normal group
-+                        else:
-+                            data = groups[idx]
-+                            if data or token not in ignore_if_empty:
-+                                yield lineno, token, data
-+                            lineno += data.count("\n")
-+
-+                # strings as token just are yielded as it.
-+                else:
-+                    data = m.group()
-+                    # update brace/parentheses balance
-+                    if tokens == TOKEN_OPERATOR:
-+                        if data == "{":
-+                            balancing_stack.append("}")
-+                        elif data == "(":
-+                            balancing_stack.append(")")
-+                        elif data == "[":
-+                            balancing_stack.append("]")
-+                        elif data in ("}", ")", "]"):
-+                            if not balancing_stack:
-+                                raise TemplateSyntaxError(
-+                                    "unexpected '%s'" % data, lineno, name, filename
-+                                )
-+                            expected_op = balancing_stack.pop()
-+                            if expected_op != data:
-+                                raise TemplateSyntaxError(
-+                                    "unexpected '%s', "
-+                                    "expected '%s'" % (data, expected_op),
-+                                    lineno,
-+                                    name,
-+                                    filename,
-+                                )
-+                    # yield items
-+                    if data or tokens not in ignore_if_empty:
-+                        yield lineno, tokens, data
-+                    lineno += data.count("\n")
-+
-+                # fetch new position into new variable so that we can check
-+                # if there is a internal parsing error which would result
-+                # in an infinite loop
-+                pos2 = m.end()
-+
-+                # handle state changes
-+                if new_state is not None:
-+                    # remove the uppermost state
-+                    if new_state == "#pop":
-+                        stack.pop()
-+                    # resolve the new state by group checking
-+                    elif new_state == "#bygroup":
-+                        for key, value in iteritems(m.groupdict()):
-+                            if value is not None:
-+                                stack.append(key)
-+                                break
-+                        else:
-+                            raise RuntimeError(
-+                                "%r wanted to resolve the "
-+                                "new state dynamically but"
-+                                " no group matched" % regex
-+                            )
-+                    # direct state name given
-+                    else:
-+                        stack.append(new_state)
-+                    statetokens = self.rules[stack[-1]]
-+                # we are still at the same position and no stack change.
-+                # this means a loop without break condition, avoid that and
-+                # raise error
-+                elif pos2 == pos:
-+                    raise RuntimeError(
-+                        "%r yielded empty string without stack change" % regex
-+                    )
-+                # publish new function and start again
-+                pos = pos2
-+                break
-+            # if loop terminated without break we haven't found a single match
-+            # either we are at the end of the file or we have a problem
-+            else:
-+                # end of text
-+                if pos >= source_length:
-+                    return
-+                # something went wrong
-+                raise TemplateSyntaxError(
-+                    "unexpected char %r at %d" % (source[pos], pos),
-+                    lineno,
-+                    name,
-+                    filename,
-+                )
-diff --git a/third_party/python/Jinja2/jinja2/loaders.py b/third_party/python/Jinja2/jinja2/loaders.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/Jinja2/jinja2/loaders.py
-@@ -0,0 +1,572 @@
-+# -*- coding: utf-8 -*-
-+"""API and implementations for loading templates from different data
-+sources.
-+"""
-+import os
-+import pkgutil
-+import sys
-+import weakref
-+from hashlib import sha1
-+from importlib import import_module
-+from os import path
-+from types import ModuleType
-+
-+from ._compat import abc
-+from ._compat import fspath
-+from ._compat import iteritems
-+from ._compat import string_types
-+from .exceptions import TemplateNotFound
-+from .utils import internalcode
-+from .utils import open_if_exists
-+
-+
-+def split_template_path(template):
-+    """Split a path into segments and perform a sanity check.  If it detects
-+    '..' in the path it will raise a `TemplateNotFound` error.
-+    """
-+    pieces = []
-+    for piece in template.split("/"):
-+        if (
-+            path.sep in piece
-+            or (path.altsep and path.altsep in piece)
-+            or piece == path.pardir
-+        ):
-+            raise TemplateNotFound(template)
-+        elif piece and piece != ".":
-+            pieces.append(piece)
-+    return pieces
-+
-+
-+class BaseLoader(object):
-+    """Baseclass for all loaders.  Subclass this and override `get_source` to
-+    implement a custom loading mechanism.  The environment provides a
-+    `get_template` method that calls the loader's `load` method to get the
-+    :class:`Template` object.
-+
-+    A very basic example for a loader that looks up templates on the file
-+    system could look like this::
-+
-+        from jinja2 import BaseLoader, TemplateNotFound
-+        from os.path import join, exists, getmtime
-+
-+        class MyLoader(BaseLoader):
-+
-+            def __init__(self, path):
-+                self.path = path
-+
-+            def get_source(self, environment, template):
-+                path = join(self.path, template)
-+                if not exists(path):
-+                    raise TemplateNotFound(template)
-+                mtime = getmtime(path)
-+                with file(path) as f:
-+                    source = f.read().decode('utf-8')
-+                return source, path, lambda: mtime == getmtime(path)
-+    """
-+
-+    #: if set to `False` it indicates that the loader cannot provide access
-+    #: to the source of templates.
-+    #:
-+    #: .. versionadded:: 2.4
-+    has_source_access = True
-+
-+    def get_source(self, environment, template):
-+        """Get the template source, filename and reload helper for a template.
-+        It's passed the environment and template name and has to return a
-+        tuple in the form ``(source, filename, uptodate)`` or raise a
-+        `TemplateNotFound` error if it can't locate the template.
-+
-+        The source part of the returned tuple must be the source of the
-+        template as unicode string or a ASCII bytestring.  The filename should
-+        be the name of the file on the filesystem if it was loaded from there,
-+        otherwise `None`.  The filename is used by python for the tracebacks
-+        if no loader extension is used.
-+
-+        The last item in the tuple is the `uptodate` function.  If auto
-+        reloading is enabled it's always called to check if the template
-+        changed.  No arguments are passed so the function must store the
-+        old state somewhere (for example in a closure).  If it returns `False`
-+        the template will be reloaded.
-+        """
-+        if not self.has_source_access:
-+            raise RuntimeError(
-+                "%s cannot provide access to the source" % self.__class__.__name__
-+            )
-+        raise TemplateNotFound(template)
-+
-+    def list_templates(self):
-+        """Iterates over all templates.  If the loader does not support that
-+        it should raise a :exc:`TypeError` which is the default behavior.
-+        """
-+        raise TypeError("this loader cannot iterate over all templates")
-+
-+    @internalcode
-+    def load(self, environment, name, globals=None):
-+        """Loads a template.  This method looks up the template in the cache
-+        or loads one by calling :meth:`get_source`.  Subclasses should not
-+        override this method as loaders working on collections of other
-+        loaders (such as :class:`PrefixLoader` or :class:`ChoiceLoader`)
-+        will not call this method but `get_source` directly.
-+        """
-+        code = None
-+        if globals is None:
-+            globals = {}
-+
-+        # first we try to get the source for this template together
-+        # with the filename and the uptodate function.
-+        source, filename, uptodate = self.get_source(environment, name)
-+
-+        # try to load the code from the bytecode cache if there is a
-+        # bytecode cache configured.
-+        bcc = environment.bytecode_cache
-+        if bcc is not None:
-+            bucket = bcc.get_bucket(environment, name, filename, source)
-+            code = bucket.code
-+
-+        # if we don't have code so far (not cached, no longer up to
-+        # date) etc. we compile the template
-+        if code is None:
-+            code = environment.compile(source, name, filename)
-+
-+        # if the bytecode cache is available and the bucket doesn't
-+        # have a code so far, we give the bucket the new code and put
-+        # it back to the bytecode cache.
-+        if bcc is not None and bucket.code is None:
-+            bucket.code = code
-+            bcc.set_bucket(bucket)
-+
-+        return environment.template_class.from_code(
-+            environment, code, globals, uptodate
-+        )
-+
-+
-+class FileSystemLoader(BaseLoader):
-+    """Loads templates from the file system.  This loader can find templates
-+    in folders on the file system and is the preferred way to load them.
-+
-+    The loader takes the path to the templates as string, or if multiple
-+    locations are wanted a list of them which is then looked up in the
-+    given order::
-+
-+    >>> loader = FileSystemLoader('/path/to/templates')
-+    >>> loader = FileSystemLoader(['/path/to/templates', '/other/path'])
-+
-+    Per default the template encoding is ``'utf-8'`` which can be changed
-+    by setting the `encoding` parameter to something else.
-+
-+    To follow symbolic links, set the *followlinks* parameter to ``True``::
-+
-+    >>> loader = FileSystemLoader('/path/to/templates', followlinks=True)
-+
-+    .. versionchanged:: 2.8
-+       The ``followlinks`` parameter was added.
-+    """
-+
-+    def __init__(self, searchpath, encoding="utf-8", followlinks=False):
-+        if not isinstance(searchpath, abc.Iterable) or isinstance(
-+            searchpath, string_types
-+        ):
-+            searchpath = [searchpath]
-+
-+        # In Python 3.5, os.path.join doesn't support Path. This can be
-+        # simplified to list(searchpath) when Python 3.5 is dropped.
-+        self.searchpath = [fspath(p) for p in searchpath]
-+
-+        self.encoding = encoding
-+        self.followlinks = followlinks
-+
-+    def get_source(self, environment, template):
-+        pieces = split_template_path(template)
-+        for searchpath in self.searchpath:
-+            filename = path.join(searchpath, *pieces)
-+            f = open_if_exists(filename)
-+            if f is None:
-+                continue
-+            try:
-+                contents = f.read().decode(self.encoding)
-+            finally:
-+                f.close()
-+
-+            mtime = path.getmtime(filename)
-+
-+            def uptodate():
-+                try:
-+                    return path.getmtime(filename) == mtime
-+                except OSError:
-+                    return False
-+
-+            return contents, filename, uptodate
-+        raise TemplateNotFound(template)
-+
-+    def list_templates(self):
-+        found = set()
-+        for searchpath in self.searchpath:
-+            walk_dir = os.walk(searchpath, followlinks=self.followlinks)
-+            for dirpath, _, filenames in walk_dir:
-+                for filename in filenames:
-+                    template = (
-+                        os.path.join(dirpath, filename)[len(searchpath) :]
-+                        .strip(os.path.sep)
-+                        .replace(os.path.sep, "/")
-+                    )
-+                    if template[:2] == "./":
-+                        template = template[2:]
-+                    if template not in found:
-+                        found.add(template)
-+        return sorted(found)
-+
-+
-+class PackageLoader(BaseLoader):
-+    """Load templates from a directory in a Python package.
-+
-+    :param package_name: Import name of the package that contains the
-+        template directory.
-+    :param package_path: Directory within the imported package that
-+        contains the templates.
-+    :param encoding: Encoding of template files.
-+
-+    The following example looks up templates in the ``pages`` directory
-+    within the ``project.ui`` package.
-+
-+    .. code-block:: python
-+
-+        loader = PackageLoader("project.ui", "pages")
-+
-+    Only packages installed as directories (standard pip behavior) or
-+    zip/egg files (less common) are supported. The Python API for
-+    introspecting data in packages is too limited to support other
-+    installation methods the way this loader requires.
-+
-+    There is limited support for :pep:`420` namespace packages. The
-+    template directory is assumed to only be in one namespace
-+    contributor. Zip files contributing to a namespace are not
-+    supported.
-+
-+    .. versionchanged:: 2.11.0
-+        No longer uses ``setuptools`` as a dependency.
-+
-+    .. versionchanged:: 2.11.0
-+        Limited PEP 420 namespace package support.
-+    """
-+
-+    def __init__(self, package_name, package_path="templates", encoding="utf-8"):
-+        if package_path == os.path.curdir:
-+            package_path = ""
-+        elif package_path[:2] == os.path.curdir + os.path.sep:
-+            package_path = package_path[2:]
-+
-+        package_path = os.path.normpath(package_path).rstrip(os.path.sep)
-+        self.package_path = package_path
-+        self.package_name = package_name
-+        self.encoding = encoding
-+
-+        # Make sure the package exists. This also makes namespace
-+        # packages work, otherwise get_loader returns None.
-+        import_module(package_name)
-+        self._loader = loader = pkgutil.get_loader(package_name)
-+
-+        # Zip loader's archive attribute points at the zip.
-+        self._archive = getattr(loader, "archive", None)
-+        self._template_root = None
-+
-+        if hasattr(loader, "get_filename"):
-+            # A standard directory package, or a zip package.
-+            self._template_root = os.path.join(
-+                os.path.dirname(loader.get_filename(package_name)), package_path
-+            )
-+        elif hasattr(loader, "_path"):
-+            # A namespace package, limited support. Find the first
-+            # contributor with the template directory.
-+            for root in loader._path:
-+                root = os.path.join(root, package_path)
-+
-+                if os.path.isdir(root):
-+                    self._template_root = root
-+                    break
-+
-+        if self._template_root is None:
-+            raise ValueError(
-+                "The %r package was not installed in a way that"
-+                " PackageLoader understands." % package_name
-+            )
-+
-+    def get_source(self, environment, template):
-+        p = os.path.join(self._template_root, *split_template_path(template))
-+
-+        if self._archive is None:
-+            # Package is a directory.
-+            if not os.path.isfile(p):
-+                raise TemplateNotFound(template)
-+
-+            with open(p, "rb") as f:
-+                source = f.read()
-+
-+            mtime = os.path.getmtime(p)
-+
-+            def up_to_date():
-+                return os.path.isfile(p) and os.path.getmtime(p) == mtime
-+
-+        else:
-+            # Package is a zip file.
-+            try:
-+                source = self._loader.get_data(p)
-+            except OSError:
-+                raise TemplateNotFound(template)
-+
-+            # Could use the zip's mtime for all template mtimes, but
-+            # would need to safely reload the module if it's out of
-+            # date, so just report it as always current.
-+            up_to_date = None
-+
-+        return source.decode(self.encoding), p, up_to_date
-+
-+    def list_templates(self):
-+        results = []
-+
-+        if self._archive is None:
-+            # Package is a directory.
-+            offset = len(self._template_root)
-+
-+            for dirpath, _, filenames in os.walk(self._template_root):
-+                dirpath = dirpath[offset:].lstrip(os.path.sep)
-+                results.extend(
-+                    os.path.join(dirpath, name).replace(os.path.sep, "/")
-+                    for name in filenames
-+                )
-+        else:
-+            if not hasattr(self._loader, "_files"):
-+                raise TypeError(
-+                    "This zip import does not have the required"
-+                    " metadata to list templates."
-+                )
-+
-+            # Package is a zip file.
-+            prefix = (
-+                self._template_root[len(self._archive) :].lstrip(os.path.sep)
-+                + os.path.sep
-+            )
-+            offset = len(prefix)
-+
-+            for name in self._loader._files.keys():
-+                # Find names under the templates directory that aren't directories.
-+                if name.startswith(prefix) and name[-1] != os.path.sep:
-+                    results.append(name[offset:].replace(os.path.sep, "/"))
-+
-+        results.sort()
-+        return results
-+
-+
-+class DictLoader(BaseLoader):
-+    """Loads a template from a python dict.  It's passed a dict of unicode
-+    strings bound to template names.  This loader is useful for unittesting:
-+
-+    >>> loader = DictLoader({'index.html': 'source here'})
-+
-+    Because auto reloading is rarely useful this is disabled per default.
-+    """
-+
-+    def __init__(self, mapping):
-+        self.mapping = mapping
-+
-+    def get_source(self, environment, template):
-+        if template in self.mapping:
-+            source = self.mapping[template]
-+            return source, None, lambda: source == self.mapping.get(template)
-+        raise TemplateNotFound(template)
-+
-+    def list_templates(self):
-+        return sorted(self.mapping)
-+
-+
-+class FunctionLoader(BaseLoader):
-+    """A loader that is passed a function which does the loading.  The
-+    function receives the name of the template and has to return either
-+    an unicode string with the template source, a tuple in the form ``(source,
-+    filename, uptodatefunc)`` or `None` if the template does not exist.
-+
-+    >>> def load_template(name):
-+    ...     if name == 'index.html':
-+    ...         return '...'
-+    ...
-+    >>> loader = FunctionLoader(load_template)
-+
-+    The `uptodatefunc` is a function that is called if autoreload is enabled
-+    and has to return `True` if the template is still up to date.  For more
-+    details have a look at :meth:`BaseLoader.get_source` which has the same
-+    return value.
-+    """
-+
-+    def __init__(self, load_func):
-+        self.load_func = load_func
-+
-+    def get_source(self, environment, template):
-+        rv = self.load_func(template)
-+        if rv is None:
-+            raise TemplateNotFound(template)
-+        elif isinstance(rv, string_types):
-+            return rv, None, None
-+        return rv
-+
-+
-+class PrefixLoader(BaseLoader):
-+    """A loader that is passed a dict of loaders where each loader is bound
-+    to a prefix.  The prefix is delimited from the template by a slash per
-+    default, which can be changed by setting the `delimiter` argument to
-+    something else::
-+
-+        loader = PrefixLoader({
-+            'app1':     PackageLoader('mypackage.app1'),
-+            'app2':     PackageLoader('mypackage.app2')
-+        })
-+
-+    By loading ``'app1/index.html'`` the file from the app1 package is loaded,
-+    by loading ``'app2/index.html'`` the file from the second.
-+    """
-+
-+    def __init__(self, mapping, delimiter="/"):
-+        self.mapping = mapping
-+        self.delimiter = delimiter
-+
-+    def get_loader(self, template):
-+        try:
-+            prefix, name = template.split(self.delimiter, 1)
-+            loader = self.mapping[prefix]
-+        except (ValueError, KeyError):
-+            raise TemplateNotFound(template)
-+        return loader, name
-+
-+    def get_source(self, environment, template):
-+        loader, name = self.get_loader(template)
-+        try:
-+            return loader.get_source(environment, name)
-+        except TemplateNotFound:
-+            # re-raise the exception with the correct filename here.
-+            # (the one that includes the prefix)
-+            raise TemplateNotFound(template)
-+
-+    @internalcode
-+    def load(self, environment, name, globals=None):
-+        loader, local_name = self.get_loader(name)
-+        try:
-+            return loader.load(environment, local_name, globals)
-+        except TemplateNotFound:
-+            # re-raise the exception with the correct filename here.
-+            # (the one that includes the prefix)
-+            raise TemplateNotFound(name)
-+
-+    def list_templates(self):
-+        result = []
-+        for prefix, loader in iteritems(self.mapping):
-+            for template in loader.list_templates():
-+                result.append(prefix + self.delimiter + template)
-+        return result
-+
-+
-+class ChoiceLoader(BaseLoader):
-+    """This loader works like the `PrefixLoader` just that no prefix is
-+    specified.  If a template could not be found by one loader the next one
-+    is tried.
-+
-+    >>> loader = ChoiceLoader([
-+    ...     FileSystemLoader('/path/to/user/templates'),
-+    ...     FileSystemLoader('/path/to/system/templates')
-+    ... ])
-+
-+    This is useful if you want to allow users to override builtin templates
-+    from a different location.
-+    """
-+
-+    def __init__(self, loaders):
-+        self.loaders = loaders
-+
-+    def get_source(self, environment, template):
-+        for loader in self.loaders:
-+            try:
-+                return loader.get_source(environment, template)
-+            except TemplateNotFound:
-+                pass
-+        raise TemplateNotFound(template)
-+
-+    @internalcode
-+    def load(self, environment, name, globals=None):
-+        for loader in self.loaders:
-+            try:
-+                return loader.load(environment, name, globals)
-+            except TemplateNotFound:
-+                pass
-+        raise TemplateNotFound(name)
-+
-+    def list_templates(self):
-+        found = set()
-+        for loader in self.loaders:
-+            found.update(loader.list_templates())
-+        return sorted(found)
-+
-+
-+class _TemplateModule(ModuleType):
-+    """Like a normal module but with support for weak references"""
-+
-+
-+class ModuleLoader(BaseLoader):
-+    """This loader loads templates from precompiled templates.
-+
-+    Example usage:
-+
-+    >>> loader = ChoiceLoader([
-+    ...     ModuleLoader('/path/to/compiled/templates'),
-+    ...     FileSystemLoader('/path/to/templates')
-+    ... ])
-+
-+    Templates can be precompiled with :meth:`Environment.compile_templates`.
-+    """
-+
-+    has_source_access = False
-+
-+    def __init__(self, path):
-+        package_name = "_jinja2_module_templates_%x" % id(self)
-+
-+        # create a fake module that looks for the templates in the
-+        # path given.
-+        mod = _TemplateModule(package_name)
-+
-+        if not isinstance(path, abc.Iterable) or isinstance(path, string_types):
-+            path = [path]
-+
-+        mod.__path__ = [fspath(p) for p in path]
-+
-+        sys.modules[package_name] = weakref.proxy(
-+            mod, lambda x: sys.modules.pop(package_name, None)
-+        )
-+
-+        # the only strong reference, the sys.modules entry is weak
-+        # so that the garbage collector can remove it once the
-+        # loader that created it goes out of business.
-+        self.module = mod
-+        self.package_name = package_name
-+
-+    @staticmethod
-+    def get_template_key(name):
-+        return "tmpl_" + sha1(name.encode("utf-8")).hexdigest()
-+
-+    @staticmethod
-+    def get_module_filename(name):
-+        return ModuleLoader.get_template_key(name) + ".py"
-+
-+    @internalcode
-+    def load(self, environment, name, globals=None):
-+        key = self.get_template_key(name)
-+        module = "%s.%s" % (self.package_name, key)
-+        mod = getattr(self.module, module, None)
-+        if mod is None:
-+            try:
-+                mod = __import__(module, None, None, ["root"])
-+            except ImportError:
-+                raise TemplateNotFound(name)
-+
-+            # remove the entry from sys.modules, we only want the attribute
-+            # on the module object we have stored on the loader.
-+            sys.modules.pop(module, None)
-+
-+        return environment.template_class.from_module_dict(
-+            environment, mod.__dict__, globals
-+        )
-diff --git a/third_party/python/Jinja2/jinja2/meta.py b/third_party/python/Jinja2/jinja2/meta.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/Jinja2/jinja2/meta.py
-@@ -0,0 +1,101 @@
-+# -*- coding: utf-8 -*-
-+"""Functions that expose information about templates that might be
-+interesting for introspection.
-+"""
-+from . import nodes
-+from ._compat import iteritems
-+from ._compat import string_types
-+from .compiler import CodeGenerator
-+
-+
-+class TrackingCodeGenerator(CodeGenerator):
-+    """We abuse the code generator for introspection."""
-+
-+    def __init__(self, environment):
-+        CodeGenerator.__init__(self, environment, "<introspection>", "<introspection>")
-+        self.undeclared_identifiers = set()
-+
-+    def write(self, x):
-+        """Don't write."""
-+
-+    def enter_frame(self, frame):
-+        """Remember all undeclared identifiers."""
-+        CodeGenerator.enter_frame(self, frame)
-+        for _, (action, param) in iteritems(frame.symbols.loads):
-+            if action == "resolve" and param not in self.environment.globals:
-+                self.undeclared_identifiers.add(param)
-+
-+
-+def find_undeclared_variables(ast):
-+    """Returns a set of all variables in the AST that will be looked up from
-+    the context at runtime.  Because at compile time it's not known which
-+    variables will be used depending on the path the execution takes at
-+    runtime, all variables are returned.
-+
-+    >>> from jinja2 import Environment, meta
-+    >>> env = Environment()
-+    >>> ast = env.parse('{% set foo = 42 %}{{ bar + foo }}')
-+    >>> meta.find_undeclared_variables(ast) == set(['bar'])
-+    True
-+
-+    .. admonition:: Implementation
-+
-+       Internally the code generator is used for finding undeclared variables.
-+       This is good to know because the code generator might raise a
-+       :exc:`TemplateAssertionError` during compilation and as a matter of
-+       fact this function can currently raise that exception as well.
-+    """
-+    codegen = TrackingCodeGenerator(ast.environment)
-+    codegen.visit(ast)
-+    return codegen.undeclared_identifiers
-+
-+
-+def find_referenced_templates(ast):
-+    """Finds all the referenced templates from the AST.  This will return an
-+    iterator over all the hardcoded template extensions, inclusions and
-+    imports.  If dynamic inheritance or inclusion is used, `None` will be
-+    yielded.
-+
-+    >>> from jinja2 import Environment, meta
-+    >>> env = Environment()
-+    >>> ast = env.parse('{% extends "layout.html" %}{% include helper %}')
-+    >>> list(meta.find_referenced_templates(ast))
-+    ['layout.html', None]
-+
-+    This function is useful for dependency tracking.  For example if you want
-+    to rebuild parts of the website after a layout template has changed.
-+    """
-+    for node in ast.find_all(
-+        (nodes.Extends, nodes.FromImport, nodes.Import, nodes.Include)
-+    ):
-+        if not isinstance(node.template, nodes.Const):
-+            # a tuple with some non consts in there
-+            if isinstance(node.template, (nodes.Tuple, nodes.List)):
-+                for template_name in node.template.items:
-+                    # something const, only yield the strings and ignore
-+                    # non-string consts that really just make no sense
-+                    if isinstance(template_name, nodes.Const):
-+                        if isinstance(template_name.value, string_types):
-+                            yield template_name.value
-+                    # something dynamic in there
-+                    else:
-+                        yield None
-+            # something dynamic we don't know about here
-+            else:
-+                yield None
-+            continue
-+        # constant is a basestring, direct template name
-+        if isinstance(node.template.value, string_types):
-+            yield node.template.value
-+        # a tuple or list (latter *should* not happen) made of consts,
-+        # yield the consts that are strings.  We could warn here for
-+        # non string values
-+        elif isinstance(node, nodes.Include) and isinstance(
-+            node.template.value, (tuple, list)
-+        ):
-+            for template_name in node.template.value:
-+                if isinstance(template_name, string_types):
-+                    yield template_name
-+        # something else we don't care about, we could warn here
-+        else:
-+            yield None
-diff --git a/third_party/python/Jinja2/jinja2/nativetypes.py b/third_party/python/Jinja2/jinja2/nativetypes.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/Jinja2/jinja2/nativetypes.py
-@@ -0,0 +1,111 @@
-+import types
-+from ast import literal_eval
-+from itertools import chain
-+from itertools import islice
-+
-+from . import nodes
-+from ._compat import text_type
-+from .compiler import CodeGenerator
-+from .compiler import has_safe_repr
-+from .environment import Environment
-+from .environment import Template
-+
-+
-+def native_concat(nodes, preserve_quotes=True):
-+    """Return a native Python type from the list of compiled nodes. If
-+    the result is a single node, its value is returned. Otherwise, the
-+    nodes are concatenated as strings. If the result can be parsed with
-+    :func:`ast.literal_eval`, the parsed value is returned. Otherwise,
-+    the string is returned.
-+
-+    :param nodes: Iterable of nodes to concatenate.
-+    :param preserve_quotes: Whether to re-wrap literal strings with
-+        quotes, to preserve quotes around expressions for later parsing.
-+        Should be ``False`` in :meth:`NativeEnvironment.render`.
-+    """
-+    head = list(islice(nodes, 2))
-+
-+    if not head:
-+        return None
-+
-+    if len(head) == 1:
-+        raw = head[0]
-+    else:
-+        if isinstance(nodes, types.GeneratorType):
-+            nodes = chain(head, nodes)
-+        raw = u"".join([text_type(v) for v in nodes])
-+
-+    try:
-+        literal = literal_eval(raw)
-+    except (ValueError, SyntaxError, MemoryError):
-+        return raw
-+
-+    # If literal_eval returned a string, re-wrap with the original
-+    # quote character to avoid dropping quotes between expression nodes.
-+    # Without this, "'{{ a }}', '{{ b }}'" results in "a, b", but should
-+    # be ('a', 'b').
-+    if preserve_quotes and isinstance(literal, str):
-+        return "{quote}{}{quote}".format(literal, quote=raw[0])
-+
-+    return literal
-+
-+
-+class NativeCodeGenerator(CodeGenerator):
-+    """A code generator which renders Python types by not adding
-+    ``to_string()`` around output nodes, and using :func:`native_concat`
-+    to convert complex strings back to Python types if possible.
-+    """
-+
-+    @staticmethod
-+    def _default_finalize(value):
-+        return value
-+
-+    def _output_const_repr(self, group):
-+        return repr(native_concat(group))
-+
-+    def _output_child_to_const(self, node, frame, finalize):
-+        const = node.as_const(frame.eval_ctx)
-+
-+        if not has_safe_repr(const):
-+            raise nodes.Impossible()
-+
-+        if isinstance(node, nodes.TemplateData):
-+            return const
-+
-+        return finalize.const(const)
-+
-+    def _output_child_pre(self, node, frame, finalize):
-+        if finalize.src is not None:
-+            self.write(finalize.src)
-+
-+    def _output_child_post(self, node, frame, finalize):
-+        if finalize.src is not None:
-+            self.write(")")
-+
-+
-+class NativeEnvironment(Environment):
-+    """An environment that renders templates to native Python types."""
-+
-+    code_generator_class = NativeCodeGenerator
-+
-+
-+class NativeTemplate(Template):
-+    environment_class = NativeEnvironment
-+
-+    def render(self, *args, **kwargs):
-+        """Render the template to produce a native Python type. If the
-+        result is a single node, its value is returned. Otherwise, the
-+        nodes are concatenated as strings. If the result can be parsed
-+        with :func:`ast.literal_eval`, the parsed value is returned.
-+        Otherwise, the string is returned.
-+        """
-+        vars = dict(*args, **kwargs)
-+        try:
-+            return native_concat(
-+                self.root_render_func(self.new_context(vars)), preserve_quotes=False
-+            )
-+        except Exception:
-+            return self.environment.handle_exception()
-+
-+
-+NativeEnvironment.template_class = NativeTemplate
-diff --git a/third_party/python/Jinja2/jinja2/nodes.py b/third_party/python/Jinja2/jinja2/nodes.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/Jinja2/jinja2/nodes.py
-@@ -0,0 +1,1088 @@
-+# -*- coding: utf-8 -*-
-+"""AST nodes generated by the parser for the compiler. Also provides
-+some node tree helper functions used by the parser and compiler in order
-+to normalize nodes.
-+"""
-+import operator
-+from collections import deque
-+
-+from markupsafe import Markup
-+
-+from ._compat import izip
-+from ._compat import PY2
-+from ._compat import text_type
-+from ._compat import with_metaclass
-+
-+_binop_to_func = {
-+    "*": operator.mul,
-+    "/": operator.truediv,
-+    "//": operator.floordiv,
-+    "**": operator.pow,
-+    "%": operator.mod,
-+    "+": operator.add,
-+    "-": operator.sub,
-+}
-+
-+_uaop_to_func = {"not": operator.not_, "+": operator.pos, "-": operator.neg}
-+
-+_cmpop_to_func = {
-+    "eq": operator.eq,
-+    "ne": operator.ne,
-+    "gt": operator.gt,
-+    "gteq": operator.ge,
-+    "lt": operator.lt,
-+    "lteq": operator.le,
-+    "in": lambda a, b: a in b,
-+    "notin": lambda a, b: a not in b,
-+}
-+
-+
-+class Impossible(Exception):
-+    """Raised if the node could not perform a requested action."""
-+
-+
-+class NodeType(type):
-+    """A metaclass for nodes that handles the field and attribute
-+    inheritance.  fields and attributes from the parent class are
-+    automatically forwarded to the child."""
-+
-+    def __new__(mcs, name, bases, d):
-+        for attr in "fields", "attributes":
-+            storage = []
-+            storage.extend(getattr(bases[0], attr, ()))
-+            storage.extend(d.get(attr, ()))
-+            assert len(bases) == 1, "multiple inheritance not allowed"
-+            assert len(storage) == len(set(storage)), "layout conflict"
-+            d[attr] = tuple(storage)
-+        d.setdefault("abstract", False)
-+        return type.__new__(mcs, name, bases, d)
-+
-+
-+class EvalContext(object):
-+    """Holds evaluation time information.  Custom attributes can be attached
-+    to it in extensions.
-+    """
-+
-+    def __init__(self, environment, template_name=None):
-+        self.environment = environment
-+        if callable(environment.autoescape):
-+            self.autoescape = environment.autoescape(template_name)
-+        else:
-+            self.autoescape = environment.autoescape
-+        self.volatile = False
-+
-+    def save(self):
-+        return self.__dict__.copy()
-+
-+    def revert(self, old):
-+        self.__dict__.clear()
-+        self.__dict__.update(old)
-+
-+
-+def get_eval_context(node, ctx):
-+    if ctx is None:
-+        if node.environment is None:
-+            raise RuntimeError(
-+                "if no eval context is passed, the "
-+                "node must have an attached "
-+                "environment."
-+            )
-+        return EvalContext(node.environment)
-+    return ctx
-+
-+
-+class Node(with_metaclass(NodeType, object)):
-+    """Baseclass for all Jinja nodes.  There are a number of nodes available
-+    of different types.  There are four major types:
-+
-+    -   :class:`Stmt`: statements
-+    -   :class:`Expr`: expressions
-+    -   :class:`Helper`: helper nodes
-+    -   :class:`Template`: the outermost wrapper node
-+
-+    All nodes have fields and attributes.  Fields may be other nodes, lists,
-+    or arbitrary values.  Fields are passed to the constructor as regular
-+    positional arguments, attributes as keyword arguments.  Each node has
-+    two attributes: `lineno` (the line number of the node) and `environment`.
-+    The `environment` attribute is set at the end of the parsing process for
-+    all nodes automatically.
-+    """
-+
-+    fields = ()
-+    attributes = ("lineno", "environment")
-+    abstract = True
-+
-+    def __init__(self, *fields, **attributes):
-+        if self.abstract:
-+            raise TypeError("abstract nodes are not instantiable")
-+        if fields:
-+            if len(fields) != len(self.fields):
-+                if not self.fields:
-+                    raise TypeError("%r takes 0 arguments" % self.__class__.__name__)
-+                raise TypeError(
-+                    "%r takes 0 or %d argument%s"
-+                    % (
-+                        self.__class__.__name__,
-+                        len(self.fields),
-+                        len(self.fields) != 1 and "s" or "",
-+                    )
-+                )
-+            for name, arg in izip(self.fields, fields):
-+                setattr(self, name, arg)
-+        for attr in self.attributes:
-+            setattr(self, attr, attributes.pop(attr, None))
-+        if attributes:
-+            raise TypeError("unknown attribute %r" % next(iter(attributes)))
-+
-+    def iter_fields(self, exclude=None, only=None):
-+        """This method iterates over all fields that are defined and yields
-+        ``(key, value)`` tuples.  Per default all fields are returned, but
-+        it's possible to limit that to some fields by providing the `only`
-+        parameter or to exclude some using the `exclude` parameter.  Both
-+        should be sets or tuples of field names.
-+        """
-+        for name in self.fields:
-+            if (
-+                (exclude is only is None)
-+                or (exclude is not None and name not in exclude)
-+                or (only is not None and name in only)
-+            ):
-+                try:
-+                    yield name, getattr(self, name)
-+                except AttributeError:
-+                    pass
-+
-+    def iter_child_nodes(self, exclude=None, only=None):
-+        """Iterates over all direct child nodes of the node.  This iterates
-+        over all fields and yields the values of they are nodes.  If the value
-+        of a field is a list all the nodes in that list are returned.
-+        """
-+        for _, item in self.iter_fields(exclude, only):
-+            if isinstance(item, list):
-+                for n in item:
-+                    if isinstance(n, Node):
-+                        yield n
-+            elif isinstance(item, Node):
-+                yield item
-+
-+    def find(self, node_type):
-+        """Find the first node of a given type.  If no such node exists the
-+        return value is `None`.
-+        """
-+        for result in self.find_all(node_type):
-+            return result
-+
-+    def find_all(self, node_type):
-+        """Find all the nodes of a given type.  If the type is a tuple,
-+        the check is performed for any of the tuple items.
-+        """
-+        for child in self.iter_child_nodes():
-+            if isinstance(child, node_type):
-+                yield child
-+            for result in child.find_all(node_type):
-+                yield result
-+
-+    def set_ctx(self, ctx):
-+        """Reset the context of a node and all child nodes.  Per default the
-+        parser will all generate nodes that have a 'load' context as it's the
-+        most common one.  This method is used in the parser to set assignment
-+        targets and other nodes to a store context.
-+        """
-+        todo = deque([self])
-+        while todo:
-+            node = todo.popleft()
-+            if "ctx" in node.fields:
-+                node.ctx = ctx
-+            todo.extend(node.iter_child_nodes())
-+        return self
-+
-+    def set_lineno(self, lineno, override=False):
-+        """Set the line numbers of the node and children."""
-+        todo = deque([self])
-+        while todo:
-+            node = todo.popleft()
-+            if "lineno" in node.attributes:
-+                if node.lineno is None or override:
-+                    node.lineno = lineno
-+            todo.extend(node.iter_child_nodes())
-+        return self
-+
-+    def set_environment(self, environment):
-+        """Set the environment for all nodes."""
-+        todo = deque([self])
-+        while todo:
-+            node = todo.popleft()
-+            node.environment = environment
-+            todo.extend(node.iter_child_nodes())
-+        return self
-+
-+    def __eq__(self, other):
-+        return type(self) is type(other) and tuple(self.iter_fields()) == tuple(
-+            other.iter_fields()
-+        )
-+
-+    def __ne__(self, other):
-+        return not self.__eq__(other)
-+
-+    # Restore Python 2 hashing behavior on Python 3
-+    __hash__ = object.__hash__
-+
-+    def __repr__(self):
-+        return "%s(%s)" % (
-+            self.__class__.__name__,
-+            ", ".join("%s=%r" % (arg, getattr(self, arg, None)) for arg in self.fields),
-+        )
-+
-+    def dump(self):
-+        def _dump(node):
-+            if not isinstance(node, Node):
-+                buf.append(repr(node))
-+                return
-+
-+            buf.append("nodes.%s(" % node.__class__.__name__)
-+            if not node.fields:
-+                buf.append(")")
-+                return
-+            for idx, field in enumerate(node.fields):
-+                if idx:
-+                    buf.append(", ")
-+                value = getattr(node, field)
-+                if isinstance(value, list):
-+                    buf.append("[")
-+                    for idx, item in enumerate(value):
-+                        if idx:
-+                            buf.append(", ")
-+                        _dump(item)
-+                    buf.append("]")
-+                else:
-+                    _dump(value)
-+            buf.append(")")
-+
-+        buf = []
-+        _dump(self)
-+        return "".join(buf)
-+
-+
-+class Stmt(Node):
-+    """Base node for all statements."""
-+
-+    abstract = True
-+
-+
-+class Helper(Node):
-+    """Nodes that exist in a specific context only."""
-+
-+    abstract = True
-+
-+
-+class Template(Node):
-+    """Node that represents a template.  This must be the outermost node that
-+    is passed to the compiler.
-+    """
-+
-+    fields = ("body",)
-+
-+
-+class Output(Stmt):
-+    """A node that holds multiple expressions which are then printed out.
-+    This is used both for the `print` statement and the regular template data.
-+    """
-+
-+    fields = ("nodes",)
-+
-+
-+class Extends(Stmt):
-+    """Represents an extends statement."""
-+
-+    fields = ("template",)
-+
-+
-+class For(Stmt):
-+    """The for loop.  `target` is the target for the iteration (usually a
-+    :class:`Name` or :class:`Tuple`), `iter` the iterable.  `body` is a list
-+    of nodes that are used as loop-body, and `else_` a list of nodes for the
-+    `else` block.  If no else node exists it has to be an empty list.
-+
-+    For filtered nodes an expression can be stored as `test`, otherwise `None`.
-+    """
-+
-+    fields = ("target", "iter", "body", "else_", "test", "recursive")
-+
-+
-+class If(Stmt):
-+    """If `test` is true, `body` is rendered, else `else_`."""
-+
-+    fields = ("test", "body", "elif_", "else_")
-+
-+
-+class Macro(Stmt):
-+    """A macro definition.  `name` is the name of the macro, `args` a list of
-+    arguments and `defaults` a list of defaults if there are any.  `body` is
-+    a list of nodes for the macro body.
-+    """
-+
-+    fields = ("name", "args", "defaults", "body")
-+
-+
-+class CallBlock(Stmt):
-+    """Like a macro without a name but a call instead.  `call` is called with
-+    the unnamed macro as `caller` argument this node holds.
-+    """
-+
-+    fields = ("call", "args", "defaults", "body")
-+
-+
-+class FilterBlock(Stmt):
-+    """Node for filter sections."""
-+
-+    fields = ("body", "filter")
-+
-+
-+class With(Stmt):
-+    """Specific node for with statements.  In older versions of Jinja the
-+    with statement was implemented on the base of the `Scope` node instead.
-+
-+    .. versionadded:: 2.9.3
-+    """
-+
-+    fields = ("targets", "values", "body")
-+
-+
-+class Block(Stmt):
-+    """A node that represents a block."""
-+
-+    fields = ("name", "body", "scoped")
-+
-+
-+class Include(Stmt):
-+    """A node that represents the include tag."""
-+
-+    fields = ("template", "with_context", "ignore_missing")
-+
-+
-+class Import(Stmt):
-+    """A node that represents the import tag."""
-+
-+    fields = ("template", "target", "with_context")
-+
-+
-+class FromImport(Stmt):
-+    """A node that represents the from import tag.  It's important to not
-+    pass unsafe names to the name attribute.  The compiler translates the
-+    attribute lookups directly into getattr calls and does *not* use the
-+    subscript callback of the interface.  As exported variables may not
-+    start with double underscores (which the parser asserts) this is not a
-+    problem for regular Jinja code, but if this node is used in an extension
-+    extra care must be taken.
-+
-+    The list of names may contain tuples if aliases are wanted.
-+    """
-+
-+    fields = ("template", "names", "with_context")
-+
-+
-+class ExprStmt(Stmt):
-+    """A statement that evaluates an expression and discards the result."""
-+
-+    fields = ("node",)
-+
-+
-+class Assign(Stmt):
-+    """Assigns an expression to a target."""
-+
-+    fields = ("target", "node")
-+
-+
-+class AssignBlock(Stmt):
-+    """Assigns a block to a target."""
-+
-+    fields = ("target", "filter", "body")
-+
-+
-+class Expr(Node):
-+    """Baseclass for all expressions."""
-+
-+    abstract = True
-+
-+    def as_const(self, eval_ctx=None):
-+        """Return the value of the expression as constant or raise
-+        :exc:`Impossible` if this was not possible.
-+
-+        An :class:`EvalContext` can be provided, if none is given
-+        a default context is created which requires the nodes to have
-+        an attached environment.
-+
-+        .. versionchanged:: 2.4
-+           the `eval_ctx` parameter was added.
-+        """
-+        raise Impossible()
-+
-+    def can_assign(self):
-+        """Check if it's possible to assign something to this node."""
-+        return False
-+
-+
-+class BinExpr(Expr):
-+    """Baseclass for all binary expressions."""
-+
-+    fields = ("left", "right")
-+    operator = None
-+    abstract = True
-+
-+    def as_const(self, eval_ctx=None):
-+        eval_ctx = get_eval_context(self, eval_ctx)
-+        # intercepted operators cannot be folded at compile time
-+        if (
-+            self.environment.sandboxed
-+            and self.operator in self.environment.intercepted_binops
-+        ):
-+            raise Impossible()
-+        f = _binop_to_func[self.operator]
-+        try:
-+            return f(self.left.as_const(eval_ctx), self.right.as_const(eval_ctx))
-+        except Exception:
-+            raise Impossible()
-+
-+
-+class UnaryExpr(Expr):
-+    """Baseclass for all unary expressions."""
-+
-+    fields = ("node",)
-+    operator = None
-+    abstract = True
-+
-+    def as_const(self, eval_ctx=None):
-+        eval_ctx = get_eval_context(self, eval_ctx)
-+        # intercepted operators cannot be folded at compile time
-+        if (
-+            self.environment.sandboxed
-+            and self.operator in self.environment.intercepted_unops
-+        ):
-+            raise Impossible()
-+        f = _uaop_to_func[self.operator]
-+        try:
-+            return f(self.node.as_const(eval_ctx))
-+        except Exception:
-+            raise Impossible()
-+
-+
-+class Name(Expr):
-+    """Looks up a name or stores a value in a name.
-+    The `ctx` of the node can be one of the following values:
-+
-+    -   `store`: store a value in the name
-+    -   `load`: load that name
-+    -   `param`: like `store` but if the name was defined as function parameter.
-+    """
-+
-+    fields = ("name", "ctx")
-+
-+    def can_assign(self):
-+        return self.name not in ("true", "false", "none", "True", "False", "None")
-+
-+
-+class NSRef(Expr):
-+    """Reference to a namespace value assignment"""
-+
-+    fields = ("name", "attr")
-+
-+    def can_assign(self):
-+        # We don't need any special checks here; NSRef assignments have a
-+        # runtime check to ensure the target is a namespace object which will
-+        # have been checked already as it is created using a normal assignment
-+        # which goes through a `Name` node.
-+        return True
-+
-+
-+class Literal(Expr):
-+    """Baseclass for literals."""
-+
-+    abstract = True
-+
-+
-+class Const(Literal):
-+    """All constant values.  The parser will return this node for simple
-+    constants such as ``42`` or ``"foo"`` but it can be used to store more
-+    complex values such as lists too.  Only constants with a safe
-+    representation (objects where ``eval(repr(x)) == x`` is true).
-+    """
-+
-+    fields = ("value",)
-+
-+    def as_const(self, eval_ctx=None):
-+        rv = self.value
-+        if (
-+            PY2
-+            and type(rv) is text_type
-+            and self.environment.policies["compiler.ascii_str"]
-+        ):
-+            try:
-+                rv = rv.encode("ascii")
-+            except UnicodeError:
-+                pass
-+        return rv
-+
-+    @classmethod
-+    def from_untrusted(cls, value, lineno=None, environment=None):
-+        """Return a const object if the value is representable as
-+        constant value in the generated code, otherwise it will raise
-+        an `Impossible` exception.
-+        """
-+        from .compiler import has_safe_repr
-+
-+        if not has_safe_repr(value):
-+            raise Impossible()
-+        return cls(value, lineno=lineno, environment=environment)
-+
-+
-+class TemplateData(Literal):
-+    """A constant template string."""
-+
-+    fields = ("data",)
-+
-+    def as_const(self, eval_ctx=None):
-+        eval_ctx = get_eval_context(self, eval_ctx)
-+        if eval_ctx.volatile:
-+            raise Impossible()
-+        if eval_ctx.autoescape:
-+            return Markup(self.data)
-+        return self.data
-+
-+
-+class Tuple(Literal):
-+    """For loop unpacking and some other things like multiple arguments
-+    for subscripts.  Like for :class:`Name` `ctx` specifies if the tuple
-+    is used for loading the names or storing.
-+    """
-+
-+    fields = ("items", "ctx")
-+
-+    def as_const(self, eval_ctx=None):
-+        eval_ctx = get_eval_context(self, eval_ctx)
-+        return tuple(x.as_const(eval_ctx) for x in self.items)
-+
-+    def can_assign(self):
-+        for item in self.items:
-+            if not item.can_assign():
-+                return False
-+        return True
-+
-+
-+class List(Literal):
-+    """Any list literal such as ``[1, 2, 3]``"""
-+
-+    fields = ("items",)
-+
-+    def as_const(self, eval_ctx=None):
-+        eval_ctx = get_eval_context(self, eval_ctx)
-+        return [x.as_const(eval_ctx) for x in self.items]
-+
-+
-+class Dict(Literal):
-+    """Any dict literal such as ``{1: 2, 3: 4}``.  The items must be a list of
-+    :class:`Pair` nodes.
-+    """
-+
-+    fields = ("items",)
-+
-+    def as_const(self, eval_ctx=None):
-+        eval_ctx = get_eval_context(self, eval_ctx)
-+        return dict(x.as_const(eval_ctx) for x in self.items)
-+
-+
-+class Pair(Helper):
-+    """A key, value pair for dicts."""
-+
-+    fields = ("key", "value")
-+
-+    def as_const(self, eval_ctx=None):
-+        eval_ctx = get_eval_context(self, eval_ctx)
-+        return self.key.as_const(eval_ctx), self.value.as_const(eval_ctx)
-+
-+
-+class Keyword(Helper):
-+    """A key, value pair for keyword arguments where key is a string."""
-+
-+    fields = ("key", "value")
-+
-+    def as_const(self, eval_ctx=None):
-+        eval_ctx = get_eval_context(self, eval_ctx)
-+        return self.key, self.value.as_const(eval_ctx)
-+
-+
-+class CondExpr(Expr):
-+    """A conditional expression (inline if expression).  (``{{
-+    foo if bar else baz }}``)
-+    """
-+
-+    fields = ("test", "expr1", "expr2")
-+
-+    def as_const(self, eval_ctx=None):
-+        eval_ctx = get_eval_context(self, eval_ctx)
-+        if self.test.as_const(eval_ctx):
-+            return self.expr1.as_const(eval_ctx)
-+
-+        # if we evaluate to an undefined object, we better do that at runtime
-+        if self.expr2 is None:
-+            raise Impossible()
-+
-+        return self.expr2.as_const(eval_ctx)
-+
-+
-+def args_as_const(node, eval_ctx):
-+    args = [x.as_const(eval_ctx) for x in node.args]
-+    kwargs = dict(x.as_const(eval_ctx) for x in node.kwargs)
-+
-+    if node.dyn_args is not None:
-+        try:
-+            args.extend(node.dyn_args.as_const(eval_ctx))
-+        except Exception:
-+            raise Impossible()
-+
-+    if node.dyn_kwargs is not None:
-+        try:
-+            kwargs.update(node.dyn_kwargs.as_const(eval_ctx))
-+        except Exception:
-+            raise Impossible()
-+
-+    return args, kwargs
-+
-+
-+class Filter(Expr):
-+    """This node applies a filter on an expression.  `name` is the name of
-+    the filter, the rest of the fields are the same as for :class:`Call`.
-+
-+    If the `node` of a filter is `None` the contents of the last buffer are
-+    filtered.  Buffers are created by macros and filter blocks.
-+    """
-+
-+    fields = ("node", "name", "args", "kwargs", "dyn_args", "dyn_kwargs")
-+
-+    def as_const(self, eval_ctx=None):
-+        eval_ctx = get_eval_context(self, eval_ctx)
-+
-+        if eval_ctx.volatile or self.node is None:
-+            raise Impossible()
-+
-+        # we have to be careful here because we call filter_ below.
-+        # if this variable would be called filter, 2to3 would wrap the
-+        # call in a list because it is assuming we are talking about the
-+        # builtin filter function here which no longer returns a list in
-+        # python 3.  because of that, do not rename filter_ to filter!
-+        filter_ = self.environment.filters.get(self.name)
-+
-+        if filter_ is None or getattr(filter_, "contextfilter", False):
-+            raise Impossible()
-+
-+        # We cannot constant handle async filters, so we need to make sure
-+        # to not go down this path.
-+        if eval_ctx.environment.is_async and getattr(
-+            filter_, "asyncfiltervariant", False
-+        ):
-+            raise Impossible()
-+
-+        args, kwargs = args_as_const(self, eval_ctx)
-+        args.insert(0, self.node.as_const(eval_ctx))
-+
-+        if getattr(filter_, "evalcontextfilter", False):
-+            args.insert(0, eval_ctx)
-+        elif getattr(filter_, "environmentfilter", False):
-+            args.insert(0, self.environment)
-+
-+        try:
-+            return filter_(*args, **kwargs)
-+        except Exception:
-+            raise Impossible()
-+
-+
-+class Test(Expr):
-+    """Applies a test on an expression.  `name` is the name of the test, the
-+    rest of the fields are the same as for :class:`Call`.
-+    """
-+
-+    fields = ("node", "name", "args", "kwargs", "dyn_args", "dyn_kwargs")
-+
-+    def as_const(self, eval_ctx=None):
-+        test = self.environment.tests.get(self.name)
-+
-+        if test is None:
-+            raise Impossible()
-+
-+        eval_ctx = get_eval_context(self, eval_ctx)
-+        args, kwargs = args_as_const(self, eval_ctx)
-+        args.insert(0, self.node.as_const(eval_ctx))
-+
-+        try:
-+            return test(*args, **kwargs)
-+        except Exception:
-+            raise Impossible()
-+
-+
-+class Call(Expr):
-+    """Calls an expression.  `args` is a list of arguments, `kwargs` a list
-+    of keyword arguments (list of :class:`Keyword` nodes), and `dyn_args`
-+    and `dyn_kwargs` has to be either `None` or a node that is used as
-+    node for dynamic positional (``*args``) or keyword (``**kwargs``)
-+    arguments.
-+    """
-+
-+    fields = ("node", "args", "kwargs", "dyn_args", "dyn_kwargs")
-+
-+
-+class Getitem(Expr):
-+    """Get an attribute or item from an expression and prefer the item."""
-+
-+    fields = ("node", "arg", "ctx")
-+
-+    def as_const(self, eval_ctx=None):
-+        eval_ctx = get_eval_context(self, eval_ctx)
-+        if self.ctx != "load":
-+            raise Impossible()
-+        try:
-+            return self.environment.getitem(
-+                self.node.as_const(eval_ctx), self.arg.as_const(eval_ctx)
-+            )
-+        except Exception:
-+            raise Impossible()
-+
-+    def can_assign(self):
-+        return False
-+
-+
-+class Getattr(Expr):
-+    """Get an attribute or item from an expression that is a ascii-only
-+    bytestring and prefer the attribute.
-+    """
-+
-+    fields = ("node", "attr", "ctx")
-+
-+    def as_const(self, eval_ctx=None):
-+        if self.ctx != "load":
-+            raise Impossible()
-+        try:
-+            eval_ctx = get_eval_context(self, eval_ctx)
-+            return self.environment.getattr(self.node.as_const(eval_ctx), self.attr)
-+        except Exception:
-+            raise Impossible()
-+
-+    def can_assign(self):
-+        return False
-+
-+
-+class Slice(Expr):
-+    """Represents a slice object.  This must only be used as argument for
-+    :class:`Subscript`.
-+    """
-+
-+    fields = ("start", "stop", "step")
-+
-+    def as_const(self, eval_ctx=None):
-+        eval_ctx = get_eval_context(self, eval_ctx)
-+
-+        def const(obj):
-+            if obj is None:
-+                return None
-+            return obj.as_const(eval_ctx)
-+
-+        return slice(const(self.start), const(self.stop), const(self.step))
-+
-+
-+class Concat(Expr):
-+    """Concatenates the list of expressions provided after converting them to
-+    unicode.
-+    """
-+
-+    fields = ("nodes",)
-+
-+    def as_const(self, eval_ctx=None):
-+        eval_ctx = get_eval_context(self, eval_ctx)
-+        return "".join(text_type(x.as_const(eval_ctx)) for x in self.nodes)
-+
-+
-+class Compare(Expr):
-+    """Compares an expression with some other expressions.  `ops` must be a
-+    list of :class:`Operand`\\s.
-+    """
-+
-+    fields = ("expr", "ops")
-+
-+    def as_const(self, eval_ctx=None):
-+        eval_ctx = get_eval_context(self, eval_ctx)
-+        result = value = self.expr.as_const(eval_ctx)
-+
-+        try:
-+            for op in self.ops:
-+                new_value = op.expr.as_const(eval_ctx)
-+                result = _cmpop_to_func[op.op](value, new_value)
-+
-+                if not result:
-+                    return False
-+
-+                value = new_value
-+        except Exception:
-+            raise Impossible()
-+
-+        return result
-+
-+
-+class Operand(Helper):
-+    """Holds an operator and an expression."""
-+
-+    fields = ("op", "expr")
-+
-+
-+if __debug__:
-+    Operand.__doc__ += "\nThe following operators are available: " + ", ".join(
-+        sorted(
-+            "``%s``" % x
-+            for x in set(_binop_to_func) | set(_uaop_to_func) | set(_cmpop_to_func)
-+        )
-+    )
-+
-+
-+class Mul(BinExpr):
-+    """Multiplies the left with the right node."""
-+
-+    operator = "*"
-+
-+
-+class Div(BinExpr):
-+    """Divides the left by the right node."""
-+
-+    operator = "/"
-+
-+
-+class FloorDiv(BinExpr):
-+    """Divides the left by the right node and truncates conver the
-+    result into an integer by truncating.
-+    """
-+
-+    operator = "//"
-+
-+
-+class Add(BinExpr):
-+    """Add the left to the right node."""
-+
-+    operator = "+"
-+
-+
-+class Sub(BinExpr):
-+    """Subtract the right from the left node."""
-+
-+    operator = "-"
-+
-+
-+class Mod(BinExpr):
-+    """Left modulo right."""
-+
-+    operator = "%"
-+
-+
-+class Pow(BinExpr):
-+    """Left to the power of right."""
-+
-+    operator = "**"
-+
-+
-+class And(BinExpr):
-+    """Short circuited AND."""
-+
-+    operator = "and"
-+
-+    def as_const(self, eval_ctx=None):
-+        eval_ctx = get_eval_context(self, eval_ctx)
-+        return self.left.as_const(eval_ctx) and self.right.as_const(eval_ctx)
-+
-+
-+class Or(BinExpr):
-+    """Short circuited OR."""
-+
-+    operator = "or"
-+
-+    def as_const(self, eval_ctx=None):
-+        eval_ctx = get_eval_context(self, eval_ctx)
-+        return self.left.as_const(eval_ctx) or self.right.as_const(eval_ctx)
-+
-+
-+class Not(UnaryExpr):
-+    """Negate the expression."""
-+
-+    operator = "not"
-+
-+
-+class Neg(UnaryExpr):
-+    """Make the expression negative."""
-+
-+    operator = "-"
-+
-+
-+class Pos(UnaryExpr):
-+    """Make the expression positive (noop for most expressions)"""
-+
-+    operator = "+"
-+
-+
-+# Helpers for extensions
-+
-+
-+class EnvironmentAttribute(Expr):
-+    """Loads an attribute from the environment object.  This is useful for
-+    extensions that want to call a callback stored on the environment.
-+    """
-+
-+    fields = ("name",)
-+
-+
-+class ExtensionAttribute(Expr):
-+    """Returns the attribute of an extension bound to the environment.
-+    The identifier is the identifier of the :class:`Extension`.
-+
-+    This node is usually constructed by calling the
-+    :meth:`~jinja2.ext.Extension.attr` method on an extension.
-+    """
-+
-+    fields = ("identifier", "name")
-+
-+
-+class ImportedName(Expr):
-+    """If created with an import name the import name is returned on node
-+    access.  For example ``ImportedName('cgi.escape')`` returns the `escape`
-+    function from the cgi module on evaluation.  Imports are optimized by the
-+    compiler so there is no need to assign them to local variables.
-+    """
-+
-+    fields = ("importname",)
-+
-+
-+class InternalName(Expr):
-+    """An internal name in the compiler.  You cannot create these nodes
-+    yourself but the parser provides a
-+    :meth:`~jinja2.parser.Parser.free_identifier` method that creates
-+    a new identifier for you.  This identifier is not available from the
-+    template and is not threated specially by the compiler.
-+    """
-+
-+    fields = ("name",)
-+
-+    def __init__(self):
-+        raise TypeError(
-+            "Can't create internal names.  Use the "
-+            "`free_identifier` method on a parser."
-+        )
-+
-+
-+class MarkSafe(Expr):
-+    """Mark the wrapped expression as safe (wrap it as `Markup`)."""
-+
-+    fields = ("expr",)
-+
-+    def as_const(self, eval_ctx=None):
-+        eval_ctx = get_eval_context(self, eval_ctx)
-+        return Markup(self.expr.as_const(eval_ctx))
-+
-+
-+class MarkSafeIfAutoescape(Expr):
-+    """Mark the wrapped expression as safe (wrap it as `Markup`) but
-+    only if autoescaping is active.
-+
-+    .. versionadded:: 2.5
-+    """
-+
-+    fields = ("expr",)
-+
-+    def as_const(self, eval_ctx=None):
-+        eval_ctx = get_eval_context(self, eval_ctx)
-+        if eval_ctx.volatile:
-+            raise Impossible()
-+        expr = self.expr.as_const(eval_ctx)
-+        if eval_ctx.autoescape:
-+            return Markup(expr)
-+        return expr
-+
-+
-+class ContextReference(Expr):
-+    """Returns the current template context.  It can be used like a
-+    :class:`Name` node, with a ``'load'`` ctx and will return the
-+    current :class:`~jinja2.runtime.Context` object.
-+
-+    Here an example that assigns the current template name to a
-+    variable named `foo`::
-+
-+        Assign(Name('foo', ctx='store'),
-+               Getattr(ContextReference(), 'name'))
-+
-+    This is basically equivalent to using the
-+    :func:`~jinja2.contextfunction` decorator when using the
-+    high-level API, which causes a reference to the context to be passed
-+    as the first argument to a function.
-+    """
-+
-+
-+class DerivedContextReference(Expr):
-+    """Return the current template context including locals. Behaves
-+    exactly like :class:`ContextReference`, but includes local
-+    variables, such as from a ``for`` loop.
-+
-+    .. versionadded:: 2.11
-+    """
-+
-+
-+class Continue(Stmt):
-+    """Continue a loop."""
-+
-+
-+class Break(Stmt):
-+    """Break a loop."""
-+
-+
-+class Scope(Stmt):
-+    """An artificial scope."""
-+
-+    fields = ("body",)
-+
-+
-+class OverlayScope(Stmt):
-+    """An overlay scope for extensions.  This is a largely unoptimized scope
-+    that however can be used to introduce completely arbitrary variables into
-+    a sub scope from a dictionary or dictionary like object.  The `context`
-+    field has to evaluate to a dictionary object.
-+
-+    Example usage::
-+
-+        OverlayScope(context=self.call_method('get_context'),
-+                     body=[...])
-+
-+    .. versionadded:: 2.10
-+    """
-+
-+    fields = ("context", "body")
-+
-+
-+class EvalContextModifier(Stmt):
-+    """Modifies the eval context.  For each option that should be modified,
-+    a :class:`Keyword` has to be added to the :attr:`options` list.
-+
-+    Example to change the `autoescape` setting::
-+
-+        EvalContextModifier(options=[Keyword('autoescape', Const(True))])
-+    """
-+
-+    fields = ("options",)
-+
-+
-+class ScopedEvalContextModifier(EvalContextModifier):
-+    """Modifies the eval context and reverts it later.  Works exactly like
-+    :class:`EvalContextModifier` but will only modify the
-+    :class:`~jinja2.nodes.EvalContext` for nodes in the :attr:`body`.
-+    """
-+
-+    fields = ("body",)
-+
-+
-+# make sure nobody creates custom nodes
-+def _failing_new(*args, **kwargs):
-+    raise TypeError("can't create custom node types")
-+
-+
-+NodeType.__new__ = staticmethod(_failing_new)
-+del _failing_new
-diff --git a/third_party/python/Jinja2/jinja2/optimizer.py b/third_party/python/Jinja2/jinja2/optimizer.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/Jinja2/jinja2/optimizer.py
-@@ -0,0 +1,41 @@
-+# -*- coding: utf-8 -*-
-+"""The optimizer tries to constant fold expressions and modify the AST
-+in place so that it should be faster to evaluate.
-+
-+Because the AST does not contain all the scoping information and the
-+compiler has to find that out, we cannot do all the optimizations we
-+want. For example, loop unrolling doesn't work because unrolled loops
-+would have a different scope. The solution would be a second syntax tree
-+that stored the scoping rules.
-+"""
-+from . import nodes
-+from .visitor import NodeTransformer
-+
-+
-+def optimize(node, environment):
-+    """The context hint can be used to perform an static optimization
-+    based on the context given."""
-+    optimizer = Optimizer(environment)
-+    return optimizer.visit(node)
-+
-+
-+class Optimizer(NodeTransformer):
-+    def __init__(self, environment):
-+        self.environment = environment
-+
-+    def generic_visit(self, node, *args, **kwargs):
-+        node = super(Optimizer, self).generic_visit(node, *args, **kwargs)
-+
-+        # Do constant folding. Some other nodes besides Expr have
-+        # as_const, but folding them causes errors later on.
-+        if isinstance(node, nodes.Expr):
-+            try:
-+                return nodes.Const.from_untrusted(
-+                    node.as_const(args[0] if args else None),
-+                    lineno=node.lineno,
-+                    environment=self.environment,
-+                )
-+            except nodes.Impossible:
-+                pass
-+
-+        return node
-diff --git a/third_party/python/Jinja2/jinja2/parser.py b/third_party/python/Jinja2/jinja2/parser.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/Jinja2/jinja2/parser.py
-@@ -0,0 +1,939 @@
-+# -*- coding: utf-8 -*-
-+"""Parse tokens from the lexer into nodes for the compiler."""
-+from . import nodes
-+from ._compat import imap
-+from .exceptions import TemplateAssertionError
-+from .exceptions import TemplateSyntaxError
-+from .lexer import describe_token
-+from .lexer import describe_token_expr
-+
-+_statement_keywords = frozenset(
-+    [
-+        "for",
-+        "if",
-+        "block",
-+        "extends",
-+        "print",
-+        "macro",
-+        "include",
-+        "from",
-+        "import",
-+        "set",
-+        "with",
-+        "autoescape",
-+    ]
-+)
-+_compare_operators = frozenset(["eq", "ne", "lt", "lteq", "gt", "gteq"])
-+
-+_math_nodes = {
-+    "add": nodes.Add,
-+    "sub": nodes.Sub,
-+    "mul": nodes.Mul,
-+    "div": nodes.Div,
-+    "floordiv": nodes.FloorDiv,
-+    "mod": nodes.Mod,
-+}
-+
-+
-+class Parser(object):
-+    """This is the central parsing class Jinja uses.  It's passed to
-+    extensions and can be used to parse expressions or statements.
-+    """
-+
-+    def __init__(self, environment, source, name=None, filename=None, state=None):
-+        self.environment = environment
-+        self.stream = environment._tokenize(source, name, filename, state)
-+        self.name = name
-+        self.filename = filename
-+        self.closed = False
-+        self.extensions = {}
-+        for extension in environment.iter_extensions():
-+            for tag in extension.tags:
-+                self.extensions[tag] = extension.parse
-+        self._last_identifier = 0
-+        self._tag_stack = []
-+        self._end_token_stack = []
-+
-+    def fail(self, msg, lineno=None, exc=TemplateSyntaxError):
-+        """Convenience method that raises `exc` with the message, passed
-+        line number or last line number as well as the current name and
-+        filename.
-+        """
-+        if lineno is None:
-+            lineno = self.stream.current.lineno
-+        raise exc(msg, lineno, self.name, self.filename)
-+
-+    def _fail_ut_eof(self, name, end_token_stack, lineno):
-+        expected = []
-+        for exprs in end_token_stack:
-+            expected.extend(imap(describe_token_expr, exprs))
-+        if end_token_stack:
-+            currently_looking = " or ".join(
-+                "'%s'" % describe_token_expr(expr) for expr in end_token_stack[-1]
-+            )
-+        else:
-+            currently_looking = None
-+
-+        if name is None:
-+            message = ["Unexpected end of template."]
-+        else:
-+            message = ["Encountered unknown tag '%s'." % name]
-+
-+        if currently_looking:
-+            if name is not None and name in expected:
-+                message.append(
-+                    "You probably made a nesting mistake. Jinja "
-+                    "is expecting this tag, but currently looking "
-+                    "for %s." % currently_looking
-+                )
-+            else:
-+                message.append(
-+                    "Jinja was looking for the following tags: "
-+                    "%s." % currently_looking
-+                )
-+
-+        if self._tag_stack:
-+            message.append(
-+                "The innermost block that needs to be "
-+                "closed is '%s'." % self._tag_stack[-1]
-+            )
-+
-+        self.fail(" ".join(message), lineno)
-+
-+    def fail_unknown_tag(self, name, lineno=None):
-+        """Called if the parser encounters an unknown tag.  Tries to fail
-+        with a human readable error message that could help to identify
-+        the problem.
-+        """
-+        return self._fail_ut_eof(name, self._end_token_stack, lineno)
-+
-+    def fail_eof(self, end_tokens=None, lineno=None):
-+        """Like fail_unknown_tag but for end of template situations."""
-+        stack = list(self._end_token_stack)
-+        if end_tokens is not None:
-+            stack.append(end_tokens)
-+        return self._fail_ut_eof(None, stack, lineno)
-+
-+    def is_tuple_end(self, extra_end_rules=None):
-+        """Are we at the end of a tuple?"""
-+        if self.stream.current.type in ("variable_end", "block_end", "rparen"):
-+            return True
-+        elif extra_end_rules is not None:
-+            return self.stream.current.test_any(extra_end_rules)
-+        return False
-+
-+    def free_identifier(self, lineno=None):
-+        """Return a new free identifier as :class:`~jinja2.nodes.InternalName`."""
-+        self._last_identifier += 1
-+        rv = object.__new__(nodes.InternalName)
-+        nodes.Node.__init__(rv, "fi%d" % self._last_identifier, lineno=lineno)
-+        return rv
-+
-+    def parse_statement(self):
-+        """Parse a single statement."""
-+        token = self.stream.current
-+        if token.type != "name":
-+            self.fail("tag name expected", token.lineno)
-+        self._tag_stack.append(token.value)
-+        pop_tag = True
-+        try:
-+            if token.value in _statement_keywords:
-+                return getattr(self, "parse_" + self.stream.current.value)()
-+            if token.value == "call":
-+                return self.parse_call_block()
-+            if token.value == "filter":
-+                return self.parse_filter_block()
-+            ext = self.extensions.get(token.value)
-+            if ext is not None:
-+                return ext(self)
-+
-+            # did not work out, remove the token we pushed by accident
-+            # from the stack so that the unknown tag fail function can
-+            # produce a proper error message.
-+            self._tag_stack.pop()
-+            pop_tag = False
-+            self.fail_unknown_tag(token.value, token.lineno)
-+        finally:
-+            if pop_tag:
-+                self._tag_stack.pop()
-+
-+    def parse_statements(self, end_tokens, drop_needle=False):
-+        """Parse multiple statements into a list until one of the end tokens
-+        is reached.  This is used to parse the body of statements as it also
-+        parses template data if appropriate.  The parser checks first if the
-+        current token is a colon and skips it if there is one.  Then it checks
-+        for the block end and parses until if one of the `end_tokens` is
-+        reached.  Per default the active token in the stream at the end of
-+        the call is the matched end token.  If this is not wanted `drop_needle`
-+        can be set to `True` and the end token is removed.
-+        """
-+        # the first token may be a colon for python compatibility
-+        self.stream.skip_if("colon")
-+
-+        # in the future it would be possible to add whole code sections
-+        # by adding some sort of end of statement token and parsing those here.
-+        self.stream.expect("block_end")
-+        result = self.subparse(end_tokens)
-+
-+        # we reached the end of the template too early, the subparser
-+        # does not check for this, so we do that now
-+        if self.stream.current.type == "eof":
-+            self.fail_eof(end_tokens)
-+
-+        if drop_needle:
-+            next(self.stream)
-+        return result
-+
-+    def parse_set(self):
-+        """Parse an assign statement."""
-+        lineno = next(self.stream).lineno
-+        target = self.parse_assign_target(with_namespace=True)
-+        if self.stream.skip_if("assign"):
-+            expr = self.parse_tuple()
-+            return nodes.Assign(target, expr, lineno=lineno)
-+        filter_node = self.parse_filter(None)
-+        body = self.parse_statements(("name:endset",), drop_needle=True)
-+        return nodes.AssignBlock(target, filter_node, body, lineno=lineno)
-+
-+    def parse_for(self):
-+        """Parse a for loop."""
-+        lineno = self.stream.expect("name:for").lineno
-+        target = self.parse_assign_target(extra_end_rules=("name:in",))
-+        self.stream.expect("name:in")
-+        iter = self.parse_tuple(
-+            with_condexpr=False, extra_end_rules=("name:recursive",)
-+        )
-+        test = None
-+        if self.stream.skip_if("name:if"):
-+            test = self.parse_expression()
-+        recursive = self.stream.skip_if("name:recursive")
-+        body = self.parse_statements(("name:endfor", "name:else"))
-+        if next(self.stream).value == "endfor":
-+            else_ = []
-+        else:
-+            else_ = self.parse_statements(("name:endfor",), drop_needle=True)
-+        return nodes.For(target, iter, body, else_, test, recursive, lineno=lineno)
-+
-+    def parse_if(self):
-+        """Parse an if construct."""
-+        node = result = nodes.If(lineno=self.stream.expect("name:if").lineno)
-+        while 1:
-+            node.test = self.parse_tuple(with_condexpr=False)
-+            node.body = self.parse_statements(("name:elif", "name:else", "name:endif"))
-+            node.elif_ = []
-+            node.else_ = []
-+            token = next(self.stream)
-+            if token.test("name:elif"):
-+                node = nodes.If(lineno=self.stream.current.lineno)
-+                result.elif_.append(node)
-+                continue
-+            elif token.test("name:else"):
-+                result.else_ = self.parse_statements(("name:endif",), drop_needle=True)
-+            break
-+        return result
-+
-+    def parse_with(self):
-+        node = nodes.With(lineno=next(self.stream).lineno)
-+        targets = []
-+        values = []
-+        while self.stream.current.type != "block_end":
-+            if targets:
-+                self.stream.expect("comma")
-+            target = self.parse_assign_target()
-+            target.set_ctx("param")
-+            targets.append(target)
-+            self.stream.expect("assign")
-+            values.append(self.parse_expression())
-+        node.targets = targets
-+        node.values = values
-+        node.body = self.parse_statements(("name:endwith",), drop_needle=True)
-+        return node
-+
-+    def parse_autoescape(self):
-+        node = nodes.ScopedEvalContextModifier(lineno=next(self.stream).lineno)
-+        node.options = [nodes.Keyword("autoescape", self.parse_expression())]
-+        node.body = self.parse_statements(("name:endautoescape",), drop_needle=True)
-+        return nodes.Scope([node])
-+
-+    def parse_block(self):
-+        node = nodes.Block(lineno=next(self.stream).lineno)
-+        node.name = self.stream.expect("name").value
-+        node.scoped = self.stream.skip_if("name:scoped")
-+
-+        # common problem people encounter when switching from django
-+        # to jinja.  we do not support hyphens in block names, so let's
-+        # raise a nicer error message in that case.
-+        if self.stream.current.type == "sub":
-+            self.fail(
-+                "Block names in Jinja have to be valid Python "
-+                "identifiers and may not contain hyphens, use an "
-+                "underscore instead."
-+            )
-+
-+        node.body = self.parse_statements(("name:endblock",), drop_needle=True)
-+        self.stream.skip_if("name:" + node.name)
-+        return node
-+
-+    def parse_extends(self):
-+        node = nodes.Extends(lineno=next(self.stream).lineno)
-+        node.template = self.parse_expression()
-+        return node
-+
-+    def parse_import_context(self, node, default):
-+        if self.stream.current.test_any(
-+            "name:with", "name:without"
-+        ) and self.stream.look().test("name:context"):
-+            node.with_context = next(self.stream).value == "with"
-+            self.stream.skip()
-+        else:
-+            node.with_context = default
-+        return node
-+
-+    def parse_include(self):
-+        node = nodes.Include(lineno=next(self.stream).lineno)
-+        node.template = self.parse_expression()
-+        if self.stream.current.test("name:ignore") and self.stream.look().test(
-+            "name:missing"
-+        ):
-+            node.ignore_missing = True
-+            self.stream.skip(2)
-+        else:
-+            node.ignore_missing = False
-+        return self.parse_import_context(node, True)
-+
-+    def parse_import(self):
-+        node = nodes.Import(lineno=next(self.stream).lineno)
-+        node.template = self.parse_expression()
-+        self.stream.expect("name:as")
-+        node.target = self.parse_assign_target(name_only=True).name
-+        return self.parse_import_context(node, False)
-+
-+    def parse_from(self):
-+        node = nodes.FromImport(lineno=next(self.stream).lineno)
-+        node.template = self.parse_expression()
-+        self.stream.expect("name:import")
-+        node.names = []
-+
-+        def parse_context():
-+            if self.stream.current.value in (
-+                "with",
-+                "without",
-+            ) and self.stream.look().test("name:context"):
-+                node.with_context = next(self.stream).value == "with"
-+                self.stream.skip()
-+                return True
-+            return False
-+
-+        while 1:
-+            if node.names:
-+                self.stream.expect("comma")
-+            if self.stream.current.type == "name":
-+                if parse_context():
-+                    break
-+                target = self.parse_assign_target(name_only=True)
-+                if target.name.startswith("_"):
-+                    self.fail(
-+                        "names starting with an underline can not be imported",
-+                        target.lineno,
-+                        exc=TemplateAssertionError,
-+                    )
-+                if self.stream.skip_if("name:as"):
-+                    alias = self.parse_assign_target(name_only=True)
-+                    node.names.append((target.name, alias.name))
-+                else:
-+                    node.names.append(target.name)
-+                if parse_context() or self.stream.current.type != "comma":
-+                    break
-+            else:
-+                self.stream.expect("name")
-+        if not hasattr(node, "with_context"):
-+            node.with_context = False
-+        return node
-+
-+    def parse_signature(self, node):
-+        node.args = args = []
-+        node.defaults = defaults = []
-+        self.stream.expect("lparen")
-+        while self.stream.current.type != "rparen":
-+            if args:
-+                self.stream.expect("comma")
-+            arg = self.parse_assign_target(name_only=True)
-+            arg.set_ctx("param")
-+            if self.stream.skip_if("assign"):
-+                defaults.append(self.parse_expression())
-+            elif defaults:
-+                self.fail("non-default argument follows default argument")
-+            args.append(arg)
-+        self.stream.expect("rparen")
-+
-+    def parse_call_block(self):
-+        node = nodes.CallBlock(lineno=next(self.stream).lineno)
-+        if self.stream.current.type == "lparen":
-+            self.parse_signature(node)
-+        else:
-+            node.args = []
-+            node.defaults = []
-+
-+        node.call = self.parse_expression()
-+        if not isinstance(node.call, nodes.Call):
-+            self.fail("expected call", node.lineno)
-+        node.body = self.parse_statements(("name:endcall",), drop_needle=True)
-+        return node
-+
-+    def parse_filter_block(self):
-+        node = nodes.FilterBlock(lineno=next(self.stream).lineno)
-+        node.filter = self.parse_filter(None, start_inline=True)
-+        node.body = self.parse_statements(("name:endfilter",), drop_needle=True)
-+        return node
-+
-+    def parse_macro(self):
-+        node = nodes.Macro(lineno=next(self.stream).lineno)
-+        node.name = self.parse_assign_target(name_only=True).name
-+        self.parse_signature(node)
-+        node.body = self.parse_statements(("name:endmacro",), drop_needle=True)
-+        return node
-+
-+    def parse_print(self):
-+        node = nodes.Output(lineno=next(self.stream).lineno)
-+        node.nodes = []
-+        while self.stream.current.type != "block_end":
-+            if node.nodes:
-+                self.stream.expect("comma")
-+            node.nodes.append(self.parse_expression())
-+        return node
-+
-+    def parse_assign_target(
-+        self,
-+        with_tuple=True,
-+        name_only=False,
-+        extra_end_rules=None,
-+        with_namespace=False,
-+    ):
-+        """Parse an assignment target.  As Jinja allows assignments to
-+        tuples, this function can parse all allowed assignment targets.  Per
-+        default assignments to tuples are parsed, that can be disable however
-+        by setting `with_tuple` to `False`.  If only assignments to names are
-+        wanted `name_only` can be set to `True`.  The `extra_end_rules`
-+        parameter is forwarded to the tuple parsing function.  If
-+        `with_namespace` is enabled, a namespace assignment may be parsed.
-+        """
-+        if with_namespace and self.stream.look().type == "dot":
-+            token = self.stream.expect("name")
-+            next(self.stream)  # dot
-+            attr = self.stream.expect("name")
-+            target = nodes.NSRef(token.value, attr.value, lineno=token.lineno)
-+        elif name_only:
-+            token = self.stream.expect("name")
-+            target = nodes.Name(token.value, "store", lineno=token.lineno)
-+        else:
-+            if with_tuple:
-+                target = self.parse_tuple(
-+                    simplified=True, extra_end_rules=extra_end_rules
-+                )
-+            else:
-+                target = self.parse_primary()
-+            target.set_ctx("store")
-+        if not target.can_assign():
-+            self.fail(
-+                "can't assign to %r" % target.__class__.__name__.lower(), target.lineno
-+            )
-+        return target
-+
-+    def parse_expression(self, with_condexpr=True):
-+        """Parse an expression.  Per default all expressions are parsed, if
-+        the optional `with_condexpr` parameter is set to `False` conditional
-+        expressions are not parsed.
-+        """
-+        if with_condexpr:
-+            return self.parse_condexpr()
-+        return self.parse_or()
-+
-+    def parse_condexpr(self):
-+        lineno = self.stream.current.lineno
-+        expr1 = self.parse_or()
-+        while self.stream.skip_if("name:if"):
-+            expr2 = self.parse_or()
-+            if self.stream.skip_if("name:else"):
-+                expr3 = self.parse_condexpr()
-+            else:
-+                expr3 = None
-+            expr1 = nodes.CondExpr(expr2, expr1, expr3, lineno=lineno)
-+            lineno = self.stream.current.lineno
-+        return expr1
-+
-+    def parse_or(self):
-+        lineno = self.stream.current.lineno
-+        left = self.parse_and()
-+        while self.stream.skip_if("name:or"):
-+            right = self.parse_and()
-+            left = nodes.Or(left, right, lineno=lineno)
-+            lineno = self.stream.current.lineno
-+        return left
-+
-+    def parse_and(self):
-+        lineno = self.stream.current.lineno
-+        left = self.parse_not()
-+        while self.stream.skip_if("name:and"):
-+            right = self.parse_not()
-+            left = nodes.And(left, right, lineno=lineno)
-+            lineno = self.stream.current.lineno
-+        return left
-+
-+    def parse_not(self):
-+        if self.stream.current.test("name:not"):
-+            lineno = next(self.stream).lineno
-+            return nodes.Not(self.parse_not(), lineno=lineno)
-+        return self.parse_compare()
-+
-+    def parse_compare(self):
-+        lineno = self.stream.current.lineno
-+        expr = self.parse_math1()
-+        ops = []
-+        while 1:
-+            token_type = self.stream.current.type
-+            if token_type in _compare_operators:
-+                next(self.stream)
-+                ops.append(nodes.Operand(token_type, self.parse_math1()))
-+            elif self.stream.skip_if("name:in"):
-+                ops.append(nodes.Operand("in", self.parse_math1()))
-+            elif self.stream.current.test("name:not") and self.stream.look().test(
-+                "name:in"
-+            ):
-+                self.stream.skip(2)
-+                ops.append(nodes.Operand("notin", self.parse_math1()))
-+            else:
-+                break
-+            lineno = self.stream.current.lineno
-+        if not ops:
-+            return expr
-+        return nodes.Compare(expr, ops, lineno=lineno)
-+
-+    def parse_math1(self):
-+        lineno = self.stream.current.lineno
-+        left = self.parse_concat()
-+        while self.stream.current.type in ("add", "sub"):
-+            cls = _math_nodes[self.stream.current.type]
-+            next(self.stream)
-+            right = self.parse_concat()
-+            left = cls(left, right, lineno=lineno)
-+            lineno = self.stream.current.lineno
-+        return left
-+
-+    def parse_concat(self):
-+        lineno = self.stream.current.lineno
-+        args = [self.parse_math2()]
-+        while self.stream.current.type == "tilde":
-+            next(self.stream)
-+            args.append(self.parse_math2())
-+        if len(args) == 1:
-+            return args[0]
-+        return nodes.Concat(args, lineno=lineno)
-+
-+    def parse_math2(self):
-+        lineno = self.stream.current.lineno
-+        left = self.parse_pow()
-+        while self.stream.current.type in ("mul", "div", "floordiv", "mod"):
-+            cls = _math_nodes[self.stream.current.type]
-+            next(self.stream)
-+            right = self.parse_pow()
-+            left = cls(left, right, lineno=lineno)
-+            lineno = self.stream.current.lineno
-+        return left
-+
-+    def parse_pow(self):
-+        lineno = self.stream.current.lineno
-+        left = self.parse_unary()
-+        while self.stream.current.type == "pow":
-+            next(self.stream)
-+            right = self.parse_unary()
-+            left = nodes.Pow(left, right, lineno=lineno)
-+            lineno = self.stream.current.lineno
-+        return left
-+
-+    def parse_unary(self, with_filter=True):
-+        token_type = self.stream.current.type
-+        lineno = self.stream.current.lineno
-+        if token_type == "sub":
-+            next(self.stream)
-+            node = nodes.Neg(self.parse_unary(False), lineno=lineno)
-+        elif token_type == "add":
-+            next(self.stream)
-+            node = nodes.Pos(self.parse_unary(False), lineno=lineno)
-+        else:
-+            node = self.parse_primary()
-+        node = self.parse_postfix(node)
-+        if with_filter:
-+            node = self.parse_filter_expr(node)
-+        return node
-+
-+    def parse_primary(self):
-+        token = self.stream.current
-+        if token.type == "name":
-+            if token.value in ("true", "false", "True", "False"):
-+                node = nodes.Const(token.value in ("true", "True"), lineno=token.lineno)
-+            elif token.value in ("none", "None"):
-+                node = nodes.Const(None, lineno=token.lineno)
-+            else:
-+                node = nodes.Name(token.value, "load", lineno=token.lineno)
-+            next(self.stream)
-+        elif token.type == "string":
-+            next(self.stream)
-+            buf = [token.value]
-+            lineno = token.lineno
-+            while self.stream.current.type == "string":
-+                buf.append(self.stream.current.value)
-+                next(self.stream)
-+            node = nodes.Const("".join(buf), lineno=lineno)
-+        elif token.type in ("integer", "float"):
-+            next(self.stream)
-+            node = nodes.Const(token.value, lineno=token.lineno)
-+        elif token.type == "lparen":
-+            next(self.stream)
-+            node = self.parse_tuple(explicit_parentheses=True)
-+            self.stream.expect("rparen")
-+        elif token.type == "lbracket":
-+            node = self.parse_list()
-+        elif token.type == "lbrace":
-+            node = self.parse_dict()
-+        else:
-+            self.fail("unexpected '%s'" % describe_token(token), token.lineno)
-+        return node
-+
-+    def parse_tuple(
-+        self,
-+        simplified=False,
-+        with_condexpr=True,
-+        extra_end_rules=None,
-+        explicit_parentheses=False,
-+    ):
-+        """Works like `parse_expression` but if multiple expressions are
-+        delimited by a comma a :class:`~jinja2.nodes.Tuple` node is created.
-+        This method could also return a regular expression instead of a tuple
-+        if no commas where found.
-+
-+        The default parsing mode is a full tuple.  If `simplified` is `True`
-+        only names and literals are parsed.  The `no_condexpr` parameter is
-+        forwarded to :meth:`parse_expression`.
-+
-+        Because tuples do not require delimiters and may end in a bogus comma
-+        an extra hint is needed that marks the end of a tuple.  For example
-+        for loops support tuples between `for` and `in`.  In that case the
-+        `extra_end_rules` is set to ``['name:in']``.
-+
-+        `explicit_parentheses` is true if the parsing was triggered by an
-+        expression in parentheses.  This is used to figure out if an empty
-+        tuple is a valid expression or not.
-+        """
-+        lineno = self.stream.current.lineno
-+        if simplified:
-+            parse = self.parse_primary
-+        elif with_condexpr:
-+            parse = self.parse_expression
-+        else:
-+
-+            def parse():
-+                return self.parse_expression(with_condexpr=False)
-+
-+        args = []
-+        is_tuple = False
-+        while 1:
-+            if args:
-+                self.stream.expect("comma")
-+            if self.is_tuple_end(extra_end_rules):
-+                break
-+            args.append(parse())
-+            if self.stream.current.type == "comma":
-+                is_tuple = True
-+            else:
-+                break
-+            lineno = self.stream.current.lineno
-+
-+        if not is_tuple:
-+            if args:
-+                return args[0]
-+
-+            # if we don't have explicit parentheses, an empty tuple is
-+            # not a valid expression.  This would mean nothing (literally
-+            # nothing) in the spot of an expression would be an empty
-+            # tuple.
-+            if not explicit_parentheses:
-+                self.fail(
-+                    "Expected an expression, got '%s'"
-+                    % describe_token(self.stream.current)
-+                )
-+
-+        return nodes.Tuple(args, "load", lineno=lineno)
-+
-+    def parse_list(self):
-+        token = self.stream.expect("lbracket")
-+        items = []
-+        while self.stream.current.type != "rbracket":
-+            if items:
-+                self.stream.expect("comma")
-+            if self.stream.current.type == "rbracket":
-+                break
-+            items.append(self.parse_expression())
-+        self.stream.expect("rbracket")
-+        return nodes.List(items, lineno=token.lineno)
-+
-+    def parse_dict(self):
-+        token = self.stream.expect("lbrace")
-+        items = []
-+        while self.stream.current.type != "rbrace":
-+            if items:
-+                self.stream.expect("comma")
-+            if self.stream.current.type == "rbrace":
-+                break
-+            key = self.parse_expression()
-+            self.stream.expect("colon")
-+            value = self.parse_expression()
-+            items.append(nodes.Pair(key, value, lineno=key.lineno))
-+        self.stream.expect("rbrace")
-+        return nodes.Dict(items, lineno=token.lineno)
-+
-+    def parse_postfix(self, node):
-+        while 1:
-+            token_type = self.stream.current.type
-+            if token_type == "dot" or token_type == "lbracket":
-+                node = self.parse_subscript(node)
-+            # calls are valid both after postfix expressions (getattr
-+            # and getitem) as well as filters and tests
-+            elif token_type == "lparen":
-+                node = self.parse_call(node)
-+            else:
-+                break
-+        return node
-+
-+    def parse_filter_expr(self, node):
-+        while 1:
-+            token_type = self.stream.current.type
-+            if token_type == "pipe":
-+                node = self.parse_filter(node)
-+            elif token_type == "name" and self.stream.current.value == "is":
-+                node = self.parse_test(node)
-+            # calls are valid both after postfix expressions (getattr
-+            # and getitem) as well as filters and tests
-+            elif token_type == "lparen":
-+                node = self.parse_call(node)
-+            else:
-+                break
-+        return node
-+
-+    def parse_subscript(self, node):
-+        token = next(self.stream)
-+        if token.type == "dot":
-+            attr_token = self.stream.current
-+            next(self.stream)
-+            if attr_token.type == "name":
-+                return nodes.Getattr(
-+                    node, attr_token.value, "load", lineno=token.lineno
-+                )
-+            elif attr_token.type != "integer":
-+                self.fail("expected name or number", attr_token.lineno)
-+            arg = nodes.Const(attr_token.value, lineno=attr_token.lineno)
-+            return nodes.Getitem(node, arg, "load", lineno=token.lineno)
-+        if token.type == "lbracket":
-+            args = []
-+            while self.stream.current.type != "rbracket":
-+                if args:
-+                    self.stream.expect("comma")
-+                args.append(self.parse_subscribed())
-+            self.stream.expect("rbracket")
-+            if len(args) == 1:
-+                arg = args[0]
-+            else:
-+                arg = nodes.Tuple(args, "load", lineno=token.lineno)
-+            return nodes.Getitem(node, arg, "load", lineno=token.lineno)
-+        self.fail("expected subscript expression", token.lineno)
-+
-+    def parse_subscribed(self):
-+        lineno = self.stream.current.lineno
-+
-+        if self.stream.current.type == "colon":
-+            next(self.stream)
-+            args = [None]
-+        else:
-+            node = self.parse_expression()
-+            if self.stream.current.type != "colon":
-+                return node
-+            next(self.stream)
-+            args = [node]
-+
-+        if self.stream.current.type == "colon":
-+            args.append(None)
-+        elif self.stream.current.type not in ("rbracket", "comma"):
-+            args.append(self.parse_expression())
-+        else:
-+            args.append(None)
-+
-+        if self.stream.current.type == "colon":
-+            next(self.stream)
-+            if self.stream.current.type not in ("rbracket", "comma"):
-+                args.append(self.parse_expression())
-+            else:
-+                args.append(None)
-+        else:
-+            args.append(None)
-+
-+        return nodes.Slice(lineno=lineno, *args)
-+
-+    def parse_call(self, node):
-+        token = self.stream.expect("lparen")
-+        args = []
-+        kwargs = []
-+        dyn_args = dyn_kwargs = None
-+        require_comma = False
-+
-+        def ensure(expr):
-+            if not expr:
-+                self.fail("invalid syntax for function call expression", token.lineno)
-+
-+        while self.stream.current.type != "rparen":
-+            if require_comma:
-+                self.stream.expect("comma")
-+                # support for trailing comma
-+                if self.stream.current.type == "rparen":
-+                    break
-+            if self.stream.current.type == "mul":
-+                ensure(dyn_args is None and dyn_kwargs is None)
-+                next(self.stream)
-+                dyn_args = self.parse_expression()
-+            elif self.stream.current.type == "pow":
-+                ensure(dyn_kwargs is None)
-+                next(self.stream)
-+                dyn_kwargs = self.parse_expression()
-+            else:
-+                if (
-+                    self.stream.current.type == "name"
-+                    and self.stream.look().type == "assign"
-+                ):
-+                    # Parsing a kwarg
-+                    ensure(dyn_kwargs is None)
-+                    key = self.stream.current.value
-+                    self.stream.skip(2)
-+                    value = self.parse_expression()
-+                    kwargs.append(nodes.Keyword(key, value, lineno=value.lineno))
-+                else:
-+                    # Parsing an arg
-+                    ensure(dyn_args is None and dyn_kwargs is None and not kwargs)
-+                    args.append(self.parse_expression())
-+
-+            require_comma = True
-+        self.stream.expect("rparen")
-+
-+        if node is None:
-+            return args, kwargs, dyn_args, dyn_kwargs
-+        return nodes.Call(node, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno)
-+
-+    def parse_filter(self, node, start_inline=False):
-+        while self.stream.current.type == "pipe" or start_inline:
-+            if not start_inline:
-+                next(self.stream)
-+            token = self.stream.expect("name")
-+            name = token.value
-+            while self.stream.current.type == "dot":
-+                next(self.stream)
-+                name += "." + self.stream.expect("name").value
-+            if self.stream.current.type == "lparen":
-+                args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
-+            else:
-+                args = []
-+                kwargs = []
-+                dyn_args = dyn_kwargs = None
-+            node = nodes.Filter(
-+                node, name, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno
-+            )
-+            start_inline = False
-+        return node
-+
-+    def parse_test(self, node):
-+        token = next(self.stream)
-+        if self.stream.current.test("name:not"):
-+            next(self.stream)
-+            negated = True
-+        else:
-+            negated = False
-+        name = self.stream.expect("name").value
-+        while self.stream.current.type == "dot":
-+            next(self.stream)
-+            name += "." + self.stream.expect("name").value
-+        dyn_args = dyn_kwargs = None
-+        kwargs = []
-+        if self.stream.current.type == "lparen":
-+            args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
-+        elif self.stream.current.type in (
-+            "name",
-+            "string",
-+            "integer",
-+            "float",
-+            "lparen",
-+            "lbracket",
-+            "lbrace",
-+        ) and not self.stream.current.test_any("name:else", "name:or", "name:and"):
-+            if self.stream.current.test("name:is"):
-+                self.fail("You cannot chain multiple tests with is")
-+            arg_node = self.parse_primary()
-+            arg_node = self.parse_postfix(arg_node)
-+            args = [arg_node]
-+        else:
-+            args = []
-+        node = nodes.Test(
-+            node, name, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno
-+        )
-+        if negated:
-+            node = nodes.Not(node, lineno=token.lineno)
-+        return node
-+
-+    def subparse(self, end_tokens=None):
-+        body = []
-+        data_buffer = []
-+        add_data = data_buffer.append
-+
-+        if end_tokens is not None:
-+            self._end_token_stack.append(end_tokens)
-+
-+        def flush_data():
-+            if data_buffer:
-+                lineno = data_buffer[0].lineno
-+                body.append(nodes.Output(data_buffer[:], lineno=lineno))
-+                del data_buffer[:]
-+
-+        try:
-+            while self.stream:
-+                token = self.stream.current
-+                if token.type == "data":
-+                    if token.value:
-+                        add_data(nodes.TemplateData(token.value, lineno=token.lineno))
-+                    next(self.stream)
-+                elif token.type == "variable_begin":
-+                    next(self.stream)
-+                    add_data(self.parse_tuple(with_condexpr=True))
-+                    self.stream.expect("variable_end")
-+                elif token.type == "block_begin":
-+                    flush_data()
-+                    next(self.stream)
-+                    if end_tokens is not None and self.stream.current.test_any(
-+                        *end_tokens
-+                    ):
-+                        return body
-+                    rv = self.parse_statement()
-+                    if isinstance(rv, list):
-+                        body.extend(rv)
-+                    else:
-+                        body.append(rv)
-+                    self.stream.expect("block_end")
-+                else:
-+                    raise AssertionError("internal parsing error")
-+
-+            flush_data()
-+        finally:
-+            if end_tokens is not None:
-+                self._end_token_stack.pop()
-+
-+        return body
-+
-+    def parse(self):
-+        """Parse the whole template into a `Template` node."""
-+        result = nodes.Template(self.subparse(), lineno=1)
-+        result.set_environment(self.environment)
-+        return result
-diff --git a/third_party/python/Jinja2/jinja2/runtime.py b/third_party/python/Jinja2/jinja2/runtime.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/Jinja2/jinja2/runtime.py
-@@ -0,0 +1,1011 @@
-+# -*- coding: utf-8 -*-
-+"""The runtime functions and state used by compiled templates."""
-+import sys
-+from itertools import chain
-+from types import MethodType
-+
-+from markupsafe import escape  # noqa: F401
-+from markupsafe import Markup
-+from markupsafe import soft_unicode
-+
-+from ._compat import abc
-+from ._compat import imap
-+from ._compat import implements_iterator
-+from ._compat import implements_to_string
-+from ._compat import iteritems
-+from ._compat import PY2
-+from ._compat import string_types
-+from ._compat import text_type
-+from ._compat import with_metaclass
-+from .exceptions import TemplateNotFound  # noqa: F401
-+from .exceptions import TemplateRuntimeError  # noqa: F401
-+from .exceptions import UndefinedError
-+from .nodes import EvalContext
-+from .utils import concat
-+from .utils import evalcontextfunction
-+from .utils import internalcode
-+from .utils import missing
-+from .utils import Namespace  # noqa: F401
-+from .utils import object_type_repr
-+
-+# these variables are exported to the template runtime
-+exported = [
-+    "LoopContext",
-+    "TemplateReference",
-+    "Macro",
-+    "Markup",
-+    "TemplateRuntimeError",
-+    "missing",
-+    "concat",
-+    "escape",
-+    "markup_join",
-+    "unicode_join",
-+    "to_string",
-+    "identity",
-+    "TemplateNotFound",
-+    "Namespace",
-+    "Undefined",
-+]
-+
-+#: the name of the function that is used to convert something into
-+#: a string.  We can just use the text type here.
-+to_string = text_type
-+
-+
-+def identity(x):
-+    """Returns its argument. Useful for certain things in the
-+    environment.
-+    """
-+    return x
-+
-+
-+def markup_join(seq):
-+    """Concatenation that escapes if necessary and converts to unicode."""
-+    buf = []
-+    iterator = imap(soft_unicode, seq)
-+    for arg in iterator:
-+        buf.append(arg)
-+        if hasattr(arg, "__html__"):
-+            return Markup(u"").join(chain(buf, iterator))
-+    return concat(buf)
-+
-+
-+def unicode_join(seq):
-+    """Simple args to unicode conversion and concatenation."""
-+    return concat(imap(text_type, seq))
-+
-+
-+def new_context(
-+    environment,
-+    template_name,
-+    blocks,
-+    vars=None,
-+    shared=None,
-+    globals=None,
-+    locals=None,
-+):
-+    """Internal helper for context creation."""
-+    if vars is None:
-+        vars = {}
-+    if shared:
-+        parent = vars
-+    else:
-+        parent = dict(globals or (), **vars)
-+    if locals:
-+        # if the parent is shared a copy should be created because
-+        # we don't want to modify the dict passed
-+        if shared:
-+            parent = dict(parent)
-+        for key, value in iteritems(locals):
-+            if value is not missing:
-+                parent[key] = value
-+    return environment.context_class(environment, parent, template_name, blocks)
-+
-+
-+class TemplateReference(object):
-+    """The `self` in templates."""
-+
-+    def __init__(self, context):
-+        self.__context = context
-+
-+    def __getitem__(self, name):
-+        blocks = self.__context.blocks[name]
-+        return BlockReference(name, self.__context, blocks, 0)
-+
-+    def __repr__(self):
-+        return "<%s %r>" % (self.__class__.__name__, self.__context.name)
-+
-+
-+def _get_func(x):
-+    return getattr(x, "__func__", x)
-+
-+
-+class ContextMeta(type):
-+    def __new__(mcs, name, bases, d):
-+        rv = type.__new__(mcs, name, bases, d)
-+        if bases == ():
-+            return rv
-+
-+        resolve = _get_func(rv.resolve)
-+        default_resolve = _get_func(Context.resolve)
-+        resolve_or_missing = _get_func(rv.resolve_or_missing)
-+        default_resolve_or_missing = _get_func(Context.resolve_or_missing)
-+
-+        # If we have a changed resolve but no changed default or missing
-+        # resolve we invert the call logic.
-+        if (
-+            resolve is not default_resolve
-+            and resolve_or_missing is default_resolve_or_missing
-+        ):
-+            rv._legacy_resolve_mode = True
-+        elif (
-+            resolve is default_resolve
-+            and resolve_or_missing is default_resolve_or_missing
-+        ):
-+            rv._fast_resolve_mode = True
-+
-+        return rv
-+
-+
-+def resolve_or_missing(context, key, missing=missing):
-+    if key in context.vars:
-+        return context.vars[key]
-+    if key in context.parent:
-+        return context.parent[key]
-+    return missing
-+
-+
-+class Context(with_metaclass(ContextMeta)):
-+    """The template context holds the variables of a template.  It stores the
-+    values passed to the template and also the names the template exports.
-+    Creating instances is neither supported nor useful as it's created
-+    automatically at various stages of the template evaluation and should not
-+    be created by hand.
-+
-+    The context is immutable.  Modifications on :attr:`parent` **must not**
-+    happen and modifications on :attr:`vars` are allowed from generated
-+    template code only.  Template filters and global functions marked as
-+    :func:`contextfunction`\\s get the active context passed as first argument
-+    and are allowed to access the context read-only.
-+
-+    The template context supports read only dict operations (`get`,
-+    `keys`, `values`, `items`, `iterkeys`, `itervalues`, `iteritems`,
-+    `__getitem__`, `__contains__`).  Additionally there is a :meth:`resolve`
-+    method that doesn't fail with a `KeyError` but returns an
-+    :class:`Undefined` object for missing variables.
-+    """
-+
-+    # XXX: we want to eventually make this be a deprecation warning and
-+    # remove it.
-+    _legacy_resolve_mode = False
-+    _fast_resolve_mode = False
-+
-+    def __init__(self, environment, parent, name, blocks):
-+        self.parent = parent
-+        self.vars = {}
-+        self.environment = environment
-+        self.eval_ctx = EvalContext(self.environment, name)
-+        self.exported_vars = set()
-+        self.name = name
-+
-+        # create the initial mapping of blocks.  Whenever template inheritance
-+        # takes place the runtime will update this mapping with the new blocks
-+        # from the template.
-+        self.blocks = dict((k, [v]) for k, v in iteritems(blocks))
-+
-+        # In case we detect the fast resolve mode we can set up an alias
-+        # here that bypasses the legacy code logic.
-+        if self._fast_resolve_mode:
-+            self.resolve_or_missing = MethodType(resolve_or_missing, self)
-+
-+    def super(self, name, current):
-+        """Render a parent block."""
-+        try:
-+            blocks = self.blocks[name]
-+            index = blocks.index(current) + 1
-+            blocks[index]
-+        except LookupError:
-+            return self.environment.undefined(
-+                "there is no parent block called %r." % name, name="super"
-+            )
-+        return BlockReference(name, self, blocks, index)
-+
-+    def get(self, key, default=None):
-+        """Returns an item from the template context, if it doesn't exist
-+        `default` is returned.
-+        """
-+        try:
-+            return self[key]
-+        except KeyError:
-+            return default
-+
-+    def resolve(self, key):
-+        """Looks up a variable like `__getitem__` or `get` but returns an
-+        :class:`Undefined` object with the name of the name looked up.
-+        """
-+        if self._legacy_resolve_mode:
-+            rv = resolve_or_missing(self, key)
-+        else:
-+            rv = self.resolve_or_missing(key)
-+        if rv is missing:
-+            return self.environment.undefined(name=key)
-+        return rv
-+
-+    def resolve_or_missing(self, key):
-+        """Resolves a variable like :meth:`resolve` but returns the
-+        special `missing` value if it cannot be found.
-+        """
-+        if self._legacy_resolve_mode:
-+            rv = self.resolve(key)
-+            if isinstance(rv, Undefined):
-+                rv = missing
-+            return rv
-+        return resolve_or_missing(self, key)
-+
-+    def get_exported(self):
-+        """Get a new dict with the exported variables."""
-+        return dict((k, self.vars[k]) for k in self.exported_vars)
-+
-+    def get_all(self):
-+        """Return the complete context as dict including the exported
-+        variables.  For optimizations reasons this might not return an
-+        actual copy so be careful with using it.
-+        """
-+        if not self.vars:
-+            return self.parent
-+        if not self.parent:
-+            return self.vars
-+        return dict(self.parent, **self.vars)
-+
-+    @internalcode
-+    def call(__self, __obj, *args, **kwargs):  # noqa: B902
-+        """Call the callable with the arguments and keyword arguments
-+        provided but inject the active context or environment as first
-+        argument if the callable is a :func:`contextfunction` or
-+        :func:`environmentfunction`.
-+        """
-+        if __debug__:
-+            __traceback_hide__ = True  # noqa
-+
-+        # Allow callable classes to take a context
-+        if hasattr(__obj, "__call__"):  # noqa: B004
-+            fn = __obj.__call__
-+            for fn_type in (
-+                "contextfunction",
-+                "evalcontextfunction",
-+                "environmentfunction",
-+            ):
-+                if hasattr(fn, fn_type):
-+                    __obj = fn
-+                    break
-+
-+        if callable(__obj):
-+            if getattr(__obj, "contextfunction", 0):
-+                args = (__self,) + args
-+            elif getattr(__obj, "evalcontextfunction", 0):
-+                args = (__self.eval_ctx,) + args
-+            elif getattr(__obj, "environmentfunction", 0):
-+                args = (__self.environment,) + args
-+        try:
-+            return __obj(*args, **kwargs)
-+        except StopIteration:
-+            return __self.environment.undefined(
-+                "value was undefined because "
-+                "a callable raised a "
-+                "StopIteration exception"
-+            )
-+
-+    def derived(self, locals=None):
-+        """Internal helper function to create a derived context.  This is
-+        used in situations where the system needs a new context in the same
-+        template that is independent.
-+        """
-+        context = new_context(
-+            self.environment, self.name, {}, self.get_all(), True, None, locals
-+        )
-+        context.eval_ctx = self.eval_ctx
-+        context.blocks.update((k, list(v)) for k, v in iteritems(self.blocks))
-+        return context
-+
-+    def _all(meth):  # noqa: B902
-+        def proxy(self):
-+            return getattr(self.get_all(), meth)()
-+
-+        proxy.__doc__ = getattr(dict, meth).__doc__
-+        proxy.__name__ = meth
-+        return proxy
-+
-+    keys = _all("keys")
-+    values = _all("values")
-+    items = _all("items")
-+
-+    # not available on python 3
-+    if PY2:
-+        iterkeys = _all("iterkeys")
-+        itervalues = _all("itervalues")
-+        iteritems = _all("iteritems")
-+    del _all
-+
-+    def __contains__(self, name):
-+        return name in self.vars or name in self.parent
-+
-+    def __getitem__(self, key):
-+        """Lookup a variable or raise `KeyError` if the variable is
-+        undefined.
-+        """
-+        item = self.resolve_or_missing(key)
-+        if item is missing:
-+            raise KeyError(key)
-+        return item
-+
-+    def __repr__(self):
-+        return "<%s %s of %r>" % (
-+            self.__class__.__name__,
-+            repr(self.get_all()),
-+            self.name,
-+        )
-+
-+
-+abc.Mapping.register(Context)
-+
-+
-+class BlockReference(object):
-+    """One block on a template reference."""
-+
-+    def __init__(self, name, context, stack, depth):
-+        self.name = name
-+        self._context = context
-+        self._stack = stack
-+        self._depth = depth
-+
-+    @property
-+    def super(self):
-+        """Super the block."""
-+        if self._depth + 1 >= len(self._stack):
-+            return self._context.environment.undefined(
-+                "there is no parent block called %r." % self.name, name="super"
-+            )
-+        return BlockReference(self.name, self._context, self._stack, self._depth + 1)
-+
-+    @internalcode
-+    def __call__(self):
-+        rv = concat(self._stack[self._depth](self._context))
-+        if self._context.eval_ctx.autoescape:
-+            rv = Markup(rv)
-+        return rv
-+
-+
-+@implements_iterator
-+class LoopContext:
-+    """A wrapper iterable for dynamic ``for`` loops, with information
-+    about the loop and iteration.
-+    """
-+
-+    #: Current iteration of the loop, starting at 0.
-+    index0 = -1
-+
-+    _length = None
-+    _after = missing
-+    _current = missing
-+    _before = missing
-+    _last_changed_value = missing
-+
-+    def __init__(self, iterable, undefined, recurse=None, depth0=0):
-+        """
-+        :param iterable: Iterable to wrap.
-+        :param undefined: :class:`Undefined` class to use for next and
-+            previous items.
-+        :param recurse: The function to render the loop body when the
-+            loop is marked recursive.
-+        :param depth0: Incremented when looping recursively.
-+        """
-+        self._iterable = iterable
-+        self._iterator = self._to_iterator(iterable)
-+        self._undefined = undefined
-+        self._recurse = recurse
-+        #: How many levels deep a recursive loop currently is, starting at 0.
-+        self.depth0 = depth0
-+
-+    @staticmethod
-+    def _to_iterator(iterable):
-+        return iter(iterable)
-+
-+    @property
-+    def length(self):
-+        """Length of the iterable.
-+
-+        If the iterable is a generator or otherwise does not have a
-+        size, it is eagerly evaluated to get a size.
-+        """
-+        if self._length is not None:
-+            return self._length
-+
-+        try:
-+            self._length = len(self._iterable)
-+        except TypeError:
-+            iterable = list(self._iterator)
-+            self._iterator = self._to_iterator(iterable)
-+            self._length = len(iterable) + self.index + (self._after is not missing)
-+
-+        return self._length
-+
-+    def __len__(self):
-+        return self.length
-+
-+    @property
-+    def depth(self):
-+        """How many levels deep a recursive loop currently is, starting at 1."""
-+        return self.depth0 + 1
-+
-+    @property
-+    def index(self):
-+        """Current iteration of the loop, starting at 1."""
-+        return self.index0 + 1
-+
-+    @property
-+    def revindex0(self):
-+        """Number of iterations from the end of the loop, ending at 0.
-+
-+        Requires calculating :attr:`length`.
-+        """
-+        return self.length - self.index
-+
-+    @property
-+    def revindex(self):
-+        """Number of iterations from the end of the loop, ending at 1.
-+
-+        Requires calculating :attr:`length`.
-+        """
-+        return self.length - self.index0
-+
-+    @property
-+    def first(self):
-+        """Whether this is the first iteration of the loop."""
-+        return self.index0 == 0
-+
-+    def _peek_next(self):
-+        """Return the next element in the iterable, or :data:`missing`
-+        if the iterable is exhausted. Only peeks one item ahead, caching
-+        the result in :attr:`_last` for use in subsequent checks. The
-+        cache is reset when :meth:`__next__` is called.
-+        """
-+        if self._after is not missing:
-+            return self._after
-+
-+        self._after = next(self._iterator, missing)
-+        return self._after
-+
-+    @property
-+    def last(self):
-+        """Whether this is the last iteration of the loop.
-+
-+        Causes the iterable to advance early. See
-+        :func:`itertools.groupby` for issues this can cause.
-+        The :func:`groupby` filter avoids that issue.
-+        """
-+        return self._peek_next() is missing
-+
-+    @property
-+    def previtem(self):
-+        """The item in the previous iteration. Undefined during the
-+        first iteration.
-+        """
-+        if self.first:
-+            return self._undefined("there is no previous item")
-+
-+        return self._before
-+
-+    @property
-+    def nextitem(self):
-+        """The item in the next iteration. Undefined during the last
-+        iteration.
-+
-+        Causes the iterable to advance early. See
-+        :func:`itertools.groupby` for issues this can cause.
-+        The :func:`groupby` filter avoids that issue.
-+        """
-+        rv = self._peek_next()
-+
-+        if rv is missing:
-+            return self._undefined("there is no next item")
-+
-+        return rv
-+
-+    def cycle(self, *args):
-+        """Return a value from the given args, cycling through based on
-+        the current :attr:`index0`.
-+
-+        :param args: One or more values to cycle through.
-+        """
-+        if not args:
-+            raise TypeError("no items for cycling given")
-+
-+        return args[self.index0 % len(args)]
-+
-+    def changed(self, *value):
-+        """Return ``True`` if previously called with a different value
-+        (including when called for the first time).
-+
-+        :param value: One or more values to compare to the last call.
-+        """
-+        if self._last_changed_value != value:
-+            self._last_changed_value = value
-+            return True
-+
-+        return False
-+
-+    def __iter__(self):
-+        return self
-+
-+    def __next__(self):
-+        if self._after is not missing:
-+            rv = self._after
-+            self._after = missing
-+        else:
-+            rv = next(self._iterator)
-+
-+        self.index0 += 1
-+        self._before = self._current
-+        self._current = rv
-+        return rv, self
-+
-+    @internalcode
-+    def __call__(self, iterable):
-+        """When iterating over nested data, render the body of the loop
-+        recursively with the given inner iterable data.
-+
-+        The loop must have the ``recursive`` marker for this to work.
-+        """
-+        if self._recurse is None:
-+            raise TypeError(
-+                "The loop must have the 'recursive' marker to be called recursively."
-+            )
-+
-+        return self._recurse(iterable, self._recurse, depth=self.depth)
-+
-+    def __repr__(self):
-+        return "<%s %d/%d>" % (self.__class__.__name__, self.index, self.length)
-+
-+
-+class Macro(object):
-+    """Wraps a macro function."""
-+
-+    def __init__(
-+        self,
-+        environment,
-+        func,
-+        name,
-+        arguments,
-+        catch_kwargs,
-+        catch_varargs,
-+        caller,
-+        default_autoescape=None,
-+    ):
-+        self._environment = environment
-+        self._func = func
-+        self._argument_count = len(arguments)
-+        self.name = name
-+        self.arguments = arguments
-+        self.catch_kwargs = catch_kwargs
-+        self.catch_varargs = catch_varargs
-+        self.caller = caller
-+        self.explicit_caller = "caller" in arguments
-+        if default_autoescape is None:
-+            default_autoescape = environment.autoescape
-+        self._default_autoescape = default_autoescape
-+
-+    @internalcode
-+    @evalcontextfunction
-+    def __call__(self, *args, **kwargs):
-+        # This requires a bit of explanation,  In the past we used to
-+        # decide largely based on compile-time information if a macro is
-+        # safe or unsafe.  While there was a volatile mode it was largely
-+        # unused for deciding on escaping.  This turns out to be
-+        # problematic for macros because whether a macro is safe depends not
-+        # on the escape mode when it was defined, but rather when it was used.
-+        #
-+        # Because however we export macros from the module system and
-+        # there are historic callers that do not pass an eval context (and
-+        # will continue to not pass one), we need to perform an instance
-+        # check here.
-+        #
-+        # This is considered safe because an eval context is not a valid
-+        # argument to callables otherwise anyway.  Worst case here is
-+        # that if no eval context is passed we fall back to the compile
-+        # time autoescape flag.
-+        if args and isinstance(args[0], EvalContext):
-+            autoescape = args[0].autoescape
-+            args = args[1:]
-+        else:
-+            autoescape = self._default_autoescape
-+
-+        # try to consume the positional arguments
-+        arguments = list(args[: self._argument_count])
-+        off = len(arguments)
-+
-+        # For information why this is necessary refer to the handling
-+        # of caller in the `macro_body` handler in the compiler.
-+        found_caller = False
-+
-+        # if the number of arguments consumed is not the number of
-+        # arguments expected we start filling in keyword arguments
-+        # and defaults.
-+        if off != self._argument_count:
-+            for name in self.arguments[len(arguments) :]:
-+                try:
-+                    value = kwargs.pop(name)
-+                except KeyError:
-+                    value = missing
-+                if name == "caller":
-+                    found_caller = True
-+                arguments.append(value)
-+        else:
-+            found_caller = self.explicit_caller
-+
-+        # it's important that the order of these arguments does not change
-+        # if not also changed in the compiler's `function_scoping` method.
-+        # the order is caller, keyword arguments, positional arguments!
-+        if self.caller and not found_caller:
-+            caller = kwargs.pop("caller", None)
-+            if caller is None:
-+                caller = self._environment.undefined("No caller defined", name="caller")
-+            arguments.append(caller)
-+
-+        if self.catch_kwargs:
-+            arguments.append(kwargs)
-+        elif kwargs:
-+            if "caller" in kwargs:
-+                raise TypeError(
-+                    "macro %r was invoked with two values for "
-+                    "the special caller argument.  This is "
-+                    "most likely a bug." % self.name
-+                )
-+            raise TypeError(
-+                "macro %r takes no keyword argument %r"
-+                % (self.name, next(iter(kwargs)))
-+            )
-+        if self.catch_varargs:
-+            arguments.append(args[self._argument_count :])
-+        elif len(args) > self._argument_count:
-+            raise TypeError(
-+                "macro %r takes not more than %d argument(s)"
-+                % (self.name, len(self.arguments))
-+            )
-+
-+        return self._invoke(arguments, autoescape)
-+
-+    def _invoke(self, arguments, autoescape):
-+        """This method is being swapped out by the async implementation."""
-+        rv = self._func(*arguments)
-+        if autoescape:
-+            rv = Markup(rv)
-+        return rv
-+
-+    def __repr__(self):
-+        return "<%s %s>" % (
-+            self.__class__.__name__,
-+            self.name is None and "anonymous" or repr(self.name),
-+        )
-+
-+
-+@implements_to_string
-+class Undefined(object):
-+    """The default undefined type.  This undefined type can be printed and
-+    iterated over, but every other access will raise an :exc:`UndefinedError`:
-+
-+    >>> foo = Undefined(name='foo')
-+    >>> str(foo)
-+    ''
-+    >>> not foo
-+    True
-+    >>> foo + 42
-+    Traceback (most recent call last):
-+      ...
-+    jinja2.exceptions.UndefinedError: 'foo' is undefined
-+    """
-+
-+    __slots__ = (
-+        "_undefined_hint",
-+        "_undefined_obj",
-+        "_undefined_name",
-+        "_undefined_exception",
-+    )
-+
-+    def __init__(self, hint=None, obj=missing, name=None, exc=UndefinedError):
-+        self._undefined_hint = hint
-+        self._undefined_obj = obj
-+        self._undefined_name = name
-+        self._undefined_exception = exc
-+
-+    @property
-+    def _undefined_message(self):
-+        """Build a message about the undefined value based on how it was
-+        accessed.
-+        """
-+        if self._undefined_hint:
-+            return self._undefined_hint
-+
-+        if self._undefined_obj is missing:
-+            return "%r is undefined" % self._undefined_name
-+
-+        if not isinstance(self._undefined_name, string_types):
-+            return "%s has no element %r" % (
-+                object_type_repr(self._undefined_obj),
-+                self._undefined_name,
-+            )
-+
-+        return "%r has no attribute %r" % (
-+            object_type_repr(self._undefined_obj),
-+            self._undefined_name,
-+        )
-+
-+    @internalcode
-+    def _fail_with_undefined_error(self, *args, **kwargs):
-+        """Raise an :exc:`UndefinedError` when operations are performed
-+        on the undefined value.
-+        """
-+        raise self._undefined_exception(self._undefined_message)
-+
-+    @internalcode
-+    def __getattr__(self, name):
-+        if name[:2] == "__":
-+            raise AttributeError(name)
-+        return self._fail_with_undefined_error()
-+
-+    __add__ = (
-+        __radd__
-+    ) = (
-+        __mul__
-+    ) = (
-+        __rmul__
-+    ) = (
-+        __div__
-+    ) = (
-+        __rdiv__
-+    ) = (
-+        __truediv__
-+    ) = (
-+        __rtruediv__
-+    ) = (
-+        __floordiv__
-+    ) = (
-+        __rfloordiv__
-+    ) = (
-+        __mod__
-+    ) = (
-+        __rmod__
-+    ) = (
-+        __pos__
-+    ) = (
-+        __neg__
-+    ) = (
-+        __call__
-+    ) = (
-+        __getitem__
-+    ) = (
-+        __lt__
-+    ) = (
-+        __le__
-+    ) = (
-+        __gt__
-+    ) = (
-+        __ge__
-+    ) = (
-+        __int__
-+    ) = (
-+        __float__
-+    ) = (
-+        __complex__
-+    ) = __pow__ = __rpow__ = __sub__ = __rsub__ = _fail_with_undefined_error
-+
-+    def __eq__(self, other):
-+        return type(self) is type(other)
-+
-+    def __ne__(self, other):
-+        return not self.__eq__(other)
-+
-+    def __hash__(self):
-+        return id(type(self))
-+
-+    def __str__(self):
-+        return u""
-+
-+    def __len__(self):
-+        return 0
-+
-+    def __iter__(self):
-+        if 0:
-+            yield None
-+
-+    def __nonzero__(self):
-+        return False
-+
-+    __bool__ = __nonzero__
-+
-+    def __repr__(self):
-+        return "Undefined"
-+
-+
-+def make_logging_undefined(logger=None, base=None):
-+    """Given a logger object this returns a new undefined class that will
-+    log certain failures.  It will log iterations and printing.  If no
-+    logger is given a default logger is created.
-+
-+    Example::
-+
-+        logger = logging.getLogger(__name__)
-+        LoggingUndefined = make_logging_undefined(
-+            logger=logger,
-+            base=Undefined
-+        )
-+
-+    .. versionadded:: 2.8
-+
-+    :param logger: the logger to use.  If not provided, a default logger
-+                   is created.
-+    :param base: the base class to add logging functionality to.  This
-+                 defaults to :class:`Undefined`.
-+    """
-+    if logger is None:
-+        import logging
-+
-+        logger = logging.getLogger(__name__)
-+        logger.addHandler(logging.StreamHandler(sys.stderr))
-+    if base is None:
-+        base = Undefined
-+
-+    def _log_message(undef):
-+        if undef._undefined_hint is None:
-+            if undef._undefined_obj is missing:
-+                hint = "%s is undefined" % undef._undefined_name
-+            elif not isinstance(undef._undefined_name, string_types):
-+                hint = "%s has no element %s" % (
-+                    object_type_repr(undef._undefined_obj),
-+                    undef._undefined_name,
-+                )
-+            else:
-+                hint = "%s has no attribute %s" % (
-+                    object_type_repr(undef._undefined_obj),
-+                    undef._undefined_name,
-+                )
-+        else:
-+            hint = undef._undefined_hint
-+        logger.warning("Template variable warning: %s", hint)
-+
-+    class LoggingUndefined(base):
-+        def _fail_with_undefined_error(self, *args, **kwargs):
-+            try:
-+                return base._fail_with_undefined_error(self, *args, **kwargs)
-+            except self._undefined_exception as e:
-+                logger.error("Template variable error: %s", str(e))
-+                raise e
-+
-+        def __str__(self):
-+            rv = base.__str__(self)
-+            _log_message(self)
-+            return rv
-+
-+        def __iter__(self):
-+            rv = base.__iter__(self)
-+            _log_message(self)
-+            return rv
-+
-+        if PY2:
-+
-+            def __nonzero__(self):
-+                rv = base.__nonzero__(self)
-+                _log_message(self)
-+                return rv
-+
-+            def __unicode__(self):
-+                rv = base.__unicode__(self)
-+                _log_message(self)
-+                return rv
-+
-+        else:
-+
-+            def __bool__(self):
-+                rv = base.__bool__(self)
-+                _log_message(self)
-+                return rv
-+
-+    return LoggingUndefined
-+
-+
-+# No @implements_to_string decorator here because __str__
-+# is not overwritten from Undefined in this class.
-+# This would cause a recursion error in Python 2.
-+class ChainableUndefined(Undefined):
-+    """An undefined that is chainable, where both ``__getattr__`` and
-+    ``__getitem__`` return itself rather than raising an
-+    :exc:`UndefinedError`.
-+
-+    >>> foo = ChainableUndefined(name='foo')
-+    >>> str(foo.bar['baz'])
-+    ''
-+    >>> foo.bar['baz'] + 42
-+    Traceback (most recent call last):
-+      ...
-+    jinja2.exceptions.UndefinedError: 'foo' is undefined
-+
-+    .. versionadded:: 2.11.0
-+    """
-+
-+    __slots__ = ()
-+
-+    def __html__(self):
-+        return self.__str__()
-+
-+    def __getattr__(self, _):
-+        return self
-+
-+    __getitem__ = __getattr__
-+
-+
-+@implements_to_string
-+class DebugUndefined(Undefined):
-+    """An undefined that returns the debug info when printed.
-+
-+    >>> foo = DebugUndefined(name='foo')
-+    >>> str(foo)
-+    '{{ foo }}'
-+    >>> not foo
-+    True
-+    >>> foo + 42
-+    Traceback (most recent call last):
-+      ...
-+    jinja2.exceptions.UndefinedError: 'foo' is undefined
-+    """
-+
-+    __slots__ = ()
-+
-+    def __str__(self):
-+        if self._undefined_hint is None:
-+            if self._undefined_obj is missing:
-+                return u"{{ %s }}" % self._undefined_name
-+            return "{{ no such element: %s[%r] }}" % (
-+                object_type_repr(self._undefined_obj),
-+                self._undefined_name,
-+            )
-+        return u"{{ undefined value printed: %s }}" % self._undefined_hint
-+
-+
-+@implements_to_string
-+class StrictUndefined(Undefined):
-+    """An undefined that barks on print and iteration as well as boolean
-+    tests and all kinds of comparisons.  In other words: you can do nothing
-+    with it except checking if it's defined using the `defined` test.
-+
-+    >>> foo = StrictUndefined(name='foo')
-+    >>> str(foo)
-+    Traceback (most recent call last):
-+      ...
-+    jinja2.exceptions.UndefinedError: 'foo' is undefined
-+    >>> not foo
-+    Traceback (most recent call last):
-+      ...
-+    jinja2.exceptions.UndefinedError: 'foo' is undefined
-+    >>> foo + 42
-+    Traceback (most recent call last):
-+      ...
-+    jinja2.exceptions.UndefinedError: 'foo' is undefined
-+    """
-+
-+    __slots__ = ()
-+    __iter__ = (
-+        __str__
-+    ) = (
-+        __len__
-+    ) = (
-+        __nonzero__
-+    ) = __eq__ = __ne__ = __bool__ = __hash__ = Undefined._fail_with_undefined_error
-+
-+
-+# remove remaining slots attributes, after the metaclass did the magic they
-+# are unneeded and irritating as they contain wrong data for the subclasses.
-+del (
-+    Undefined.__slots__,
-+    ChainableUndefined.__slots__,
-+    DebugUndefined.__slots__,
-+    StrictUndefined.__slots__,
-+)
-diff --git a/third_party/python/Jinja2/jinja2/sandbox.py b/third_party/python/Jinja2/jinja2/sandbox.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/Jinja2/jinja2/sandbox.py
-@@ -0,0 +1,510 @@
-+# -*- coding: utf-8 -*-
-+"""A sandbox layer that ensures unsafe operations cannot be performed.
-+Useful when the template itself comes from an untrusted source.
-+"""
-+import operator
-+import types
-+import warnings
-+from collections import deque
-+from string import Formatter
-+
-+from markupsafe import EscapeFormatter
-+from markupsafe import Markup
-+
-+from ._compat import abc
-+from ._compat import PY2
-+from ._compat import range_type
-+from ._compat import string_types
-+from .environment import Environment
-+from .exceptions import SecurityError
-+
-+#: maximum number of items a range may produce
-+MAX_RANGE = 100000
-+
-+#: attributes of function objects that are considered unsafe.
-+if PY2:
-+    UNSAFE_FUNCTION_ATTRIBUTES = {
-+        "func_closure",
-+        "func_code",
-+        "func_dict",
-+        "func_defaults",
-+        "func_globals",
-+    }
-+else:
-+    # On versions > python 2 the special attributes on functions are gone,
-+    # but they remain on methods and generators for whatever reason.
-+    UNSAFE_FUNCTION_ATTRIBUTES = set()
-+
-+#: unsafe method attributes.  function attributes are unsafe for methods too
-+UNSAFE_METHOD_ATTRIBUTES = {"im_class", "im_func", "im_self"}
-+
-+#: unsafe generator attributes.
-+UNSAFE_GENERATOR_ATTRIBUTES = {"gi_frame", "gi_code"}
-+
-+#: unsafe attributes on coroutines
-+UNSAFE_COROUTINE_ATTRIBUTES = {"cr_frame", "cr_code"}
-+
-+#: unsafe attributes on async generators
-+UNSAFE_ASYNC_GENERATOR_ATTRIBUTES = {"ag_code", "ag_frame"}
-+
-+# make sure we don't warn in python 2.6 about stuff we don't care about
-+warnings.filterwarnings(
-+    "ignore", "the sets module", DeprecationWarning, module=__name__
-+)
-+
-+_mutable_set_types = (set,)
-+_mutable_mapping_types = (dict,)
-+_mutable_sequence_types = (list,)
-+
-+# on python 2.x we can register the user collection types
-+try:
-+    from UserDict import UserDict, DictMixin
-+    from UserList import UserList
-+
-+    _mutable_mapping_types += (UserDict, DictMixin)
-+    _mutable_set_types += (UserList,)
-+except ImportError:
-+    pass
-+
-+# if sets is still available, register the mutable set from there as well
-+try:
-+    from sets import Set
-+
-+    _mutable_set_types += (Set,)
-+except ImportError:
-+    pass
-+
-+#: register Python 2.6 abstract base classes
-+_mutable_set_types += (abc.MutableSet,)
-+_mutable_mapping_types += (abc.MutableMapping,)
-+_mutable_sequence_types += (abc.MutableSequence,)
-+
-+_mutable_spec = (
-+    (
-+        _mutable_set_types,
-+        frozenset(
-+            [
-+                "add",
-+                "clear",
-+                "difference_update",
-+                "discard",
-+                "pop",
-+                "remove",
-+                "symmetric_difference_update",
-+                "update",
-+            ]
-+        ),
-+    ),
-+    (
-+        _mutable_mapping_types,
-+        frozenset(["clear", "pop", "popitem", "setdefault", "update"]),
-+    ),
-+    (
-+        _mutable_sequence_types,
-+        frozenset(["append", "reverse", "insert", "sort", "extend", "remove"]),
-+    ),
-+    (
-+        deque,
-+        frozenset(
-+            [
-+                "append",
-+                "appendleft",
-+                "clear",
-+                "extend",
-+                "extendleft",
-+                "pop",
-+                "popleft",
-+                "remove",
-+                "rotate",
-+            ]
-+        ),
-+    ),
-+)
-+
-+
-+class _MagicFormatMapping(abc.Mapping):
-+    """This class implements a dummy wrapper to fix a bug in the Python
-+    standard library for string formatting.
-+
-+    See https://bugs.python.org/issue13598 for information about why
-+    this is necessary.
-+    """
-+
-+    def __init__(self, args, kwargs):
-+        self._args = args
-+        self._kwargs = kwargs
-+        self._last_index = 0
-+
-+    def __getitem__(self, key):
-+        if key == "":
-+            idx = self._last_index
-+            self._last_index += 1
-+            try:
-+                return self._args[idx]
-+            except LookupError:
-+                pass
-+            key = str(idx)
-+        return self._kwargs[key]
-+
-+    def __iter__(self):
-+        return iter(self._kwargs)
-+
-+    def __len__(self):
-+        return len(self._kwargs)
-+
-+
-+def inspect_format_method(callable):
-+    if not isinstance(
-+        callable, (types.MethodType, types.BuiltinMethodType)
-+    ) or callable.__name__ not in ("format", "format_map"):
-+        return None
-+    obj = callable.__self__
-+    if isinstance(obj, string_types):
-+        return obj
-+
-+
-+def safe_range(*args):
-+    """A range that can't generate ranges with a length of more than
-+    MAX_RANGE items.
-+    """
-+    rng = range_type(*args)
-+
-+    if len(rng) > MAX_RANGE:
-+        raise OverflowError(
-+            "Range too big. The sandbox blocks ranges larger than"
-+            " MAX_RANGE (%d)." % MAX_RANGE
-+        )
-+
-+    return rng
-+
-+
-+def unsafe(f):
-+    """Marks a function or method as unsafe.
-+
-+    ::
-+
-+        @unsafe
-+        def delete(self):
-+            pass
-+    """
-+    f.unsafe_callable = True
-+    return f
-+
-+
-+def is_internal_attribute(obj, attr):
-+    """Test if the attribute given is an internal python attribute.  For
-+    example this function returns `True` for the `func_code` attribute of
-+    python objects.  This is useful if the environment method
-+    :meth:`~SandboxedEnvironment.is_safe_attribute` is overridden.
-+
-+    >>> from jinja2.sandbox import is_internal_attribute
-+    >>> is_internal_attribute(str, "mro")
-+    True
-+    >>> is_internal_attribute(str, "upper")
-+    False
-+    """
-+    if isinstance(obj, types.FunctionType):
-+        if attr in UNSAFE_FUNCTION_ATTRIBUTES:
-+            return True
-+    elif isinstance(obj, types.MethodType):
-+        if attr in UNSAFE_FUNCTION_ATTRIBUTES or attr in UNSAFE_METHOD_ATTRIBUTES:
-+            return True
-+    elif isinstance(obj, type):
-+        if attr == "mro":
-+            return True
-+    elif isinstance(obj, (types.CodeType, types.TracebackType, types.FrameType)):
-+        return True
-+    elif isinstance(obj, types.GeneratorType):
-+        if attr in UNSAFE_GENERATOR_ATTRIBUTES:
-+            return True
-+    elif hasattr(types, "CoroutineType") and isinstance(obj, types.CoroutineType):
-+        if attr in UNSAFE_COROUTINE_ATTRIBUTES:
-+            return True
-+    elif hasattr(types, "AsyncGeneratorType") and isinstance(
-+        obj, types.AsyncGeneratorType
-+    ):
-+        if attr in UNSAFE_ASYNC_GENERATOR_ATTRIBUTES:
-+            return True
-+    return attr.startswith("__")
-+
-+
-+def modifies_known_mutable(obj, attr):
-+    """This function checks if an attribute on a builtin mutable object
-+    (list, dict, set or deque) would modify it if called.  It also supports
-+    the "user"-versions of the objects (`sets.Set`, `UserDict.*` etc.) and
-+    with Python 2.6 onwards the abstract base classes `MutableSet`,
-+    `MutableMapping`, and `MutableSequence`.
-+
-+    >>> modifies_known_mutable({}, "clear")
-+    True
-+    >>> modifies_known_mutable({}, "keys")
-+    False
-+    >>> modifies_known_mutable([], "append")
-+    True
-+    >>> modifies_known_mutable([], "index")
-+    False
-+
-+    If called with an unsupported object (such as unicode) `False` is
-+    returned.
-+
-+    >>> modifies_known_mutable("foo", "upper")
-+    False
-+    """
-+    for typespec, unsafe in _mutable_spec:
-+        if isinstance(obj, typespec):
-+            return attr in unsafe
-+    return False
-+
-+
-+class SandboxedEnvironment(Environment):
-+    """The sandboxed environment.  It works like the regular environment but
-+    tells the compiler to generate sandboxed code.  Additionally subclasses of
-+    this environment may override the methods that tell the runtime what
-+    attributes or functions are safe to access.
-+
-+    If the template tries to access insecure code a :exc:`SecurityError` is
-+    raised.  However also other exceptions may occur during the rendering so
-+    the caller has to ensure that all exceptions are caught.
-+    """
-+
-+    sandboxed = True
-+
-+    #: default callback table for the binary operators.  A copy of this is
-+    #: available on each instance of a sandboxed environment as
-+    #: :attr:`binop_table`
-+    default_binop_table = {
-+        "+": operator.add,
-+        "-": operator.sub,
-+        "*": operator.mul,
-+        "/": operator.truediv,
-+        "//": operator.floordiv,
-+        "**": operator.pow,
-+        "%": operator.mod,
-+    }
-+
-+    #: default callback table for the unary operators.  A copy of this is
-+    #: available on each instance of a sandboxed environment as
-+    #: :attr:`unop_table`
-+    default_unop_table = {"+": operator.pos, "-": operator.neg}
-+
-+    #: a set of binary operators that should be intercepted.  Each operator
-+    #: that is added to this set (empty by default) is delegated to the
-+    #: :meth:`call_binop` method that will perform the operator.  The default
-+    #: operator callback is specified by :attr:`binop_table`.
-+    #:
-+    #: The following binary operators are interceptable:
-+    #: ``//``, ``%``, ``+``, ``*``, ``-``, ``/``, and ``**``
-+    #:
-+    #: The default operation form the operator table corresponds to the
-+    #: builtin function.  Intercepted calls are always slower than the native
-+    #: operator call, so make sure only to intercept the ones you are
-+    #: interested in.
-+    #:
-+    #: .. versionadded:: 2.6
-+    intercepted_binops = frozenset()
-+
-+    #: a set of unary operators that should be intercepted.  Each operator
-+    #: that is added to this set (empty by default) is delegated to the
-+    #: :meth:`call_unop` method that will perform the operator.  The default
-+    #: operator callback is specified by :attr:`unop_table`.
-+    #:
-+    #: The following unary operators are interceptable: ``+``, ``-``
-+    #:
-+    #: The default operation form the operator table corresponds to the
-+    #: builtin function.  Intercepted calls are always slower than the native
-+    #: operator call, so make sure only to intercept the ones you are
-+    #: interested in.
-+    #:
-+    #: .. versionadded:: 2.6
-+    intercepted_unops = frozenset()
-+
-+    def intercept_unop(self, operator):
-+        """Called during template compilation with the name of a unary
-+        operator to check if it should be intercepted at runtime.  If this
-+        method returns `True`, :meth:`call_unop` is executed for this unary
-+        operator.  The default implementation of :meth:`call_unop` will use
-+        the :attr:`unop_table` dictionary to perform the operator with the
-+        same logic as the builtin one.
-+
-+        The following unary operators are interceptable: ``+`` and ``-``
-+
-+        Intercepted calls are always slower than the native operator call,
-+        so make sure only to intercept the ones you are interested in.
-+
-+        .. versionadded:: 2.6
-+        """
-+        return False
-+
-+    def __init__(self, *args, **kwargs):
-+        Environment.__init__(self, *args, **kwargs)
-+        self.globals["range"] = safe_range
-+        self.binop_table = self.default_binop_table.copy()
-+        self.unop_table = self.default_unop_table.copy()
-+
-+    def is_safe_attribute(self, obj, attr, value):
-+        """The sandboxed environment will call this method to check if the
-+        attribute of an object is safe to access.  Per default all attributes
-+        starting with an underscore are considered private as well as the
-+        special attributes of internal python objects as returned by the
-+        :func:`is_internal_attribute` function.
-+        """
-+        return not (attr.startswith("_") or is_internal_attribute(obj, attr))
-+
-+    def is_safe_callable(self, obj):
-+        """Check if an object is safely callable.  Per default a function is
-+        considered safe unless the `unsafe_callable` attribute exists and is
-+        True.  Override this method to alter the behavior, but this won't
-+        affect the `unsafe` decorator from this module.
-+        """
-+        return not (
-+            getattr(obj, "unsafe_callable", False) or getattr(obj, "alters_data", False)
-+        )
-+
-+    def call_binop(self, context, operator, left, right):
-+        """For intercepted binary operator calls (:meth:`intercepted_binops`)
-+        this function is executed instead of the builtin operator.  This can
-+        be used to fine tune the behavior of certain operators.
-+
-+        .. versionadded:: 2.6
-+        """
-+        return self.binop_table[operator](left, right)
-+
-+    def call_unop(self, context, operator, arg):
-+        """For intercepted unary operator calls (:meth:`intercepted_unops`)
-+        this function is executed instead of the builtin operator.  This can
-+        be used to fine tune the behavior of certain operators.
-+
-+        .. versionadded:: 2.6
-+        """
-+        return self.unop_table[operator](arg)
-+
-+    def getitem(self, obj, argument):
-+        """Subscribe an object from sandboxed code."""
-+        try:
-+            return obj[argument]
-+        except (TypeError, LookupError):
-+            if isinstance(argument, string_types):
-+                try:
-+                    attr = str(argument)
-+                except Exception:
-+                    pass
-+                else:
-+                    try:
-+                        value = getattr(obj, attr)
-+                    except AttributeError:
-+                        pass
-+                    else:
-+                        if self.is_safe_attribute(obj, argument, value):
-+                            return value
-+                        return self.unsafe_undefined(obj, argument)
-+        return self.undefined(obj=obj, name=argument)
-+
-+    def getattr(self, obj, attribute):
-+        """Subscribe an object from sandboxed code and prefer the
-+        attribute.  The attribute passed *must* be a bytestring.
-+        """
-+        try:
-+            value = getattr(obj, attribute)
-+        except AttributeError:
-+            try:
-+                return obj[attribute]
-+            except (TypeError, LookupError):
-+                pass
-+        else:
-+            if self.is_safe_attribute(obj, attribute, value):
-+                return value
-+            return self.unsafe_undefined(obj, attribute)
-+        return self.undefined(obj=obj, name=attribute)
-+
-+    def unsafe_undefined(self, obj, attribute):
-+        """Return an undefined object for unsafe attributes."""
-+        return self.undefined(
-+            "access to attribute %r of %r "
-+            "object is unsafe." % (attribute, obj.__class__.__name__),
-+            name=attribute,
-+            obj=obj,
-+            exc=SecurityError,
-+        )
-+
-+    def format_string(self, s, args, kwargs, format_func=None):
-+        """If a format call is detected, then this is routed through this
-+        method so that our safety sandbox can be used for it.
-+        """
-+        if isinstance(s, Markup):
-+            formatter = SandboxedEscapeFormatter(self, s.escape)
-+        else:
-+            formatter = SandboxedFormatter(self)
-+
-+        if format_func is not None and format_func.__name__ == "format_map":
-+            if len(args) != 1 or kwargs:
-+                raise TypeError(
-+                    "format_map() takes exactly one argument %d given"
-+                    % (len(args) + (kwargs is not None))
-+                )
-+
-+            kwargs = args[0]
-+            args = None
-+
-+        kwargs = _MagicFormatMapping(args, kwargs)
-+        rv = formatter.vformat(s, args, kwargs)
-+        return type(s)(rv)
-+
-+    def call(__self, __context, __obj, *args, **kwargs):  # noqa: B902
-+        """Call an object from sandboxed code."""
-+        fmt = inspect_format_method(__obj)
-+        if fmt is not None:
-+            return __self.format_string(fmt, args, kwargs, __obj)
-+
-+        # the double prefixes are to avoid double keyword argument
-+        # errors when proxying the call.
-+        if not __self.is_safe_callable(__obj):
-+            raise SecurityError("%r is not safely callable" % (__obj,))
-+        return __context.call(__obj, *args, **kwargs)
-+
-+
-+class ImmutableSandboxedEnvironment(SandboxedEnvironment):
-+    """Works exactly like the regular `SandboxedEnvironment` but does not
-+    permit modifications on the builtin mutable objects `list`, `set`, and
-+    `dict` by using the :func:`modifies_known_mutable` function.
-+    """
-+
-+    def is_safe_attribute(self, obj, attr, value):
-+        if not SandboxedEnvironment.is_safe_attribute(self, obj, attr, value):
-+            return False
-+        return not modifies_known_mutable(obj, attr)
-+
-+
-+# This really is not a public API apparently.
-+try:
-+    from _string import formatter_field_name_split
-+except ImportError:
-+
-+    def formatter_field_name_split(field_name):
-+        return field_name._formatter_field_name_split()
-+
-+
-+class SandboxedFormatterMixin(object):
-+    def __init__(self, env):
-+        self._env = env
-+
-+    def get_field(self, field_name, args, kwargs):
-+        first, rest = formatter_field_name_split(field_name)
-+        obj = self.get_value(first, args, kwargs)
-+        for is_attr, i in rest:
-+            if is_attr:
-+                obj = self._env.getattr(obj, i)
-+            else:
-+                obj = self._env.getitem(obj, i)
-+        return obj, first
-+
-+
-+class SandboxedFormatter(SandboxedFormatterMixin, Formatter):
-+    def __init__(self, env):
-+        SandboxedFormatterMixin.__init__(self, env)
-+        Formatter.__init__(self)
-+
-+
-+class SandboxedEscapeFormatter(SandboxedFormatterMixin, EscapeFormatter):
-+    def __init__(self, env, escape):
-+        SandboxedFormatterMixin.__init__(self, env)
-+        EscapeFormatter.__init__(self, escape)
-diff --git a/third_party/python/Jinja2/jinja2/tests.py b/third_party/python/Jinja2/jinja2/tests.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/Jinja2/jinja2/tests.py
-@@ -0,0 +1,215 @@
-+# -*- coding: utf-8 -*-
-+"""Built-in template tests used with the ``is`` operator."""
-+import decimal
-+import operator
-+import re
-+
-+from ._compat import abc
-+from ._compat import integer_types
-+from ._compat import string_types
-+from ._compat import text_type
-+from .runtime import Undefined
-+
-+number_re = re.compile(r"^-?\d+(\.\d+)?$")
-+regex_type = type(number_re)
-+test_callable = callable
-+
-+
-+def test_odd(value):
-+    """Return true if the variable is odd."""
-+    return value % 2 == 1
-+
-+
-+def test_even(value):
-+    """Return true if the variable is even."""
-+    return value % 2 == 0
-+
-+
-+def test_divisibleby(value, num):
-+    """Check if a variable is divisible by a number."""
-+    return value % num == 0
-+
-+
-+def test_defined(value):
-+    """Return true if the variable is defined:
-+
-+    .. sourcecode:: jinja
-+
-+        {% if variable is defined %}
-+            value of variable: {{ variable }}
-+        {% else %}
-+            variable is not defined
-+        {% endif %}
-+
-+    See the :func:`default` filter for a simple way to set undefined
-+    variables.
-+    """
-+    return not isinstance(value, Undefined)
-+
-+
-+def test_undefined(value):
-+    """Like :func:`defined` but the other way round."""
-+    return isinstance(value, Undefined)
-+
-+
-+def test_none(value):
-+    """Return true if the variable is none."""
-+    return value is None
-+
-+
-+def test_boolean(value):
-+    """Return true if the object is a boolean value.
-+
-+    .. versionadded:: 2.11
-+    """
-+    return value is True or value is False
-+
-+
-+def test_false(value):
-+    """Return true if the object is False.
-+
-+    .. versionadded:: 2.11
-+    """
-+    return value is False
-+
-+
-+def test_true(value):
-+    """Return true if the object is True.
-+
-+    .. versionadded:: 2.11
-+    """
-+    return value is True
-+
-+
-+# NOTE: The existing 'number' test matches booleans and floats
-+def test_integer(value):
-+    """Return true if the object is an integer.
-+
-+    .. versionadded:: 2.11
-+    """
-+    return isinstance(value, integer_types) and value is not True and value is not False
-+
-+
-+# NOTE: The existing 'number' test matches booleans and integers
-+def test_float(value):
-+    """Return true if the object is a float.
-+
-+    .. versionadded:: 2.11
-+    """
-+    return isinstance(value, float)
-+
-+
-+def test_lower(value):
-+    """Return true if the variable is lowercased."""
-+    return text_type(value).islower()
-+
-+
-+def test_upper(value):
-+    """Return true if the variable is uppercased."""
-+    return text_type(value).isupper()
-+
-+
-+def test_string(value):
-+    """Return true if the object is a string."""
-+    return isinstance(value, string_types)
-+
-+
-+def test_mapping(value):
-+    """Return true if the object is a mapping (dict etc.).
-+
-+    .. versionadded:: 2.6
-+    """
-+    return isinstance(value, abc.Mapping)
-+
-+
-+def test_number(value):
-+    """Return true if the variable is a number."""
-+    return isinstance(value, integer_types + (float, complex, decimal.Decimal))
-+
-+
-+def test_sequence(value):
-+    """Return true if the variable is a sequence. Sequences are variables
-+    that are iterable.
-+    """
-+    try:
-+        len(value)
-+        value.__getitem__
-+    except Exception:
-+        return False
-+    return True
-+
-+
-+def test_sameas(value, other):
-+    """Check if an object points to the same memory address than another
-+    object:
-+
-+    .. sourcecode:: jinja
-+
-+        {% if foo.attribute is sameas false %}
-+            the foo attribute really is the `False` singleton
-+        {% endif %}
-+    """
-+    return value is other
-+
-+
-+def test_iterable(value):
-+    """Check if it's possible to iterate over an object."""
-+    try:
-+        iter(value)
-+    except TypeError:
-+        return False
-+    return True
-+
-+
-+def test_escaped(value):
-+    """Check if the value is escaped."""
-+    return hasattr(value, "__html__")
-+
-+
-+def test_in(value, seq):
-+    """Check if value is in seq.
-+
-+    .. versionadded:: 2.10
-+    """
-+    return value in seq
-+
-+
-+TESTS = {
-+    "odd": test_odd,
-+    "even": test_even,
-+    "divisibleby": test_divisibleby,
-+    "defined": test_defined,
-+    "undefined": test_undefined,
-+    "none": test_none,
-+    "boolean": test_boolean,
-+    "false": test_false,
-+    "true": test_true,
-+    "integer": test_integer,
-+    "float": test_float,
-+    "lower": test_lower,
-+    "upper": test_upper,
-+    "string": test_string,
-+    "mapping": test_mapping,
-+    "number": test_number,
-+    "sequence": test_sequence,
-+    "iterable": test_iterable,
-+    "callable": test_callable,
-+    "sameas": test_sameas,
-+    "escaped": test_escaped,
-+    "in": test_in,
-+    "==": operator.eq,
-+    "eq": operator.eq,
-+    "equalto": operator.eq,
-+    "!=": operator.ne,
-+    "ne": operator.ne,
-+    ">": operator.gt,
-+    "gt": operator.gt,
-+    "greaterthan": operator.gt,
-+    "ge": operator.ge,
-+    ">=": operator.ge,
-+    "<": operator.lt,
-+    "lt": operator.lt,
-+    "lessthan": operator.lt,
-+    "<=": operator.le,
-+    "le": operator.le,
-+}
-diff --git a/third_party/python/Jinja2/jinja2/utils.py b/third_party/python/Jinja2/jinja2/utils.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/Jinja2/jinja2/utils.py
-@@ -0,0 +1,727 @@
-+# -*- coding: utf-8 -*-
-+import json
-+import os
-+import re
-+import warnings
-+from collections import deque
-+from random import choice
-+from random import randrange
-+from threading import Lock
-+
-+from markupsafe import escape
-+from markupsafe import Markup
-+
-+from ._compat import abc
-+from ._compat import string_types
-+from ._compat import text_type
-+from ._compat import url_quote
-+
-+_word_split_re = re.compile(r"(\s+)")
-+_punctuation_re = re.compile(
-+    "^(?P<lead>(?:%s)*)(?P<middle>.*?)(?P<trail>(?:%s)*)$"
-+    % (
-+        "|".join(map(re.escape, ("(", "<", "&lt;"))),
-+        "|".join(map(re.escape, (".", ",", ")", ">", "\n", "&gt;"))),
-+    )
-+)
-+_simple_email_re = re.compile(r"^\S+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9._-]+$")
-+_striptags_re = re.compile(r"(<!--.*?-->|<[^>]*>)")
-+_entity_re = re.compile(r"&([^;]+);")
-+_letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
-+_digits = "0123456789"
-+
-+# special singleton representing missing values for the runtime
-+missing = type("MissingType", (), {"__repr__": lambda x: "missing"})()
-+
-+# internal code
-+internal_code = set()
-+
-+concat = u"".join
-+
-+_slash_escape = "\\/" not in json.dumps("/")
-+
-+
-+def contextfunction(f):
-+    """This decorator can be used to mark a function or method context callable.
-+    A context callable is passed the active :class:`Context` as first argument when
-+    called from the template.  This is useful if a function wants to get access
-+    to the context or functions provided on the context object.  For example
-+    a function that returns a sorted list of template variables the current
-+    template exports could look like this::
-+
-+        @contextfunction
-+        def get_exported_names(context):
-+            return sorted(context.exported_vars)
-+    """
-+    f.contextfunction = True
-+    return f
-+
-+
-+def evalcontextfunction(f):
-+    """This decorator can be used to mark a function or method as an eval
-+    context callable.  This is similar to the :func:`contextfunction`
-+    but instead of passing the context, an evaluation context object is
-+    passed.  For more information about the eval context, see
-+    :ref:`eval-context`.
-+
-+    .. versionadded:: 2.4
-+    """
-+    f.evalcontextfunction = True
-+    return f
-+
-+
-+def environmentfunction(f):
-+    """This decorator can be used to mark a function or method as environment
-+    callable.  This decorator works exactly like the :func:`contextfunction`
-+    decorator just that the first argument is the active :class:`Environment`
-+    and not context.
-+    """
-+    f.environmentfunction = True
-+    return f
-+
-+
-+def internalcode(f):
-+    """Marks the function as internally used"""
-+    internal_code.add(f.__code__)
-+    return f
-+
-+
-+def is_undefined(obj):
-+    """Check if the object passed is undefined.  This does nothing more than
-+    performing an instance check against :class:`Undefined` but looks nicer.
-+    This can be used for custom filters or tests that want to react to
-+    undefined variables.  For example a custom default filter can look like
-+    this::
-+
-+        def default(var, default=''):
-+            if is_undefined(var):
-+                return default
-+            return var
-+    """
-+    from .runtime import Undefined
-+
-+    return isinstance(obj, Undefined)
-+
-+
-+def consume(iterable):
-+    """Consumes an iterable without doing anything with it."""
-+    for _ in iterable:
-+        pass
-+
-+
-+def clear_caches():
-+    """Jinja keeps internal caches for environments and lexers.  These are
-+    used so that Jinja doesn't have to recreate environments and lexers all
-+    the time.  Normally you don't have to care about that but if you are
-+    measuring memory consumption you may want to clean the caches.
-+    """
-+    from .environment import _spontaneous_environments
-+    from .lexer import _lexer_cache
-+
-+    _spontaneous_environments.clear()
-+    _lexer_cache.clear()
-+
-+
-+def import_string(import_name, silent=False):
-+    """Imports an object based on a string.  This is useful if you want to
-+    use import paths as endpoints or something similar.  An import path can
-+    be specified either in dotted notation (``xml.sax.saxutils.escape``)
-+    or with a colon as object delimiter (``xml.sax.saxutils:escape``).
-+
-+    If the `silent` is True the return value will be `None` if the import
-+    fails.
-+
-+    :return: imported object
-+    """
-+    try:
-+        if ":" in import_name:
-+            module, obj = import_name.split(":", 1)
-+        elif "." in import_name:
-+            module, _, obj = import_name.rpartition(".")
-+        else:
-+            return __import__(import_name)
-+        return getattr(__import__(module, None, None, [obj]), obj)
-+    except (ImportError, AttributeError):
-+        if not silent:
-+            raise
-+
-+
-+def open_if_exists(filename, mode="rb"):
-+    """Returns a file descriptor for the filename if that file exists,
-+    otherwise ``None``.
-+    """
-+    if not os.path.isfile(filename):
-+        return None
-+
-+    return open(filename, mode)
-+
-+
-+def object_type_repr(obj):
-+    """Returns the name of the object's type.  For some recognized
-+    singletons the name of the object is returned instead. (For
-+    example for `None` and `Ellipsis`).
-+    """
-+    if obj is None:
-+        return "None"
-+    elif obj is Ellipsis:
-+        return "Ellipsis"
-+    # __builtin__ in 2.x, builtins in 3.x
-+    if obj.__class__.__module__ in ("__builtin__", "builtins"):
-+        name = obj.__class__.__name__
-+    else:
-+        name = obj.__class__.__module__ + "." + obj.__class__.__name__
-+    return "%s object" % name
-+
-+
-+def pformat(obj, verbose=False):
-+    """Prettyprint an object.  Either use the `pretty` library or the
-+    builtin `pprint`.
-+    """
-+    try:
-+        from pretty import pretty
-+
-+        return pretty(obj, verbose=verbose)
-+    except ImportError:
-+        from pprint import pformat
-+
-+        return pformat(obj)
-+
-+
-+def urlize(text, trim_url_limit=None, rel=None, target=None):
-+    """Converts any URLs in text into clickable links. Works on http://,
-+    https:// and www. links. Links can have trailing punctuation (periods,
-+    commas, close-parens) and leading punctuation (opening parens) and
-+    it'll still do the right thing.
-+
-+    If trim_url_limit is not None, the URLs in link text will be limited
-+    to trim_url_limit characters.
-+
-+    If nofollow is True, the URLs in link text will get a rel="nofollow"
-+    attribute.
-+
-+    If target is not None, a target attribute will be added to the link.
-+    """
-+    trim_url = (
-+        lambda x, limit=trim_url_limit: limit is not None
-+        and (x[:limit] + (len(x) >= limit and "..." or ""))
-+        or x
-+    )
-+    words = _word_split_re.split(text_type(escape(text)))
-+    rel_attr = rel and ' rel="%s"' % text_type(escape(rel)) or ""
-+    target_attr = target and ' target="%s"' % escape(target) or ""
-+
-+    for i, word in enumerate(words):
-+        match = _punctuation_re.match(word)
-+        if match:
-+            lead, middle, trail = match.groups()
-+            if middle.startswith("www.") or (
-+                "@" not in middle
-+                and not middle.startswith("http://")
-+                and not middle.startswith("https://")
-+                and len(middle) > 0
-+                and middle[0] in _letters + _digits
-+                and (
-+                    middle.endswith(".org")
-+                    or middle.endswith(".net")
-+                    or middle.endswith(".com")
-+                )
-+            ):
-+                middle = '<a href="http://%s"%s%s>%s</a>' % (
-+                    middle,
-+                    rel_attr,
-+                    target_attr,
-+                    trim_url(middle),
-+                )
-+            if middle.startswith("http://") or middle.startswith("https://"):
-+                middle = '<a href="%s"%s%s>%s</a>' % (
-+                    middle,
-+                    rel_attr,
-+                    target_attr,
-+                    trim_url(middle),
-+                )
-+            if (
-+                "@" in middle
-+                and not middle.startswith("www.")
-+                and ":" not in middle
-+                and _simple_email_re.match(middle)
-+            ):
-+                middle = '<a href="mailto:%s">%s</a>' % (middle, middle)
-+            if lead + middle + trail != word:
-+                words[i] = lead + middle + trail
-+    return u"".join(words)
-+
-+
-+def generate_lorem_ipsum(n=5, html=True, min=20, max=100):
-+    """Generate some lorem ipsum for the template."""
-+    from .constants import LOREM_IPSUM_WORDS
-+
-+    words = LOREM_IPSUM_WORDS.split()
-+    result = []
-+
-+    for _ in range(n):
-+        next_capitalized = True
-+        last_comma = last_fullstop = 0
-+        word = None
-+        last = None
-+        p = []
-+
-+        # each paragraph contains out of 20 to 100 words.
-+        for idx, _ in enumerate(range(randrange(min, max))):
-+            while True:
-+                word = choice(words)
-+                if word != last:
-+                    last = word
-+                    break
-+            if next_capitalized:
-+                word = word.capitalize()
-+                next_capitalized = False
-+            # add commas
-+            if idx - randrange(3, 8) > last_comma:
-+                last_comma = idx
-+                last_fullstop += 2
-+                word += ","
-+            # add end of sentences
-+            if idx - randrange(10, 20) > last_fullstop:
-+                last_comma = last_fullstop = idx
-+                word += "."
-+                next_capitalized = True
-+            p.append(word)
-+
-+        # ensure that the paragraph ends with a dot.
-+        p = u" ".join(p)
-+        if p.endswith(","):
-+            p = p[:-1] + "."
-+        elif not p.endswith("."):
-+            p += "."
-+        result.append(p)
-+
-+    if not html:
-+        return u"\n\n".join(result)
-+    return Markup(u"\n".join(u"<p>%s</p>" % escape(x) for x in result))
-+
-+
-+def unicode_urlencode(obj, charset="utf-8", for_qs=False):
-+    """Quote a string for use in a URL using the given charset.
-+
-+    This function is misnamed, it is a wrapper around
-+    :func:`urllib.parse.quote`.
-+
-+    :param obj: String or bytes to quote. Other types are converted to
-+        string then encoded to bytes using the given charset.
-+    :param charset: Encode text to bytes using this charset.
-+    :param for_qs: Quote "/" and use "+" for spaces.
-+    """
-+    if not isinstance(obj, string_types):
-+        obj = text_type(obj)
-+
-+    if isinstance(obj, text_type):
-+        obj = obj.encode(charset)
-+
-+    safe = b"" if for_qs else b"/"
-+    rv = url_quote(obj, safe)
-+
-+    if not isinstance(rv, text_type):
-+        rv = rv.decode("utf-8")
-+
-+    if for_qs:
-+        rv = rv.replace("%20", "+")
-+
-+    return rv
-+
-+
-+class LRUCache(object):
-+    """A simple LRU Cache implementation."""
-+
-+    # this is fast for small capacities (something below 1000) but doesn't
-+    # scale.  But as long as it's only used as storage for templates this
-+    # won't do any harm.
-+
-+    def __init__(self, capacity):
-+        self.capacity = capacity
-+        self._mapping = {}
-+        self._queue = deque()
-+        self._postinit()
-+
-+    def _postinit(self):
-+        # alias all queue methods for faster lookup
-+        self._popleft = self._queue.popleft
-+        self._pop = self._queue.pop
-+        self._remove = self._queue.remove
-+        self._wlock = Lock()
-+        self._append = self._queue.append
-+
-+    def __getstate__(self):
-+        return {
-+            "capacity": self.capacity,
-+            "_mapping": self._mapping,
-+            "_queue": self._queue,
-+        }
-+
-+    def __setstate__(self, d):
-+        self.__dict__.update(d)
-+        self._postinit()
-+
-+    def __getnewargs__(self):
-+        return (self.capacity,)
-+
-+    def copy(self):
-+        """Return a shallow copy of the instance."""
-+        rv = self.__class__(self.capacity)
-+        rv._mapping.update(self._mapping)
-+        rv._queue.extend(self._queue)
-+        return rv
-+
-+    def get(self, key, default=None):
-+        """Return an item from the cache dict or `default`"""
-+        try:
-+            return self[key]
-+        except KeyError:
-+            return default
-+
-+    def setdefault(self, key, default=None):
-+        """Set `default` if the key is not in the cache otherwise
-+        leave unchanged. Return the value of this key.
-+        """
-+        try:
-+            return self[key]
-+        except KeyError:
-+            self[key] = default
-+            return default
-+
-+    def clear(self):
-+        """Clear the cache."""
-+        self._wlock.acquire()
-+        try:
-+            self._mapping.clear()
-+            self._queue.clear()
-+        finally:
-+            self._wlock.release()
-+
-+    def __contains__(self, key):
-+        """Check if a key exists in this cache."""
-+        return key in self._mapping
-+
-+    def __len__(self):
-+        """Return the current size of the cache."""
-+        return len(self._mapping)
-+
-+    def __repr__(self):
-+        return "<%s %r>" % (self.__class__.__name__, self._mapping)
-+
-+    def __getitem__(self, key):
-+        """Get an item from the cache. Moves the item up so that it has the
-+        highest priority then.
-+
-+        Raise a `KeyError` if it does not exist.
-+        """
-+        self._wlock.acquire()
-+        try:
-+            rv = self._mapping[key]
-+            if self._queue[-1] != key:
-+                try:
-+                    self._remove(key)
-+                except ValueError:
-+                    # if something removed the key from the container
-+                    # when we read, ignore the ValueError that we would
-+                    # get otherwise.
-+                    pass
-+                self._append(key)
-+            return rv
-+        finally:
-+            self._wlock.release()
-+
-+    def __setitem__(self, key, value):
-+        """Sets the value for an item. Moves the item up so that it
-+        has the highest priority then.
-+        """
-+        self._wlock.acquire()
-+        try:
-+            if key in self._mapping:
-+                self._remove(key)
-+            elif len(self._mapping) == self.capacity:
-+                del self._mapping[self._popleft()]
-+            self._append(key)
-+            self._mapping[key] = value
-+        finally:
-+            self._wlock.release()
-+
-+    def __delitem__(self, key):
-+        """Remove an item from the cache dict.
-+        Raise a `KeyError` if it does not exist.
-+        """
-+        self._wlock.acquire()
-+        try:
-+            del self._mapping[key]
-+            try:
-+                self._remove(key)
-+            except ValueError:
-+                pass
-+        finally:
-+            self._wlock.release()
-+
-+    def items(self):
-+        """Return a list of items."""
-+        result = [(key, self._mapping[key]) for key in list(self._queue)]
-+        result.reverse()
-+        return result
-+
-+    def iteritems(self):
-+        """Iterate over all items."""
-+        warnings.warn(
-+            "'iteritems()' will be removed in version 3.0. Use"
-+            " 'iter(cache.items())' instead.",
-+            DeprecationWarning,
-+            stacklevel=2,
-+        )
-+        return iter(self.items())
-+
-+    def values(self):
-+        """Return a list of all values."""
-+        return [x[1] for x in self.items()]
-+
-+    def itervalue(self):
-+        """Iterate over all values."""
-+        warnings.warn(
-+            "'itervalue()' will be removed in version 3.0. Use"
-+            " 'iter(cache.values())' instead.",
-+            DeprecationWarning,
-+            stacklevel=2,
-+        )
-+        return iter(self.values())
-+
-+    def itervalues(self):
-+        """Iterate over all values."""
-+        warnings.warn(
-+            "'itervalues()' will be removed in version 3.0. Use"
-+            " 'iter(cache.values())' instead.",
-+            DeprecationWarning,
-+            stacklevel=2,
-+        )
-+        return iter(self.values())
-+
-+    def keys(self):
-+        """Return a list of all keys ordered by most recent usage."""
-+        return list(self)
-+
-+    def iterkeys(self):
-+        """Iterate over all keys in the cache dict, ordered by
-+        the most recent usage.
-+        """
-+        warnings.warn(
-+            "'iterkeys()' will be removed in version 3.0. Use"
-+            " 'iter(cache.keys())' instead.",
-+            DeprecationWarning,
-+            stacklevel=2,
-+        )
-+        return iter(self)
-+
-+    def __iter__(self):
-+        return reversed(tuple(self._queue))
-+
-+    def __reversed__(self):
-+        """Iterate over the keys in the cache dict, oldest items
-+        coming first.
-+        """
-+        return iter(tuple(self._queue))
-+
-+    __copy__ = copy
-+
-+
-+abc.MutableMapping.register(LRUCache)
-+
-+
-+def select_autoescape(
-+    enabled_extensions=("html", "htm", "xml"),
-+    disabled_extensions=(),
-+    default_for_string=True,
-+    default=False,
-+):
-+    """Intelligently sets the initial value of autoescaping based on the
-+    filename of the template.  This is the recommended way to configure
-+    autoescaping if you do not want to write a custom function yourself.
-+
-+    If you want to enable it for all templates created from strings or
-+    for all templates with `.html` and `.xml` extensions::
-+
-+        from jinja2 import Environment, select_autoescape
-+        env = Environment(autoescape=select_autoescape(
-+            enabled_extensions=('html', 'xml'),
-+            default_for_string=True,
-+        ))
-+
-+    Example configuration to turn it on at all times except if the template
-+    ends with `.txt`::
-+
-+        from jinja2 import Environment, select_autoescape
-+        env = Environment(autoescape=select_autoescape(
-+            disabled_extensions=('txt',),
-+            default_for_string=True,
-+            default=True,
-+        ))
-+
-+    The `enabled_extensions` is an iterable of all the extensions that
-+    autoescaping should be enabled for.  Likewise `disabled_extensions` is
-+    a list of all templates it should be disabled for.  If a template is
-+    loaded from a string then the default from `default_for_string` is used.
-+    If nothing matches then the initial value of autoescaping is set to the
-+    value of `default`.
-+
-+    For security reasons this function operates case insensitive.
-+
-+    .. versionadded:: 2.9
-+    """
-+    enabled_patterns = tuple("." + x.lstrip(".").lower() for x in enabled_extensions)
-+    disabled_patterns = tuple("." + x.lstrip(".").lower() for x in disabled_extensions)
-+
-+    def autoescape(template_name):
-+        if template_name is None:
-+            return default_for_string
-+        template_name = template_name.lower()
-+        if template_name.endswith(enabled_patterns):
-+            return True
-+        if template_name.endswith(disabled_patterns):
-+            return False
-+        return default
-+
-+    return autoescape
-+
-+
-+def htmlsafe_json_dumps(obj, dumper=None, **kwargs):
-+    """Works exactly like :func:`dumps` but is safe for use in ``<script>``
-+    tags.  It accepts the same arguments and returns a JSON string.  Note that
-+    this is available in templates through the ``|tojson`` filter which will
-+    also mark the result as safe.  Due to how this function escapes certain
-+    characters this is safe even if used outside of ``<script>`` tags.
-+
-+    The following characters are escaped in strings:
-+
-+    -   ``<``
-+    -   ``>``
-+    -   ``&``
-+    -   ``'``
-+
-+    This makes it safe to embed such strings in any place in HTML with the
-+    notable exception of double quoted attributes.  In that case single
-+    quote your attributes or HTML escape it in addition.
-+    """
-+    if dumper is None:
-+        dumper = json.dumps
-+    rv = (
-+        dumper(obj, **kwargs)
-+        .replace(u"<", u"\\u003c")
-+        .replace(u">", u"\\u003e")
-+        .replace(u"&", u"\\u0026")
-+        .replace(u"'", u"\\u0027")
-+    )
-+    return Markup(rv)
-+
-+
-+class Cycler(object):
-+    """Cycle through values by yield them one at a time, then restarting
-+    once the end is reached. Available as ``cycler`` in templates.
-+
-+    Similar to ``loop.cycle``, but can be used outside loops or across
-+    multiple loops. For example, render a list of folders and files in a
-+    list, alternating giving them "odd" and "even" classes.
-+
-+    .. code-block:: html+jinja
-+
-+        {% set row_class = cycler("odd", "even") %}
-+        <ul class="browser">
-+        {% for folder in folders %}
-+          <li class="folder {{ row_class.next() }}">{{ folder }}
-+        {% endfor %}
-+        {% for file in files %}
-+          <li class="file {{ row_class.next() }}">{{ file }}
-+        {% endfor %}
-+        </ul>
-+
-+    :param items: Each positional argument will be yielded in the order
-+        given for each cycle.
-+
-+    .. versionadded:: 2.1
-+    """
-+
-+    def __init__(self, *items):
-+        if not items:
-+            raise RuntimeError("at least one item has to be provided")
-+        self.items = items
-+        self.pos = 0
-+
-+    def reset(self):
-+        """Resets the current item to the first item."""
-+        self.pos = 0
-+
-+    @property
-+    def current(self):
-+        """Return the current item. Equivalent to the item that will be
-+        returned next time :meth:`next` is called.
-+        """
-+        return self.items[self.pos]
-+
-+    def next(self):
-+        """Return the current item, then advance :attr:`current` to the
-+        next item.
-+        """
-+        rv = self.current
-+        self.pos = (self.pos + 1) % len(self.items)
-+        return rv
-+
-+    __next__ = next
-+
-+
-+class Joiner(object):
-+    """A joining helper for templates."""
-+
-+    def __init__(self, sep=u", "):
-+        self.sep = sep
-+        self.used = False
-+
-+    def __call__(self):
-+        if not self.used:
-+            self.used = True
-+            return u""
-+        return self.sep
-+
-+
-+class Namespace(object):
-+    """A namespace object that can hold arbitrary attributes.  It may be
-+    initialized from a dictionary or with keyword arguments."""
-+
-+    def __init__(*args, **kwargs):  # noqa: B902
-+        self, args = args[0], args[1:]
-+        self.__attrs = dict(*args, **kwargs)
-+
-+    def __getattribute__(self, name):
-+        if name == "_Namespace__attrs":
-+            return object.__getattribute__(self, name)
-+        try:
-+            return self.__attrs[name]
-+        except KeyError:
-+            raise AttributeError(name)
-+
-+    def __setitem__(self, name, value):
-+        self.__attrs[name] = value
-+
-+    def __repr__(self):
-+        return "<Namespace %r>" % self.__attrs
-+
-+
-+# does this python version support async for in and async generators?
-+try:
-+    exec("async def _():\n async for _ in ():\n  yield _")
-+    have_async_gen = True
-+except SyntaxError:
-+    have_async_gen = False
-+
-+
-+def soft_unicode(s):
-+    from markupsafe import soft_unicode
-+
-+    warnings.warn(
-+        "'jinja2.utils.soft_unicode' will be removed in version 3.0."
-+        " Use 'markupsafe.soft_unicode' instead.",
-+        DeprecationWarning,
-+        stacklevel=2,
-+    )
-+    return soft_unicode(s)
-diff --git a/third_party/python/Jinja2/jinja2/visitor.py b/third_party/python/Jinja2/jinja2/visitor.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/Jinja2/jinja2/visitor.py
-@@ -0,0 +1,81 @@
-+# -*- coding: utf-8 -*-
-+"""API for traversing the AST nodes. Implemented by the compiler and
-+meta introspection.
-+"""
-+from .nodes import Node
-+
-+
-+class NodeVisitor(object):
-+    """Walks the abstract syntax tree and call visitor functions for every
-+    node found.  The visitor functions may return values which will be
-+    forwarded by the `visit` method.
-+
-+    Per default the visitor functions for the nodes are ``'visit_'`` +
-+    class name of the node.  So a `TryFinally` node visit function would
-+    be `visit_TryFinally`.  This behavior can be changed by overriding
-+    the `get_visitor` function.  If no visitor function exists for a node
-+    (return value `None`) the `generic_visit` visitor is used instead.
-+    """
-+
-+    def get_visitor(self, node):
-+        """Return the visitor function for this node or `None` if no visitor
-+        exists for this node.  In that case the generic visit function is
-+        used instead.
-+        """
-+        method = "visit_" + node.__class__.__name__
-+        return getattr(self, method, None)
-+
-+    def visit(self, node, *args, **kwargs):
-+        """Visit a node."""
-+        f = self.get_visitor(node)
-+        if f is not None:
-+            return f(node, *args, **kwargs)
-+        return self.generic_visit(node, *args, **kwargs)
-+
-+    def generic_visit(self, node, *args, **kwargs):
-+        """Called if no explicit visitor function exists for a node."""
-+        for node in node.iter_child_nodes():
-+            self.visit(node, *args, **kwargs)
-+
-+
-+class NodeTransformer(NodeVisitor):
-+    """Walks the abstract syntax tree and allows modifications of nodes.
-+
-+    The `NodeTransformer` will walk the AST and use the return value of the
-+    visitor functions to replace or remove the old node.  If the return
-+    value of the visitor function is `None` the node will be removed
-+    from the previous location otherwise it's replaced with the return
-+    value.  The return value may be the original node in which case no
-+    replacement takes place.
-+    """
-+
-+    def generic_visit(self, node, *args, **kwargs):
-+        for field, old_value in node.iter_fields():
-+            if isinstance(old_value, list):
-+                new_values = []
-+                for value in old_value:
-+                    if isinstance(value, Node):
-+                        value = self.visit(value, *args, **kwargs)
-+                        if value is None:
-+                            continue
-+                        elif not isinstance(value, Node):
-+                            new_values.extend(value)
-+                            continue
-+                    new_values.append(value)
-+                old_value[:] = new_values
-+            elif isinstance(old_value, Node):
-+                new_node = self.visit(old_value, *args, **kwargs)
-+                if new_node is None:
-+                    delattr(node, field)
-+                else:
-+                    setattr(node, field, new_node)
-+        return node
-+
-+    def visit_list(self, node, *args, **kwargs):
-+        """As transformers may return lists in some places this method
-+        can be used to enforce a list as return value.
-+        """
-+        rv = self.visit(node, *args, **kwargs)
-+        if not isinstance(rv, list):
-+            rv = [rv]
-+        return rv
-diff --git a/third_party/python/MarkupSafe/CHANGES.rst b/third_party/python/MarkupSafe/CHANGES.rst
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/MarkupSafe/CHANGES.rst
-@@ -0,0 +1,97 @@
-+Version 1.1.1
-+-------------
-+
-+Released 2019-02-23
-+
-+-   Fix segfault when ``__html__`` method raises an exception when using
-+    the C speedups. The exception is now propagated correctly. (`#109`_)
-+
-+.. _#109: https://github.com/pallets/markupsafe/pull/109
-+
-+
-+Version 1.1.0
-+-------------
-+
-+Released 2018-11-05
-+
-+-   Drop support for Python 2.6 and 3.3.
-+-   Build wheels for Linux, Mac, and Windows, allowing systems without
-+    a compiler to take advantage of the C extension speedups. (`#104`_)
-+-   Use newer CPython API on Python 3, resulting in a 1.5x speedup.
-+    (`#64`_)
-+-   ``escape`` wraps ``__html__`` result in ``Markup``, consistent with
-+    documented behavior. (`#69`_)
-+
-+.. _#64: https://github.com/pallets/markupsafe/pull/64
-+.. _#69: https://github.com/pallets/markupsafe/pull/69
-+.. _#104: https://github.com/pallets/markupsafe/pull/104
-+
-+
-+Version 1.0
-+-----------
-+
-+Released 2017-03-07
-+
-+-   Fixed custom types not invoking ``__unicode__`` when used with
-+    ``format()``.
-+-   Added ``__version__`` module attribute.
-+-   Improve unescape code to leave lone ampersands alone.
-+
-+
-+Version 0.18
-+------------
-+
-+Released 2013-05-22
-+
-+-   Fixed ``__mul__`` and string splitting on Python 3.
-+
-+
-+Version 0.17
-+------------
-+
-+Released 2013-05-21
-+
-+-   Fixed a bug with broken interpolation on tuples.
-+
-+
-+Version 0.16
-+------------
-+
-+Released 2013-05-20
-+
-+-   Improved Python 3 Support and removed 2to3.
-+-   Removed support for Python 3.2 and 2.5.
-+
-+
-+Version 0.15
-+------------
-+
-+Released 2011-07-20
-+
-+-   Fixed a typo that caused the library to fail to install on pypy and
-+    jython.
-+
-+
-+Version 0.14
-+------------
-+
-+Released 2011-07-20
-+
-+-   Release fix for 0.13.
-+
-+
-+Version 0.13
-+------------
-+
-+Released 2011-07-20
-+
-+-   Do not attempt to compile extension for PyPy or Jython.
-+-   Work around some 64bit Windows issues.
-+
-+
-+Version 0.12
-+------------
-+
-+Released 2011-02-17
-+
-+-   Improved PyPy compatibility.
-diff --git a/third_party/python/MarkupSafe/LICENSE.rst b/third_party/python/MarkupSafe/LICENSE.rst
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/MarkupSafe/LICENSE.rst
-@@ -0,0 +1,28 @@
-+Copyright 2010 Pallets
-+
-+Redistribution and use in source and binary forms, with or without
-+modification, are permitted provided that the following conditions are
-+met:
-+
-+1.  Redistributions of source code must retain the above copyright
-+    notice, this list of conditions and the following disclaimer.
-+
-+2.  Redistributions in binary form must reproduce the above copyright
-+    notice, this list of conditions and the following disclaimer in the
-+    documentation and/or other materials provided with the distribution.
-+
-+3.  Neither the name of the copyright holder nor the names of its
-+    contributors may be used to endorse or promote products derived from
-+    this software without specific prior written permission.
-+
-+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
-+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
-+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-diff --git a/third_party/python/MarkupSafe/MANIFEST.in b/third_party/python/MarkupSafe/MANIFEST.in
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/MarkupSafe/MANIFEST.in
-@@ -0,0 +1,8 @@
-+include CHANGES.rst
-+include LICENSE.rst
-+include README.rst
-+include tox.ini
-+graft docs
-+prune docs/_build
-+graft tests
-+global-exclude *.py[co]
-diff --git a/third_party/python/MarkupSafe/PKG-INFO b/third_party/python/MarkupSafe/PKG-INFO
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/MarkupSafe/PKG-INFO
-@@ -0,0 +1,101 @@
-+Metadata-Version: 1.2
-+Name: MarkupSafe
-+Version: 1.1.1
-+Summary: Safely add untrusted strings to HTML/XML markup.
-+Home-page: https://palletsprojects.com/p/markupsafe/
-+Author: Armin Ronacher
-+Author-email: armin.ronacher@active-4.com
-+Maintainer: The Pallets Team
-+Maintainer-email: contact@palletsprojects.com
-+License: BSD-3-Clause
-+Project-URL: Documentation, https://markupsafe.palletsprojects.com/
-+Project-URL: Code, https://github.com/pallets/markupsafe
-+Project-URL: Issue tracker, https://github.com/pallets/markupsafe/issues
-+Description: MarkupSafe
-+        ==========
-+        
-+        MarkupSafe implements a text object that escapes characters so it is
-+        safe to use in HTML and XML. Characters that have special meanings are
-+        replaced so that they display as the actual characters. This mitigates
-+        injection attacks, meaning untrusted user input can safely be displayed
-+        on a page.
-+        
-+        
-+        Installing
-+        ----------
-+        
-+        Install and update using `pip`_:
-+        
-+        .. code-block:: text
-+        
-+            pip install -U MarkupSafe
-+        
-+        .. _pip: https://pip.pypa.io/en/stable/quickstart/
-+        
-+        
-+        Examples
-+        --------
-+        
-+        .. code-block:: pycon
-+        
-+            >>> from markupsafe import Markup, escape
-+            >>> # escape replaces special characters and wraps in Markup
-+            >>> escape('<script>alert(document.cookie);</script>')
-+            Markup(u'&lt;script&gt;alert(document.cookie);&lt;/script&gt;')
-+            >>> # wrap in Markup to mark text "safe" and prevent escaping
-+            >>> Markup('<strong>Hello</strong>')
-+            Markup('<strong>hello</strong>')
-+            >>> escape(Markup('<strong>Hello</strong>'))
-+            Markup('<strong>hello</strong>')
-+            >>> # Markup is a text subclass (str on Python 3, unicode on Python 2)
-+            >>> # methods and operators escape their arguments
-+            >>> template = Markup("Hello <em>%s</em>")
-+            >>> template % '"World"'
-+            Markup('Hello <em>&#34;World&#34;</em>')
-+        
-+        
-+        Donate
-+        ------
-+        
-+        The Pallets organization develops and supports MarkupSafe and other
-+        libraries that use it. In order to grow the community of contributors
-+        and users, and allow the maintainers to devote more time to the
-+        projects, `please donate today`_.
-+        
-+        .. _please donate today: https://palletsprojects.com/donate
-+        
-+        
-+        Links
-+        -----
-+        
-+        *   Website: https://palletsprojects.com/p/markupsafe/
-+        *   Documentation: https://markupsafe.palletsprojects.com/
-+        *   License: `BSD-3-Clause <https://github.com/pallets/markupsafe/blob/master/LICENSE.rst>`_
-+        *   Releases: https://pypi.org/project/MarkupSafe/
-+        *   Code: https://github.com/pallets/markupsafe
-+        *   Issue tracker: https://github.com/pallets/markupsafe/issues
-+        *   Test status:
-+        
-+            *   Linux, Mac: https://travis-ci.org/pallets/markupsafe
-+            *   Windows: https://ci.appveyor.com/project/pallets/markupsafe
-+        
-+        *   Test coverage: https://codecov.io/gh/pallets/markupsafe
-+        
-+Platform: UNKNOWN
-+Classifier: Development Status :: 5 - Production/Stable
-+Classifier: Environment :: Web Environment
-+Classifier: Intended Audience :: Developers
-+Classifier: License :: OSI Approved :: BSD License
-+Classifier: Operating System :: OS Independent
-+Classifier: Programming Language :: Python
-+Classifier: Programming Language :: Python :: 2
-+Classifier: Programming Language :: Python :: 2.7
-+Classifier: Programming Language :: Python :: 3
-+Classifier: Programming Language :: Python :: 3.4
-+Classifier: Programming Language :: Python :: 3.5
-+Classifier: Programming Language :: Python :: 3.6
-+Classifier: Programming Language :: Python :: 3.7
-+Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
-+Classifier: Topic :: Software Development :: Libraries :: Python Modules
-+Classifier: Topic :: Text Processing :: Markup :: HTML
-+Requires-Python: >=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*
-diff --git a/third_party/python/MarkupSafe/README.rst b/third_party/python/MarkupSafe/README.rst
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/MarkupSafe/README.rst
-@@ -0,0 +1,69 @@
-+MarkupSafe
-+==========
-+
-+MarkupSafe implements a text object that escapes characters so it is
-+safe to use in HTML and XML. Characters that have special meanings are
-+replaced so that they display as the actual characters. This mitigates
-+injection attacks, meaning untrusted user input can safely be displayed
-+on a page.
-+
-+
-+Installing
-+----------
-+
-+Install and update using `pip`_:
-+
-+.. code-block:: text
-+
-+    pip install -U MarkupSafe
-+
-+.. _pip: https://pip.pypa.io/en/stable/quickstart/
-+
-+
-+Examples
-+--------
-+
-+.. code-block:: pycon
-+
-+    >>> from markupsafe import Markup, escape
-+    >>> # escape replaces special characters and wraps in Markup
-+    >>> escape('<script>alert(document.cookie);</script>')
-+    Markup(u'&lt;script&gt;alert(document.cookie);&lt;/script&gt;')
-+    >>> # wrap in Markup to mark text "safe" and prevent escaping
-+    >>> Markup('<strong>Hello</strong>')
-+    Markup('<strong>hello</strong>')
-+    >>> escape(Markup('<strong>Hello</strong>'))
-+    Markup('<strong>hello</strong>')
-+    >>> # Markup is a text subclass (str on Python 3, unicode on Python 2)
-+    >>> # methods and operators escape their arguments
-+    >>> template = Markup("Hello <em>%s</em>")
-+    >>> template % '"World"'
-+    Markup('Hello <em>&#34;World&#34;</em>')
-+
-+
-+Donate
-+------
-+
-+The Pallets organization develops and supports MarkupSafe and other
-+libraries that use it. In order to grow the community of contributors
-+and users, and allow the maintainers to devote more time to the
-+projects, `please donate today`_.
-+
-+.. _please donate today: https://palletsprojects.com/donate
-+
-+
-+Links
-+-----
-+
-+*   Website: https://palletsprojects.com/p/markupsafe/
-+*   Documentation: https://markupsafe.palletsprojects.com/
-+*   License: `BSD-3-Clause <https://github.com/pallets/markupsafe/blob/master/LICENSE.rst>`_
-+*   Releases: https://pypi.org/project/MarkupSafe/
-+*   Code: https://github.com/pallets/markupsafe
-+*   Issue tracker: https://github.com/pallets/markupsafe/issues
-+*   Test status:
-+
-+    *   Linux, Mac: https://travis-ci.org/pallets/markupsafe
-+    *   Windows: https://ci.appveyor.com/project/pallets/markupsafe
-+
-+*   Test coverage: https://codecov.io/gh/pallets/markupsafe
-diff --git a/third_party/python/MarkupSafe/docs/Makefile b/third_party/python/MarkupSafe/docs/Makefile
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/MarkupSafe/docs/Makefile
-@@ -0,0 +1,19 @@
-+# Minimal makefile for Sphinx documentation
-+#
-+
-+# You can set these variables from the command line.
-+SPHINXOPTS    =
-+SPHINXBUILD   = sphinx-build
-+SOURCEDIR     = .
-+BUILDDIR      = _build
-+
-+# Put it first so that "make" without argument is like "make help".
-+help:
-+	@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
-+
-+.PHONY: help Makefile
-+
-+# Catch-all target: route all unknown targets to Sphinx using the new
-+# "make mode" option.  $(O) is meant as a shortcut for $(SPHINXOPTS).
-+%: Makefile
-+	@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
-diff --git a/third_party/python/MarkupSafe/docs/changes.rst b/third_party/python/MarkupSafe/docs/changes.rst
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/MarkupSafe/docs/changes.rst
-@@ -0,0 +1,4 @@
-+Changes
-+=======
-+
-+.. include:: ../CHANGES.rst
-diff --git a/third_party/python/MarkupSafe/docs/conf.py b/third_party/python/MarkupSafe/docs/conf.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/MarkupSafe/docs/conf.py
-@@ -0,0 +1,42 @@
-+from pallets_sphinx_themes import get_version
-+from pallets_sphinx_themes import ProjectLink
-+
-+# Project --------------------------------------------------------------
-+
-+project = "MarkupSafe"
-+copyright = "2010 Pallets Team"
-+author = "Pallets Team"
-+release, version = get_version("MarkupSafe")
-+
-+# General --------------------------------------------------------------
-+
-+master_doc = "index"
-+extensions = ["sphinx.ext.autodoc", "sphinx.ext.intersphinx", "pallets_sphinx_themes"]
-+intersphinx_mapping = {"python": ("https://docs.python.org/3/", None)}
-+
-+# HTML -----------------------------------------------------------------
-+
-+html_theme = "flask"
-+html_theme_options = {"index_sidebar_logo": False}
-+html_context = {
-+    "project_links": [
-+        ProjectLink("Donate to Pallets", "https://palletsprojects.com/donate"),
-+        ProjectLink("Website", "https://palletsprojects.com/p/markupsafe/"),
-+        ProjectLink("PyPI releases", "https://pypi.org/project/MarkupSafe/"),
-+        ProjectLink("Source Code", "https://github.com/pallets/markupsafe/"),
-+        ProjectLink("Issue Tracker", "https://github.com/pallets/markupsafe/issues/"),
-+    ]
-+}
-+html_sidebars = {
-+    "index": ["project.html", "localtoc.html", "searchbox.html"],
-+    "**": ["localtoc.html", "relations.html", "searchbox.html"],
-+}
-+singlehtml_sidebars = {"index": ["project.html", "localtoc.html"]}
-+html_title = "MarkupSafe Documentation ({})".format(version)
-+html_show_sourcelink = False
-+
-+# LaTeX ----------------------------------------------------------------
-+
-+latex_documents = [
-+    (master_doc, "MarkupSafe-{}.tex".format(version), html_title, author, "manual")
-+]
-diff --git a/third_party/python/MarkupSafe/docs/escaping.rst b/third_party/python/MarkupSafe/docs/escaping.rst
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/MarkupSafe/docs/escaping.rst
-@@ -0,0 +1,21 @@
-+.. module:: markupsafe
-+
-+Working With Safe Text
-+======================
-+
-+.. autofunction:: escape
-+
-+.. autoclass:: Markup
-+    :members: escape, unescape, striptags
-+
-+
-+Optional Values
-+---------------
-+
-+.. autofunction:: escape_silent
-+
-+
-+Convert an Object to a String
-+-----------------------------
-+
-+.. autofunction:: soft_unicode
-diff --git a/third_party/python/MarkupSafe/docs/formatting.rst b/third_party/python/MarkupSafe/docs/formatting.rst
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/MarkupSafe/docs/formatting.rst
-@@ -0,0 +1,77 @@
-+.. currentmodule:: markupsafe
-+
-+String Formatting
-+=================
-+
-+The :class:`Markup` class can be used as a format string. Objects
-+formatted into a markup string will be escaped first.
-+
-+
-+Format Method
-+-------------
-+
-+The ``format`` method extends the standard :meth:`str.format` behavior
-+to use an ``__html_format__`` method.
-+
-+#.  If an object has an ``__html_format__`` method, it is called as a
-+    replacement for the ``__format__`` method. It is passed a format
-+    specifier if it's given. The method must return a string or
-+    :class:`Markup` instance.
-+
-+#.  If an object has an ``__html__`` method, it is called. If a format
-+    specifier was passed and the class defined ``__html__`` but not
-+    ``__html_format__``, a ``ValueError`` is raised.
-+
-+#.  Otherwise Python's default format behavior is used and the result
-+    is escaped.
-+
-+For example, to implement a ``User`` that wraps its ``name`` in a
-+``span`` tag, and adds a link when using the ``'link'`` format
-+specifier:
-+
-+.. code-block:: python
-+
-+    class User(object):
-+        def __init__(self, id, name):
-+            self.id = id
-+            self.name = name
-+
-+        def __html_format__(self, format_spec):
-+            if format_spec == 'link':
-+                return Markup(
-+                    '<a href="/user/{}">{}</a>'
-+                ).format(self.id, self.__html__())
-+            elif format_spec:
-+                raise ValueError('Invalid format spec')
-+            return self.__html__()
-+
-+        def __html__(self):
-+            return Markup(
-+                '<span class="user">{0}</span>'
-+            ).format(self.name)
-+
-+
-+.. code-block:: pycon
-+
-+    >>> user = User(3, '<script>')
-+    >>> escape(user)
-+    Markup('<span class="user">&lt;script&gt;</span>')
-+    >>> Markup('<p>User: {user:link}').format(user=user)
-+    Markup('<p>User: <a href="/user/3"><span class="user">&lt;script&gt;</span></a>
-+
-+See Python's docs on :ref:`format string syntax <python:formatstrings>`.
-+
-+
-+printf-style Formatting
-+-----------------------
-+
-+Besides escaping, there's no special behavior involved with percent
-+formatting.
-+
-+.. code-block:: pycon
-+
-+    >>> user = User(3, '<script>')
-+    >>> Markup('<a href="/user/%d">"%s</a>') % (user.id, user.name)
-+    Markup('<a href="/user/3">&lt;script&gt;</a>')
-+
-+See Python's docs on :ref:`printf-style formatting <python:old-string-formatting>`.
-diff --git a/third_party/python/MarkupSafe/docs/html.rst b/third_party/python/MarkupSafe/docs/html.rst
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/MarkupSafe/docs/html.rst
-@@ -0,0 +1,51 @@
-+.. currentmodule:: markupsafe
-+
-+HTML Representations
-+====================
-+
-+In many frameworks, if a class implements an ``__html__`` method it
-+will be used to get the object's representation in HTML. MarkupSafe's
-+:func:`escape` function and :class:`Markup` class understand and
-+implement this method. If an object has an ``__html__`` method it will
-+be called rather than converting the object to a string, and the result
-+will be assumed safe and not escaped.
-+
-+For example, an ``Image`` class might automatically generate an
-+``<img>`` tag:
-+
-+.. code-block:: python
-+
-+    class Image:
-+        def __init__(self, url):
-+            self.url = url
-+
-+        def __html__(self):
-+            return '<img src="%s">' % self.url
-+
-+.. code-block:: pycon
-+
-+    >>> img = Image('/static/logo.png')
-+    >>> Markup(img)
-+    Markup('<img src="/static/logo.png">')
-+
-+Since this bypasses escaping, you need to be careful about using
-+user-provided data in the output. For example, a user's display name
-+should still be escaped:
-+
-+.. code-block:: python
-+
-+    class User:
-+        def __init__(self, id, name):
-+            self.id = id
-+            self.name = name
-+
-+        def __html__(self):
-+            return '<a href="/user/{}">{}</a>'.format(
-+                self.id, escape(self.name)
-+            )
-+
-+.. code-block:: pycon
-+
-+    >>> user = User(3, '<script>')
-+    >>> escape(user)
-+    Markup('<a href="/users/3">&lt;script&gt;</a>')
-diff --git a/third_party/python/MarkupSafe/docs/index.rst b/third_party/python/MarkupSafe/docs/index.rst
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/MarkupSafe/docs/index.rst
-@@ -0,0 +1,53 @@
-+.. currentmodule:: markupsafe
-+
-+MarkupSafe
-+==========
-+
-+MarkupSafe escapes characters so text is safe to use in HTML and XML.
-+Characters that have special meanings are replaced so that they display
-+as the actual characters. This mitigates injection attacks, meaning
-+untrusted user input can safely be displayed on a page.
-+
-+The :func:`escape` function escapes text and returns a :class:`Markup`
-+object. The object won't be escaped anymore, but any text that is used
-+with it will be, ensuring that the result remains safe to use in HTML.
-+
-+>>> from markupsafe import escape
-+>>> hello = escape('<em>Hello</em>')
-+>>> hello
-+Markup('&lt;em&gt;Hello&lt;/em&gt;')
-+>>> escape(hello)
-+Markup('&lt;em&gt;Hello&lt;/em&gt;')
-+>>> hello + ' <strong>World</strong>'
-+Markup('&lt;em&gt;Hello&lt;/em&gt; &lt;strong&gt;World&lt;/strong&gt;')
-+
-+.. note::
-+
-+    The docs assume you're using Python 3. The terms "text" and "string"
-+    refer to the :class:`str` class. In Python 2, this would be the
-+    ``unicode`` class instead.
-+
-+
-+Installing
-+----------
-+
-+Install and update using `pip`_:
-+
-+.. code-block:: text
-+
-+    pip install -U MarkupSafe
-+
-+.. _pip: https://pip.pypa.io/en/stable/quickstart/
-+
-+
-+Table of Contents
-+-----------------
-+
-+.. toctree::
-+    :maxdepth: 2
-+
-+    escaping
-+    html
-+    formatting
-+    license
-+    changes
-diff --git a/third_party/python/MarkupSafe/docs/license.rst b/third_party/python/MarkupSafe/docs/license.rst
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/MarkupSafe/docs/license.rst
-@@ -0,0 +1,4 @@
-+License
-+=======
-+
-+.. include:: ../LICENSE.rst
-diff --git a/third_party/python/MarkupSafe/docs/make.bat b/third_party/python/MarkupSafe/docs/make.bat
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/MarkupSafe/docs/make.bat
-@@ -0,0 +1,35 @@
-+@ECHO OFF
-+
-+pushd %~dp0
-+
-+REM Command file for Sphinx documentation
-+
-+if "%SPHINXBUILD%" == "" (
-+	set SPHINXBUILD=sphinx-build
-+)
-+set SOURCEDIR=.
-+set BUILDDIR=_build
-+
-+if "%1" == "" goto help
-+
-+%SPHINXBUILD% >NUL 2>NUL
-+if errorlevel 9009 (
-+	echo.
-+	echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
-+	echo.installed, then set the SPHINXBUILD environment variable to point
-+	echo.to the full path of the 'sphinx-build' executable. Alternatively you
-+	echo.may add the Sphinx directory to PATH.
-+	echo.
-+	echo.If you don't have Sphinx installed, grab it from
-+	echo.http://sphinx-doc.org/
-+	exit /b 1
-+)
-+
-+%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
-+goto end
-+
-+:help
-+%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
-+
-+:end
-+popd
-diff --git a/third_party/python/MarkupSafe/docs/requirements.txt b/third_party/python/MarkupSafe/docs/requirements.txt
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/MarkupSafe/docs/requirements.txt
-@@ -0,0 +1,2 @@
-+Sphinx~=1.8.0
-+Pallets-Sphinx-Themes~=1.1.0
-diff --git a/third_party/python/MarkupSafe/setup.cfg b/third_party/python/MarkupSafe/setup.cfg
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/MarkupSafe/setup.cfg
-@@ -0,0 +1,27 @@
-+[metadata]
-+license_file = LICENSE.rst
-+
-+[tool:pytest]
-+testpaths = tests
-+
-+[coverage:run]
-+branch = True
-+source = 
-+	markupsafe
-+
-+[coverage:paths]
-+source = 
-+	src/markupsafe
-+	.tox/*/lib/python*/site-packages/markupsafe
-+	.tox/*/site-packages/markupsafe
-+
-+[flake8]
-+select = B, E, F, W, B9
-+ignore = E203, E501, W503
-+max-line-length = 80
-+exclude = src/markupsafe/_compat.py
-+
-+[egg_info]
-+tag_build = 
-+tag_date = 0
-+
-diff --git a/third_party/python/MarkupSafe/setup.py b/third_party/python/MarkupSafe/setup.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/MarkupSafe/setup.py
-@@ -0,0 +1,125 @@
-+from __future__ import print_function
-+
-+import io
-+import re
-+import sys
-+from distutils.errors import CCompilerError
-+from distutils.errors import DistutilsExecError
-+from distutils.errors import DistutilsPlatformError
-+
-+from setuptools import Extension
-+from setuptools import find_packages
-+from setuptools import setup
-+from setuptools.command.build_ext import build_ext
-+
-+with io.open("README.rst", "rt", encoding="utf8") as f:
-+    readme = f.read()
-+
-+with io.open("src/markupsafe/__init__.py", "rt", encoding="utf8") as f:
-+    version = re.search(r'__version__ = "(.*?)"', f.read()).group(1)
-+
-+is_jython = "java" in sys.platform
-+is_pypy = hasattr(sys, "pypy_version_info")
-+
-+ext_modules = [Extension("markupsafe._speedups", ["src/markupsafe/_speedups.c"])]
-+
-+
-+class BuildFailed(Exception):
-+    pass
-+
-+
-+class ve_build_ext(build_ext):
-+    """This class allows C extension building to fail."""
-+
-+    def run(self):
-+        try:
-+            build_ext.run(self)
-+        except DistutilsPlatformError:
-+            raise BuildFailed()
-+
-+    def build_extension(self, ext):
-+        try:
-+            build_ext.build_extension(self, ext)
-+        except (CCompilerError, DistutilsExecError, DistutilsPlatformError):
-+            raise BuildFailed()
-+        except ValueError:
-+            # this can happen on Windows 64 bit, see Python issue 7511
-+            if "'path'" in str(sys.exc_info()[1]):  # works with Python 2 and 3
-+                raise BuildFailed()
-+            raise
-+
-+
-+def run_setup(with_binary):
-+    setup(
-+        name="MarkupSafe",
-+        version=version,
-+        url="https://palletsprojects.com/p/markupsafe/",
-+        project_urls={
-+            "Documentation": "https://markupsafe.palletsprojects.com/",
-+            "Code": "https://github.com/pallets/markupsafe",
-+            "Issue tracker": "https://github.com/pallets/markupsafe/issues",
-+        },
-+        license="BSD-3-Clause",
-+        author="Armin Ronacher",
-+        author_email="armin.ronacher@active-4.com",
-+        maintainer="The Pallets Team",
-+        maintainer_email="contact@palletsprojects.com",
-+        description="Safely add untrusted strings to HTML/XML markup.",
-+        long_description=readme,
-+        classifiers=[
-+            "Development Status :: 5 - Production/Stable",
-+            "Environment :: Web Environment",
-+            "Intended Audience :: Developers",
-+            "License :: OSI Approved :: BSD License",
-+            "Operating System :: OS Independent",
-+            "Programming Language :: Python",
-+            "Programming Language :: Python :: 2",
-+            "Programming Language :: Python :: 2.7",
-+            "Programming Language :: Python :: 3",
-+            "Programming Language :: Python :: 3.4",
-+            "Programming Language :: Python :: 3.5",
-+            "Programming Language :: Python :: 3.6",
-+            "Programming Language :: Python :: 3.7",
-+            "Topic :: Internet :: WWW/HTTP :: Dynamic Content",
-+            "Topic :: Software Development :: Libraries :: Python Modules",
-+            "Topic :: Text Processing :: Markup :: HTML",
-+        ],
-+        packages=find_packages("src"),
-+        package_dir={"": "src"},
-+        include_package_data=True,
-+        python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*",
-+        cmdclass={"build_ext": ve_build_ext},
-+        ext_modules=ext_modules if with_binary else [],
-+    )
-+
-+
-+def show_message(*lines):
-+    print("=" * 74)
-+    for line in lines:
-+        print(line)
-+    print("=" * 74)
-+
-+
-+if not (is_pypy or is_jython):
-+    try:
-+        run_setup(True)
-+    except BuildFailed:
-+        show_message(
-+            "WARNING: The C extension could not be compiled, speedups"
-+            " are not enabled.",
-+            "Failure information, if any, is above.",
-+            "Retrying the build without the C extension now.",
-+        )
-+        run_setup(False)
-+        show_message(
-+            "WARNING: The C extension could not be compiled, speedups"
-+            " are not enabled.",
-+            "Plain-Python build succeeded.",
-+        )
-+else:
-+    run_setup(False)
-+    show_message(
-+        "WARNING: C extensions are not supported on this Python"
-+        " platform, speedups are not enabled.",
-+        "Plain-Python build succeeded.",
-+    )
-diff --git a/third_party/python/MarkupSafe/src/markupsafe/__init__.py b/third_party/python/MarkupSafe/src/markupsafe/__init__.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/MarkupSafe/src/markupsafe/__init__.py
-@@ -0,0 +1,327 @@
-+# -*- coding: utf-8 -*-
-+"""
-+markupsafe
-+~~~~~~~~~~
-+
-+Implements an escape function and a Markup string to replace HTML
-+special characters with safe representations.
-+
-+:copyright: 2010 Pallets
-+:license: BSD-3-Clause
-+"""
-+import re
-+import string
-+
-+from ._compat import int_types
-+from ._compat import iteritems
-+from ._compat import Mapping
-+from ._compat import PY2
-+from ._compat import string_types
-+from ._compat import text_type
-+from ._compat import unichr
-+
-+__version__ = "1.1.1"
-+
-+__all__ = ["Markup", "soft_unicode", "escape", "escape_silent"]
-+
-+_striptags_re = re.compile(r"(<!--.*?-->|<[^>]*>)")
-+_entity_re = re.compile(r"&([^& ;]+);")
-+
-+
-+class Markup(text_type):
-+    """A string that is ready to be safely inserted into an HTML or XML
-+    document, either because it was escaped or because it was marked
-+    safe.
-+
-+    Passing an object to the constructor converts it to text and wraps
-+    it to mark it safe without escaping. To escape the text, use the
-+    :meth:`escape` class method instead.
-+
-+    >>> Markup('Hello, <em>World</em>!')
-+    Markup('Hello, <em>World</em>!')
-+    >>> Markup(42)
-+    Markup('42')
-+    >>> Markup.escape('Hello, <em>World</em>!')
-+    Markup('Hello &lt;em&gt;World&lt;/em&gt;!')
-+
-+    This implements the ``__html__()`` interface that some frameworks
-+    use. Passing an object that implements ``__html__()`` will wrap the
-+    output of that method, marking it safe.
-+
-+    >>> class Foo:
-+    ...     def __html__(self):
-+    ...         return '<a href="/foo">foo</a>'
-+    ...
-+    >>> Markup(Foo())
-+    Markup('<a href="/foo">foo</a>')
-+
-+    This is a subclass of the text type (``str`` in Python 3,
-+    ``unicode`` in Python 2). It has the same methods as that type, but
-+    all methods escape their arguments and return a ``Markup`` instance.
-+
-+    >>> Markup('<em>%s</em>') % 'foo & bar'
-+    Markup('<em>foo &amp; bar</em>')
-+    >>> Markup('<em>Hello</em> ') + '<foo>'
-+    Markup('<em>Hello</em> &lt;foo&gt;')
-+    """
-+
-+    __slots__ = ()
-+
-+    def __new__(cls, base=u"", encoding=None, errors="strict"):
-+        if hasattr(base, "__html__"):
-+            base = base.__html__()
-+        if encoding is None:
-+            return text_type.__new__(cls, base)
-+        return text_type.__new__(cls, base, encoding, errors)
-+
-+    def __html__(self):
-+        return self
-+
-+    def __add__(self, other):
-+        if isinstance(other, string_types) or hasattr(other, "__html__"):
-+            return self.__class__(super(Markup, self).__add__(self.escape(other)))
-+        return NotImplemented
-+
-+    def __radd__(self, other):
-+        if hasattr(other, "__html__") or isinstance(other, string_types):
-+            return self.escape(other).__add__(self)
-+        return NotImplemented
-+
-+    def __mul__(self, num):
-+        if isinstance(num, int_types):
-+            return self.__class__(text_type.__mul__(self, num))
-+        return NotImplemented
-+
-+    __rmul__ = __mul__
-+
-+    def __mod__(self, arg):
-+        if isinstance(arg, tuple):
-+            arg = tuple(_MarkupEscapeHelper(x, self.escape) for x in arg)
-+        else:
-+            arg = _MarkupEscapeHelper(arg, self.escape)
-+        return self.__class__(text_type.__mod__(self, arg))
-+
-+    def __repr__(self):
-+        return "%s(%s)" % (self.__class__.__name__, text_type.__repr__(self))
-+
-+    def join(self, seq):
-+        return self.__class__(text_type.join(self, map(self.escape, seq)))
-+
-+    join.__doc__ = text_type.join.__doc__
-+
-+    def split(self, *args, **kwargs):
-+        return list(map(self.__class__, text_type.split(self, *args, **kwargs)))
-+
-+    split.__doc__ = text_type.split.__doc__
-+
-+    def rsplit(self, *args, **kwargs):
-+        return list(map(self.__class__, text_type.rsplit(self, *args, **kwargs)))
-+
-+    rsplit.__doc__ = text_type.rsplit.__doc__
-+
-+    def splitlines(self, *args, **kwargs):
-+        return list(map(self.__class__, text_type.splitlines(self, *args, **kwargs)))
-+
-+    splitlines.__doc__ = text_type.splitlines.__doc__
-+
-+    def unescape(self):
-+        """Convert escaped markup back into a text string. This replaces
-+        HTML entities with the characters they represent.
-+
-+        >>> Markup('Main &raquo; <em>About</em>').unescape()
-+        'Main » <em>About</em>'
-+        """
-+        from ._constants import HTML_ENTITIES
-+
-+        def handle_match(m):
-+            name = m.group(1)
-+            if name in HTML_ENTITIES:
-+                return unichr(HTML_ENTITIES[name])
-+            try:
-+                if name[:2] in ("#x", "#X"):
-+                    return unichr(int(name[2:], 16))
-+                elif name.startswith("#"):
-+                    return unichr(int(name[1:]))
-+            except ValueError:
-+                pass
-+            # Don't modify unexpected input.
-+            return m.group()
-+
-+        return _entity_re.sub(handle_match, text_type(self))
-+
-+    def striptags(self):
-+        """:meth:`unescape` the markup, remove tags, and normalize
-+        whitespace to single spaces.
-+
-+        >>> Markup('Main &raquo;\t<em>About</em>').striptags()
-+        'Main » About'
-+        """
-+        stripped = u" ".join(_striptags_re.sub("", self).split())
-+        return Markup(stripped).unescape()
-+
-+    @classmethod
-+    def escape(cls, s):
-+        """Escape a string. Calls :func:`escape` and ensures that for
-+        subclasses the correct type is returned.
-+        """
-+        rv = escape(s)
-+        if rv.__class__ is not cls:
-+            return cls(rv)
-+        return rv
-+
-+    def make_simple_escaping_wrapper(name):  # noqa: B902
-+        orig = getattr(text_type, name)
-+
-+        def func(self, *args, **kwargs):
-+            args = _escape_argspec(list(args), enumerate(args), self.escape)
-+            _escape_argspec(kwargs, iteritems(kwargs), self.escape)
-+            return self.__class__(orig(self, *args, **kwargs))
-+
-+        func.__name__ = orig.__name__
-+        func.__doc__ = orig.__doc__
-+        return func
-+
-+    for method in (
-+        "__getitem__",
-+        "capitalize",
-+        "title",
-+        "lower",
-+        "upper",
-+        "replace",
-+        "ljust",
-+        "rjust",
-+        "lstrip",
-+        "rstrip",
-+        "center",
-+        "strip",
-+        "translate",
-+        "expandtabs",
-+        "swapcase",
-+        "zfill",
-+    ):
-+        locals()[method] = make_simple_escaping_wrapper(method)
-+
-+    def partition(self, sep):
-+        return tuple(map(self.__class__, text_type.partition(self, self.escape(sep))))
-+
-+    def rpartition(self, sep):
-+        return tuple(map(self.__class__, text_type.rpartition(self, self.escape(sep))))
-+
-+    def format(self, *args, **kwargs):
-+        formatter = EscapeFormatter(self.escape)
-+        kwargs = _MagicFormatMapping(args, kwargs)
-+        return self.__class__(formatter.vformat(self, args, kwargs))
-+
-+    def __html_format__(self, format_spec):
-+        if format_spec:
-+            raise ValueError("Unsupported format specification " "for Markup.")
-+        return self
-+
-+    # not in python 3
-+    if hasattr(text_type, "__getslice__"):
-+        __getslice__ = make_simple_escaping_wrapper("__getslice__")
-+
-+    del method, make_simple_escaping_wrapper
-+
-+
-+class _MagicFormatMapping(Mapping):
-+    """This class implements a dummy wrapper to fix a bug in the Python
-+    standard library for string formatting.
-+
-+    See http://bugs.python.org/issue13598 for information about why
-+    this is necessary.
-+    """
-+
-+    def __init__(self, args, kwargs):
-+        self._args = args
-+        self._kwargs = kwargs
-+        self._last_index = 0
-+
-+    def __getitem__(self, key):
-+        if key == "":
-+            idx = self._last_index
-+            self._last_index += 1
-+            try:
-+                return self._args[idx]
-+            except LookupError:
-+                pass
-+            key = str(idx)
-+        return self._kwargs[key]
-+
-+    def __iter__(self):
-+        return iter(self._kwargs)
-+
-+    def __len__(self):
-+        return len(self._kwargs)
-+
-+
-+if hasattr(text_type, "format"):
-+
-+    class EscapeFormatter(string.Formatter):
-+        def __init__(self, escape):
-+            self.escape = escape
-+
-+        def format_field(self, value, format_spec):
-+            if hasattr(value, "__html_format__"):
-+                rv = value.__html_format__(format_spec)
-+            elif hasattr(value, "__html__"):
-+                if format_spec:
-+                    raise ValueError(
-+                        "Format specifier {0} given, but {1} does not"
-+                        " define __html_format__. A class that defines"
-+                        " __html__ must define __html_format__ to work"
-+                        " with format specifiers.".format(format_spec, type(value))
-+                    )
-+                rv = value.__html__()
-+            else:
-+                # We need to make sure the format spec is unicode here as
-+                # otherwise the wrong callback methods are invoked.  For
-+                # instance a byte string there would invoke __str__ and
-+                # not __unicode__.
-+                rv = string.Formatter.format_field(self, value, text_type(format_spec))
-+            return text_type(self.escape(rv))
-+
-+
-+def _escape_argspec(obj, iterable, escape):
-+    """Helper for various string-wrapped functions."""
-+    for key, value in iterable:
-+        if hasattr(value, "__html__") or isinstance(value, string_types):
-+            obj[key] = escape(value)
-+    return obj
-+
-+
-+class _MarkupEscapeHelper(object):
-+    """Helper for Markup.__mod__"""
-+
-+    def __init__(self, obj, escape):
-+        self.obj = obj
-+        self.escape = escape
-+
-+    def __getitem__(self, item):
-+        return _MarkupEscapeHelper(self.obj[item], self.escape)
-+
-+    def __str__(self):
-+        return text_type(self.escape(self.obj))
-+
-+    __unicode__ = __str__
-+
-+    def __repr__(self):
-+        return str(self.escape(repr(self.obj)))
-+
-+    def __int__(self):
-+        return int(self.obj)
-+
-+    def __float__(self):
-+        return float(self.obj)
-+
-+
-+# we have to import it down here as the speedups and native
-+# modules imports the markup type which is define above.
-+try:
-+    from ._speedups import escape, escape_silent, soft_unicode
-+except ImportError:
-+    from ._native import escape, escape_silent, soft_unicode
-+
-+if not PY2:
-+    soft_str = soft_unicode
-+    __all__.append("soft_str")
-diff --git a/third_party/python/MarkupSafe/src/markupsafe/_compat.py b/third_party/python/MarkupSafe/src/markupsafe/_compat.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/MarkupSafe/src/markupsafe/_compat.py
-@@ -0,0 +1,33 @@
-+# -*- coding: utf-8 -*-
-+"""
-+markupsafe._compat
-+~~~~~~~~~~~~~~~~~~
-+
-+:copyright: 2010 Pallets
-+:license: BSD-3-Clause
-+"""
-+import sys
-+
-+PY2 = sys.version_info[0] == 2
-+
-+if not PY2:
-+    text_type = str
-+    string_types = (str,)
-+    unichr = chr
-+    int_types = (int,)
-+
-+    def iteritems(x):
-+        return iter(x.items())
-+
-+    from collections.abc import Mapping
-+
-+else:
-+    text_type = unicode
-+    string_types = (str, unicode)
-+    unichr = unichr
-+    int_types = (int, long)
-+
-+    def iteritems(x):
-+        return x.iteritems()
-+
-+    from collections import Mapping
-diff --git a/third_party/python/MarkupSafe/src/markupsafe/_constants.py b/third_party/python/MarkupSafe/src/markupsafe/_constants.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/MarkupSafe/src/markupsafe/_constants.py
-@@ -0,0 +1,264 @@
-+# -*- coding: utf-8 -*-
-+"""
-+markupsafe._constants
-+~~~~~~~~~~~~~~~~~~~~~
-+
-+:copyright: 2010 Pallets
-+:license: BSD-3-Clause
-+"""
-+
-+HTML_ENTITIES = {
-+    "AElig": 198,
-+    "Aacute": 193,
-+    "Acirc": 194,
-+    "Agrave": 192,
-+    "Alpha": 913,
-+    "Aring": 197,
-+    "Atilde": 195,
-+    "Auml": 196,
-+    "Beta": 914,
-+    "Ccedil": 199,
-+    "Chi": 935,
-+    "Dagger": 8225,
-+    "Delta": 916,
-+    "ETH": 208,
-+    "Eacute": 201,
-+    "Ecirc": 202,
-+    "Egrave": 200,
-+    "Epsilon": 917,
-+    "Eta": 919,
-+    "Euml": 203,
-+    "Gamma": 915,
-+    "Iacute": 205,
-+    "Icirc": 206,
-+    "Igrave": 204,
-+    "Iota": 921,
-+    "Iuml": 207,
-+    "Kappa": 922,
-+    "Lambda": 923,
-+    "Mu": 924,
-+    "Ntilde": 209,
-+    "Nu": 925,
-+    "OElig": 338,
-+    "Oacute": 211,
-+    "Ocirc": 212,
-+    "Ograve": 210,
-+    "Omega": 937,
-+    "Omicron": 927,
-+    "Oslash": 216,
-+    "Otilde": 213,
-+    "Ouml": 214,
-+    "Phi": 934,
-+    "Pi": 928,
-+    "Prime": 8243,
-+    "Psi": 936,
-+    "Rho": 929,
-+    "Scaron": 352,
-+    "Sigma": 931,
-+    "THORN": 222,
-+    "Tau": 932,
-+    "Theta": 920,
-+    "Uacute": 218,
-+    "Ucirc": 219,
-+    "Ugrave": 217,
-+    "Upsilon": 933,
-+    "Uuml": 220,
-+    "Xi": 926,
-+    "Yacute": 221,
-+    "Yuml": 376,
-+    "Zeta": 918,
-+    "aacute": 225,
-+    "acirc": 226,
-+    "acute": 180,
-+    "aelig": 230,
-+    "agrave": 224,
-+    "alefsym": 8501,
-+    "alpha": 945,
-+    "amp": 38,
-+    "and": 8743,
-+    "ang": 8736,
-+    "apos": 39,
-+    "aring": 229,
-+    "asymp": 8776,
-+    "atilde": 227,
-+    "auml": 228,
-+    "bdquo": 8222,
-+    "beta": 946,
-+    "brvbar": 166,
-+    "bull": 8226,
-+    "cap": 8745,
-+    "ccedil": 231,
-+    "cedil": 184,
-+    "cent": 162,
-+    "chi": 967,
-+    "circ": 710,
-+    "clubs": 9827,
-+    "cong": 8773,
-+    "copy": 169,
-+    "crarr": 8629,
-+    "cup": 8746,
-+    "curren": 164,
-+    "dArr": 8659,
-+    "dagger": 8224,
-+    "darr": 8595,
-+    "deg": 176,
-+    "delta": 948,
-+    "diams": 9830,
-+    "divide": 247,
-+    "eacute": 233,
-+    "ecirc": 234,
-+    "egrave": 232,
-+    "empty": 8709,
-+    "emsp": 8195,
-+    "ensp": 8194,
-+    "epsilon": 949,
-+    "equiv": 8801,
-+    "eta": 951,
-+    "eth": 240,
-+    "euml": 235,
-+    "euro": 8364,
-+    "exist": 8707,
-+    "fnof": 402,
-+    "forall": 8704,
-+    "frac12": 189,
-+    "frac14": 188,
-+    "frac34": 190,
-+    "frasl": 8260,
-+    "gamma": 947,
-+    "ge": 8805,
-+    "gt": 62,
-+    "hArr": 8660,
-+    "harr": 8596,
-+    "hearts": 9829,
-+    "hellip": 8230,
-+    "iacute": 237,
-+    "icirc": 238,
-+    "iexcl": 161,
-+    "igrave": 236,
-+    "image": 8465,
-+    "infin": 8734,
-+    "int": 8747,
-+    "iota": 953,
-+    "iquest": 191,
-+    "isin": 8712,
-+    "iuml": 239,
-+    "kappa": 954,
-+    "lArr": 8656,
-+    "lambda": 955,
-+    "lang": 9001,
-+    "laquo": 171,
-+    "larr": 8592,
-+    "lceil": 8968,
-+    "ldquo": 8220,
-+    "le": 8804,
-+    "lfloor": 8970,
-+    "lowast": 8727,
-+    "loz": 9674,
-+    "lrm": 8206,
-+    "lsaquo": 8249,
-+    "lsquo": 8216,
-+    "lt": 60,
-+    "macr": 175,
-+    "mdash": 8212,
-+    "micro": 181,
-+    "middot": 183,
-+    "minus": 8722,
-+    "mu": 956,
-+    "nabla": 8711,
-+    "nbsp": 160,
-+    "ndash": 8211,
-+    "ne": 8800,
-+    "ni": 8715,
-+    "not": 172,
-+    "notin": 8713,
-+    "nsub": 8836,
-+    "ntilde": 241,
-+    "nu": 957,
-+    "oacute": 243,
-+    "ocirc": 244,
-+    "oelig": 339,
-+    "ograve": 242,
-+    "oline": 8254,
-+    "omega": 969,
-+    "omicron": 959,
-+    "oplus": 8853,
-+    "or": 8744,
-+    "ordf": 170,
-+    "ordm": 186,
-+    "oslash": 248,
-+    "otilde": 245,
-+    "otimes": 8855,
-+    "ouml": 246,
-+    "para": 182,
-+    "part": 8706,
-+    "permil": 8240,
-+    "perp": 8869,
-+    "phi": 966,
-+    "pi": 960,
-+    "piv": 982,
-+    "plusmn": 177,
-+    "pound": 163,
-+    "prime": 8242,
-+    "prod": 8719,
-+    "prop": 8733,
-+    "psi": 968,
-+    "quot": 34,
-+    "rArr": 8658,
-+    "radic": 8730,
-+    "rang": 9002,
-+    "raquo": 187,
-+    "rarr": 8594,
-+    "rceil": 8969,
-+    "rdquo": 8221,
-+    "real": 8476,
-+    "reg": 174,
-+    "rfloor": 8971,
-+    "rho": 961,
-+    "rlm": 8207,
-+    "rsaquo": 8250,
-+    "rsquo": 8217,
-+    "sbquo": 8218,
-+    "scaron": 353,
-+    "sdot": 8901,
-+    "sect": 167,
-+    "shy": 173,
-+    "sigma": 963,
-+    "sigmaf": 962,
-+    "sim": 8764,
-+    "spades": 9824,
-+    "sub": 8834,
-+    "sube": 8838,
-+    "sum": 8721,
-+    "sup": 8835,
-+    "sup1": 185,
-+    "sup2": 178,
-+    "sup3": 179,
-+    "supe": 8839,
-+    "szlig": 223,
-+    "tau": 964,
-+    "there4": 8756,
-+    "theta": 952,
-+    "thetasym": 977,
-+    "thinsp": 8201,
-+    "thorn": 254,
-+    "tilde": 732,
-+    "times": 215,
-+    "trade": 8482,
-+    "uArr": 8657,
-+    "uacute": 250,
-+    "uarr": 8593,
-+    "ucirc": 251,
-+    "ugrave": 249,
-+    "uml": 168,
-+    "upsih": 978,
-+    "upsilon": 965,
-+    "uuml": 252,
-+    "weierp": 8472,
-+    "xi": 958,
-+    "yacute": 253,
-+    "yen": 165,
-+    "yuml": 255,
-+    "zeta": 950,
-+    "zwj": 8205,
-+    "zwnj": 8204,
-+}
-diff --git a/third_party/python/MarkupSafe/src/markupsafe/_native.py b/third_party/python/MarkupSafe/src/markupsafe/_native.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/MarkupSafe/src/markupsafe/_native.py
-@@ -0,0 +1,69 @@
-+# -*- coding: utf-8 -*-
-+"""
-+markupsafe._native
-+~~~~~~~~~~~~~~~~~~
-+
-+Native Python implementation used when the C module is not compiled.
-+
-+:copyright: 2010 Pallets
-+:license: BSD-3-Clause
-+"""
-+from . import Markup
-+from ._compat import text_type
-+
-+
-+def escape(s):
-+    """Replace the characters ``&``, ``<``, ``>``, ``'``, and ``"`` in
-+    the string with HTML-safe sequences. Use this if you need to display
-+    text that might contain such characters in HTML.
-+
-+    If the object has an ``__html__`` method, it is called and the
-+    return value is assumed to already be safe for HTML.
-+
-+    :param s: An object to be converted to a string and escaped.
-+    :return: A :class:`Markup` string with the escaped text.
-+    """
-+    if hasattr(s, "__html__"):
-+        return Markup(s.__html__())
-+    return Markup(
-+        text_type(s)
-+        .replace("&", "&amp;")
-+        .replace(">", "&gt;")
-+        .replace("<", "&lt;")
-+        .replace("'", "&#39;")
-+        .replace('"', "&#34;")
-+    )
-+
-+
-+def escape_silent(s):
-+    """Like :func:`escape` but treats ``None`` as the empty string.
-+    Useful with optional values, as otherwise you get the string
-+    ``'None'`` when the value is ``None``.
-+
-+    >>> escape(None)
-+    Markup('None')
-+    >>> escape_silent(None)
-+    Markup('')
-+    """
-+    if s is None:
-+        return Markup()
-+    return escape(s)
-+
-+
-+def soft_unicode(s):
-+    """Convert an object to a string if it isn't already. This preserves
-+    a :class:`Markup` string rather than converting it back to a basic
-+    string, so it will still be marked as safe and won't be escaped
-+    again.
-+
-+    >>> value = escape('<User 1>')
-+    >>> value
-+    Markup('&lt;User 1&gt;')
-+    >>> escape(str(value))
-+    Markup('&amp;lt;User 1&amp;gt;')
-+    >>> escape(soft_unicode(value))
-+    Markup('&lt;User 1&gt;')
-+    """
-+    if not isinstance(s, text_type):
-+        s = text_type(s)
-+    return s
-diff --git a/third_party/python/MarkupSafe/src/markupsafe/_speedups.c b/third_party/python/MarkupSafe/src/markupsafe/_speedups.c
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/MarkupSafe/src/markupsafe/_speedups.c
-@@ -0,0 +1,423 @@
-+/**
-+ * markupsafe._speedups
-+ * ~~~~~~~~~~~~~~~~~~~~
-+ *
-+ * C implementation of escaping for better performance. Used instead of
-+ * the native Python implementation when compiled.
-+ *
-+ * :copyright: 2010 Pallets
-+ * :license: BSD-3-Clause
-+ */
-+#include <Python.h>
-+
-+#if PY_MAJOR_VERSION < 3
-+#define ESCAPED_CHARS_TABLE_SIZE 63
-+#define UNICHR(x) (PyUnicode_AS_UNICODE((PyUnicodeObject*)PyUnicode_DecodeASCII(x, strlen(x), NULL)));
-+
-+static Py_ssize_t escaped_chars_delta_len[ESCAPED_CHARS_TABLE_SIZE];
-+static Py_UNICODE *escaped_chars_repl[ESCAPED_CHARS_TABLE_SIZE];
-+#endif
-+
-+static PyObject* markup;
-+
-+static int
-+init_constants(void)
-+{
-+	PyObject *module;
-+
-+#if PY_MAJOR_VERSION < 3
-+	/* mapping of characters to replace */
-+	escaped_chars_repl['"'] = UNICHR("&#34;");
-+	escaped_chars_repl['\''] = UNICHR("&#39;");
-+	escaped_chars_repl['&'] = UNICHR("&amp;");
-+	escaped_chars_repl['<'] = UNICHR("&lt;");
-+	escaped_chars_repl['>'] = UNICHR("&gt;");
-+
-+	/* lengths of those characters when replaced - 1 */
-+	memset(escaped_chars_delta_len, 0, sizeof (escaped_chars_delta_len));
-+	escaped_chars_delta_len['"'] = escaped_chars_delta_len['\''] = \
-+		escaped_chars_delta_len['&'] = 4;
-+	escaped_chars_delta_len['<'] = escaped_chars_delta_len['>'] = 3;
-+#endif
-+
-+	/* import markup type so that we can mark the return value */
-+	module = PyImport_ImportModule("markupsafe");
-+	if (!module)
-+		return 0;
-+	markup = PyObject_GetAttrString(module, "Markup");
-+	Py_DECREF(module);
-+
-+	return 1;
-+}
-+
-+#if PY_MAJOR_VERSION < 3
-+static PyObject*
-+escape_unicode(PyUnicodeObject *in)
-+{
-+	PyUnicodeObject *out;
-+	Py_UNICODE *inp = PyUnicode_AS_UNICODE(in);
-+	const Py_UNICODE *inp_end = PyUnicode_AS_UNICODE(in) + PyUnicode_GET_SIZE(in);
-+	Py_UNICODE *next_escp;
-+	Py_UNICODE *outp;
-+	Py_ssize_t delta=0, erepl=0, delta_len=0;
-+
-+	/* First we need to figure out how long the escaped string will be */
-+	while (*(inp) || inp < inp_end) {
-+		if (*inp < ESCAPED_CHARS_TABLE_SIZE) {
-+			delta += escaped_chars_delta_len[*inp];
-+			erepl += !!escaped_chars_delta_len[*inp];
-+		}
-+		++inp;
-+	}
-+
-+	/* Do we need to escape anything at all? */
-+	if (!erepl) {
-+		Py_INCREF(in);
-+		return (PyObject*)in;
-+	}
-+
-+	out = (PyUnicodeObject*)PyUnicode_FromUnicode(NULL, PyUnicode_GET_SIZE(in) + delta);
-+	if (!out)
-+		return NULL;
-+
-+	outp = PyUnicode_AS_UNICODE(out);
-+	inp = PyUnicode_AS_UNICODE(in);
-+	while (erepl-- > 0) {
-+		/* look for the next substitution */
-+		next_escp = inp;
-+		while (next_escp < inp_end) {
-+			if (*next_escp < ESCAPED_CHARS_TABLE_SIZE &&
-+			    (delta_len = escaped_chars_delta_len[*next_escp])) {
-+				++delta_len;
-+				break;
-+			}
-+			++next_escp;
-+		}
-+
-+		if (next_escp > inp) {
-+			/* copy unescaped chars between inp and next_escp */
-+			Py_UNICODE_COPY(outp, inp, next_escp-inp);
-+			outp += next_escp - inp;
-+		}
-+
-+		/* escape 'next_escp' */
-+		Py_UNICODE_COPY(outp, escaped_chars_repl[*next_escp], delta_len);
-+		outp += delta_len;
-+
-+		inp = next_escp + 1;
-+	}
-+	if (inp < inp_end)
-+		Py_UNICODE_COPY(outp, inp, PyUnicode_GET_SIZE(in) - (inp - PyUnicode_AS_UNICODE(in)));
-+
-+	return (PyObject*)out;
-+}
-+#else /* PY_MAJOR_VERSION < 3 */
-+
-+#define GET_DELTA(inp, inp_end, delta) \
-+	while (inp < inp_end) {	 \
-+		switch (*inp++) {	   \
-+		case '"':			   \
-+		case '\'':			  \
-+		case '&':			   \
-+			delta += 4;		 \
-+			break;			  \
-+		case '<':			   \
-+		case '>':			   \
-+			delta += 3;		 \
-+			break;			  \
-+		}					   \
-+	}
-+
-+#define DO_ESCAPE(inp, inp_end, outp) \
-+	{  \
-+		Py_ssize_t ncopy = 0;  \
-+		while (inp < inp_end) {  \
-+			switch (*inp) {  \
-+			case '"':  \
-+				memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
-+				outp += ncopy; ncopy = 0; \
-+				*outp++ = '&';  \
-+				*outp++ = '#';  \
-+				*outp++ = '3';  \
-+				*outp++ = '4';  \
-+				*outp++ = ';';  \
-+				break;  \
-+			case '\'':  \
-+				memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
-+				outp += ncopy; ncopy = 0; \
-+				*outp++ = '&';  \
-+				*outp++ = '#';  \
-+				*outp++ = '3';  \
-+				*outp++ = '9';  \
-+				*outp++ = ';';  \
-+				break;  \
-+			case '&':  \
-+				memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
-+				outp += ncopy; ncopy = 0; \
-+				*outp++ = '&';  \
-+				*outp++ = 'a';  \
-+				*outp++ = 'm';  \
-+				*outp++ = 'p';  \
-+				*outp++ = ';';  \
-+				break;  \
-+			case '<':  \
-+				memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
-+				outp += ncopy; ncopy = 0; \
-+				*outp++ = '&';  \
-+				*outp++ = 'l';  \
-+				*outp++ = 't';  \
-+				*outp++ = ';';  \
-+				break;  \
-+			case '>':  \
-+				memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
-+				outp += ncopy; ncopy = 0; \
-+				*outp++ = '&';  \
-+				*outp++ = 'g';  \
-+				*outp++ = 't';  \
-+				*outp++ = ';';  \
-+				break;  \
-+			default:  \
-+				ncopy++; \
-+			}  \
-+            inp++; \
-+		}  \
-+		memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
-+	}
-+
-+static PyObject*
-+escape_unicode_kind1(PyUnicodeObject *in)
-+{
-+	Py_UCS1 *inp = PyUnicode_1BYTE_DATA(in);
-+	Py_UCS1 *inp_end = inp + PyUnicode_GET_LENGTH(in);
-+	Py_UCS1 *outp;
-+	PyObject *out;
-+	Py_ssize_t delta = 0;
-+
-+	GET_DELTA(inp, inp_end, delta);
-+	if (!delta) {
-+		Py_INCREF(in);
-+		return (PyObject*)in;
-+	}
-+
-+	out = PyUnicode_New(PyUnicode_GET_LENGTH(in) + delta,
-+						PyUnicode_IS_ASCII(in) ? 127 : 255);
-+	if (!out)
-+		return NULL;
-+
-+	inp = PyUnicode_1BYTE_DATA(in);
-+	outp = PyUnicode_1BYTE_DATA(out);
-+	DO_ESCAPE(inp, inp_end, outp);
-+	return out;
-+}
-+
-+static PyObject*
-+escape_unicode_kind2(PyUnicodeObject *in)
-+{
-+	Py_UCS2 *inp = PyUnicode_2BYTE_DATA(in);
-+	Py_UCS2 *inp_end = inp + PyUnicode_GET_LENGTH(in);
-+	Py_UCS2 *outp;
-+	PyObject *out;
-+	Py_ssize_t delta = 0;
-+
-+	GET_DELTA(inp, inp_end, delta);
-+	if (!delta) {
-+		Py_INCREF(in);
-+		return (PyObject*)in;
-+	}
-+
-+	out = PyUnicode_New(PyUnicode_GET_LENGTH(in) + delta, 65535);
-+	if (!out)
-+		return NULL;
-+
-+	inp = PyUnicode_2BYTE_DATA(in);
-+	outp = PyUnicode_2BYTE_DATA(out);
-+	DO_ESCAPE(inp, inp_end, outp);
-+	return out;
-+}
-+
-+
-+static PyObject*
-+escape_unicode_kind4(PyUnicodeObject *in)
-+{
-+	Py_UCS4 *inp = PyUnicode_4BYTE_DATA(in);
-+	Py_UCS4 *inp_end = inp + PyUnicode_GET_LENGTH(in);
-+	Py_UCS4 *outp;
-+	PyObject *out;
-+	Py_ssize_t delta = 0;
-+
-+	GET_DELTA(inp, inp_end, delta);
-+	if (!delta) {
-+		Py_INCREF(in);
-+		return (PyObject*)in;
-+	}
-+
-+	out = PyUnicode_New(PyUnicode_GET_LENGTH(in) + delta, 1114111);
-+	if (!out)
-+		return NULL;
-+
-+	inp = PyUnicode_4BYTE_DATA(in);
-+	outp = PyUnicode_4BYTE_DATA(out);
-+	DO_ESCAPE(inp, inp_end, outp);
-+	return out;
-+}
-+
-+static PyObject*
-+escape_unicode(PyUnicodeObject *in)
-+{
-+	if (PyUnicode_READY(in))
-+		return NULL;
-+
-+	switch (PyUnicode_KIND(in)) {
-+	case PyUnicode_1BYTE_KIND:
-+		return escape_unicode_kind1(in);
-+	case PyUnicode_2BYTE_KIND:
-+		return escape_unicode_kind2(in);
-+	case PyUnicode_4BYTE_KIND:
-+		return escape_unicode_kind4(in);
-+	}
-+	assert(0);  /* shouldn't happen */
-+	return NULL;
-+}
-+#endif /* PY_MAJOR_VERSION < 3 */
-+
-+static PyObject*
-+escape(PyObject *self, PyObject *text)
-+{
-+	static PyObject *id_html;
-+	PyObject *s = NULL, *rv = NULL, *html;
-+
-+	if (id_html == NULL) {
-+#if PY_MAJOR_VERSION < 3
-+		id_html = PyString_InternFromString("__html__");
-+#else
-+		id_html = PyUnicode_InternFromString("__html__");
-+#endif
-+		if (id_html == NULL) {
-+			return NULL;
-+		}
-+	}
-+
-+	/* we don't have to escape integers, bools or floats */
-+	if (PyLong_CheckExact(text) ||
-+#if PY_MAJOR_VERSION < 3
-+	    PyInt_CheckExact(text) ||
-+#endif
-+	    PyFloat_CheckExact(text) || PyBool_Check(text) ||
-+	    text == Py_None)
-+		return PyObject_CallFunctionObjArgs(markup, text, NULL);
-+
-+	/* if the object has an __html__ method that performs the escaping */
-+	html = PyObject_GetAttr(text ,id_html);
-+	if (html) {
-+		s = PyObject_CallObject(html, NULL);
-+		Py_DECREF(html);
-+		if (s == NULL) {
-+			return NULL;
-+		}
-+		/* Convert to Markup object */
-+		rv = PyObject_CallFunctionObjArgs(markup, (PyObject*)s, NULL);
-+		Py_DECREF(s);
-+		return rv;
-+	}
-+
-+	/* otherwise make the object unicode if it isn't, then escape */
-+	PyErr_Clear();
-+	if (!PyUnicode_Check(text)) {
-+#if PY_MAJOR_VERSION < 3
-+		PyObject *unicode = PyObject_Unicode(text);
-+#else
-+		PyObject *unicode = PyObject_Str(text);
-+#endif
-+		if (!unicode)
-+			return NULL;
-+		s = escape_unicode((PyUnicodeObject*)unicode);
-+		Py_DECREF(unicode);
-+	}
-+	else
-+		s = escape_unicode((PyUnicodeObject*)text);
-+
-+	/* convert the unicode string into a markup object. */
-+	rv = PyObject_CallFunctionObjArgs(markup, (PyObject*)s, NULL);
-+	Py_DECREF(s);
-+	return rv;
-+}
-+
-+
-+static PyObject*
-+escape_silent(PyObject *self, PyObject *text)
-+{
-+	if (text != Py_None)
-+		return escape(self, text);
-+	return PyObject_CallFunctionObjArgs(markup, NULL);
-+}
-+
-+
-+static PyObject*
-+soft_unicode(PyObject *self, PyObject *s)
-+{
-+	if (!PyUnicode_Check(s))
-+#if PY_MAJOR_VERSION < 3
-+		return PyObject_Unicode(s);
-+#else
-+		return PyObject_Str(s);
-+#endif
-+	Py_INCREF(s);
-+	return s;
-+}
-+
-+
-+static PyMethodDef module_methods[] = {
-+	{"escape", (PyCFunction)escape, METH_O,
-+	 "escape(s) -> markup\n\n"
-+	 "Convert the characters &, <, >, ', and \" in string s to HTML-safe\n"
-+	 "sequences.  Use this if you need to display text that might contain\n"
-+	 "such characters in HTML.  Marks return value as markup string."},
-+	{"escape_silent", (PyCFunction)escape_silent, METH_O,
-+	 "escape_silent(s) -> markup\n\n"
-+	 "Like escape but converts None to an empty string."},
-+	{"soft_unicode", (PyCFunction)soft_unicode, METH_O,
-+	 "soft_unicode(object) -> string\n\n"
-+         "Make a string unicode if it isn't already.  That way a markup\n"
-+         "string is not converted back to unicode."},
-+	{NULL, NULL, 0, NULL}		/* Sentinel */
-+};
-+
-+
-+#if PY_MAJOR_VERSION < 3
-+
-+#ifndef PyMODINIT_FUNC	/* declarations for DLL import/export */
-+#define PyMODINIT_FUNC void
-+#endif
-+PyMODINIT_FUNC
-+init_speedups(void)
-+{
-+	if (!init_constants())
-+		return;
-+
-+	Py_InitModule3("markupsafe._speedups", module_methods, "");
-+}
-+
-+#else /* Python 3.x module initialization */
-+
-+static struct PyModuleDef module_definition = {
-+        PyModuleDef_HEAD_INIT,
-+	"markupsafe._speedups",
-+	NULL,
-+	-1,
-+	module_methods,
-+	NULL,
-+	NULL,
-+	NULL,
-+	NULL
-+};
-+
-+PyMODINIT_FUNC
-+PyInit__speedups(void)
-+{
-+	if (!init_constants())
-+		return NULL;
-+
-+	return PyModule_Create(&module_definition);
-+}
-+
-+#endif
-diff --git a/third_party/python/MarkupSafe/tox.ini b/third_party/python/MarkupSafe/tox.ini
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/MarkupSafe/tox.ini
-@@ -0,0 +1,44 @@
-+[tox]
-+envlist =
-+    py{37,36,35,34,27,py3,py3,py}
-+    stylecheck
-+    docs-html
-+    coverage-report
-+skip_missing_interpreters = true
-+
-+[testenv]
-+setenv =
-+    COVERAGE_FILE = .coverage.{envname}
-+deps =
-+    pytest-cov
-+commands = pytest --tb=short --cov --cov-report= {posargs}
-+
-+[testenv:stylecheck]
-+deps = pre-commit
-+skip_install = true
-+commands = pre-commit run --all-files --show-diff-on-failure
-+
-+[testenv:docs-html]
-+deps = -r docs/requirements.txt
-+commands = sphinx-build -W -b html -d {envtmpdir}/doctrees docs {envtmpdir}/html
-+
-+[testenv:coverage-report]
-+setenv =
-+    COVERAGE_FILE = .coverage
-+deps = coverage
-+skip_install = true
-+commands =
-+    coverage combine
-+    coverage html
-+    coverage report
-+
-+[testenv:codecov]
-+passenv = CI TRAVIS TRAVIS_*
-+setenv =
-+    COVERAGE_FILE = .coverage
-+deps = codecov
-+skip_install = true
-+commands =
-+    coverage combine
-+    codecov
-+    coverage report
-diff --git a/third_party/python/appdirs/appdirs.py b/third_party/python/appdirs/appdirs.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/appdirs/appdirs.py
-@@ -0,0 +1,608 @@
-+#!/usr/bin/env python
-+# -*- coding: utf-8 -*-
-+# Copyright (c) 2005-2010 ActiveState Software Inc.
-+# Copyright (c) 2013 Eddy Petrișor
-+
-+"""Utilities for determining application-specific dirs.
-+
-+See <http://github.com/ActiveState/appdirs> for details and usage.
-+"""
-+# Dev Notes:
-+# - MSDN on where to store app data files:
-+#   http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
-+# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
-+# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
-+
-+__version_info__ = (1, 4, 3)
-+__version__ = '.'.join(map(str, __version_info__))
-+
-+
-+import sys
-+import os
-+
-+PY3 = sys.version_info[0] == 3
-+
-+if PY3:
-+    unicode = str
-+
-+if sys.platform.startswith('java'):
-+    import platform
-+    os_name = platform.java_ver()[3][0]
-+    if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc.
-+        system = 'win32'
-+    elif os_name.startswith('Mac'): # "Mac OS X", etc.
-+        system = 'darwin'
-+    else: # "Linux", "SunOS", "FreeBSD", etc.
-+        # Setting this to "linux2" is not ideal, but only Windows or Mac
-+        # are actually checked for and the rest of the module expects
-+        # *sys.platform* style strings.
-+        system = 'linux2'
-+else:
-+    system = sys.platform
-+
-+
-+
-+def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
-+    r"""Return full path to the user-specific data dir for this application.
-+
-+        "appname" is the name of application.
-+            If None, just the system directory is returned.
-+        "appauthor" (only used on Windows) is the name of the
-+            appauthor or distributing body for this application. Typically
-+            it is the owning company name. This falls back to appname. You may
-+            pass False to disable it.
-+        "version" is an optional version path element to append to the
-+            path. You might want to use this if you want multiple versions
-+            of your app to be able to run independently. If used, this
-+            would typically be "<major>.<minor>".
-+            Only applied when appname is present.
-+        "roaming" (boolean, default False) can be set True to use the Windows
-+            roaming appdata directory. That means that for users on a Windows
-+            network setup for roaming profiles, this user data will be
-+            sync'd on login. See
-+            <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
-+            for a discussion of issues.
-+
-+    Typical user data directories are:
-+        Mac OS X:               ~/Library/Application Support/<AppName>
-+        Unix:                   ~/.local/share/<AppName>    # or in $XDG_DATA_HOME, if defined
-+        Win XP (not roaming):   C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
-+        Win XP (roaming):       C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
-+        Win 7  (not roaming):   C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
-+        Win 7  (roaming):       C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
-+
-+    For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
-+    That means, by default "~/.local/share/<AppName>".
-+    """
-+    if system == "win32":
-+        if appauthor is None:
-+            appauthor = appname
-+        const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
-+        path = os.path.normpath(_get_win_folder(const))
-+        if appname:
-+            if appauthor is not False:
-+                path = os.path.join(path, appauthor, appname)
-+            else:
-+                path = os.path.join(path, appname)
-+    elif system == 'darwin':
-+        path = os.path.expanduser('~/Library/Application Support/')
-+        if appname:
-+            path = os.path.join(path, appname)
-+    else:
-+        path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share"))
-+        if appname:
-+            path = os.path.join(path, appname)
-+    if appname and version:
-+        path = os.path.join(path, version)
-+    return path
-+
-+
-+def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
-+    r"""Return full path to the user-shared data dir for this application.
-+
-+        "appname" is the name of application.
-+            If None, just the system directory is returned.
-+        "appauthor" (only used on Windows) is the name of the
-+            appauthor or distributing body for this application. Typically
-+            it is the owning company name. This falls back to appname. You may
-+            pass False to disable it.
-+        "version" is an optional version path element to append to the
-+            path. You might want to use this if you want multiple versions
-+            of your app to be able to run independently. If used, this
-+            would typically be "<major>.<minor>".
-+            Only applied when appname is present.
-+        "multipath" is an optional parameter only applicable to *nix
-+            which indicates that the entire list of data dirs should be
-+            returned. By default, the first item from XDG_DATA_DIRS is
-+            returned, or '/usr/local/share/<AppName>',
-+            if XDG_DATA_DIRS is not set
-+
-+    Typical site data directories are:
-+        Mac OS X:   /Library/Application Support/<AppName>
-+        Unix:       /usr/local/share/<AppName> or /usr/share/<AppName>
-+        Win XP:     C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
-+        Vista:      (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
-+        Win 7:      C:\ProgramData\<AppAuthor>\<AppName>   # Hidden, but writeable on Win 7.
-+
-+    For Unix, this is using the $XDG_DATA_DIRS[0] default.
-+
-+    WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
-+    """
-+    if system == "win32":
-+        if appauthor is None:
-+            appauthor = appname
-+        path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
-+        if appname:
-+            if appauthor is not False:
-+                path = os.path.join(path, appauthor, appname)
-+            else:
-+                path = os.path.join(path, appname)
-+    elif system == 'darwin':
-+        path = os.path.expanduser('/Library/Application Support')
-+        if appname:
-+            path = os.path.join(path, appname)
-+    else:
-+        # XDG default for $XDG_DATA_DIRS
-+        # only first, if multipath is False
-+        path = os.getenv('XDG_DATA_DIRS',
-+                         os.pathsep.join(['/usr/local/share', '/usr/share']))
-+        pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
-+        if appname:
-+            if version:
-+                appname = os.path.join(appname, version)
-+            pathlist = [os.sep.join([x, appname]) for x in pathlist]
-+
-+        if multipath:
-+            path = os.pathsep.join(pathlist)
-+        else:
-+            path = pathlist[0]
-+        return path
-+
-+    if appname and version:
-+        path = os.path.join(path, version)
-+    return path
-+
-+
-+def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
-+    r"""Return full path to the user-specific config dir for this application.
-+
-+        "appname" is the name of application.
-+            If None, just the system directory is returned.
-+        "appauthor" (only used on Windows) is the name of the
-+            appauthor or distributing body for this application. Typically
-+            it is the owning company name. This falls back to appname. You may
-+            pass False to disable it.
-+        "version" is an optional version path element to append to the
-+            path. You might want to use this if you want multiple versions
-+            of your app to be able to run independently. If used, this
-+            would typically be "<major>.<minor>".
-+            Only applied when appname is present.
-+        "roaming" (boolean, default False) can be set True to use the Windows
-+            roaming appdata directory. That means that for users on a Windows
-+            network setup for roaming profiles, this user data will be
-+            sync'd on login. See
-+            <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
-+            for a discussion of issues.
-+
-+    Typical user config directories are:
-+        Mac OS X:               same as user_data_dir
-+        Unix:                   ~/.config/<AppName>     # or in $XDG_CONFIG_HOME, if defined
-+        Win *:                  same as user_data_dir
-+
-+    For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
-+    That means, by default "~/.config/<AppName>".
-+    """
-+    if system in ["win32", "darwin"]:
-+        path = user_data_dir(appname, appauthor, None, roaming)
-+    else:
-+        path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
-+        if appname:
-+            path = os.path.join(path, appname)
-+    if appname and version:
-+        path = os.path.join(path, version)
-+    return path
-+
-+
-+def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
-+    r"""Return full path to the user-shared data dir for this application.
-+
-+        "appname" is the name of application.
-+            If None, just the system directory is returned.
-+        "appauthor" (only used on Windows) is the name of the
-+            appauthor or distributing body for this application. Typically
-+            it is the owning company name. This falls back to appname. You may
-+            pass False to disable it.
-+        "version" is an optional version path element to append to the
-+            path. You might want to use this if you want multiple versions
-+            of your app to be able to run independently. If used, this
-+            would typically be "<major>.<minor>".
-+            Only applied when appname is present.
-+        "multipath" is an optional parameter only applicable to *nix
-+            which indicates that the entire list of config dirs should be
-+            returned. By default, the first item from XDG_CONFIG_DIRS is
-+            returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set
-+
-+    Typical site config directories are:
-+        Mac OS X:   same as site_data_dir
-+        Unix:       /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
-+                    $XDG_CONFIG_DIRS
-+        Win *:      same as site_data_dir
-+        Vista:      (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
-+
-+    For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
-+
-+    WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
-+    """
-+    if system in ["win32", "darwin"]:
-+        path = site_data_dir(appname, appauthor)
-+        if appname and version:
-+            path = os.path.join(path, version)
-+    else:
-+        # XDG default for $XDG_CONFIG_DIRS
-+        # only first, if multipath is False
-+        path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
-+        pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
-+        if appname:
-+            if version:
-+                appname = os.path.join(appname, version)
-+            pathlist = [os.sep.join([x, appname]) for x in pathlist]
-+
-+        if multipath:
-+            path = os.pathsep.join(pathlist)
-+        else:
-+            path = pathlist[0]
-+    return path
-+
-+
-+def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
-+    r"""Return full path to the user-specific cache dir for this application.
-+
-+        "appname" is the name of application.
-+            If None, just the system directory is returned.
-+        "appauthor" (only used on Windows) is the name of the
-+            appauthor or distributing body for this application. Typically
-+            it is the owning company name. This falls back to appname. You may
-+            pass False to disable it.
-+        "version" is an optional version path element to append to the
-+            path. You might want to use this if you want multiple versions
-+            of your app to be able to run independently. If used, this
-+            would typically be "<major>.<minor>".
-+            Only applied when appname is present.
-+        "opinion" (boolean) can be False to disable the appending of
-+            "Cache" to the base app data dir for Windows. See
-+            discussion below.
-+
-+    Typical user cache directories are:
-+        Mac OS X:   ~/Library/Caches/<AppName>
-+        Unix:       ~/.cache/<AppName> (XDG default)
-+        Win XP:     C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
-+        Vista:      C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache
-+
-+    On Windows the only suggestion in the MSDN docs is that local settings go in
-+    the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
-+    app data dir (the default returned by `user_data_dir` above). Apps typically
-+    put cache data somewhere *under* the given dir here. Some examples:
-+        ...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
-+        ...\Acme\SuperApp\Cache\1.0
-+    OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
-+    This can be disabled with the `opinion=False` option.
-+    """
-+    if system == "win32":
-+        if appauthor is None:
-+            appauthor = appname
-+        path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
-+        if appname:
-+            if appauthor is not False:
-+                path = os.path.join(path, appauthor, appname)
-+            else:
-+                path = os.path.join(path, appname)
-+            if opinion:
-+                path = os.path.join(path, "Cache")
-+    elif system == 'darwin':
-+        path = os.path.expanduser('~/Library/Caches')
-+        if appname:
-+            path = os.path.join(path, appname)
-+    else:
-+        path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
-+        if appname:
-+            path = os.path.join(path, appname)
-+    if appname and version:
-+        path = os.path.join(path, version)
-+    return path
-+
-+
-+def user_state_dir(appname=None, appauthor=None, version=None, roaming=False):
-+    r"""Return full path to the user-specific state dir for this application.
-+
-+        "appname" is the name of application.
-+            If None, just the system directory is returned.
-+        "appauthor" (only used on Windows) is the name of the
-+            appauthor or distributing body for this application. Typically
-+            it is the owning company name. This falls back to appname. You may
-+            pass False to disable it.
-+        "version" is an optional version path element to append to the
-+            path. You might want to use this if you want multiple versions
-+            of your app to be able to run independently. If used, this
-+            would typically be "<major>.<minor>".
-+            Only applied when appname is present.
-+        "roaming" (boolean, default False) can be set True to use the Windows
-+            roaming appdata directory. That means that for users on a Windows
-+            network setup for roaming profiles, this user data will be
-+            sync'd on login. See
-+            <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
-+            for a discussion of issues.
-+
-+    Typical user state directories are:
-+        Mac OS X:  same as user_data_dir
-+        Unix:      ~/.local/state/<AppName>   # or in $XDG_STATE_HOME, if defined
-+        Win *:     same as user_data_dir
-+
-+    For Unix, we follow this Debian proposal <https://wiki.debian.org/XDGBaseDirectorySpecification#state>
-+    to extend the XDG spec and support $XDG_STATE_HOME.
-+
-+    That means, by default "~/.local/state/<AppName>".
-+    """
-+    if system in ["win32", "darwin"]:
-+        path = user_data_dir(appname, appauthor, None, roaming)
-+    else:
-+        path = os.getenv('XDG_STATE_HOME', os.path.expanduser("~/.local/state"))
-+        if appname:
-+            path = os.path.join(path, appname)
-+    if appname and version:
-+        path = os.path.join(path, version)
-+    return path
-+
-+
-+def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
-+    r"""Return full path to the user-specific log dir for this application.
-+
-+        "appname" is the name of application.
-+            If None, just the system directory is returned.
-+        "appauthor" (only used on Windows) is the name of the
-+            appauthor or distributing body for this application. Typically
-+            it is the owning company name. This falls back to appname. You may
-+            pass False to disable it.
-+        "version" is an optional version path element to append to the
-+            path. You might want to use this if you want multiple versions
-+            of your app to be able to run independently. If used, this
-+            would typically be "<major>.<minor>".
-+            Only applied when appname is present.
-+        "opinion" (boolean) can be False to disable the appending of
-+            "Logs" to the base app data dir for Windows, and "log" to the
-+            base cache dir for Unix. See discussion below.
-+
-+    Typical user log directories are:
-+        Mac OS X:   ~/Library/Logs/<AppName>
-+        Unix:       ~/.cache/<AppName>/log  # or under $XDG_CACHE_HOME if defined
-+        Win XP:     C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
-+        Vista:      C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs
-+
-+    On Windows the only suggestion in the MSDN docs is that local settings
-+    go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
-+    examples of what some windows apps use for a logs dir.)
-+
-+    OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
-+    value for Windows and appends "log" to the user cache dir for Unix.
-+    This can be disabled with the `opinion=False` option.
-+    """
-+    if system == "darwin":
-+        path = os.path.join(
-+            os.path.expanduser('~/Library/Logs'),
-+            appname)
-+    elif system == "win32":
-+        path = user_data_dir(appname, appauthor, version)
-+        version = False
-+        if opinion:
-+            path = os.path.join(path, "Logs")
-+    else:
-+        path = user_cache_dir(appname, appauthor, version)
-+        version = False
-+        if opinion:
-+            path = os.path.join(path, "log")
-+    if appname and version:
-+        path = os.path.join(path, version)
-+    return path
-+
-+
-+class AppDirs(object):
-+    """Convenience wrapper for getting application dirs."""
-+    def __init__(self, appname=None, appauthor=None, version=None,
-+            roaming=False, multipath=False):
-+        self.appname = appname
-+        self.appauthor = appauthor
-+        self.version = version
-+        self.roaming = roaming
-+        self.multipath = multipath
-+
-+    @property
-+    def user_data_dir(self):
-+        return user_data_dir(self.appname, self.appauthor,
-+                             version=self.version, roaming=self.roaming)
-+
-+    @property
-+    def site_data_dir(self):
-+        return site_data_dir(self.appname, self.appauthor,
-+                             version=self.version, multipath=self.multipath)
-+
-+    @property
-+    def user_config_dir(self):
-+        return user_config_dir(self.appname, self.appauthor,
-+                               version=self.version, roaming=self.roaming)
-+
-+    @property
-+    def site_config_dir(self):
-+        return site_config_dir(self.appname, self.appauthor,
-+                             version=self.version, multipath=self.multipath)
-+
-+    @property
-+    def user_cache_dir(self):
-+        return user_cache_dir(self.appname, self.appauthor,
-+                              version=self.version)
-+
-+    @property
-+    def user_state_dir(self):
-+        return user_state_dir(self.appname, self.appauthor,
-+                              version=self.version)
-+
-+    @property
-+    def user_log_dir(self):
-+        return user_log_dir(self.appname, self.appauthor,
-+                            version=self.version)
-+
-+
-+#---- internal support stuff
-+
-+def _get_win_folder_from_registry(csidl_name):
-+    """This is a fallback technique at best. I'm not sure if using the
-+    registry for this guarantees us the correct answer for all CSIDL_*
-+    names.
-+    """
-+    if PY3:
-+      import winreg as _winreg
-+    else:
-+      import _winreg
-+
-+    shell_folder_name = {
-+        "CSIDL_APPDATA": "AppData",
-+        "CSIDL_COMMON_APPDATA": "Common AppData",
-+        "CSIDL_LOCAL_APPDATA": "Local AppData",
-+    }[csidl_name]
-+
-+    key = _winreg.OpenKey(
-+        _winreg.HKEY_CURRENT_USER,
-+        r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
-+    )
-+    dir, type = _winreg.QueryValueEx(key, shell_folder_name)
-+    return dir
-+
-+
-+def _get_win_folder_with_pywin32(csidl_name):
-+    from win32com.shell import shellcon, shell
-+    dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
-+    # Try to make this a unicode path because SHGetFolderPath does
-+    # not return unicode strings when there is unicode data in the
-+    # path.
-+    try:
-+        dir = unicode(dir)
-+
-+        # Downgrade to short path name if have highbit chars. See
-+        # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
-+        has_high_char = False
-+        for c in dir:
-+            if ord(c) > 255:
-+                has_high_char = True
-+                break
-+        if has_high_char:
-+            try:
-+                import win32api
-+                dir = win32api.GetShortPathName(dir)
-+            except ImportError:
-+                pass
-+    except UnicodeError:
-+        pass
-+    return dir
-+
-+
-+def _get_win_folder_with_ctypes(csidl_name):
-+    import ctypes
-+
-+    csidl_const = {
-+        "CSIDL_APPDATA": 26,
-+        "CSIDL_COMMON_APPDATA": 35,
-+        "CSIDL_LOCAL_APPDATA": 28,
-+    }[csidl_name]
-+
-+    buf = ctypes.create_unicode_buffer(1024)
-+    ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
-+
-+    # Downgrade to short path name if have highbit chars. See
-+    # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
-+    has_high_char = False
-+    for c in buf:
-+        if ord(c) > 255:
-+            has_high_char = True
-+            break
-+    if has_high_char:
-+        buf2 = ctypes.create_unicode_buffer(1024)
-+        if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
-+            buf = buf2
-+
-+    return buf.value
-+
-+def _get_win_folder_with_jna(csidl_name):
-+    import array
-+    from com.sun import jna
-+    from com.sun.jna.platform import win32
-+
-+    buf_size = win32.WinDef.MAX_PATH * 2
-+    buf = array.zeros('c', buf_size)
-+    shell = win32.Shell32.INSTANCE
-+    shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)
-+    dir = jna.Native.toString(buf.tostring()).rstrip("\0")
-+
-+    # Downgrade to short path name if have highbit chars. See
-+    # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
-+    has_high_char = False
-+    for c in dir:
-+        if ord(c) > 255:
-+            has_high_char = True
-+            break
-+    if has_high_char:
-+        buf = array.zeros('c', buf_size)
-+        kernel = win32.Kernel32.INSTANCE
-+        if kernel.GetShortPathName(dir, buf, buf_size):
-+            dir = jna.Native.toString(buf.tostring()).rstrip("\0")
-+
-+    return dir
-+
-+if system == "win32":
-+    try:
-+        import win32com.shell
-+        _get_win_folder = _get_win_folder_with_pywin32
-+    except ImportError:
-+        try:
-+            from ctypes import windll
-+            _get_win_folder = _get_win_folder_with_ctypes
-+        except ImportError:
-+            try:
-+                import com.sun.jna
-+                _get_win_folder = _get_win_folder_with_jna
-+            except ImportError:
-+                _get_win_folder = _get_win_folder_from_registry
-+
-+
-+#---- self test code
-+
-+if __name__ == "__main__":
-+    appname = "MyApp"
-+    appauthor = "MyCompany"
-+
-+    props = ("user_data_dir",
-+             "user_config_dir",
-+             "user_cache_dir",
-+             "user_state_dir",
-+             "user_log_dir",
-+             "site_data_dir",
-+             "site_config_dir")
-+
-+    print("-- app dirs %s --" % __version__)
-+
-+    print("-- app dirs (with optional 'version')")
-+    dirs = AppDirs(appname, appauthor, version="1.0")
-+    for prop in props:
-+        print("%s: %s" % (prop, getattr(dirs, prop)))
-+
-+    print("\n-- app dirs (without optional 'version')")
-+    dirs = AppDirs(appname, appauthor)
-+    for prop in props:
-+        print("%s: %s" % (prop, getattr(dirs, prop)))
-+
-+    print("\n-- app dirs (without optional 'appauthor')")
-+    dirs = AppDirs(appname)
-+    for prop in props:
-+        print("%s: %s" % (prop, getattr(dirs, prop)))
-+
-+    print("\n-- app dirs (with disabled 'appauthor')")
-+    dirs = AppDirs(appname, appauthor=False)
-+    for prop in props:
-+        print("%s: %s" % (prop, getattr(dirs, prop)))
-diff --git a/third_party/python/diskcache/diskcache/__init__.py b/third_party/python/diskcache/diskcache/__init__.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/diskcache/diskcache/__init__.py
-@@ -0,0 +1,51 @@
-+"""
-+DiskCache API Reference
-+=======================
-+
-+The :doc:`tutorial` provides a helpful walkthrough of most methods.
-+
-+"""
-+
-+from .core import Cache, Disk, EmptyDirWarning, JSONDisk, UnknownFileWarning, Timeout
-+from .core import DEFAULT_SETTINGS, ENOVAL, EVICTION_POLICY, UNKNOWN
-+from .fanout import FanoutCache
-+from .persistent import Deque, Index
-+from .recipes import Averager, BoundedSemaphore, Lock, RLock
-+from .recipes import barrier, memoize_stampede, throttle
-+
-+__all__ = [
-+    'Averager',
-+    'BoundedSemaphore',
-+    'Cache',
-+    'DEFAULT_SETTINGS',
-+    'Deque',
-+    'Disk',
-+    'ENOVAL',
-+    'EVICTION_POLICY',
-+    'EmptyDirWarning',
-+    'FanoutCache',
-+    'Index',
-+    'JSONDisk',
-+    'Lock',
-+    'RLock',
-+    'Timeout',
-+    'UNKNOWN',
-+    'UnknownFileWarning',
-+    'barrier',
-+    'memoize_stampede',
-+    'throttle',
-+]
-+
-+try:
-+    from .djangocache import DjangoCache  # pylint: disable=wrong-import-position
-+    __all__.append('DjangoCache')
-+except Exception:  # pylint: disable=broad-except
-+    # Django not installed or not setup so ignore.
-+    pass
-+
-+__title__ = 'diskcache'
-+__version__ = '4.1.0'
-+__build__ = 0x040100
-+__author__ = 'Grant Jenks'
-+__license__ = 'Apache 2.0'
-+__copyright__ = 'Copyright 2016-2018 Grant Jenks'
-diff --git a/third_party/python/diskcache/diskcache/cli.py b/third_party/python/diskcache/diskcache/cli.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/diskcache/diskcache/cli.py
-@@ -0,0 +1,1 @@
-+"Command line interface to disk cache."
-diff --git a/third_party/python/diskcache/diskcache/core.py b/third_party/python/diskcache/diskcache/core.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/diskcache/diskcache/core.py
-@@ -0,0 +1,2481 @@
-+"""Core disk and file backed cache API.
-+
-+"""
-+
-+import codecs
-+import contextlib as cl
-+import errno
-+import functools as ft
-+import io
-+import json
-+import os
-+import os.path as op
-+import pickletools
-+import sqlite3
-+import struct
-+import sys
-+import tempfile
-+import threading
-+import time
-+import warnings
-+import zlib
-+
-+############################################################################
-+# BEGIN Python 2/3 Shims
-+############################################################################
-+
-+if sys.hexversion < 0x03000000:
-+    import cPickle as pickle  # pylint: disable=import-error
-+    # ISSUE #25 Fix for http://bugs.python.org/issue10211
-+    from cStringIO import StringIO as BytesIO  # pylint: disable=import-error
-+    from thread import get_ident  # pylint: disable=import-error,no-name-in-module
-+    TextType = unicode  # pylint: disable=invalid-name,undefined-variable
-+    BytesType = str
-+    INT_TYPES = int, long  # pylint: disable=undefined-variable
-+    range = xrange  # pylint: disable=redefined-builtin,invalid-name,undefined-variable
-+    io_open = io.open  # pylint: disable=invalid-name
-+else:
-+    import pickle
-+    from io import BytesIO  # pylint: disable=ungrouped-imports
-+    from threading import get_ident
-+    TextType = str
-+    BytesType = bytes
-+    INT_TYPES = (int,)
-+    io_open = open  # pylint: disable=invalid-name
-+
-+def full_name(func):
-+    "Return full name of `func` by adding the module and function name."
-+    try:
-+        # The __qualname__ attribute is only available in Python 3.3 and later.
-+        # GrantJ 2019-03-29 Remove after support for Python 2 is dropped.
-+        name = func.__qualname__
-+    except AttributeError:
-+        name = func.__name__
-+    return func.__module__ + '.' + name
-+
-+############################################################################
-+# END Python 2/3 Shims
-+############################################################################
-+
-+try:
-+    WindowsError
-+except NameError:
-+    class WindowsError(Exception):
-+        "Windows error place-holder on platforms without support."
-+
-+class Constant(tuple):
-+    "Pretty display of immutable constant."
-+    def __new__(cls, name):
-+        return tuple.__new__(cls, (name,))
-+
-+    def __repr__(self):
-+        return '%s' % self[0]
-+
-+DBNAME = 'cache.db'
-+ENOVAL = Constant('ENOVAL')
-+UNKNOWN = Constant('UNKNOWN')
-+
-+MODE_NONE = 0
-+MODE_RAW = 1
-+MODE_BINARY = 2
-+MODE_TEXT = 3
-+MODE_PICKLE = 4
-+
-+DEFAULT_SETTINGS = {
-+    u'statistics': 0,  # False
-+    u'tag_index': 0,   # False
-+    u'eviction_policy': u'least-recently-stored',
-+    u'size_limit': 2 ** 30,  # 1gb
-+    u'cull_limit': 10,
-+    u'sqlite_auto_vacuum': 1,        # FULL
-+    u'sqlite_cache_size': 2 ** 13,   # 8,192 pages
-+    u'sqlite_journal_mode': u'wal',
-+    u'sqlite_mmap_size': 2 ** 26,    # 64mb
-+    u'sqlite_synchronous': 1,        # NORMAL
-+    u'disk_min_file_size': 2 ** 15,  # 32kb
-+    u'disk_pickle_protocol': pickle.HIGHEST_PROTOCOL,
-+}
-+
-+METADATA = {
-+    u'count': 0,
-+    u'size': 0,
-+    u'hits': 0,
-+    u'misses': 0,
-+}
-+
-+EVICTION_POLICY = {
-+    'none': {
-+        'init': None,
-+        'get': None,
-+        'cull': None,
-+    },
-+    'least-recently-stored': {
-+        'init': (
-+            'CREATE INDEX IF NOT EXISTS Cache_store_time ON'
-+            ' Cache (store_time)'
-+        ),
-+        'get': None,
-+        'cull': 'SELECT {fields} FROM Cache ORDER BY store_time LIMIT ?',
-+    },
-+    'least-recently-used': {
-+        'init': (
-+            'CREATE INDEX IF NOT EXISTS Cache_access_time ON'
-+            ' Cache (access_time)'
-+        ),
-+        'get': 'access_time = {now}',
-+        'cull': 'SELECT {fields} FROM Cache ORDER BY access_time LIMIT ?',
-+    },
-+    'least-frequently-used': {
-+        'init': (
-+            'CREATE INDEX IF NOT EXISTS Cache_access_count ON'
-+            ' Cache (access_count)'
-+        ),
-+        'get': 'access_count = access_count + 1',
-+        'cull': 'SELECT {fields} FROM Cache ORDER BY access_count LIMIT ?',
-+    },
-+}
-+
-+
-+class Disk(object):
-+    "Cache key and value serialization for SQLite database and files."
-+    def __init__(self, directory, min_file_size=0, pickle_protocol=0):
-+        """Initialize disk instance.
-+
-+        :param str directory: directory path
-+        :param int min_file_size: minimum size for file use
-+        :param int pickle_protocol: pickle protocol for serialization
-+
-+        """
-+        self._directory = directory
-+        self.min_file_size = min_file_size
-+        self.pickle_protocol = pickle_protocol
-+
-+
-+    def hash(self, key):
-+        """Compute portable hash for `key`.
-+
-+        :param key: key to hash
-+        :return: hash value
-+
-+        """
-+        mask = 0xFFFFFFFF
-+        disk_key, _ = self.put(key)
-+        type_disk_key = type(disk_key)
-+
-+        if type_disk_key is sqlite3.Binary:
-+            return zlib.adler32(disk_key) & mask
-+        elif type_disk_key is TextType:
-+            return zlib.adler32(disk_key.encode('utf-8')) & mask  # pylint: disable=no-member
-+        elif type_disk_key in INT_TYPES:
-+            return disk_key % mask
-+        else:
-+            assert type_disk_key is float
-+            return zlib.adler32(struct.pack('!d', disk_key)) & mask
-+
-+
-+    def put(self, key):
-+        """Convert `key` to fields key and raw for Cache table.
-+
-+        :param key: key to convert
-+        :return: (database key, raw boolean) pair
-+
-+        """
-+        # pylint: disable=bad-continuation,unidiomatic-typecheck
-+        type_key = type(key)
-+
-+        if type_key is BytesType:
-+            return sqlite3.Binary(key), True
-+        elif ((type_key is TextType)
-+                or (type_key in INT_TYPES
-+                    and -9223372036854775808 <= key <= 9223372036854775807)
-+                or (type_key is float)):
-+            return key, True
-+        else:
-+            data = pickle.dumps(key, protocol=self.pickle_protocol)
-+            result = pickletools.optimize(data)
-+            return sqlite3.Binary(result), False
-+
-+
-+    def get(self, key, raw):
-+        """Convert fields `key` and `raw` from Cache table to key.
-+
-+        :param key: database key to convert
-+        :param bool raw: flag indicating raw database storage
-+        :return: corresponding Python key
-+
-+        """
-+        # pylint: disable=no-self-use,unidiomatic-typecheck
-+        if raw:
-+            return BytesType(key) if type(key) is sqlite3.Binary else key
-+        else:
-+            return pickle.load(BytesIO(key))
-+
-+
-+    def store(self, value, read, key=UNKNOWN):
-+        """Convert `value` to fields size, mode, filename, and value for Cache
-+        table.
-+
-+        :param value: value to convert
-+        :param bool read: True when value is file-like object
-+        :param key: key for item (default UNKNOWN)
-+        :return: (size, mode, filename, value) tuple for Cache table
-+
-+        """
-+        # pylint: disable=unidiomatic-typecheck
-+        type_value = type(value)
-+        min_file_size = self.min_file_size
-+
-+        if ((type_value is TextType and len(value) < min_file_size)
-+                or (type_value in INT_TYPES
-+                    and -9223372036854775808 <= value <= 9223372036854775807)
-+                or (type_value is float)):
-+            return 0, MODE_RAW, None, value
-+        elif type_value is BytesType:
-+            if len(value) < min_file_size:
-+                return 0, MODE_RAW, None, sqlite3.Binary(value)
-+            else:
-+                filename, full_path = self.filename(key, value)
-+
-+                with open(full_path, 'wb') as writer:
-+                    writer.write(value)
-+
-+                return len(value), MODE_BINARY, filename, None
-+        elif type_value is TextType:
-+            filename, full_path = self.filename(key, value)
-+
-+            with io_open(full_path, 'w', encoding='UTF-8') as writer:
-+                writer.write(value)
-+
-+            size = op.getsize(full_path)
-+            return size, MODE_TEXT, filename, None
-+        elif read:
-+            size = 0
-+            reader = ft.partial(value.read, 2 ** 22)
-+            filename, full_path = self.filename(key, value)
-+
-+            with open(full_path, 'wb') as writer:
-+                for chunk in iter(reader, b''):
-+                    size += len(chunk)
-+                    writer.write(chunk)
-+
-+            return size, MODE_BINARY, filename, None
-+        else:
-+            result = pickle.dumps(value, protocol=self.pickle_protocol)
-+
-+            if len(result) < min_file_size:
-+                return 0, MODE_PICKLE, None, sqlite3.Binary(result)
-+            else:
-+                filename, full_path = self.filename(key, value)
-+
-+                with open(full_path, 'wb') as writer:
-+                    writer.write(result)
-+
-+                return len(result), MODE_PICKLE, filename, None
-+
-+
-+    def fetch(self, mode, filename, value, read):
-+        """Convert fields `mode`, `filename`, and `value` from Cache table to
-+        value.
-+
-+        :param int mode: value mode raw, binary, text, or pickle
-+        :param str filename: filename of corresponding value
-+        :param value: database value
-+        :param bool read: when True, return an open file handle
-+        :return: corresponding Python value
-+
-+        """
-+        # pylint: disable=no-self-use,unidiomatic-typecheck
-+        if mode == MODE_RAW:
-+            return BytesType(value) if type(value) is sqlite3.Binary else value
-+        elif mode == MODE_BINARY:
-+            if read:
-+                return open(op.join(self._directory, filename), 'rb')
-+            else:
-+                with open(op.join(self._directory, filename), 'rb') as reader:
-+                    return reader.read()
-+        elif mode == MODE_TEXT:
-+            full_path = op.join(self._directory, filename)
-+            with io_open(full_path, 'r', encoding='UTF-8') as reader:
-+                return reader.read()
-+        elif mode == MODE_PICKLE:
-+            if value is None:
-+                with open(op.join(self._directory, filename), 'rb') as reader:
-+                    return pickle.load(reader)
-+            else:
-+                return pickle.load(BytesIO(value))
-+
-+
-+    def filename(self, key=UNKNOWN, value=UNKNOWN):
-+        """Return filename and full-path tuple for file storage.
-+
-+        Filename will be a randomly generated 28 character hexadecimal string
-+        with ".val" suffixed. Two levels of sub-directories will be used to
-+        reduce the size of directories. On older filesystems, lookups in
-+        directories with many files may be slow.
-+
-+        The default implementation ignores the `key` and `value` parameters.
-+
-+        In some scenarios, for example :meth:`Cache.push
-+        <diskcache.Cache.push>`, the `key` or `value` may not be known when the
-+        item is stored in the cache.
-+
-+        :param key: key for item (default UNKNOWN)
-+        :param value: value for item (default UNKNOWN)
-+
-+        """
-+        # pylint: disable=unused-argument
-+        hex_name = codecs.encode(os.urandom(16), 'hex').decode('utf-8')
-+        sub_dir = op.join(hex_name[:2], hex_name[2:4])
-+        name = hex_name[4:] + '.val'
-+        directory = op.join(self._directory, sub_dir)
-+
-+        try:
-+            os.makedirs(directory)
-+        except OSError as error:
-+            if error.errno != errno.EEXIST:
-+                raise
-+
-+        filename = op.join(sub_dir, name)
-+        full_path = op.join(self._directory, filename)
-+        return filename, full_path
-+
-+
-+    def remove(self, filename):
-+        """Remove a file given by `filename`.
-+
-+        This method is cross-thread and cross-process safe. If an "error no
-+        entry" occurs, it is suppressed.
-+
-+        :param str filename: relative path to file
-+
-+        """
-+        full_path = op.join(self._directory, filename)
-+
-+        try:
-+            os.remove(full_path)
-+        except WindowsError:
-+            pass
-+        except OSError as error:
-+            if error.errno != errno.ENOENT:
-+                # ENOENT may occur if two caches attempt to delete the same
-+                # file at the same time.
-+                raise
-+
-+
-+class JSONDisk(Disk):
-+    "Cache key and value using JSON serialization with zlib compression."
-+    def __init__(self, directory, compress_level=1, **kwargs):
-+        """Initialize JSON disk instance.
-+
-+        Keys and values are compressed using the zlib library. The
-+        `compress_level` is an integer from 0 to 9 controlling the level of
-+        compression; 1 is fastest and produces the least compression, 9 is
-+        slowest and produces the most compression, and 0 is no compression.
-+
-+        :param str directory: directory path
-+        :param int compress_level: zlib compression level (default 1)
-+        :param kwargs: super class arguments
-+
-+        """
-+        self.compress_level = compress_level
-+        super(JSONDisk, self).__init__(directory, **kwargs)
-+
-+
-+    def put(self, key):
-+        json_bytes = json.dumps(key).encode('utf-8')
-+        data = zlib.compress(json_bytes, self.compress_level)
-+        return super(JSONDisk, self).put(data)
-+
-+
-+    def get(self, key, raw):
-+        data = super(JSONDisk, self).get(key, raw)
-+        return json.loads(zlib.decompress(data).decode('utf-8'))
-+
-+
-+    def store(self, value, read, key=UNKNOWN):
-+        if not read:
-+            json_bytes = json.dumps(value).encode('utf-8')
-+            value = zlib.compress(json_bytes, self.compress_level)
-+        return super(JSONDisk, self).store(value, read, key=key)
-+
-+
-+    def fetch(self, mode, filename, value, read):
-+        data = super(JSONDisk, self).fetch(mode, filename, value, read)
-+        if not read:
-+            data = json.loads(zlib.decompress(data).decode('utf-8'))
-+        return data
-+
-+
-+class Timeout(Exception):
-+    "Database timeout expired."
-+
-+
-+class UnknownFileWarning(UserWarning):
-+    "Warning used by Cache.check for unknown files."
-+
-+
-+class EmptyDirWarning(UserWarning):
-+    "Warning used by Cache.check for empty directories."
-+
-+
-+def args_to_key(base, args, kwargs, typed):
-+    """Create cache key out of function arguments.
-+
-+    :param tuple base: base of key
-+    :param tuple args: function arguments
-+    :param dict kwargs: function keyword arguments
-+    :param bool typed: include types in cache key
-+    :return: cache key tuple
-+
-+    """
-+    key = base + args
-+
-+    if kwargs:
-+        key += (ENOVAL,)
-+        sorted_items = sorted(kwargs.items())
-+
-+        for item in sorted_items:
-+            key += item
-+
-+    if typed:
-+        key += tuple(type(arg) for arg in args)
-+
-+        if kwargs:
-+            key += tuple(type(value) for _, value in sorted_items)
-+
-+    return key
-+
-+
-+class Cache(object):
-+    "Disk and file backed cache."
-+    # pylint: disable=bad-continuation
-+    def __init__(self, directory=None, timeout=60, disk=Disk, **settings):
-+        """Initialize cache instance.
-+
-+        :param str directory: cache directory
-+        :param float timeout: SQLite connection timeout
-+        :param disk: Disk type or subclass for serialization
-+        :param settings: any of DEFAULT_SETTINGS
-+
-+        """
-+        try:
-+            assert issubclass(disk, Disk)
-+        except (TypeError, AssertionError):
-+            raise ValueError('disk must subclass diskcache.Disk')
-+
-+        if directory is None:
-+            directory = tempfile.mkdtemp(prefix='diskcache-')
-+        directory = op.expanduser(directory)
-+        directory = op.expandvars(directory)
-+
-+        self._directory = directory
-+        self._timeout = 0  # Manually handle retries during initialization.
-+        self._local = threading.local()
-+        self._txn_id = None
-+
-+        if not op.isdir(directory):
-+            try:
-+                os.makedirs(directory, 0o755)
-+            except OSError as error:
-+                if error.errno != errno.EEXIST:
-+                    raise EnvironmentError(
-+                        error.errno,
-+                        'Cache directory "%s" does not exist'
-+                        ' and could not be created' % self._directory
-+                    )
-+
-+        sql = self._sql_retry
-+
-+        # Setup Settings table.
-+
-+        try:
-+            current_settings = dict(sql(
-+                'SELECT key, value FROM Settings'
-+            ).fetchall())
-+        except sqlite3.OperationalError:
-+            current_settings = {}
-+
-+        sets = DEFAULT_SETTINGS.copy()
-+        sets.update(current_settings)
-+        sets.update(settings)
-+
-+        for key in METADATA:
-+            sets.pop(key, None)
-+
-+        # Chance to set pragmas before any tables are created.
-+
-+        for key, value in sorted(sets.items()):
-+            if key.startswith('sqlite_'):
-+                self.reset(key, value, update=False)
-+
-+        sql('CREATE TABLE IF NOT EXISTS Settings ('
-+            ' key TEXT NOT NULL UNIQUE,'
-+            ' value)'
-+        )
-+
-+        # Setup Disk object (must happen after settings initialized).
-+
-+        kwargs = {
-+            key[5:]: value for key, value in sets.items()
-+            if key.startswith('disk_')
-+        }
-+        self._disk = disk(directory, **kwargs)
-+
-+        # Set cached attributes: updates settings and sets pragmas.
-+
-+        for key, value in sets.items():
-+            query = 'INSERT OR REPLACE INTO Settings VALUES (?, ?)'
-+            sql(query, (key, value))
-+            self.reset(key, value)
-+
-+        for key, value in METADATA.items():
-+            query = 'INSERT OR IGNORE INTO Settings VALUES (?, ?)'
-+            sql(query, (key, value))
-+            self.reset(key)
-+
-+        (self._page_size,), = sql('PRAGMA page_size').fetchall()
-+
-+        # Setup Cache table.
-+
-+        sql('CREATE TABLE IF NOT EXISTS Cache ('
-+            ' rowid INTEGER PRIMARY KEY,'
-+            ' key BLOB,'
-+            ' raw INTEGER,'
-+            ' store_time REAL,'
-+            ' expire_time REAL,'
-+            ' access_time REAL,'
-+            ' access_count INTEGER DEFAULT 0,'
-+            ' tag BLOB,'
-+            ' size INTEGER DEFAULT 0,'
-+            ' mode INTEGER DEFAULT 0,'
-+            ' filename TEXT,'
-+            ' value BLOB)'
-+        )
-+
-+        sql('CREATE UNIQUE INDEX IF NOT EXISTS Cache_key_raw ON'
-+            ' Cache(key, raw)'
-+        )
-+
-+        sql('CREATE INDEX IF NOT EXISTS Cache_expire_time ON'
-+            ' Cache (expire_time)'
-+        )
-+
-+        query = EVICTION_POLICY[self.eviction_policy]['init']
-+
-+        if query is not None:
-+            sql(query)
-+
-+        # Use triggers to keep Metadata updated.
-+
-+        sql('CREATE TRIGGER IF NOT EXISTS Settings_count_insert'
-+            ' AFTER INSERT ON Cache FOR EACH ROW BEGIN'
-+            ' UPDATE Settings SET value = value + 1'
-+            ' WHERE key = "count"; END'
-+        )
-+
-+        sql('CREATE TRIGGER IF NOT EXISTS Settings_count_delete'
-+            ' AFTER DELETE ON Cache FOR EACH ROW BEGIN'
-+            ' UPDATE Settings SET value = value - 1'
-+            ' WHERE key = "count"; END'
-+        )
-+
-+        sql('CREATE TRIGGER IF NOT EXISTS Settings_size_insert'
-+            ' AFTER INSERT ON Cache FOR EACH ROW BEGIN'
-+            ' UPDATE Settings SET value = value + NEW.size'
-+            ' WHERE key = "size"; END'
-+        )
-+
-+        sql('CREATE TRIGGER IF NOT EXISTS Settings_size_update'
-+            ' AFTER UPDATE ON Cache FOR EACH ROW BEGIN'
-+            ' UPDATE Settings'
-+            ' SET value = value + NEW.size - OLD.size'
-+            ' WHERE key = "size"; END'
-+        )
-+
-+        sql('CREATE TRIGGER IF NOT EXISTS Settings_size_delete'
-+            ' AFTER DELETE ON Cache FOR EACH ROW BEGIN'
-+            ' UPDATE Settings SET value = value - OLD.size'
-+            ' WHERE key = "size"; END'
-+        )
-+
-+        # Create tag index if requested.
-+
-+        if self.tag_index:  # pylint: disable=no-member
-+            self.create_tag_index()
-+        else:
-+            self.drop_tag_index()
-+
-+        # Close and re-open database connection with given timeout.
-+
-+        self.close()
-+        self._timeout = timeout
-+        self._sql  # pylint: disable=pointless-statement
-+
-+
-+    @property
-+    def directory(self):
-+        """Cache directory."""
-+        return self._directory
-+
-+
-+    @property
-+    def timeout(self):
-+        """SQLite connection timeout value in seconds."""
-+        return self._timeout
-+
-+
-+    @property
-+    def disk(self):
-+        """Disk used for serialization."""
-+        return self._disk
-+
-+
-+    @property
-+    def _con(self):
-+        # Check process ID to support process forking. If the process
-+        # ID changes, close the connection and update the process ID.
-+
-+        local_pid = getattr(self._local, 'pid', None)
-+        pid = os.getpid()
-+
-+        if local_pid != pid:
-+            self.close()
-+            self._local.pid = pid
-+
-+        con = getattr(self._local, 'con', None)
-+
-+        if con is None:
-+            con = self._local.con = sqlite3.connect(
-+                op.join(self._directory, DBNAME),
-+                timeout=self._timeout,
-+                isolation_level=None,
-+            )
-+
-+            # Some SQLite pragmas work on a per-connection basis so
-+            # query the Settings table and reset the pragmas. The
-+            # Settings table may not exist so catch and ignore the
-+            # OperationalError that may occur.
-+
-+            try:
-+                select = 'SELECT key, value FROM Settings'
-+                settings = con.execute(select).fetchall()
-+            except sqlite3.OperationalError:
-+                pass
-+            else:
-+                for key, value in settings:
-+                    if key.startswith('sqlite_'):
-+                        self.reset(key, value, update=False)
-+
-+        return con
-+
-+
-+    @property
-+    def _sql(self):
-+        return self._con.execute
-+
-+
-+    @property
-+    def _sql_retry(self):
-+        sql = self._sql
-+
-+        # 2018-11-01 GrantJ - Some SQLite builds/versions handle
-+        # the SQLITE_BUSY return value and connection parameter
-+        # "timeout" differently. For a more reliable duration,
-+        # manually retry the statement for 60 seconds. Only used
-+        # by statements which modify the database and do not use
-+        # a transaction (like those in ``__init__`` or ``reset``).
-+        # See Issue #85 for and tests/issue_85.py for more details.
-+
-+        def _execute_with_retry(statement, *args, **kwargs):
-+            start = time.time()
-+            while True:
-+                try:
-+                    return sql(statement, *args, **kwargs)
-+                except sqlite3.OperationalError as exc:
-+                    if str(exc) != 'database is locked':
-+                        raise
-+                    diff = time.time() - start
-+                    if diff > 60:
-+                        raise
-+                    time.sleep(0.001)
-+
-+        return _execute_with_retry
-+
-+
-+    @cl.contextmanager
-+    def transact(self, retry=False):
-+        """Context manager to perform a transaction by locking the cache.
-+
-+        While the cache is locked, no other write operation is permitted.
-+        Transactions should therefore be as short as possible. Read and write
-+        operations performed in a transaction are atomic. Read operations may
-+        occur concurrent to a transaction.
-+
-+        Transactions may be nested and may not be shared between threads.
-+
-+        Raises :exc:`Timeout` error when database timeout occurs and `retry` is
-+        `False` (default).
-+
-+        >>> cache = Cache()
-+        >>> with cache.transact():  # Atomically increment two keys.
-+        ...     _ = cache.incr('total', 123.4)
-+        ...     _ = cache.incr('count', 1)
-+        >>> with cache.transact():  # Atomically calculate average.
-+        ...     average = cache['total'] / cache['count']
-+        >>> average
-+        123.4
-+
-+        :param bool retry: retry if database timeout occurs (default False)
-+        :return: context manager for use in `with` statement
-+        :raises Timeout: if database timeout occurs
-+
-+        """
-+        with self._transact(retry=retry):
-+            yield
-+
-+
-+    @cl.contextmanager
-+    def _transact(self, retry=False, filename=None):
-+        sql = self._sql
-+        filenames = []
-+        _disk_remove = self._disk.remove
-+        tid = get_ident()
-+        txn_id = self._txn_id
-+
-+        if tid == txn_id:
-+            begin = False
-+        else:
-+            while True:
-+                try:
-+                    sql('BEGIN IMMEDIATE')
-+                    begin = True
-+                    self._txn_id = tid
-+                    break
-+                except sqlite3.OperationalError:
-+                    if retry:
-+                        continue
-+                    if filename is not None:
-+                        _disk_remove(filename)
-+                    raise Timeout
-+
-+        try:
-+            yield sql, filenames.append
-+        except BaseException:
-+            if begin:
-+                assert self._txn_id == tid
-+                self._txn_id = None
-+                sql('ROLLBACK')
-+            raise
-+        else:
-+            if begin:
-+                assert self._txn_id == tid
-+                self._txn_id = None
-+                sql('COMMIT')
-+            for name in filenames:
-+                if name is not None:
-+                    _disk_remove(name)
-+
-+
-+    def set(self, key, value, expire=None, read=False, tag=None, retry=False):
-+        """Set `key` and `value` item in cache.
-+
-+        When `read` is `True`, `value` should be a file-like object opened
-+        for reading in binary mode.
-+
-+        Raises :exc:`Timeout` error when database timeout occurs and `retry` is
-+        `False` (default).
-+
-+        :param key: key for item
-+        :param value: value for item
-+        :param float expire: seconds until item expires
-+            (default None, no expiry)
-+        :param bool read: read value as bytes from file (default False)
-+        :param str tag: text to associate with key (default None)
-+        :param bool retry: retry if database timeout occurs (default False)
-+        :return: True if item was set
-+        :raises Timeout: if database timeout occurs
-+
-+        """
-+        now = time.time()
-+        db_key, raw = self._disk.put(key)
-+        expire_time = None if expire is None else now + expire
-+        size, mode, filename, db_value = self._disk.store(value, read, key=key)
-+        columns = (expire_time, tag, size, mode, filename, db_value)
-+
-+        # The order of SELECT, UPDATE, and INSERT is important below.
-+        #
-+        # Typical cache usage pattern is:
-+        #
-+        # value = cache.get(key)
-+        # if value is None:
-+        #     value = expensive_calculation()
-+        #     cache.set(key, value)
-+        #
-+        # Cache.get does not evict expired keys to avoid writes during lookups.
-+        # Commonly used/expired keys will therefore remain in the cache making
-+        # an UPDATE the preferred path.
-+        #
-+        # The alternative is to assume the key is not present by first trying
-+        # to INSERT and then handling the IntegrityError that occurs from
-+        # violating the UNIQUE constraint. This optimistic approach was
-+        # rejected based on the common cache usage pattern.
-+        #
-+        # INSERT OR REPLACE aka UPSERT is not used because the old filename may
-+        # need cleanup.
-+
-+        with self._transact(retry, filename) as (sql, cleanup):
-+            rows = sql(
-+                'SELECT rowid, filename FROM Cache'
-+                ' WHERE key = ? AND raw = ?',
-+                (db_key, raw),
-+            ).fetchall()
-+
-+            if rows:
-+                (rowid, old_filename), = rows
-+                cleanup(old_filename)
-+                self._row_update(rowid, now, columns)
-+            else:
-+                self._row_insert(db_key, raw, now, columns)
-+
-+            self._cull(now, sql, cleanup)
-+
-+            return True
-+
-+
-+    def __setitem__(self, key, value):
-+        """Set corresponding `value` for `key` in cache.
-+
-+        :param key: key for item
-+        :param value: value for item
-+        :return: corresponding value
-+        :raises KeyError: if key is not found
-+
-+        """
-+        self.set(key, value, retry=True)
-+
-+
-+    def _row_update(self, rowid, now, columns):
-+        sql = self._sql
-+        expire_time, tag, size, mode, filename, value = columns
-+        sql('UPDATE Cache SET'
-+            ' store_time = ?,'
-+            ' expire_time = ?,'
-+            ' access_time = ?,'
-+            ' access_count = ?,'
-+            ' tag = ?,'
-+            ' size = ?,'
-+            ' mode = ?,'
-+            ' filename = ?,'
-+            ' value = ?'
-+            ' WHERE rowid = ?', (
-+                now,          # store_time
-+                expire_time,
-+                now,          # access_time
-+                0,            # access_count
-+                tag,
-+                size,
-+                mode,
-+                filename,
-+                value,
-+                rowid,
-+            ),
-+        )
-+
-+
-+    def _row_insert(self, key, raw, now, columns):
-+        sql = self._sql
-+        expire_time, tag, size, mode, filename, value = columns
-+        sql('INSERT INTO Cache('
-+            ' key, raw, store_time, expire_time, access_time,'
-+            ' access_count, tag, size, mode, filename, value'
-+            ') VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', (
-+                key,
-+                raw,
-+                now,         # store_time
-+                expire_time,
-+                now,         # access_time
-+                0,           # access_count
-+                tag,
-+                size,
-+                mode,
-+                filename,
-+                value,
-+            ),
-+        )
-+
-+
-+    def _cull(self, now, sql, cleanup, limit=None):
-+        cull_limit = self.cull_limit if limit is None else limit
-+
-+        if cull_limit == 0:
-+            return
-+
-+        # Evict expired keys.
-+
-+        select_expired_template = (
-+            'SELECT %s FROM Cache'
-+            ' WHERE expire_time IS NOT NULL AND expire_time < ?'
-+            ' ORDER BY expire_time LIMIT ?'
-+        )
-+
-+        select_expired = select_expired_template % 'filename'
-+        rows = sql(select_expired, (now, cull_limit)).fetchall()
-+
-+        if rows:
-+            delete_expired = (
-+                'DELETE FROM Cache WHERE rowid IN (%s)'
-+                % (select_expired_template % 'rowid')
-+            )
-+            sql(delete_expired, (now, cull_limit))
-+
-+            for filename, in rows:
-+                cleanup(filename)
-+
-+            cull_limit -= len(rows)
-+
-+            if cull_limit == 0:
-+                return
-+
-+        # Evict keys by policy.
-+
-+        select_policy = EVICTION_POLICY[self.eviction_policy]['cull']
-+
-+        if select_policy is None or self.volume() < self.size_limit:
-+            return
-+
-+        select_filename = select_policy.format(fields='filename', now=now)
-+        rows = sql(select_filename, (cull_limit,)).fetchall()
-+
-+        if rows:
-+            delete = (
-+                'DELETE FROM Cache WHERE rowid IN (%s)'
-+                % (select_policy.format(fields='rowid', now=now))
-+            )
-+            sql(delete, (cull_limit,))
-+
-+            for filename, in rows:
-+                cleanup(filename)
-+
-+
-+    def touch(self, key, expire=None, retry=False):
-+        """Touch `key` in cache and update `expire` time.
-+
-+        Raises :exc:`Timeout` error when database timeout occurs and `retry` is
-+        `False` (default).
-+
-+        :param key: key for item
-+        :param float expire: seconds until item expires
-+            (default None, no expiry)
-+        :param bool retry: retry if database timeout occurs (default False)
-+        :return: True if key was touched
-+        :raises Timeout: if database timeout occurs
-+
-+        """
-+        now = time.time()
-+        db_key, raw = self._disk.put(key)
-+        expire_time = None if expire is None else now + expire
-+
-+        with self._transact(retry) as (sql, _):
-+            rows = sql(
-+                'SELECT rowid, expire_time FROM Cache'
-+                ' WHERE key = ? AND raw = ?',
-+                (db_key, raw),
-+            ).fetchall()
-+
-+            if rows:
-+                (rowid, old_expire_time), = rows
-+
-+                if old_expire_time is None or old_expire_time > now:
-+                    sql('UPDATE Cache SET expire_time = ? WHERE rowid = ?',
-+                        (expire_time, rowid),
-+                    )
-+                    return True
-+
-+        return False
-+
-+
-+    def add(self, key, value, expire=None, read=False, tag=None, retry=False):
-+        """Add `key` and `value` item to cache.
-+
-+        Similar to `set`, but only add to cache if key not present.
-+
-+        Operation is atomic. Only one concurrent add operation for a given key
-+        will succeed.
-+
-+        When `read` is `True`, `value` should be a file-like object opened
-+        for reading in binary mode.
-+
-+        Raises :exc:`Timeout` error when database timeout occurs and `retry` is
-+        `False` (default).
-+
-+        :param key: key for item
-+        :param value: value for item
-+        :param float expire: seconds until the key expires
-+            (default None, no expiry)
-+        :param bool read: read value as bytes from file (default False)
-+        :param str tag: text to associate with key (default None)
-+        :param bool retry: retry if database timeout occurs (default False)
-+        :return: True if item was added
-+        :raises Timeout: if database timeout occurs
-+
-+        """
-+        now = time.time()
-+        db_key, raw = self._disk.put(key)
-+        expire_time = None if expire is None else now + expire
-+        size, mode, filename, db_value = self._disk.store(value, read, key=key)
-+        columns = (expire_time, tag, size, mode, filename, db_value)
-+
-+        with self._transact(retry, filename) as (sql, cleanup):
-+            rows = sql(
-+                'SELECT rowid, filename, expire_time FROM Cache'
-+                ' WHERE key = ? AND raw = ?',
-+                (db_key, raw),
-+            ).fetchall()
-+
-+            if rows:
-+                (rowid, old_filename, old_expire_time), = rows
-+
-+                if old_expire_time is None or old_expire_time > now:
-+                    cleanup(filename)
-+                    return False
-+
-+                cleanup(old_filename)
-+                self._row_update(rowid, now, columns)
-+            else:
-+                self._row_insert(db_key, raw, now, columns)
-+
-+            self._cull(now, sql, cleanup)
-+
-+            return True
-+
-+
-+    def incr(self, key, delta=1, default=0, retry=False):
-+        """Increment value by delta for item with key.
-+
-+        If key is missing and default is None then raise KeyError. Else if key
-+        is missing and default is not None then use default for value.
-+
-+        Operation is atomic. All concurrent increment operations will be
-+        counted individually.
-+
-+        Assumes value may be stored in a SQLite column. Most builds that target
-+        machines with 64-bit pointer widths will support 64-bit signed
-+        integers.
-+
-+        Raises :exc:`Timeout` error when database timeout occurs and `retry` is
-+        `False` (default).
-+
-+        :param key: key for item
-+        :param int delta: amount to increment (default 1)
-+        :param int default: value if key is missing (default 0)
-+        :param bool retry: retry if database timeout occurs (default False)
-+        :return: new value for item
-+        :raises KeyError: if key is not found and default is None
-+        :raises Timeout: if database timeout occurs
-+
-+        """
-+        now = time.time()
-+        db_key, raw = self._disk.put(key)
-+        select = (
-+            'SELECT rowid, expire_time, filename, value FROM Cache'
-+            ' WHERE key = ? AND raw = ?'
-+        )
-+
-+        with self._transact(retry) as (sql, cleanup):
-+            rows = sql(select, (db_key, raw)).fetchall()
-+
-+            if not rows:
-+                if default is None:
-+                    raise KeyError(key)
-+
-+                value = default + delta
-+                columns = (None, None) + self._disk.store(value, False, key=key)
-+                self._row_insert(db_key, raw, now, columns)
-+                self._cull(now, sql, cleanup)
-+                return value
-+
-+            (rowid, expire_time, filename, value), = rows
-+
-+            if expire_time is not None and expire_time < now:
-+                if default is None:
-+                    raise KeyError(key)
-+
-+                value = default + delta
-+                columns = (None, None) + self._disk.store(value, False, key=key)
-+                self._row_update(rowid, now, columns)
-+                self._cull(now, sql, cleanup)
-+                cleanup(filename)
-+                return value
-+
-+            value += delta
-+
-+            columns = 'store_time = ?, value = ?'
-+            update_column = EVICTION_POLICY[self.eviction_policy]['get']
-+
-+            if update_column is not None:
-+                columns += ', ' + update_column.format(now=now)
-+
-+            update = 'UPDATE Cache SET %s WHERE rowid = ?' % columns
-+            sql(update, (now, value, rowid))
-+
-+            return value
-+
-+
-+    def decr(self, key, delta=1, default=0, retry=False):
-+        """Decrement value by delta for item with key.
-+
-+        If key is missing and default is None then raise KeyError. Else if key
-+        is missing and default is not None then use default for value.
-+
-+        Operation is atomic. All concurrent decrement operations will be
-+        counted individually.
-+
-+        Unlike Memcached, negative values are supported. Value may be
-+        decremented below zero.
-+
-+        Assumes value may be stored in a SQLite column. Most builds that target
-+        machines with 64-bit pointer widths will support 64-bit signed
-+        integers.
-+
-+        Raises :exc:`Timeout` error when database timeout occurs and `retry` is
-+        `False` (default).
-+
-+        :param key: key for item
-+        :param int delta: amount to decrement (default 1)
-+        :param int default: value if key is missing (default 0)
-+        :param bool retry: retry if database timeout occurs (default False)
-+        :return: new value for item
-+        :raises KeyError: if key is not found and default is None
-+        :raises Timeout: if database timeout occurs
-+
-+        """
-+        return self.incr(key, -delta, default, retry)
-+
-+
-+    def get(self, key, default=None, read=False, expire_time=False, tag=False,
-+            retry=False):
-+        """Retrieve value from cache. If `key` is missing, return `default`.
-+
-+        Raises :exc:`Timeout` error when database timeout occurs and `retry` is
-+        `False` (default).
-+
-+        :param key: key for item
-+        :param default: value to return if key is missing (default None)
-+        :param bool read: if True, return file handle to value
-+            (default False)
-+        :param bool expire_time: if True, return expire_time in tuple
-+            (default False)
-+        :param bool tag: if True, return tag in tuple (default False)
-+        :param bool retry: retry if database timeout occurs (default False)
-+        :return: value for item or default if key not found
-+        :raises Timeout: if database timeout occurs
-+
-+        """
-+        db_key, raw = self._disk.put(key)
-+        update_column = EVICTION_POLICY[self.eviction_policy]['get']
-+        select = (
-+            'SELECT rowid, expire_time, tag, mode, filename, value'
-+            ' FROM Cache WHERE key = ? AND raw = ?'
-+            ' AND (expire_time IS NULL OR expire_time > ?)'
-+        )
-+
-+        if expire_time and tag:
-+            default = (default, None, None)
-+        elif expire_time or tag:
-+            default = (default, None)
-+
-+        if not self.statistics and update_column is None:
-+            # Fast path, no transaction necessary.
-+
-+            rows = self._sql(select, (db_key, raw, time.time())).fetchall()
-+
-+            if not rows:
-+                return default
-+
-+            (rowid, db_expire_time, db_tag, mode, filename, db_value), = rows
-+
-+            try:
-+                value = self._disk.fetch(mode, filename, db_value, read)
-+            except IOError:
-+                # Key was deleted before we could retrieve result.
-+                return default
-+
-+        else:  # Slow path, transaction required.
-+            cache_hit = (
-+                'UPDATE Settings SET value = value + 1 WHERE key = "hits"'
-+            )
-+            cache_miss = (
-+                'UPDATE Settings SET value = value + 1 WHERE key = "misses"'
-+            )
-+
-+            with self._transact(retry) as (sql, _):
-+                rows = sql(select, (db_key, raw, time.time())).fetchall()
-+
-+                if not rows:
-+                    if self.statistics:
-+                        sql(cache_miss)
-+                    return default
-+
-+                (rowid, db_expire_time, db_tag,
-+                     mode, filename, db_value), = rows
-+
-+                try:
-+                    value = self._disk.fetch(mode, filename, db_value, read)
-+                except IOError as error:
-+                    if error.errno == errno.ENOENT:
-+                        # Key was deleted before we could retrieve result.
-+                        if self.statistics:
-+                            sql(cache_miss)
-+                        return default
-+                    else:
-+                        raise
-+
-+                if self.statistics:
-+                    sql(cache_hit)
-+
-+                now = time.time()
-+                update = 'UPDATE Cache SET %s WHERE rowid = ?'
-+
-+                if update_column is not None:
-+                    sql(update % update_column.format(now=now), (rowid,))
-+
-+        if expire_time and tag:
-+            return (value, db_expire_time, db_tag)
-+        elif expire_time:
-+            return (value, db_expire_time)
-+        elif tag:
-+            return (value, db_tag)
-+        else:
-+            return value
-+
-+
-+    def __getitem__(self, key):
-+        """Return corresponding value for `key` from cache.
-+
-+        :param key: key matching item
-+        :return: corresponding value
-+        :raises KeyError: if key is not found
-+
-+        """
-+        value = self.get(key, default=ENOVAL, retry=True)
-+        if value is ENOVAL:
-+            raise KeyError(key)
-+        return value
-+
-+
-+    def read(self, key, retry=False):
-+        """Return file handle value corresponding to `key` from cache.
-+
-+        Raises :exc:`Timeout` error when database timeout occurs and `retry` is
-+        `False` (default).
-+
-+        :param key: key matching item
-+        :param bool retry: retry if database timeout occurs (default False)
-+        :return: file open for reading in binary mode
-+        :raises KeyError: if key is not found
-+        :raises Timeout: if database timeout occurs
-+
-+        """
-+        handle = self.get(key, default=ENOVAL, read=True, retry=retry)
-+        if handle is ENOVAL:
-+            raise KeyError(key)
-+        return handle
-+
-+
-+    def __contains__(self, key):
-+        """Return `True` if `key` matching item is found in cache.
-+
-+        :param key: key matching item
-+        :return: True if key matching item
-+
-+        """
-+        sql = self._sql
-+        db_key, raw = self._disk.put(key)
-+        select = (
-+            'SELECT rowid FROM Cache'
-+            ' WHERE key = ? AND raw = ?'
-+            ' AND (expire_time IS NULL OR expire_time > ?)'
-+        )
-+
-+        rows = sql(select, (db_key, raw, time.time())).fetchall()
-+
-+        return bool(rows)
-+
-+
-+    def pop(self, key, default=None, expire_time=False, tag=False, retry=False):
-+        """Remove corresponding item for `key` from cache and return value.
-+
-+        If `key` is missing, return `default`.
-+
-+        Operation is atomic. Concurrent operations will be serialized.
-+
-+        Raises :exc:`Timeout` error when database timeout occurs and `retry` is
-+        `False` (default).
-+
-+        :param key: key for item
-+        :param default: value to return if key is missing (default None)
-+        :param bool expire_time: if True, return expire_time in tuple
-+            (default False)
-+        :param bool tag: if True, return tag in tuple (default False)
-+        :param bool retry: retry if database timeout occurs (default False)
-+        :return: value for item or default if key not found
-+        :raises Timeout: if database timeout occurs
-+
-+        """
-+        db_key, raw = self._disk.put(key)
-+        select = (
-+            'SELECT rowid, expire_time, tag, mode, filename, value'
-+            ' FROM Cache WHERE key = ? AND raw = ?'
-+            ' AND (expire_time IS NULL OR expire_time > ?)'
-+        )
-+
-+        if expire_time and tag:
-+            default = default, None, None
-+        elif expire_time or tag:
-+            default = default, None
-+
-+        with self._transact(retry) as (sql, _):
-+            rows = sql(select, (db_key, raw, time.time())).fetchall()
-+
-+            if not rows:
-+                return default
-+
-+            (rowid, db_expire_time, db_tag, mode, filename, db_value), = rows
-+
-+            sql('DELETE FROM Cache WHERE rowid = ?', (rowid,))
-+
-+        try:
-+            value = self._disk.fetch(mode, filename, db_value, False)
-+        except IOError as error:
-+            if error.errno == errno.ENOENT:
-+                # Key was deleted before we could retrieve result.
-+                return default
-+            else:
-+                raise
-+        finally:
-+            if filename is not None:
-+                self._disk.remove(filename)
-+
-+        if expire_time and tag:
-+            return value, db_expire_time, db_tag
-+        elif expire_time:
-+            return value, db_expire_time
-+        elif tag:
-+            return value, db_tag
-+        else:
-+            return value
-+
-+
-+    def __delitem__(self, key, retry=True):
-+        """Delete corresponding item for `key` from cache.
-+
-+        Raises :exc:`Timeout` error when database timeout occurs and `retry` is
-+        `False` (default `True`).
-+
-+        :param key: key matching item
-+        :param bool retry: retry if database timeout occurs (default True)
-+        :raises KeyError: if key is not found
-+        :raises Timeout: if database timeout occurs
-+
-+        """
-+        db_key, raw = self._disk.put(key)
-+
-+        with self._transact(retry) as (sql, cleanup):
-+            rows = sql(
-+                'SELECT rowid, filename FROM Cache'
-+                ' WHERE key = ? AND raw = ?'
-+                ' AND (expire_time IS NULL OR expire_time > ?)',
-+                (db_key, raw, time.time()),
-+            ).fetchall()
-+
-+            if not rows:
-+                raise KeyError(key)
-+
-+            (rowid, filename), = rows
-+            sql('DELETE FROM Cache WHERE rowid = ?', (rowid,))
-+            cleanup(filename)
-+
-+            return True
-+
-+
-+    def delete(self, key, retry=False):
-+        """Delete corresponding item for `key` from cache.
-+
-+        Missing keys are ignored.
-+
-+        Raises :exc:`Timeout` error when database timeout occurs and `retry` is
-+        `False` (default).
-+
-+        :param key: key matching item
-+        :param bool retry: retry if database timeout occurs (default False)
-+        :return: True if item was deleted
-+        :raises Timeout: if database timeout occurs
-+
-+        """
-+        try:
-+            return self.__delitem__(key, retry=retry)
-+        except KeyError:
-+            return False
-+
-+
-+    def push(self, value, prefix=None, side='back', expire=None, read=False,
-+             tag=None, retry=False):
-+        """Push `value` onto `side` of queue identified by `prefix` in cache.
-+
-+        When prefix is None, integer keys are used. Otherwise, string keys are
-+        used in the format "prefix-integer". Integer starts at 500 trillion.
-+
-+        Defaults to pushing value on back of queue. Set side to 'front' to push
-+        value on front of queue. Side must be one of 'back' or 'front'.
-+
-+        Operation is atomic. Concurrent operations will be serialized.
-+
-+        When `read` is `True`, `value` should be a file-like object opened
-+        for reading in binary mode.
-+
-+        Raises :exc:`Timeout` error when database timeout occurs and `retry` is
-+        `False` (default).
-+
-+        See also `Cache.pull`.
-+
-+        >>> cache = Cache()
-+        >>> print(cache.push('first value'))
-+        500000000000000
-+        >>> cache.get(500000000000000)
-+        'first value'
-+        >>> print(cache.push('second value'))
-+        500000000000001
-+        >>> print(cache.push('third value', side='front'))
-+        499999999999999
-+        >>> cache.push(1234, prefix='userids')
-+        'userids-500000000000000'
-+
-+        :param value: value for item
-+        :param str prefix: key prefix (default None, key is integer)
-+        :param str side: either 'back' or 'front' (default 'back')
-+        :param float expire: seconds until the key expires
-+            (default None, no expiry)
-+        :param bool read: read value as bytes from file (default False)
-+        :param str tag: text to associate with key (default None)
-+        :param bool retry: retry if database timeout occurs (default False)
-+        :return: key for item in cache
-+        :raises Timeout: if database timeout occurs
-+
-+        """
-+        if prefix is None:
-+            min_key = 0
-+            max_key = 999999999999999
-+        else:
-+            min_key = prefix + '-000000000000000'
-+            max_key = prefix + '-999999999999999'
-+
-+        now = time.time()
-+        raw = True
-+        expire_time = None if expire is None else now + expire
-+        size, mode, filename, db_value = self._disk.store(value, read)
-+        columns = (expire_time, tag, size, mode, filename, db_value)
-+        order = {'back': 'DESC', 'front': 'ASC'}
-+        select = (
-+            'SELECT key FROM Cache'
-+            ' WHERE ? < key AND key < ? AND raw = ?'
-+            ' ORDER BY key %s LIMIT 1'
-+        ) % order[side]
-+
-+        with self._transact(retry, filename) as (sql, cleanup):
-+            rows = sql(select, (min_key, max_key, raw)).fetchall()
-+
-+            if rows:
-+                (key,), = rows
-+
-+                if prefix is not None:
-+                    num = int(key[(key.rfind('-') + 1):])
-+                else:
-+                    num = key
-+
-+                if side == 'back':
-+                    num += 1
-+                else:
-+                    assert side == 'front'
-+                    num -= 1
-+            else:
-+                num = 500000000000000
-+
-+            if prefix is not None:
-+                db_key = '{0}-{1:015d}'.format(prefix, num)
-+            else:
-+                db_key = num
-+
-+            self._row_insert(db_key, raw, now, columns)
-+            self._cull(now, sql, cleanup)
-+
-+            return db_key
-+
-+
-+    def pull(self, prefix=None, default=(None, None), side='front',
-+             expire_time=False, tag=False, retry=False):
-+        """Pull key and value item pair from `side` of queue in cache.
-+
-+        When prefix is None, integer keys are used. Otherwise, string keys are
-+        used in the format "prefix-integer". Integer starts at 500 trillion.
-+
-+        If queue is empty, return default.
-+
-+        Defaults to pulling key and value item pairs from front of queue. Set
-+        side to 'back' to pull from back of queue. Side must be one of 'front'
-+        or 'back'.
-+
-+        Operation is atomic. Concurrent operations will be serialized.
-+
-+        Raises :exc:`Timeout` error when database timeout occurs and `retry` is
-+        `False` (default).
-+
-+        See also `Cache.push` and `Cache.get`.
-+
-+        >>> cache = Cache()
-+        >>> cache.pull()
-+        (None, None)
-+        >>> for letter in 'abc':
-+        ...     print(cache.push(letter))
-+        500000000000000
-+        500000000000001
-+        500000000000002
-+        >>> key, value = cache.pull()
-+        >>> print(key)
-+        500000000000000
-+        >>> value
-+        'a'
-+        >>> _, value = cache.pull(side='back')
-+        >>> value
-+        'c'
-+        >>> cache.push(1234, 'userids')
-+        'userids-500000000000000'
-+        >>> _, value = cache.pull('userids')
-+        >>> value
-+        1234
-+
-+        :param str prefix: key prefix (default None, key is integer)
-+        :param default: value to return if key is missing
-+            (default (None, None))
-+        :param str side: either 'front' or 'back' (default 'front')
-+        :param bool expire_time: if True, return expire_time in tuple
-+            (default False)
-+        :param bool tag: if True, return tag in tuple (default False)
-+        :param bool retry: retry if database timeout occurs (default False)
-+        :return: key and value item pair or default if queue is empty
-+        :raises Timeout: if database timeout occurs
-+
-+        """
-+        # Caution: Nearly identical code exists in Cache.peek
-+        if prefix is None:
-+            min_key = 0
-+            max_key = 999999999999999
-+        else:
-+            min_key = prefix + '-000000000000000'
-+            max_key = prefix + '-999999999999999'
-+
-+        order = {'front': 'ASC', 'back': 'DESC'}
-+        select = (
-+            'SELECT rowid, key, expire_time, tag, mode, filename, value'
-+            ' FROM Cache WHERE ? < key AND key < ? AND raw = 1'
-+            ' ORDER BY key %s LIMIT 1'
-+        ) % order[side]
-+
-+        if expire_time and tag:
-+            default = default, None, None
-+        elif expire_time or tag:
-+            default = default, None
-+
-+        while True:
-+            while True:
-+                with self._transact(retry) as (sql, cleanup):
-+                    rows = sql(select, (min_key, max_key)).fetchall()
-+
-+                    if not rows:
-+                        return default
-+
-+                    (rowid, key, db_expire, db_tag, mode, name,
-+                     db_value), = rows
-+
-+                    sql('DELETE FROM Cache WHERE rowid = ?', (rowid,))
-+
-+                    if db_expire is not None and db_expire < time.time():
-+                        cleanup(name)
-+                    else:
-+                        break
-+
-+            try:
-+                value = self._disk.fetch(mode, name, db_value, False)
-+            except IOError as error:
-+                if error.errno == errno.ENOENT:
-+                    # Key was deleted before we could retrieve result.
-+                    continue
-+                else:
-+                    raise
-+            finally:
-+                if name is not None:
-+                    self._disk.remove(name)
-+            break
-+
-+        if expire_time and tag:
-+            return (key, value), db_expire, db_tag
-+        elif expire_time:
-+            return (key, value), db_expire
-+        elif tag:
-+            return (key, value), db_tag
-+        else:
-+            return key, value
-+
-+
-+    def peek(self, prefix=None, default=(None, None), side='front',
-+             expire_time=False, tag=False, retry=False):
-+        """Peek at key and value item pair from `side` of queue in cache.
-+
-+        When prefix is None, integer keys are used. Otherwise, string keys are
-+        used in the format "prefix-integer". Integer starts at 500 trillion.
-+
-+        If queue is empty, return default.
-+
-+        Defaults to peeking at key and value item pairs from front of queue.
-+        Set side to 'back' to pull from back of queue. Side must be one of
-+        'front' or 'back'.
-+
-+        Expired items are deleted from cache. Operation is atomic. Concurrent
-+        operations will be serialized.
-+
-+        Raises :exc:`Timeout` error when database timeout occurs and `retry` is
-+        `False` (default).
-+
-+        See also `Cache.pull` and `Cache.push`.
-+
-+        >>> cache = Cache()
-+        >>> for letter in 'abc':
-+        ...     print(cache.push(letter))
-+        500000000000000
-+        500000000000001
-+        500000000000002
-+        >>> key, value = cache.peek()
-+        >>> print(key)
-+        500000000000000
-+        >>> value
-+        'a'
-+        >>> key, value = cache.peek(side='back')
-+        >>> print(key)
-+        500000000000002
-+        >>> value
-+        'c'
-+
-+        :param str prefix: key prefix (default None, key is integer)
-+        :param default: value to return if key is missing
-+            (default (None, None))
-+        :param str side: either 'front' or 'back' (default 'front')
-+        :param bool expire_time: if True, return expire_time in tuple
-+            (default False)
-+        :param bool tag: if True, return tag in tuple (default False)
-+        :param bool retry: retry if database timeout occurs (default False)
-+        :return: key and value item pair or default if queue is empty
-+        :raises Timeout: if database timeout occurs
-+
-+        """
-+        # Caution: Nearly identical code exists in Cache.pull
-+        if prefix is None:
-+            min_key = 0
-+            max_key = 999999999999999
-+        else:
-+            min_key = prefix + '-000000000000000'
-+            max_key = prefix + '-999999999999999'
-+
-+        order = {'front': 'ASC', 'back': 'DESC'}
-+        select = (
-+            'SELECT rowid, key, expire_time, tag, mode, filename, value'
-+            ' FROM Cache WHERE ? < key AND key < ? AND raw = 1'
-+            ' ORDER BY key %s LIMIT 1'
-+        ) % order[side]
-+
-+        if expire_time and tag:
-+            default = default, None, None
-+        elif expire_time or tag:
-+            default = default, None
-+
-+        while True:
-+            while True:
-+                with self._transact(retry) as (sql, cleanup):
-+                    rows = sql(select, (min_key, max_key)).fetchall()
-+
-+                    if not rows:
-+                        return default
-+
-+                    (rowid, key, db_expire, db_tag, mode, name,
-+                     db_value), = rows
-+
-+                    if db_expire is not None and db_expire < time.time():
-+                        sql('DELETE FROM Cache WHERE rowid = ?', (rowid,))
-+                        cleanup(name)
-+                    else:
-+                        break
-+
-+            try:
-+                value = self._disk.fetch(mode, name, db_value, False)
-+            except IOError as error:
-+                if error.errno == errno.ENOENT:
-+                    # Key was deleted before we could retrieve result.
-+                    continue
-+                else:
-+                    raise
-+            finally:
-+                if name is not None:
-+                    self._disk.remove(name)
-+            break
-+
-+        if expire_time and tag:
-+            return (key, value), db_expire, db_tag
-+        elif expire_time:
-+            return (key, value), db_expire
-+        elif tag:
-+            return (key, value), db_tag
-+        else:
-+            return key, value
-+
-+
-+    def peekitem(self, last=True, expire_time=False, tag=False, retry=False):
-+        """Peek at key and value item pair in cache based on iteration order.
-+
-+        Expired items are deleted from cache. Operation is atomic. Concurrent
-+        operations will be serialized.
-+
-+        Raises :exc:`Timeout` error when database timeout occurs and `retry` is
-+        `False` (default).
-+
-+        >>> cache = Cache()
-+        >>> for num, letter in enumerate('abc'):
-+        ...     cache[letter] = num
-+        >>> cache.peekitem()
-+        ('c', 2)
-+        >>> cache.peekitem(last=False)
-+        ('a', 0)
-+
-+        :param bool last: last item in iteration order (default True)
-+        :param bool expire_time: if True, return expire_time in tuple
-+            (default False)
-+        :param bool tag: if True, return tag in tuple (default False)
-+        :param bool retry: retry if database timeout occurs (default False)
-+        :return: key and value item pair
-+        :raises KeyError: if cache is empty
-+        :raises Timeout: if database timeout occurs
-+
-+        """
-+        order = ('ASC', 'DESC')
-+        select = (
-+            'SELECT rowid, key, raw, expire_time, tag, mode, filename, value'
-+            ' FROM Cache ORDER BY rowid %s LIMIT 1'
-+        ) % order[last]
-+
-+        while True:
-+            while True:
-+                with self._transact(retry) as (sql, cleanup):
-+                    rows = sql(select).fetchall()
-+
-+                    if not rows:
-+                        raise KeyError('dictionary is empty')
-+
-+                    (rowid, db_key, raw, db_expire, db_tag, mode, name,
-+                     db_value), = rows
-+
-+                    if db_expire is not None and db_expire < time.time():
-+                        sql('DELETE FROM Cache WHERE rowid = ?', (rowid,))
-+                        cleanup(name)
-+                    else:
-+                        break
-+
-+            key = self._disk.get(db_key, raw)
-+
-+            try:
-+                value = self._disk.fetch(mode, name, db_value, False)
-+            except IOError as error:
-+                if error.errno == errno.ENOENT:
-+                    # Key was deleted before we could retrieve result.
-+                    continue
-+                else:
-+                    raise
-+            break
-+
-+        if expire_time and tag:
-+            return (key, value), db_expire, db_tag
-+        elif expire_time:
-+            return (key, value), db_expire
-+        elif tag:
-+            return (key, value), db_tag
-+        else:
-+            return key, value
-+
-+
-+    def memoize(self, name=None, typed=False, expire=None, tag=None):
-+        """Memoizing cache decorator.
-+
-+        Decorator to wrap callable with memoizing function using cache.
-+        Repeated calls with the same arguments will lookup result in cache and
-+        avoid function evaluation.
-+
-+        If name is set to None (default), the callable name will be determined
-+        automatically.
-+
-+        When expire is set to zero, function results will not be set in the
-+        cache. Cache lookups still occur, however. Read
-+        :doc:`case-study-landing-page-caching` for example usage.
-+
-+        If typed is set to True, function arguments of different types will be
-+        cached separately. For example, f(3) and f(3.0) will be treated as
-+        distinct calls with distinct results.
-+
-+        The original underlying function is accessible through the __wrapped__
-+        attribute. This is useful for introspection, for bypassing the cache,
-+        or for rewrapping the function with a different cache.
-+
-+        >>> from diskcache import Cache
-+        >>> cache = Cache()
-+        >>> @cache.memoize(expire=1, tag='fib')
-+        ... def fibonacci(number):
-+        ...     if number == 0:
-+        ...         return 0
-+        ...     elif number == 1:
-+        ...         return 1
-+        ...     else:
-+        ...         return fibonacci(number - 1) + fibonacci(number - 2)
-+        >>> print(fibonacci(100))
-+        354224848179261915075
-+
-+        An additional `__cache_key__` attribute can be used to generate the
-+        cache key used for the given arguments.
-+
-+        >>> key = fibonacci.__cache_key__(100)
-+        >>> print(cache[key])
-+        354224848179261915075
-+
-+        Remember to call memoize when decorating a callable. If you forget,
-+        then a TypeError will occur. Note the lack of parenthenses after
-+        memoize below:
-+
-+        >>> @cache.memoize
-+        ... def test():
-+        ...     pass
-+        Traceback (most recent call last):
-+            ...
-+        TypeError: name cannot be callable
-+
-+        :param cache: cache to store callable arguments and return values
-+        :param str name: name given for callable (default None, automatic)
-+        :param bool typed: cache different types separately (default False)
-+        :param float expire: seconds until arguments expire
-+            (default None, no expiry)
-+        :param str tag: text to associate with arguments (default None)
-+        :return: callable decorator
-+
-+        """
-+        # Caution: Nearly identical code exists in DjangoCache.memoize
-+        if callable(name):
-+            raise TypeError('name cannot be callable')
-+
-+        def decorator(func):
-+            "Decorator created by memoize() for callable `func`."
-+            base = (full_name(func),) if name is None else (name,)
-+
-+            @ft.wraps(func)
-+            def wrapper(*args, **kwargs):
-+                "Wrapper for callable to cache arguments and return values."
-+                key = wrapper.__cache_key__(*args, **kwargs)
-+                result = self.get(key, default=ENOVAL, retry=True)
-+
-+                if result is ENOVAL:
-+                    result = func(*args, **kwargs)
-+                    if expire is None or expire > 0:
-+                        self.set(key, result, expire, tag=tag, retry=True)
-+
-+                return result
-+
-+            def __cache_key__(*args, **kwargs):
-+                "Make key for cache given function arguments."
-+                return args_to_key(base, args, kwargs, typed)
-+
-+            wrapper.__cache_key__ = __cache_key__
-+            return wrapper
-+
-+        return decorator
-+
-+
-+    def check(self, fix=False, retry=False):
-+        """Check database and file system consistency.
-+
-+        Intended for use in testing and post-mortem error analysis.
-+
-+        While checking the Cache table for consistency, a writer lock is held
-+        on the database. The lock blocks other cache clients from writing to
-+        the database. For caches with many file references, the lock may be
-+        held for a long time. For example, local benchmarking shows that a
-+        cache with 1,000 file references takes ~60ms to check.
-+
-+        Raises :exc:`Timeout` error when database timeout occurs and `retry` is
-+        `False` (default).
-+
-+        :param bool fix: correct inconsistencies
-+        :param bool retry: retry if database timeout occurs (default False)
-+        :return: list of warnings
-+        :raises Timeout: if database timeout occurs
-+
-+        """
-+        # pylint: disable=access-member-before-definition,W0201
-+        with warnings.catch_warnings(record=True) as warns:
-+            sql = self._sql
-+
-+            # Check integrity of database.
-+
-+            rows = sql('PRAGMA integrity_check').fetchall()
-+
-+            if len(rows) != 1 or rows[0][0] != u'ok':
-+                for message, in rows:
-+                    warnings.warn(message)
-+
-+            if fix:
-+                sql('VACUUM')
-+
-+            with self._transact(retry) as (sql, _):
-+
-+                # Check Cache.filename against file system.
-+
-+                filenames = set()
-+                select = (
-+                    'SELECT rowid, size, filename FROM Cache'
-+                    ' WHERE filename IS NOT NULL'
-+                )
-+
-+                rows = sql(select).fetchall()
-+
-+                for rowid, size, filename in rows:
-+                    full_path = op.join(self._directory, filename)
-+                    filenames.add(full_path)
-+
-+                    if op.exists(full_path):
-+                        real_size = op.getsize(full_path)
-+
-+                        if size != real_size:
-+                            message = 'wrong file size: %s, %d != %d'
-+                            args = full_path, real_size, size
-+                            warnings.warn(message % args)
-+
-+                            if fix:
-+                                sql('UPDATE Cache SET size = ?'
-+                                    ' WHERE rowid = ?',
-+                                    (real_size, rowid),
-+                                )
-+
-+                        continue
-+
-+                    warnings.warn('file not found: %s' % full_path)
-+
-+                    if fix:
-+                        sql('DELETE FROM Cache WHERE rowid = ?', (rowid,))
-+
-+                # Check file system against Cache.filename.
-+
-+                for dirpath, _, files in os.walk(self._directory):
-+                    paths = [op.join(dirpath, filename) for filename in files]
-+                    error = set(paths) - filenames
-+
-+                    for full_path in error:
-+                        if DBNAME in full_path:
-+                            continue
-+
-+                        message = 'unknown file: %s' % full_path
-+                        warnings.warn(message, UnknownFileWarning)
-+
-+                        if fix:
-+                            os.remove(full_path)
-+
-+                # Check for empty directories.
-+
-+                for dirpath, dirs, files in os.walk(self._directory):
-+                    if not (dirs or files):
-+                        message = 'empty directory: %s' % dirpath
-+                        warnings.warn(message, EmptyDirWarning)
-+
-+                        if fix:
-+                            os.rmdir(dirpath)
-+
-+                # Check Settings.count against count of Cache rows.
-+
-+                self.reset('count')
-+                (count,), = sql('SELECT COUNT(key) FROM Cache').fetchall()
-+
-+                if self.count != count:
-+                    message = 'Settings.count != COUNT(Cache.key); %d != %d'
-+                    warnings.warn(message % (self.count, count))
-+
-+                    if fix:
-+                        sql('UPDATE Settings SET value = ? WHERE key = ?',
-+                            (count, 'count'),
-+                        )
-+
-+                # Check Settings.size against sum of Cache.size column.
-+
-+                self.reset('size')
-+                select_size = 'SELECT COALESCE(SUM(size), 0) FROM Cache'
-+                (size,), = sql(select_size).fetchall()
-+
-+                if self.size != size:
-+                    message = 'Settings.size != SUM(Cache.size); %d != %d'
-+                    warnings.warn(message % (self.size, size))
-+
-+                    if fix:
-+                        sql('UPDATE Settings SET value = ? WHERE key =?',
-+                            (size, 'size'),
-+                        )
-+
-+            return warns
-+
-+
-+    def create_tag_index(self):
-+        """Create tag index on cache database.
-+
-+        It is better to initialize cache with `tag_index=True` than use this.
-+
-+        :raises Timeout: if database timeout occurs
-+
-+        """
-+        sql = self._sql
-+        sql('CREATE INDEX IF NOT EXISTS Cache_tag_rowid ON Cache(tag, rowid)')
-+        self.reset('tag_index', 1)
-+
-+
-+    def drop_tag_index(self):
-+        """Drop tag index on cache database.
-+
-+        :raises Timeout: if database timeout occurs
-+
-+        """
-+        sql = self._sql
-+        sql('DROP INDEX IF EXISTS Cache_tag_rowid')
-+        self.reset('tag_index', 0)
-+
-+
-+    def evict(self, tag, retry=False):
-+        """Remove items with matching `tag` from cache.
-+
-+        Removing items is an iterative process. In each iteration, a subset of
-+        items is removed. Concurrent writes may occur between iterations.
-+
-+        If a :exc:`Timeout` occurs, the first element of the exception's
-+        `args` attribute will be the number of items removed before the
-+        exception occurred.
-+
-+        Raises :exc:`Timeout` error when database timeout occurs and `retry` is
-+        `False` (default).
-+
-+        :param str tag: tag identifying items
-+        :param bool retry: retry if database timeout occurs (default False)
-+        :return: count of rows removed
-+        :raises Timeout: if database timeout occurs
-+
-+        """
-+        select = (
-+            'SELECT rowid, filename FROM Cache'
-+            ' WHERE tag = ? AND rowid > ?'
-+            ' ORDER BY rowid LIMIT ?'
-+        )
-+        args = [tag, 0, 100]
-+        return self._select_delete(select, args, arg_index=1, retry=retry)
-+
-+
-+    def expire(self, now=None, retry=False):
-+        """Remove expired items from cache.
-+
-+        Removing items is an iterative process. In each iteration, a subset of
-+        items is removed. Concurrent writes may occur between iterations.
-+
-+        If a :exc:`Timeout` occurs, the first element of the exception's
-+        `args` attribute will be the number of items removed before the
-+        exception occurred.
-+
-+        Raises :exc:`Timeout` error when database timeout occurs and `retry` is
-+        `False` (default).
-+
-+        :param float now: current time (default None, ``time.time()`` used)
-+        :param bool retry: retry if database timeout occurs (default False)
-+        :return: count of items removed
-+        :raises Timeout: if database timeout occurs
-+
-+        """
-+        select = (
-+            'SELECT rowid, expire_time, filename FROM Cache'
-+            ' WHERE ? < expire_time AND expire_time < ?'
-+            ' ORDER BY expire_time LIMIT ?'
-+        )
-+        args = [0, now or time.time(), 100]
-+        return self._select_delete(select, args, row_index=1, retry=retry)
-+
-+
-+    def cull(self, retry=False):
-+        """Cull items from cache until volume is less than size limit.
-+
-+        Removing items is an iterative process. In each iteration, a subset of
-+        items is removed. Concurrent writes may occur between iterations.
-+
-+        If a :exc:`Timeout` occurs, the first element of the exception's
-+        `args` attribute will be the number of items removed before the
-+        exception occurred.
-+
-+        Raises :exc:`Timeout` error when database timeout occurs and `retry` is
-+        `False` (default).
-+
-+        :param bool retry: retry if database timeout occurs (default False)
-+        :return: count of items removed
-+        :raises Timeout: if database timeout occurs
-+
-+        """
-+        now = time.time()
-+
-+        # Remove expired items.
-+
-+        count = self.expire(now)
-+
-+        # Remove items by policy.
-+
-+        select_policy = EVICTION_POLICY[self.eviction_policy]['cull']
-+
-+        if select_policy is None:
-+            return
-+
-+        select_filename = select_policy.format(fields='filename', now=now)
-+
-+        try:
-+            while self.volume() > self.size_limit:
-+                with self._transact(retry) as (sql, cleanup):
-+                    rows = sql(select_filename, (10,)).fetchall()
-+
-+                    if not rows:
-+                        break
-+
-+                    count += len(rows)
-+                    delete = (
-+                        'DELETE FROM Cache WHERE rowid IN (%s)'
-+                        % select_policy.format(fields='rowid', now=now)
-+                    )
-+                    sql(delete, (10,))
-+
-+                    for filename, in rows:
-+                        cleanup(filename)
-+        except Timeout:
-+            raise Timeout(count)
-+
-+        return count
-+
-+
-+    def clear(self, retry=False):
-+        """Remove all items from cache.
-+
-+        Removing items is an iterative process. In each iteration, a subset of
-+        items is removed. Concurrent writes may occur between iterations.
-+
-+        If a :exc:`Timeout` occurs, the first element of the exception's
-+        `args` attribute will be the number of items removed before the
-+        exception occurred.
-+
-+        Raises :exc:`Timeout` error when database timeout occurs and `retry` is
-+        `False` (default).
-+
-+        :param bool retry: retry if database timeout occurs (default False)
-+        :return: count of rows removed
-+        :raises Timeout: if database timeout occurs
-+
-+        """
-+        select = (
-+            'SELECT rowid, filename FROM Cache'
-+            ' WHERE rowid > ?'
-+            ' ORDER BY rowid LIMIT ?'
-+        )
-+        args = [0, 100]
-+        return self._select_delete(select, args, retry=retry)
-+
-+
-+    def _select_delete(self, select, args, row_index=0, arg_index=0,
-+                       retry=False):
-+        count = 0
-+        delete = 'DELETE FROM Cache WHERE rowid IN (%s)'
-+
-+        try:
-+            while True:
-+                with self._transact(retry) as (sql, cleanup):
-+                    rows = sql(select, args).fetchall()
-+
-+                    if not rows:
-+                        break
-+
-+                    count += len(rows)
-+                    sql(delete % ','.join(str(row[0]) for row in rows))
-+
-+                    for row in rows:
-+                        args[arg_index] = row[row_index]
-+                        cleanup(row[-1])
-+
-+        except Timeout:
-+            raise Timeout(count)
-+
-+        return count
-+
-+
-+    def iterkeys(self, reverse=False):
-+        """Iterate Cache keys in database sort order.
-+
-+        >>> cache = Cache()
-+        >>> for key in [4, 1, 3, 0, 2]:
-+        ...     cache[key] = key
-+        >>> list(cache.iterkeys())
-+        [0, 1, 2, 3, 4]
-+        >>> list(cache.iterkeys(reverse=True))
-+        [4, 3, 2, 1, 0]
-+
-+        :param bool reverse: reverse sort order (default False)
-+        :return: iterator of Cache keys
-+
-+        """
-+        sql = self._sql
-+        limit = 100
-+        _disk_get = self._disk.get
-+
-+        if reverse:
-+            select = (
-+                'SELECT key, raw FROM Cache'
-+                ' ORDER BY key DESC, raw DESC LIMIT 1'
-+            )
-+            iterate = (
-+                'SELECT key, raw FROM Cache'
-+                ' WHERE key = ? AND raw < ? OR key < ?'
-+                ' ORDER BY key DESC, raw DESC LIMIT ?'
-+            )
-+        else:
-+            select = (
-+                'SELECT key, raw FROM Cache'
-+                ' ORDER BY key ASC, raw ASC LIMIT 1'
-+            )
-+            iterate = (
-+                'SELECT key, raw FROM Cache'
-+                ' WHERE key = ? AND raw > ? OR key > ?'
-+                ' ORDER BY key ASC, raw ASC LIMIT ?'
-+            )
-+
-+        row = sql(select).fetchall()
-+
-+        if row:
-+            (key, raw), = row
-+        else:
-+            return
-+
-+        yield _disk_get(key, raw)
-+
-+        while True:
-+            rows = sql(iterate, (key, raw, key, limit)).fetchall()
-+
-+            if not rows:
-+                break
-+
-+            for key, raw in rows:
-+                yield _disk_get(key, raw)
-+
-+
-+    def _iter(self, ascending=True):
-+        sql = self._sql
-+        rows = sql('SELECT MAX(rowid) FROM Cache').fetchall()
-+        (max_rowid,), = rows
-+        yield  # Signal ready.
-+
-+        if max_rowid is None:
-+            return
-+
-+        bound = max_rowid + 1
-+        limit = 100
-+        _disk_get = self._disk.get
-+        rowid = 0 if ascending else bound
-+        select = (
-+            'SELECT rowid, key, raw FROM Cache'
-+            ' WHERE ? < rowid AND rowid < ?'
-+            ' ORDER BY rowid %s LIMIT ?'
-+        ) % ('ASC' if ascending else 'DESC')
-+
-+        while True:
-+            if ascending:
-+                args = (rowid, bound, limit)
-+            else:
-+                args = (0, rowid, limit)
-+
-+            rows = sql(select, args).fetchall()
-+
-+            if not rows:
-+                break
-+
-+            for rowid, key, raw in rows:
-+                yield _disk_get(key, raw)
-+
-+
-+    def __iter__(self):
-+        "Iterate keys in cache including expired items."
-+        iterator = self._iter()
-+        next(iterator)
-+        return iterator
-+
-+
-+    def __reversed__(self):
-+        "Reverse iterate keys in cache including expired items."
-+        iterator = self._iter(ascending=False)
-+        next(iterator)
-+        return iterator
-+
-+
-+    def stats(self, enable=True, reset=False):
-+        """Return cache statistics hits and misses.
-+
-+        :param bool enable: enable collecting statistics (default True)
-+        :param bool reset: reset hits and misses to 0 (default False)
-+        :return: (hits, misses)
-+
-+        """
-+        # pylint: disable=E0203,W0201
-+        result = (self.reset('hits'), self.reset('misses'))
-+
-+        if reset:
-+            self.reset('hits', 0)
-+            self.reset('misses', 0)
-+
-+        self.reset('statistics', enable)
-+
-+        return result
-+
-+
-+    def volume(self):
-+        """Return estimated total size of cache on disk.
-+
-+        :return: size in bytes
-+
-+        """
-+        (page_count,), = self._sql('PRAGMA page_count').fetchall()
-+        total_size = self._page_size * page_count + self.reset('size')
-+        return total_size
-+
-+
-+    def close(self):
-+        """Close database connection.
-+
-+        """
-+        con = getattr(self._local, 'con', None)
-+
-+        if con is None:
-+            return
-+
-+        con.close()
-+
-+        try:
-+            delattr(self._local, 'con')
-+        except AttributeError:
-+            pass
-+
-+
-+    def __enter__(self):
-+        # Create connection in thread.
-+        connection = self._con  # pylint: disable=unused-variable
-+        return self
-+
-+
-+    def __exit__(self, *exception):
-+        self.close()
-+
-+
-+    def __len__(self):
-+        "Count of items in cache including expired items."
-+        return self.reset('count')
-+
-+
-+    def __getstate__(self):
-+        return (self.directory, self.timeout, type(self.disk))
-+
-+
-+    def __setstate__(self, state):
-+        self.__init__(*state)
-+
-+
-+    def reset(self, key, value=ENOVAL, update=True):
-+        """Reset `key` and `value` item from Settings table.
-+
-+        Use `reset` to update the value of Cache settings correctly. Cache
-+        settings are stored in the Settings table of the SQLite database. If
-+        `update` is ``False`` then no attempt is made to update the database.
-+
-+        If `value` is not given, it is reloaded from the Settings
-+        table. Otherwise, the Settings table is updated.
-+
-+        Settings with the ``disk_`` prefix correspond to Disk
-+        attributes. Updating the value will change the unprefixed attribute on
-+        the associated Disk instance.
-+
-+        Settings with the ``sqlite_`` prefix correspond to SQLite
-+        pragmas. Updating the value will execute the corresponding PRAGMA
-+        statement.
-+
-+        SQLite PRAGMA statements may be executed before the Settings table
-+        exists in the database by setting `update` to ``False``.
-+
-+        :param str key: Settings key for item
-+        :param value: value for item (optional)
-+        :param bool update: update database Settings table (default True)
-+        :return: updated value for item
-+        :raises Timeout: if database timeout occurs
-+
-+        """
-+        sql = self._sql
-+        sql_retry = self._sql_retry
-+
-+        if value is ENOVAL:
-+            select = 'SELECT value FROM Settings WHERE key = ?'
-+            (value,), = sql_retry(select, (key,)).fetchall()
-+            setattr(self, key, value)
-+            return value
-+
-+        if update:
-+            statement = 'UPDATE Settings SET value = ? WHERE key = ?'
-+            sql_retry(statement, (value, key))
-+
-+        if key.startswith('sqlite_'):
-+            pragma = key[7:]
-+
-+            # 2016-02-17 GrantJ - PRAGMA and isolation_level=None
-+            # don't always play nicely together. Retry setting the
-+            # PRAGMA. I think some PRAGMA statements expect to
-+            # immediately take an EXCLUSIVE lock on the database. I
-+            # can't find any documentation for this but without the
-+            # retry, stress will intermittently fail with multiple
-+            # processes.
-+
-+            # 2018-11-05 GrantJ - Avoid setting pragma values that
-+            # are already set. Pragma settings like auto_vacuum and
-+            # journal_mode can take a long time or may not work after
-+            # tables have been created.
-+
-+            start = time.time()
-+            while True:
-+                try:
-+                    try:
-+                        (old_value,), = sql('PRAGMA %s' % (pragma)).fetchall()
-+                        update = old_value != value
-+                    except ValueError:
-+                        update = True
-+                    if update:
-+                        sql('PRAGMA %s = %s' % (pragma, value)).fetchall()
-+                    break
-+                except sqlite3.OperationalError as exc:
-+                    if str(exc) != 'database is locked':
-+                        raise
-+                    diff = time.time() - start
-+                    if diff > 60:
-+                        raise
-+                    time.sleep(0.001)
-+        elif key.startswith('disk_'):
-+            attr = key[5:]
-+            setattr(self._disk, attr, value)
-+
-+        setattr(self, key, value)
-+        return value
-diff --git a/third_party/python/diskcache/diskcache/djangocache.py b/third_party/python/diskcache/diskcache/djangocache.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/diskcache/diskcache/djangocache.py
-@@ -0,0 +1,433 @@
-+"Django-compatible disk and file backed cache."
-+
-+from functools import wraps
-+from django.core.cache.backends.base import BaseCache
-+
-+try:
-+    from django.core.cache.backends.base import DEFAULT_TIMEOUT
-+except ImportError:
-+    # For older versions of Django simply use 300 seconds.
-+    DEFAULT_TIMEOUT = 300
-+
-+from .core import ENOVAL, args_to_key, full_name
-+from .fanout import FanoutCache
-+
-+
-+class DjangoCache(BaseCache):
-+    "Django-compatible disk and file backed cache."
-+    def __init__(self, directory, params):
-+        """Initialize DjangoCache instance.
-+
-+        :param str directory: cache directory
-+        :param dict params: cache parameters
-+
-+        """
-+        super(DjangoCache, self).__init__(params)
-+        shards = params.get('SHARDS', 8)
-+        timeout = params.get('DATABASE_TIMEOUT', 0.010)
-+        options = params.get('OPTIONS', {})
-+        self._cache = FanoutCache(directory, shards, timeout, **options)
-+
-+
-+    @property
-+    def directory(self):
-+        """Cache directory."""
-+        return self._cache.directory
-+
-+
-+    def cache(self, name):
-+        """Return Cache with given `name` in subdirectory.
-+
-+        :param str name: subdirectory name for Cache
-+        :return: Cache with given name
-+
-+        """
-+        return self._cache.cache(name)
-+
-+
-+    def deque(self, name):
-+        """Return Deque with given `name` in subdirectory.
-+
-+        :param str name: subdirectory name for Deque
-+        :return: Deque with given name
-+
-+        """
-+        return self._cache.deque(name)
-+
-+
-+    def index(self, name):
-+        """Return Index with given `name` in subdirectory.
-+
-+        :param str name: subdirectory name for Index
-+        :return: Index with given name
-+
-+        """
-+        return self._cache.index(name)
-+
-+
-+    def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None,
-+            read=False, tag=None, retry=True):
-+        """Set a value in the cache if the key does not already exist. If
-+        timeout is given, that timeout will be used for the key; otherwise the
-+        default cache timeout will be used.
-+
-+        Return True if the value was stored, False otherwise.
-+
-+        :param key: key for item
-+        :param value: value for item
-+        :param float timeout: seconds until the item expires
-+            (default 300 seconds)
-+        :param int version: key version number (default None, cache parameter)
-+        :param bool read: read value as bytes from file (default False)
-+        :param str tag: text to associate with key (default None)
-+        :param bool retry: retry if database timeout occurs (default True)
-+        :return: True if item was added
-+
-+        """
-+        # pylint: disable=arguments-differ
-+        key = self.make_key(key, version=version)
-+        timeout = self.get_backend_timeout(timeout=timeout)
-+        return self._cache.add(key, value, timeout, read, tag, retry)
-+
-+
-+    def get(self, key, default=None, version=None, read=False,
-+            expire_time=False, tag=False, retry=False):
-+        """Fetch a given key from the cache. If the key does not exist, return
-+        default, which itself defaults to None.
-+
-+        :param key: key for item
-+        :param default: return value if key is missing (default None)
-+        :param int version: key version number (default None, cache parameter)
-+        :param bool read: if True, return file handle to value
-+            (default False)
-+        :param float expire_time: if True, return expire_time in tuple
-+            (default False)
-+        :param tag: if True, return tag in tuple (default False)
-+        :param bool retry: retry if database timeout occurs (default False)
-+        :return: value for item if key is found else default
-+
-+        """
-+        # pylint: disable=arguments-differ
-+        key = self.make_key(key, version=version)
-+        return self._cache.get(key, default, read, expire_time, tag, retry)
-+
-+
-+    def read(self, key, version=None):
-+        """Return file handle corresponding to `key` from Cache.
-+
-+        :param key: Python key to retrieve
-+        :param int version: key version number (default None, cache parameter)
-+        :return: file open for reading in binary mode
-+        :raises KeyError: if key is not found
-+
-+        """
-+        key = self.make_key(key, version=version)
-+        return self._cache.read(key)
-+
-+
-+    def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None,
-+            read=False, tag=None, retry=True):
-+        """Set a value in the cache. If timeout is given, that timeout will be
-+        used for the key; otherwise the default cache timeout will be used.
-+
-+        :param key: key for item
-+        :param value: value for item
-+        :param float timeout: seconds until the item expires
-+            (default 300 seconds)
-+        :param int version: key version number (default None, cache parameter)
-+        :param bool read: read value as bytes from file (default False)
-+        :param str tag: text to associate with key (default None)
-+        :param bool retry: retry if database timeout occurs (default True)
-+        :return: True if item was set
-+
-+        """
-+        # pylint: disable=arguments-differ
-+        key = self.make_key(key, version=version)
-+        timeout = self.get_backend_timeout(timeout=timeout)
-+        return self._cache.set(key, value, timeout, read, tag, retry)
-+
-+
-+    def touch(self, key, timeout=DEFAULT_TIMEOUT, version=None, retry=True):
-+        """Touch a key in the cache. If timeout is given, that timeout will be
-+        used for the key; otherwise the default cache timeout will be used.
-+
-+        :param key: key for item
-+        :param float timeout: seconds until the item expires
-+            (default 300 seconds)
-+        :param int version: key version number (default None, cache parameter)
-+        :param bool retry: retry if database timeout occurs (default True)
-+        :return: True if key was touched
-+
-+        """
-+        # pylint: disable=arguments-differ
-+        key = self.make_key(key, version=version)
-+        timeout = self.get_backend_timeout(timeout=timeout)
-+        return self._cache.touch(key, timeout, retry)
-+
-+
-+    def pop(self, key, default=None, version=None, expire_time=False,
-+            tag=False, retry=True):
-+        """Remove corresponding item for `key` from cache and return value.
-+
-+        If `key` is missing, return `default`.
-+
-+        Operation is atomic. Concurrent operations will be serialized.
-+
-+        :param key: key for item
-+        :param default: return value if key is missing (default None)
-+        :param int version: key version number (default None, cache parameter)
-+        :param float expire_time: if True, return expire_time in tuple
-+            (default False)
-+        :param tag: if True, return tag in tuple (default False)
-+        :param bool retry: retry if database timeout occurs (default True)
-+        :return: value for item if key is found else default
-+
-+        """
-+        key = self.make_key(key, version=version)
-+        return self._cache.pop(key, default, expire_time, tag, retry)
-+
-+
-+    def delete(self, key, version=None, retry=True):
-+        """Delete a key from the cache, failing silently.
-+
-+        :param key: key for item
-+        :param int version: key version number (default None, cache parameter)
-+        :param bool retry: retry if database timeout occurs (default True)
-+        :return: True if item was deleted
-+
-+        """
-+        # pylint: disable=arguments-differ
-+        key = self.make_key(key, version=version)
-+        self._cache.delete(key, retry)
-+
-+
-+    def incr(self, key, delta=1, version=None, default=None, retry=True):
-+        """Increment value by delta for item with key.
-+
-+        If key is missing and default is None then raise KeyError. Else if key
-+        is missing and default is not None then use default for value.
-+
-+        Operation is atomic. All concurrent increment operations will be
-+        counted individually.
-+
-+        Assumes value may be stored in a SQLite column. Most builds that target
-+        machines with 64-bit pointer widths will support 64-bit signed
-+        integers.
-+
-+        :param key: key for item
-+        :param int delta: amount to increment (default 1)
-+        :param int version: key version number (default None, cache parameter)
-+        :param int default: value if key is missing (default None)
-+        :param bool retry: retry if database timeout occurs (default True)
-+        :return: new value for item on success else None
-+        :raises ValueError: if key is not found and default is None
-+
-+        """
-+        # pylint: disable=arguments-differ
-+        key = self.make_key(key, version=version)
-+        try:
-+            return self._cache.incr(key, delta, default, retry)
-+        except KeyError:
-+            raise ValueError("Key '%s' not found" % key)
-+
-+
-+    def decr(self, key, delta=1, version=None, default=None, retry=True):
-+        """Decrement value by delta for item with key.
-+
-+        If key is missing and default is None then raise KeyError. Else if key
-+        is missing and default is not None then use default for value.
-+
-+        Operation is atomic. All concurrent decrement operations will be
-+        counted individually.
-+
-+        Unlike Memcached, negative values are supported. Value may be
-+        decremented below zero.
-+
-+        Assumes value may be stored in a SQLite column. Most builds that target
-+        machines with 64-bit pointer widths will support 64-bit signed
-+        integers.
-+
-+        :param key: key for item
-+        :param int delta: amount to decrement (default 1)
-+        :param int version: key version number (default None, cache parameter)
-+        :param int default: value if key is missing (default None)
-+        :param bool retry: retry if database timeout occurs (default True)
-+        :return: new value for item on success else None
-+        :raises ValueError: if key is not found and default is None
-+
-+        """
-+        # pylint: disable=arguments-differ
-+        return self.incr(key, -delta, version, default, retry)
-+
-+
-+    def has_key(self, key, version=None):
-+        """Returns True if the key is in the cache and has not expired.
-+
-+        :param key: key for item
-+        :param int version: key version number (default None, cache parameter)
-+        :return: True if key is found
-+
-+        """
-+        key = self.make_key(key, version=version)
-+        return key in self._cache
-+
-+
-+    def expire(self):
-+        """Remove expired items from cache.
-+
-+        :return: count of items removed
-+
-+        """
-+        return self._cache.expire()
-+
-+
-+    def stats(self, enable=True, reset=False):
-+        """Return cache statistics hits and misses.
-+
-+        :param bool enable: enable collecting statistics (default True)
-+        :param bool reset: reset hits and misses to 0 (default False)
-+        :return: (hits, misses)
-+
-+        """
-+        return self._cache.stats(enable=enable, reset=reset)
-+
-+
-+    def create_tag_index(self):
-+        """Create tag index on cache database.
-+
-+        Better to initialize cache with `tag_index=True` than use this.
-+
-+        :raises Timeout: if database timeout occurs
-+
-+        """
-+        self._cache.create_tag_index()
-+
-+
-+    def drop_tag_index(self):
-+        """Drop tag index on cache database.
-+
-+        :raises Timeout: if database timeout occurs
-+
-+        """
-+        self._cache.drop_tag_index()
-+
-+
-+    def evict(self, tag):
-+        """Remove items with matching `tag` from cache.
-+
-+        :param str tag: tag identifying items
-+        :return: count of items removed
-+
-+        """
-+        return self._cache.evict(tag)
-+
-+
-+    def cull(self):
-+        """Cull items from cache until volume is less than size limit.
-+
-+        :return: count of items removed
-+
-+        """
-+        return self._cache.cull()
-+
-+
-+    def clear(self):
-+        "Remove *all* values from the cache at once."
-+        return self._cache.clear()
-+
-+
-+    def close(self, **kwargs):
-+        "Close the cache connection."
-+        # pylint: disable=unused-argument
-+        self._cache.close()
-+
-+
-+    def get_backend_timeout(self, timeout=DEFAULT_TIMEOUT):
-+        """Return seconds to expiration.
-+
-+        :param float timeout: seconds until the item expires
-+            (default 300 seconds)
-+
-+        """
-+        if timeout == DEFAULT_TIMEOUT:
-+            timeout = self.default_timeout
-+        elif timeout == 0:
-+            # ticket 21147 - avoid time.time() related precision issues
-+            timeout = -1
-+        return None if timeout is None else timeout
-+
-+
-+    def memoize(self, name=None, timeout=DEFAULT_TIMEOUT, version=None,
-+                typed=False, tag=None):
-+        """Memoizing cache decorator.
-+
-+        Decorator to wrap callable with memoizing function using cache.
-+        Repeated calls with the same arguments will lookup result in cache and
-+        avoid function evaluation.
-+
-+        If name is set to None (default), the callable name will be determined
-+        automatically.
-+
-+        When timeout is set to zero, function results will not be set in the
-+        cache. Cache lookups still occur, however. Read
-+        :doc:`case-study-landing-page-caching` for example usage.
-+
-+        If typed is set to True, function arguments of different types will be
-+        cached separately. For example, f(3) and f(3.0) will be treated as
-+        distinct calls with distinct results.
-+
-+        The original underlying function is accessible through the __wrapped__
-+        attribute. This is useful for introspection, for bypassing the cache,
-+        or for rewrapping the function with a different cache.
-+
-+        An additional `__cache_key__` attribute can be used to generate the
-+        cache key used for the given arguments.
-+
-+        Remember to call memoize when decorating a callable. If you forget,
-+        then a TypeError will occur.
-+
-+        :param str name: name given for callable (default None, automatic)
-+        :param float timeout: seconds until the item expires
-+            (default 300 seconds)
-+        :param int version: key version number (default None, cache parameter)
-+        :param bool typed: cache different types separately (default False)
-+        :param str tag: text to associate with arguments (default None)
-+        :return: callable decorator
-+
-+        """
-+        # Caution: Nearly identical code exists in Cache.memoize
-+        if callable(name):
-+            raise TypeError('name cannot be callable')
-+
-+        def decorator(func):
-+            "Decorator created by memoize() for callable `func`."
-+            base = (full_name(func),) if name is None else (name,)
-+
-+            @wraps(func)
-+            def wrapper(*args, **kwargs):
-+                "Wrapper for callable to cache arguments and return values."
-+                key = wrapper.__cache_key__(*args, **kwargs)
-+                result = self.get(key, ENOVAL, version, retry=True)
-+
-+                if result is ENOVAL:
-+                    result = func(*args, **kwargs)
-+                    valid_timeout = (
-+                        timeout is None
-+                        or timeout == DEFAULT_TIMEOUT
-+                        or timeout > 0
-+                    )
-+                    if valid_timeout:
-+                        self.set(
-+                            key, result, timeout, version, tag=tag, retry=True,
-+                        )
-+
-+                return result
-+
-+            def __cache_key__(*args, **kwargs):
-+                "Make key for cache given function arguments."
-+                return args_to_key(base, args, kwargs, typed)
-+
-+            wrapper.__cache_key__ = __cache_key__
-+            return wrapper
-+
-+        return decorator
-diff --git a/third_party/python/diskcache/diskcache/fanout.py b/third_party/python/diskcache/diskcache/fanout.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/diskcache/diskcache/fanout.py
-@@ -0,0 +1,677 @@
-+"Fanout cache automatically shards keys and values."
-+
-+import itertools as it
-+import operator
-+import os.path as op
-+import sqlite3
-+import sys
-+import tempfile
-+import time
-+
-+from .core import ENOVAL, DEFAULT_SETTINGS, Cache, Disk, Timeout
-+from .persistent import Deque, Index
-+
-+############################################################################
-+# BEGIN Python 2/3 Shims
-+############################################################################
-+
-+if sys.hexversion >= 0x03000000:
-+    from functools import reduce
-+
-+############################################################################
-+# END Python 2/3 Shims
-+############################################################################
-+
-+
-+class FanoutCache(object):
-+    "Cache that shards keys and values."
-+    def __init__(self, directory=None, shards=8, timeout=0.010, disk=Disk,
-+                 **settings):
-+        """Initialize cache instance.
-+
-+        :param str directory: cache directory
-+        :param int shards: number of shards to distribute writes
-+        :param float timeout: SQLite connection timeout
-+        :param disk: `Disk` instance for serialization
-+        :param settings: any of `DEFAULT_SETTINGS`
-+
-+        """
-+        if directory is None:
-+            directory = tempfile.mkdtemp(prefix='diskcache-')
-+        directory = op.expanduser(directory)
-+        directory = op.expandvars(directory)
-+
-+        default_size_limit = DEFAULT_SETTINGS['size_limit']
-+        size_limit = settings.pop('size_limit', default_size_limit) / shards
-+
-+        self._count = shards
-+        self._directory = directory
-+        self._shards = tuple(
-+            Cache(
-+                directory=op.join(directory, '%03d' % num),
-+                timeout=timeout,
-+                disk=disk,
-+                size_limit=size_limit,
-+                **settings
-+            )
-+            for num in range(shards)
-+        )
-+        self._hash = self._shards[0].disk.hash
-+        self._caches = {}
-+        self._deques = {}
-+        self._indexes = {}
-+
-+
-+    @property
-+    def directory(self):
-+        """Cache directory."""
-+        return self._directory
-+
-+
-+    def __getattr__(self, name):
-+        return getattr(self._shards[0], name)
-+
-+
-+    def set(self, key, value, expire=None, read=False, tag=None, retry=False):
-+        """Set `key` and `value` item in cache.
-+
-+        When `read` is `True`, `value` should be a file-like object opened
-+        for reading in binary mode.
-+
-+        If database timeout occurs then fails silently unless `retry` is set to
-+        `True` (default `False`).
-+
-+        :param key: key for item
-+        :param value: value for item
-+        :param float expire: seconds until the key expires
-+            (default None, no expiry)
-+        :param bool read: read value as raw bytes from file (default False)
-+        :param str tag: text to associate with key (default None)
-+        :param bool retry: retry if database timeout occurs (default False)
-+        :return: True if item was set
-+
-+        """
-+        index = self._hash(key) % self._count
-+        shard = self._shards[index]
-+        try:
-+            return shard.set(key, value, expire, read, tag, retry)
-+        except Timeout:
-+            return False
-+
-+
-+    def __setitem__(self, key, value):
-+        """Set `key` and `value` item in cache.
-+
-+        Calls :func:`FanoutCache.set` internally with `retry` set to `True`.
-+
-+        :param key: key for item
-+        :param value: value for item
-+
-+        """
-+        index = self._hash(key) % self._count
-+        shard = self._shards[index]
-+        shard[key] = value
-+
-+
-+    def touch(self, key, expire=None, retry=False):
-+        """Touch `key` in cache and update `expire` time.
-+
-+        If database timeout occurs then fails silently unless `retry` is set to
-+        `True` (default `False`).
-+
-+        :param key: key for item
-+        :param float expire: seconds until the key expires
-+            (default None, no expiry)
-+        :param bool retry: retry if database timeout occurs (default False)
-+        :return: True if key was touched
-+
-+        """
-+        index = self._hash(key) % self._count
-+        shard = self._shards[index]
-+        try:
-+            return shard.touch(key, expire, retry)
-+        except Timeout:
-+            return False
-+
-+
-+    def add(self, key, value, expire=None, read=False, tag=None, retry=False):
-+        """Add `key` and `value` item to cache.
-+
-+        Similar to `set`, but only add to cache if key not present.
-+
-+        This operation is atomic. Only one concurrent add operation for given
-+        key from separate threads or processes will succeed.
-+
-+        When `read` is `True`, `value` should be a file-like object opened
-+        for reading in binary mode.
-+
-+        If database timeout occurs then fails silently unless `retry` is set to
-+        `True` (default `False`).
-+
-+        :param key: key for item
-+        :param value: value for item
-+        :param float expire: seconds until the key expires
-+            (default None, no expiry)
-+        :param bool read: read value as bytes from file (default False)
-+        :param str tag: text to associate with key (default None)
-+        :param bool retry: retry if database timeout occurs (default False)
-+        :return: True if item was added
-+
-+        """
-+        index = self._hash(key) % self._count
-+        shard = self._shards[index]
-+        try:
-+            return shard.add(key, value, expire, read, tag, retry)
-+        except Timeout:
-+            return False
-+
-+
-+    def incr(self, key, delta=1, default=0, retry=False):
-+        """Increment value by delta for item with key.
-+
-+        If key is missing and default is None then raise KeyError. Else if key
-+        is missing and default is not None then use default for value.
-+
-+        Operation is atomic. All concurrent increment operations will be
-+        counted individually.
-+
-+        Assumes value may be stored in a SQLite column. Most builds that target
-+        machines with 64-bit pointer widths will support 64-bit signed
-+        integers.
-+
-+        If database timeout occurs then fails silently unless `retry` is set to
-+        `True` (default `False`).
-+
-+        :param key: key for item
-+        :param int delta: amount to increment (default 1)
-+        :param int default: value if key is missing (default 0)
-+        :param bool retry: retry if database timeout occurs (default False)
-+        :return: new value for item on success else None
-+        :raises KeyError: if key is not found and default is None
-+
-+        """
-+        index = self._hash(key) % self._count
-+        shard = self._shards[index]
-+        try:
-+            return shard.incr(key, delta, default, retry)
-+        except Timeout:
-+            return None
-+
-+
-+    def decr(self, key, delta=1, default=0, retry=False):
-+        """Decrement value by delta for item with key.
-+
-+        If key is missing and default is None then raise KeyError. Else if key
-+        is missing and default is not None then use default for value.
-+
-+        Operation is atomic. All concurrent decrement operations will be
-+        counted individually.
-+
-+        Unlike Memcached, negative values are supported. Value may be
-+        decremented below zero.
-+
-+        Assumes value may be stored in a SQLite column. Most builds that target
-+        machines with 64-bit pointer widths will support 64-bit signed
-+        integers.
-+
-+        If database timeout occurs then fails silently unless `retry` is set to
-+        `True` (default `False`).
-+
-+        :param key: key for item
-+        :param int delta: amount to decrement (default 1)
-+        :param int default: value if key is missing (default 0)
-+        :param bool retry: retry if database timeout occurs (default False)
-+        :return: new value for item on success else None
-+        :raises KeyError: if key is not found and default is None
-+
-+        """
-+        index = self._hash(key) % self._count
-+        shard = self._shards[index]
-+        try:
-+            return shard.decr(key, delta, default, retry)
-+        except Timeout:
-+            return None
-+
-+
-+    def get(self, key, default=None, read=False, expire_time=False, tag=False,
-+            retry=False):
-+        """Retrieve value from cache. If `key` is missing, return `default`.
-+
-+        If database timeout occurs then returns `default` unless `retry` is set
-+        to `True` (default `False`).
-+
-+        :param key: key for item
-+        :param default: return value if key is missing (default None)
-+        :param bool read: if True, return file handle to value
-+            (default False)
-+        :param float expire_time: if True, return expire_time in tuple
-+            (default False)
-+        :param tag: if True, return tag in tuple (default False)
-+        :param bool retry: retry if database timeout occurs (default False)
-+        :return: value for item if key is found else default
-+
-+        """
-+        index = self._hash(key) % self._count
-+        shard = self._shards[index]
-+        try:
-+            return shard.get(key, default, read, expire_time, tag, retry)
-+        except (Timeout, sqlite3.OperationalError):
-+            return default
-+
-+
-+    def __getitem__(self, key):
-+        """Return corresponding value for `key` from cache.
-+
-+        Calls :func:`FanoutCache.get` internally with `retry` set to `True`.
-+
-+        :param key: key for item
-+        :return: value for item
-+        :raises KeyError: if key is not found
-+
-+        """
-+        index = self._hash(key) % self._count
-+        shard = self._shards[index]
-+        return shard[key]
-+
-+
-+    def read(self, key):
-+        """Return file handle corresponding to `key` from cache.
-+
-+        :param key: key for item
-+        :return: file open for reading in binary mode
-+        :raises KeyError: if key is not found
-+
-+        """
-+        handle = self.get(key, default=ENOVAL, read=True, retry=True)
-+        if handle is ENOVAL:
-+            raise KeyError(key)
-+        return handle
-+
-+
-+    def __contains__(self, key):
-+        """Return `True` if `key` matching item is found in cache.
-+
-+        :param key: key for item
-+        :return: True if key is found
-+
-+        """
-+        index = self._hash(key) % self._count
-+        shard = self._shards[index]
-+        return key in shard
-+
-+
-+    def pop(self, key, default=None, expire_time=False, tag=False, retry=False):
-+        """Remove corresponding item for `key` from cache and return value.
-+
-+        If `key` is missing, return `default`.
-+
-+        Operation is atomic. Concurrent operations will be serialized.
-+
-+        If database timeout occurs then fails silently unless `retry` is set to
-+        `True` (default `False`).
-+
-+        :param key: key for item
-+        :param default: return value if key is missing (default None)
-+        :param float expire_time: if True, return expire_time in tuple
-+            (default False)
-+        :param tag: if True, return tag in tuple (default False)
-+        :param bool retry: retry if database timeout occurs (default False)
-+        :return: value for item if key is found else default
-+
-+        """
-+        index = self._hash(key) % self._count
-+        shard = self._shards[index]
-+        try:
-+            return shard.pop(key, default, expire_time, tag, retry)
-+        except Timeout:
-+            return default
-+
-+
-+    def delete(self, key, retry=False):
-+        """Delete corresponding item for `key` from cache.
-+
-+        Missing keys are ignored.
-+
-+        If database timeout occurs then fails silently unless `retry` is set to
-+        `True` (default `False`).
-+
-+        :param key: key for item
-+        :param bool retry: retry if database timeout occurs (default False)
-+        :return: True if item was deleted
-+
-+        """
-+        index = self._hash(key) % self._count
-+        shard = self._shards[index]
-+        try:
-+            return shard.delete(key, retry)
-+        except Timeout:
-+            return False
-+
-+
-+    def __delitem__(self, key):
-+        """Delete corresponding item for `key` from cache.
-+
-+        Calls :func:`FanoutCache.delete` internally with `retry` set to `True`.
-+
-+        :param key: key for item
-+        :raises KeyError: if key is not found
-+
-+        """
-+        index = self._hash(key) % self._count
-+        shard = self._shards[index]
-+        del shard[key]
-+
-+
-+    def check(self, fix=False, retry=False):
-+        """Check database and file system consistency.
-+
-+        Intended for use in testing and post-mortem error analysis.
-+
-+        While checking the cache table for consistency, a writer lock is held
-+        on the database. The lock blocks other cache clients from writing to
-+        the database. For caches with many file references, the lock may be
-+        held for a long time. For example, local benchmarking shows that a
-+        cache with 1,000 file references takes ~60ms to check.
-+
-+        If database timeout occurs then fails silently unless `retry` is set to
-+        `True` (default `False`).
-+
-+        :param bool fix: correct inconsistencies
-+        :param bool retry: retry if database timeout occurs (default False)
-+        :return: list of warnings
-+        :raises Timeout: if database timeout occurs
-+
-+        """
-+        warnings = (shard.check(fix, retry) for shard in self._shards)
-+        return reduce(operator.iadd, warnings, [])
-+
-+
-+    def expire(self, retry=False):
-+        """Remove expired items from cache.
-+
-+        If database timeout occurs then fails silently unless `retry` is set to
-+        `True` (default `False`).
-+
-+        :param bool retry: retry if database timeout occurs (default False)
-+        :return: count of items removed
-+
-+        """
-+        return self._remove('expire', args=(time.time(),), retry=retry)
-+
-+
-+    def create_tag_index(self):
-+        """Create tag index on cache database.
-+
-+        Better to initialize cache with `tag_index=True` than use this.
-+
-+        :raises Timeout: if database timeout occurs
-+
-+        """
-+        for shard in self._shards:
-+            shard.create_tag_index()
-+
-+
-+    def drop_tag_index(self):
-+        """Drop tag index on cache database.
-+
-+        :raises Timeout: if database timeout occurs
-+
-+        """
-+        for shard in self._shards:
-+            shard.drop_tag_index()
-+
-+
-+    def evict(self, tag, retry=False):
-+        """Remove items with matching `tag` from cache.
-+
-+        If database timeout occurs then fails silently unless `retry` is set to
-+        `True` (default `False`).
-+
-+        :param str tag: tag identifying items
-+        :param bool retry: retry if database timeout occurs (default False)
-+        :return: count of items removed
-+
-+        """
-+        return self._remove('evict', args=(tag,), retry=retry)
-+
-+
-+    def cull(self, retry=False):
-+        """Cull items from cache until volume is less than size limit.
-+
-+        If database timeout occurs then fails silently unless `retry` is set to
-+        `True` (default `False`).
-+
-+        :param bool retry: retry if database timeout occurs (default False)
-+        :return: count of items removed
-+
-+        """
-+        return self._remove('cull', retry=retry)
-+
-+
-+    def clear(self, retry=False):
-+        """Remove all items from cache.
-+
-+        If database timeout occurs then fails silently unless `retry` is set to
-+        `True` (default `False`).
-+
-+        :param bool retry: retry if database timeout occurs (default False)
-+        :return: count of items removed
-+
-+        """
-+        return self._remove('clear', retry=retry)
-+
-+
-+    def _remove(self, name, args=(), retry=False):
-+        total = 0
-+        for shard in self._shards:
-+            method = getattr(shard, name)
-+            while True:
-+                try:
-+                    count = method(*args, retry=retry)
-+                    total += count
-+                except Timeout as timeout:
-+                    total += timeout.args[0]
-+                else:
-+                    break
-+        return total
-+
-+
-+    def stats(self, enable=True, reset=False):
-+        """Return cache statistics hits and misses.
-+
-+        :param bool enable: enable collecting statistics (default True)
-+        :param bool reset: reset hits and misses to 0 (default False)
-+        :return: (hits, misses)
-+
-+        """
-+        results = [shard.stats(enable, reset) for shard in self._shards]
-+        total_hits = sum(hits for hits, _ in results)
-+        total_misses = sum(misses for _, misses in results)
-+        return total_hits, total_misses
-+
-+
-+    def volume(self):
-+        """Return estimated total size of cache on disk.
-+
-+        :return: size in bytes
-+
-+        """
-+        return sum(shard.volume() for shard in self._shards)
-+
-+
-+    def close(self):
-+        "Close database connection."
-+        for shard in self._shards:
-+            shard.close()
-+        self._caches.clear()
-+        self._deques.clear()
-+        self._indexes.clear()
-+
-+
-+    def __enter__(self):
-+        return self
-+
-+
-+    def __exit__(self, *exception):
-+        self.close()
-+
-+
-+    def __getstate__(self):
-+        return (self._directory, self._count, self.timeout, type(self.disk))
-+
-+
-+    def __setstate__(self, state):
-+        self.__init__(*state)
-+
-+
-+    def __iter__(self):
-+        "Iterate keys in cache including expired items."
-+        iterators = (iter(shard) for shard in self._shards)
-+        return it.chain.from_iterable(iterators)
-+
-+
-+    def __reversed__(self):
-+        "Reverse iterate keys in cache including expired items."
-+        iterators = (reversed(shard) for shard in reversed(self._shards))
-+        return it.chain.from_iterable(iterators)
-+
-+
-+    def __len__(self):
-+        "Count of items in cache including expired items."
-+        return sum(len(shard) for shard in self._shards)
-+
-+
-+    def reset(self, key, value=ENOVAL):
-+        """Reset `key` and `value` item from Settings table.
-+
-+        If `value` is not given, it is reloaded from the Settings
-+        table. Otherwise, the Settings table is updated.
-+
-+        Settings attributes on cache objects are lazy-loaded and
-+        read-only. Use `reset` to update the value.
-+
-+        Settings with the ``sqlite_`` prefix correspond to SQLite
-+        pragmas. Updating the value will execute the corresponding PRAGMA
-+        statement.
-+
-+        :param str key: Settings key for item
-+        :param value: value for item (optional)
-+        :return: updated value for item
-+
-+        """
-+        for shard in self._shards:
-+            while True:
-+                try:
-+                    result = shard.reset(key, value)
-+                except Timeout:
-+                    pass
-+                else:
-+                    break
-+        return result
-+
-+
-+    def cache(self, name):
-+        """Return Cache with given `name` in subdirectory.
-+
-+        >>> fanout_cache = FanoutCache()
-+        >>> cache = fanout_cache.cache('test')
-+        >>> cache.set('abc', 123)
-+        True
-+        >>> cache.get('abc')
-+        123
-+        >>> len(cache)
-+        1
-+        >>> cache.delete('abc')
-+        True
-+
-+        :param str name: subdirectory name for Cache
-+        :return: Cache with given name
-+
-+        """
-+        _caches = self._caches
-+
-+        try:
-+            return _caches[name]
-+        except KeyError:
-+            parts = name.split('/')
-+            directory = op.join(self._directory, 'cache', *parts)
-+            temp = Cache(directory=directory)
-+            _caches[name] = temp
-+            return temp
-+
-+
-+    def deque(self, name):
-+        """Return Deque with given `name` in subdirectory.
-+
-+        >>> cache = FanoutCache()
-+        >>> deque = cache.deque('test')
-+        >>> deque.extend('abc')
-+        >>> deque.popleft()
-+        'a'
-+        >>> deque.pop()
-+        'c'
-+        >>> len(deque)
-+        1
-+
-+        :param str name: subdirectory name for Deque
-+        :return: Deque with given name
-+
-+        """
-+        _deques = self._deques
-+
-+        try:
-+            return _deques[name]
-+        except KeyError:
-+            parts = name.split('/')
-+            directory = op.join(self._directory, 'deque', *parts)
-+            temp = Deque(directory=directory)
-+            _deques[name] = temp
-+            return temp
-+
-+
-+    def index(self, name):
-+        """Return Index with given `name` in subdirectory.
-+
-+        >>> cache = FanoutCache()
-+        >>> index = cache.index('test')
-+        >>> index['abc'] = 123
-+        >>> index['def'] = 456
-+        >>> index['ghi'] = 789
-+        >>> index.popitem()
-+        ('ghi', 789)
-+        >>> del index['abc']
-+        >>> len(index)
-+        1
-+        >>> index['def']
-+        456
-+
-+        :param str name: subdirectory name for Index
-+        :return: Index with given name
-+
-+        """
-+        _indexes = self._indexes
-+
-+        try:
-+            return _indexes[name]
-+        except KeyError:
-+            parts = name.split('/')
-+            directory = op.join(self._directory, 'index', *parts)
-+            temp = Index(directory)
-+            _indexes[name] = temp
-+            return temp
-+
-+
-+############################################################################
-+# BEGIN Python 2/3 Shims
-+############################################################################
-+
-+if sys.hexversion < 0x03000000:
-+    import types
-+    memoize_func = Cache.__dict__['memoize']  # pylint: disable=invalid-name
-+    FanoutCache.memoize = types.MethodType(memoize_func, None, FanoutCache)
-+else:
-+    FanoutCache.memoize = Cache.memoize
-+
-+############################################################################
-+# END Python 2/3 Shims
-+############################################################################
-diff --git a/third_party/python/diskcache/diskcache/persistent.py b/third_party/python/diskcache/diskcache/persistent.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/diskcache/diskcache/persistent.py
-@@ -0,0 +1,1403 @@
-+"""Persistent Data Types
-+
-+"""
-+
-+import operator as op
-+import sys
-+
-+from collections import OrderedDict
-+from contextlib import contextmanager
-+from shutil import rmtree
-+
-+from .core import BytesType, Cache, ENOVAL, TextType
-+
-+############################################################################
-+# BEGIN Python 2/3 Shims
-+############################################################################
-+
-+try:
-+    from collections.abc import MutableMapping, Sequence
-+    from collections.abc import KeysView, ValuesView, ItemsView
-+except ImportError:
-+    from collections import MutableMapping, Sequence
-+    from collections import KeysView, ValuesView, ItemsView
-+
-+if sys.hexversion < 0x03000000:
-+    from itertools import izip as zip  # pylint: disable=redefined-builtin,no-name-in-module,ungrouped-imports
-+    range = xrange  # pylint: disable=redefined-builtin,invalid-name,undefined-variable
-+
-+############################################################################
-+# END Python 2/3 Shims
-+############################################################################
-+
-+
-+def _make_compare(seq_op, doc):
-+    "Make compare method with Sequence semantics."
-+    def compare(self, that):
-+        "Compare method for deque and sequence."
-+        if not isinstance(that, Sequence):
-+            return NotImplemented
-+
-+        len_self = len(self)
-+        len_that = len(that)
-+
-+        if len_self != len_that:
-+            if seq_op is op.eq:
-+                return False
-+            if seq_op is op.ne:
-+                return True
-+
-+        for alpha, beta in zip(self, that):
-+            if alpha != beta:
-+                return seq_op(alpha, beta)
-+
-+        return seq_op(len_self, len_that)
-+
-+    compare.__name__ = '__{0}__'.format(seq_op.__name__)
-+    doc_str = 'Return True if and only if deque is {0} `that`.'
-+    compare.__doc__ = doc_str.format(doc)
-+
-+    return compare
-+
-+
-+class Deque(Sequence):
-+    """Persistent sequence with double-ended queue semantics.
-+
-+    Double-ended queue is an ordered collection with optimized access at its
-+    endpoints.
-+
-+    Items are serialized to disk. Deque may be initialized from directory path
-+    where items are stored.
-+
-+    >>> deque = Deque()
-+    >>> deque += range(5)
-+    >>> list(deque)
-+    [0, 1, 2, 3, 4]
-+    >>> for value in range(5):
-+    ...     deque.appendleft(-value)
-+    >>> len(deque)
-+    10
-+    >>> list(deque)
-+    [-4, -3, -2, -1, 0, 0, 1, 2, 3, 4]
-+    >>> deque.pop()
-+    4
-+    >>> deque.popleft()
-+    -4
-+    >>> deque.reverse()
-+    >>> list(deque)
-+    [3, 2, 1, 0, 0, -1, -2, -3]
-+
-+    """
-+    def __init__(self, iterable=(), directory=None):
-+        """Initialize deque instance.
-+
-+        If directory is None then temporary directory created. The directory
-+        will *not* be automatically removed.
-+
-+        :param iterable: iterable of items to append to deque
-+        :param directory: deque directory (default None)
-+
-+        """
-+        self._cache = Cache(directory, eviction_policy='none')
-+        with self.transact():
-+            self.extend(iterable)
-+
-+
-+    @classmethod
-+    def fromcache(cls, cache, iterable=()):
-+        """Initialize deque using `cache`.
-+
-+        >>> cache = Cache()
-+        >>> deque = Deque.fromcache(cache, [5, 6, 7, 8])
-+        >>> deque.cache is cache
-+        True
-+        >>> len(deque)
-+        4
-+        >>> 7 in deque
-+        True
-+        >>> deque.popleft()
-+        5
-+
-+        :param Cache cache: cache to use
-+        :param iterable: iterable of items
-+        :return: initialized Deque
-+
-+        """
-+        # pylint: disable=no-member,protected-access
-+        self = cls.__new__(cls)
-+        self._cache = cache
-+        self.extend(iterable)
-+        return self
-+
-+
-+    @property
-+    def cache(self):
-+        "Cache used by deque."
-+        return self._cache
-+
-+
-+    @property
-+    def directory(self):
-+        "Directory path where deque is stored."
-+        return self._cache.directory
-+
-+
-+    def _index(self, index, func):
-+        len_self = len(self)
-+
-+        if index >= 0:
-+            if index >= len_self:
-+                raise IndexError('deque index out of range')
-+
-+            for key in self._cache.iterkeys():
-+                if index == 0:
-+                    try:
-+                        return func(key)
-+                    except KeyError:
-+                        continue
-+                index -= 1
-+        else:
-+            if index < -len_self:
-+                raise IndexError('deque index out of range')
-+
-+            index += 1
-+
-+            for key in self._cache.iterkeys(reverse=True):
-+                if index == 0:
-+                    try:
-+                        return func(key)
-+                    except KeyError:
-+                        continue
-+                index += 1
-+
-+        raise IndexError('deque index out of range')
-+
-+
-+    def __getitem__(self, index):
-+        """deque.__getitem__(index) <==> deque[index]
-+
-+        Return corresponding item for `index` in deque.
-+
-+        See also `Deque.peekleft` and `Deque.peek` for indexing deque at index
-+        ``0`` or ``-1``.
-+
-+        >>> deque = Deque()
-+        >>> deque.extend('abcde')
-+        >>> deque[1]
-+        'b'
-+        >>> deque[-2]
-+        'd'
-+
-+        :param int index: index of item
-+        :return: corresponding item
-+        :raises IndexError: if index out of range
-+
-+        """
-+        return self._index(index, self._cache.__getitem__)
-+
-+
-+    def __setitem__(self, index, value):
-+        """deque.__setitem__(index, value) <==> deque[index] = value
-+
-+        Store `value` in deque at `index`.
-+
-+        >>> deque = Deque()
-+        >>> deque.extend([None] * 3)
-+        >>> deque[0] = 'a'
-+        >>> deque[1] = 'b'
-+        >>> deque[-1] = 'c'
-+        >>> ''.join(deque)
-+        'abc'
-+
-+        :param int index: index of value
-+        :param value: value to store
-+        :raises IndexError: if index out of range
-+
-+        """
-+        set_value = lambda key: self._cache.__setitem__(key, value)
-+        self._index(index, set_value)
-+
-+
-+    def __delitem__(self, index):
-+        """deque.__delitem__(index) <==> del deque[index]
-+
-+        Delete item in deque at `index`.
-+
-+        >>> deque = Deque()
-+        >>> deque.extend([None] * 3)
-+        >>> del deque[0]
-+        >>> del deque[1]
-+        >>> del deque[-1]
-+        >>> len(deque)
-+        0
-+
-+        :param int index: index of item
-+        :raises IndexError: if index out of range
-+
-+        """
-+        self._index(index, self._cache.__delitem__)
-+
-+
-+    def __repr__(self):
-+        """deque.__repr__() <==> repr(deque)
-+
-+        Return string with printable representation of deque.
-+
-+        """
-+        name = type(self).__name__
-+        return '{0}(directory={1!r})'.format(name, self.directory)
-+
-+
-+    __eq__ = _make_compare(op.eq, 'equal to')
-+    __ne__ = _make_compare(op.ne, 'not equal to')
-+    __lt__ = _make_compare(op.lt, 'less than')
-+    __gt__ = _make_compare(op.gt, 'greater than')
-+    __le__ = _make_compare(op.le, 'less than or equal to')
-+    __ge__ = _make_compare(op.ge, 'greater than or equal to')
-+
-+
-+    def __iadd__(self, iterable):
-+        """deque.__iadd__(iterable) <==> deque += iterable
-+
-+        Extend back side of deque with items from iterable.
-+
-+        :param iterable: iterable of items to append to deque
-+        :return: deque with added items
-+
-+        """
-+        self.extend(iterable)
-+        return self
-+
-+
-+    def __iter__(self):
-+        """deque.__iter__() <==> iter(deque)
-+
-+        Return iterator of deque from front to back.
-+
-+        """
-+        _cache = self._cache
-+
-+        for key in _cache.iterkeys():
-+            try:
-+                yield _cache[key]
-+            except KeyError:
-+                pass
-+
-+
-+    def __len__(self):
-+        """deque.__len__() <==> len(deque)
-+
-+        Return length of deque.
-+
-+        """
-+        return len(self._cache)
-+
-+
-+    def __reversed__(self):
-+        """deque.__reversed__() <==> reversed(deque)
-+
-+        Return iterator of deque from back to front.
-+
-+        >>> deque = Deque()
-+        >>> deque.extend('abcd')
-+        >>> iterator = reversed(deque)
-+        >>> next(iterator)
-+        'd'
-+        >>> list(iterator)
-+        ['c', 'b', 'a']
-+
-+        """
-+        _cache = self._cache
-+
-+        for key in _cache.iterkeys(reverse=True):
-+            try:
-+                yield _cache[key]
-+            except KeyError:
-+                pass
-+
-+
-+    def __getstate__(self):
-+        return self.directory
-+
-+
-+    def __setstate__(self, state):
-+        self.__init__(directory=state)
-+
-+
-+    def append(self, value):
-+        """Add `value` to back of deque.
-+
-+        >>> deque = Deque()
-+        >>> deque.append('a')
-+        >>> deque.append('b')
-+        >>> deque.append('c')
-+        >>> list(deque)
-+        ['a', 'b', 'c']
-+
-+        :param value: value to add to back of deque
-+
-+        """
-+        self._cache.push(value, retry=True)
-+
-+
-+    def appendleft(self, value):
-+        """Add `value` to front of deque.
-+
-+        >>> deque = Deque()
-+        >>> deque.appendleft('a')
-+        >>> deque.appendleft('b')
-+        >>> deque.appendleft('c')
-+        >>> list(deque)
-+        ['c', 'b', 'a']
-+
-+        :param value: value to add to front of deque
-+
-+        """
-+        self._cache.push(value, side='front', retry=True)
-+
-+
-+    def clear(self):
-+        """Remove all elements from deque.
-+
-+        >>> deque = Deque('abc')
-+        >>> len(deque)
-+        3
-+        >>> deque.clear()
-+        >>> list(deque)
-+        []
-+
-+        """
-+        self._cache.clear(retry=True)
-+
-+
-+    def count(self, value):
-+        """Return number of occurrences of `value` in deque.
-+
-+        >>> deque = Deque()
-+        >>> deque += [num for num in range(1, 5) for _ in range(num)]
-+        >>> deque.count(0)
-+        0
-+        >>> deque.count(1)
-+        1
-+        >>> deque.count(4)
-+        4
-+
-+        :param value: value to count in deque
-+        :return: count of items equal to value in deque
-+
-+        """
-+        return sum(1 for item in self if value == item)
-+
-+
-+    def extend(self, iterable):
-+        """Extend back side of deque with values from `iterable`.
-+
-+        :param iterable: iterable of values
-+
-+        """
-+        for value in iterable:
-+            self.append(value)
-+
-+
-+    def extendleft(self, iterable):
-+        """Extend front side of deque with value from `iterable`.
-+
-+        >>> deque = Deque()
-+        >>> deque.extendleft('abc')
-+        >>> list(deque)
-+        ['c', 'b', 'a']
-+
-+        :param iterable: iterable of values
-+
-+        """
-+        for value in iterable:
-+            self.appendleft(value)
-+
-+
-+    def peek(self):
-+        """Peek at value at back of deque.
-+
-+        Faster than indexing deque at -1.
-+
-+        If deque is empty then raise IndexError.
-+
-+        >>> deque = Deque()
-+        >>> deque.peek()
-+        Traceback (most recent call last):
-+            ...
-+        IndexError: peek from an empty deque
-+        >>> deque += 'abc'
-+        >>> deque.peek()
-+        'c'
-+
-+        :return: value at back of deque
-+        :raises IndexError: if deque is empty
-+
-+        """
-+        default = None, ENOVAL
-+        _, value = self._cache.peek(default=default, side='back', retry=True)
-+        if value is ENOVAL:
-+            raise IndexError('peek from an empty deque')
-+        return value
-+
-+
-+    def peekleft(self):
-+        """Peek at value at back of deque.
-+
-+        Faster than indexing deque at 0.
-+
-+        If deque is empty then raise IndexError.
-+
-+        >>> deque = Deque()
-+        >>> deque.peekleft()
-+        Traceback (most recent call last):
-+            ...
-+        IndexError: peek from an empty deque
-+        >>> deque += 'abc'
-+        >>> deque.peekleft()
-+        'a'
-+
-+        :return: value at front of deque
-+        :raises IndexError: if deque is empty
-+
-+        """
-+        default = None, ENOVAL
-+        _, value = self._cache.peek(default=default, side='front', retry=True)
-+        if value is ENOVAL:
-+            raise IndexError('peek from an empty deque')
-+        return value
-+
-+
-+    def pop(self):
-+        """Remove and return value at back of deque.
-+
-+        If deque is empty then raise IndexError.
-+
-+        >>> deque = Deque()
-+        >>> deque += 'ab'
-+        >>> deque.pop()
-+        'b'
-+        >>> deque.pop()
-+        'a'
-+        >>> deque.pop()
-+        Traceback (most recent call last):
-+            ...
-+        IndexError: pop from an empty deque
-+
-+        :return: value at back of deque
-+        :raises IndexError: if deque is empty
-+
-+        """
-+        default = None, ENOVAL
-+        _, value = self._cache.pull(default=default, side='back', retry=True)
-+        if value is ENOVAL:
-+            raise IndexError('pop from an empty deque')
-+        return value
-+
-+
-+    def popleft(self):
-+        """Remove and return value at front of deque.
-+
-+        >>> deque = Deque()
-+        >>> deque += 'ab'
-+        >>> deque.popleft()
-+        'a'
-+        >>> deque.popleft()
-+        'b'
-+        >>> deque.popleft()
-+        Traceback (most recent call last):
-+            ...
-+        IndexError: pop from an empty deque
-+
-+        :return: value at front of deque
-+        :raises IndexError: if deque is empty
-+
-+        """
-+        default = None, ENOVAL
-+        _, value = self._cache.pull(default=default, retry=True)
-+        if value is ENOVAL:
-+            raise IndexError('pop from an empty deque')
-+        return value
-+
-+
-+    def remove(self, value):
-+        """Remove first occurrence of `value` in deque.
-+
-+        >>> deque = Deque()
-+        >>> deque += 'aab'
-+        >>> deque.remove('a')
-+        >>> list(deque)
-+        ['a', 'b']
-+        >>> deque.remove('b')
-+        >>> list(deque)
-+        ['a']
-+        >>> deque.remove('c')
-+        Traceback (most recent call last):
-+            ...
-+        ValueError: deque.remove(value): value not in deque
-+
-+        :param value: value to remove
-+        :raises ValueError: if value not in deque
-+
-+        """
-+        _cache = self._cache
-+
-+        for key in _cache.iterkeys():
-+            try:
-+                item = _cache[key]
-+            except KeyError:
-+                continue
-+            else:
-+                if value == item:
-+                    try:
-+                        del _cache[key]
-+                    except KeyError:
-+                        continue
-+                    return
-+
-+        raise ValueError('deque.remove(value): value not in deque')
-+
-+
-+    def reverse(self):
-+        """Reverse deque in place.
-+
-+        >>> deque = Deque()
-+        >>> deque += 'abc'
-+        >>> deque.reverse()
-+        >>> list(deque)
-+        ['c', 'b', 'a']
-+
-+        """
-+        # GrantJ 2019-03-22 Consider using an algorithm that swaps the values
-+        # at two keys. Like self._cache.swap(key1, key2, retry=True) The swap
-+        # method would exchange the values at two given keys. Then, using a
-+        # forward iterator and a reverse iterator, the reversis method could
-+        # avoid making copies of the values.
-+        temp = Deque(iterable=reversed(self))
-+        self.clear()
-+        self.extend(temp)
-+        directory = temp.directory
-+        del temp
-+        rmtree(directory)
-+
-+
-+    def rotate(self, steps=1):
-+        """Rotate deque right by `steps`.
-+
-+        If steps is negative then rotate left.
-+
-+        >>> deque = Deque()
-+        >>> deque += range(5)
-+        >>> deque.rotate(2)
-+        >>> list(deque)
-+        [3, 4, 0, 1, 2]
-+        >>> deque.rotate(-1)
-+        >>> list(deque)
-+        [4, 0, 1, 2, 3]
-+
-+        :param int steps: number of steps to rotate (default 1)
-+
-+        """
-+        if not isinstance(steps, int):
-+            type_name = type(steps).__name__
-+            raise TypeError('integer argument expected, got %s' % type_name)
-+
-+        len_self = len(self)
-+
-+        if not len_self:
-+            return
-+
-+        if steps >= 0:
-+            steps %= len_self
-+
-+            for _ in range(steps):
-+                try:
-+                    value = self.pop()
-+                except IndexError:
-+                    return
-+                else:
-+                    self.appendleft(value)
-+        else:
-+            steps *= -1
-+            steps %= len_self
-+
-+            for _ in range(steps):
-+                try:
-+                    value = self.popleft()
-+                except IndexError:
-+                    return
-+                else:
-+                    self.append(value)
-+
-+
-+    __hash__ = None
-+
-+
-+    @contextmanager
-+    def transact(self):
-+        """Context manager to perform a transaction by locking the deque.
-+
-+        While the deque is locked, no other write operation is permitted.
-+        Transactions should therefore be as short as possible. Read and write
-+        operations performed in a transaction are atomic. Read operations may
-+        occur concurrent to a transaction.
-+
-+        Transactions may be nested and may not be shared between threads.
-+
-+        >>> from diskcache import Deque
-+        >>> deque = Deque()
-+        >>> deque += range(5)
-+        >>> with deque.transact():  # Atomically rotate elements.
-+        ...     value = deque.pop()
-+        ...     deque.appendleft(value)
-+        >>> list(deque)
-+        [4, 0, 1, 2, 3]
-+
-+        :return: context manager for use in `with` statement
-+
-+        """
-+        with self._cache.transact(retry=True):
-+            yield
-+
-+
-+class Index(MutableMapping):
-+    """Persistent mutable mapping with insertion order iteration.
-+
-+    Items are serialized to disk. Index may be initialized from directory path
-+    where items are stored.
-+
-+    Hashing protocol is not used. Keys are looked up by their serialized
-+    format. See ``diskcache.Disk`` for details.
-+
-+    >>> index = Index()
-+    >>> index.update([('a', 1), ('b', 2), ('c', 3)])
-+    >>> index['a']
-+    1
-+    >>> list(index)
-+    ['a', 'b', 'c']
-+    >>> len(index)
-+    3
-+    >>> del index['b']
-+    >>> index.popitem()
-+    ('c', 3)
-+
-+    """
-+    def __init__(self, *args, **kwargs):
-+        """Initialize index in directory and update items.
-+
-+        Optional first argument may be string specifying directory where items
-+        are stored. When None or not given, temporary directory is created.
-+
-+        >>> index = Index({'a': 1, 'b': 2, 'c': 3})
-+        >>> len(index)
-+        3
-+        >>> directory = index.directory
-+        >>> inventory = Index(directory, d=4)
-+        >>> inventory['b']
-+        2
-+        >>> len(inventory)
-+        4
-+
-+        """
-+        if args and isinstance(args[0], (BytesType, TextType)):
-+            directory = args[0]
-+            args = args[1:]
-+        else:
-+            if args and args[0] is None:
-+                args = args[1:]
-+            directory = None
-+        self._cache = Cache(directory, eviction_policy='none')
-+        self.update(*args, **kwargs)
-+
-+
-+    @classmethod
-+    def fromcache(cls, cache, *args, **kwargs):
-+        """Initialize index using `cache` and update items.
-+
-+        >>> cache = Cache()
-+        >>> index = Index.fromcache(cache, {'a': 1, 'b': 2, 'c': 3})
-+        >>> index.cache is cache
-+        True
-+        >>> len(index)
-+        3
-+        >>> 'b' in index
-+        True
-+        >>> index['c']
-+        3
-+
-+        :param Cache cache: cache to use
-+        :param args: mapping or sequence of items
-+        :param kwargs: mapping of items
-+        :return: initialized Index
-+
-+        """
-+        # pylint: disable=no-member,protected-access
-+        self = cls.__new__(cls)
-+        self._cache = cache
-+        self.update(*args, **kwargs)
-+        return self
-+
-+
-+    @property
-+    def cache(self):
-+        "Cache used by index."
-+        return self._cache
-+
-+
-+    @property
-+    def directory(self):
-+        "Directory path where items are stored."
-+        return self._cache.directory
-+
-+
-+    def __getitem__(self, key):
-+        """index.__getitem__(key) <==> index[key]
-+
-+        Return corresponding value for `key` in index.
-+
-+        >>> index = Index()
-+        >>> index.update({'a': 1, 'b': 2})
-+        >>> index['a']
-+        1
-+        >>> index['b']
-+        2
-+        >>> index['c']
-+        Traceback (most recent call last):
-+            ...
-+        KeyError: 'c'
-+
-+        :param key: key for item
-+        :return: value for item in index with given key
-+        :raises KeyError: if key is not found
-+
-+        """
-+        return self._cache[key]
-+
-+
-+    def __setitem__(self, key, value):
-+        """index.__setitem__(key, value) <==> index[key] = value
-+
-+        Set `key` and `value` item in index.
-+
-+        >>> index = Index()
-+        >>> index['a'] = 1
-+        >>> index[0] = None
-+        >>> len(index)
-+        2
-+
-+        :param key: key for item
-+        :param value: value for item
-+
-+        """
-+        self._cache[key] = value
-+
-+
-+    def __delitem__(self, key):
-+        """index.__delitem__(key) <==> del index[key]
-+
-+        Delete corresponding item for `key` from index.
-+
-+        >>> index = Index()
-+        >>> index.update({'a': 1, 'b': 2})
-+        >>> del index['a']
-+        >>> del index['b']
-+        >>> len(index)
-+        0
-+        >>> del index['c']
-+        Traceback (most recent call last):
-+            ...
-+        KeyError: 'c'
-+
-+        :param key: key for item
-+        :raises KeyError: if key is not found
-+
-+        """
-+        del self._cache[key]
-+
-+
-+    def setdefault(self, key, default=None):
-+        """Set and get value for `key` in index using `default`.
-+
-+        If `key` is not in index then set corresponding value to `default`. If
-+        `key` is in index then ignore `default` and return existing value.
-+
-+        >>> index = Index()
-+        >>> index.setdefault('a', 0)
-+        0
-+        >>> index.setdefault('a', 1)
-+        0
-+
-+        :param key: key for item
-+        :param default: value if key is missing (default None)
-+        :return: value for item in index with given key
-+
-+        """
-+        _cache = self._cache
-+        while True:
-+            try:
-+                return _cache[key]
-+            except KeyError:
-+                _cache.add(key, default, retry=True)
-+
-+
-+    def peekitem(self, last=True):
-+        """Peek at key and value item pair in index based on iteration order.
-+
-+        >>> index = Index()
-+        >>> for num, letter in enumerate('xyz'):
-+        ...     index[letter] = num
-+        >>> index.peekitem()
-+        ('z', 2)
-+        >>> index.peekitem(last=False)
-+        ('x', 0)
-+
-+        :param bool last: last item in iteration order (default True)
-+        :return: key and value item pair
-+        :raises KeyError: if cache is empty
-+
-+        """
-+        return self._cache.peekitem(last, retry=True)
-+
-+
-+    def pop(self, key, default=ENOVAL):
-+        """Remove corresponding item for `key` from index and return value.
-+
-+        If `key` is missing then return `default`. If `default` is `ENOVAL`
-+        then raise KeyError.
-+
-+        >>> index = Index({'a': 1, 'b': 2})
-+        >>> index.pop('a')
-+        1
-+        >>> index.pop('b')
-+        2
-+        >>> index.pop('c', default=3)
-+        3
-+        >>> index.pop('d')
-+        Traceback (most recent call last):
-+            ...
-+        KeyError: 'd'
-+
-+        :param key: key for item
-+        :param default: return value if key is missing (default ENOVAL)
-+        :return: value for item if key is found else default
-+        :raises KeyError: if key is not found and default is ENOVAL
-+
-+        """
-+        _cache = self._cache
-+        value = _cache.pop(key, default=default, retry=True)
-+        if value is ENOVAL:
-+            raise KeyError(key)
-+        return value
-+
-+
-+    def popitem(self, last=True):
-+        """Remove and return item pair.
-+
-+        Item pairs are returned in last-in-first-out (LIFO) order if last is
-+        True else first-in-first-out (FIFO) order. LIFO order imitates a stack
-+        and FIFO order imitates a queue.
-+
-+        >>> index = Index()
-+        >>> index.update([('a', 1), ('b', 2), ('c', 3)])
-+        >>> index.popitem()
-+        ('c', 3)
-+        >>> index.popitem(last=False)
-+        ('a', 1)
-+        >>> index.popitem()
-+        ('b', 2)
-+        >>> index.popitem()
-+        Traceback (most recent call last):
-+          ...
-+        KeyError: 'dictionary is empty'
-+
-+        :param bool last: pop last item pair (default True)
-+        :return: key and value item pair
-+        :raises KeyError: if index is empty
-+
-+        """
-+        # pylint: disable=arguments-differ
-+        _cache = self._cache
-+
-+        with _cache.transact(retry=True):
-+            key, value = _cache.peekitem(last=last)
-+            del _cache[key]
-+
-+        return key, value
-+
-+
-+    def push(self, value, prefix=None, side='back'):
-+        """Push `value` onto `side` of queue in index identified by `prefix`.
-+
-+        When prefix is None, integer keys are used. Otherwise, string keys are
-+        used in the format "prefix-integer". Integer starts at 500 trillion.
-+
-+        Defaults to pushing value on back of queue. Set side to 'front' to push
-+        value on front of queue. Side must be one of 'back' or 'front'.
-+
-+        See also `Index.pull`.
-+
-+        >>> index = Index()
-+        >>> print(index.push('apples'))
-+        500000000000000
-+        >>> print(index.push('beans'))
-+        500000000000001
-+        >>> print(index.push('cherries', side='front'))
-+        499999999999999
-+        >>> index[500000000000001]
-+        'beans'
-+        >>> index.push('dates', prefix='fruit')
-+        'fruit-500000000000000'
-+
-+        :param value: value for item
-+        :param str prefix: key prefix (default None, key is integer)
-+        :param str side: either 'back' or 'front' (default 'back')
-+        :return: key for item in cache
-+
-+        """
-+        return self._cache.push(value, prefix, side, retry=True)
-+
-+
-+    def pull(self, prefix=None, default=(None, None), side='front'):
-+        """Pull key and value item pair from `side` of queue in index.
-+
-+        When prefix is None, integer keys are used. Otherwise, string keys are
-+        used in the format "prefix-integer". Integer starts at 500 trillion.
-+
-+        If queue is empty, return default.
-+
-+        Defaults to pulling key and value item pairs from front of queue. Set
-+        side to 'back' to pull from back of queue. Side must be one of 'front'
-+        or 'back'.
-+
-+        See also `Index.push`.
-+
-+        >>> index = Index()
-+        >>> for letter in 'abc':
-+        ...     print(index.push(letter))
-+        500000000000000
-+        500000000000001
-+        500000000000002
-+        >>> key, value = index.pull()
-+        >>> print(key)
-+        500000000000000
-+        >>> value
-+        'a'
-+        >>> _, value = index.pull(side='back')
-+        >>> value
-+        'c'
-+        >>> index.pull(prefix='fruit')
-+        (None, None)
-+
-+        :param str prefix: key prefix (default None, key is integer)
-+        :param default: value to return if key is missing
-+            (default (None, None))
-+        :param str side: either 'front' or 'back' (default 'front')
-+        :return: key and value item pair or default if queue is empty
-+
-+        """
-+        return self._cache.pull(prefix, default, side, retry=True)
-+
-+
-+    def clear(self):
-+        """Remove all items from index.
-+
-+        >>> index = Index({'a': 0, 'b': 1, 'c': 2})
-+        >>> len(index)
-+        3
-+        >>> index.clear()
-+        >>> dict(index)
-+        {}
-+
-+        """
-+        self._cache.clear(retry=True)
-+
-+
-+    def __iter__(self):
-+        """index.__iter__() <==> iter(index)
-+
-+        Return iterator of index keys in insertion order.
-+
-+        """
-+        return iter(self._cache)
-+
-+
-+    def __reversed__(self):
-+        """index.__reversed__() <==> reversed(index)
-+
-+        Return iterator of index keys in reversed insertion order.
-+
-+        >>> index = Index()
-+        >>> index.update([('a', 1), ('b', 2), ('c', 3)])
-+        >>> iterator = reversed(index)
-+        >>> next(iterator)
-+        'c'
-+        >>> list(iterator)
-+        ['b', 'a']
-+
-+        """
-+        return reversed(self._cache)
-+
-+
-+    def __len__(self):
-+        """index.__len__() <==> len(index)
-+
-+        Return length of index.
-+
-+        """
-+        return len(self._cache)
-+
-+
-+    if sys.hexversion < 0x03000000:
-+        def keys(self):
-+            """List of index keys.
-+
-+            >>> index = Index()
-+            >>> index.update([('a', 1), ('b', 2), ('c', 3)])
-+            >>> index.keys()
-+            ['a', 'b', 'c']
-+
-+            :return: list of keys
-+
-+            """
-+            return list(self._cache)
-+
-+
-+        def values(self):
-+            """List of index values.
-+
-+            >>> index = Index()
-+            >>> index.update([('a', 1), ('b', 2), ('c', 3)])
-+            >>> index.values()
-+            [1, 2, 3]
-+
-+            :return: list of values
-+
-+            """
-+            return list(self.itervalues())
-+
-+
-+        def items(self):
-+            """List of index items.
-+
-+            >>> index = Index()
-+            >>> index.update([('a', 1), ('b', 2), ('c', 3)])
-+            >>> index.items()
-+            [('a', 1), ('b', 2), ('c', 3)]
-+
-+            :return: list of items
-+
-+            """
-+            return list(self.iteritems())
-+
-+
-+        def iterkeys(self):
-+            """Iterator of index keys.
-+
-+            >>> index = Index()
-+            >>> index.update([('a', 1), ('b', 2), ('c', 3)])
-+            >>> list(index.iterkeys())
-+            ['a', 'b', 'c']
-+
-+            :return: iterator of keys
-+
-+            """
-+            return iter(self._cache)
-+
-+
-+        def itervalues(self):
-+            """Iterator of index values.
-+
-+            >>> index = Index()
-+            >>> index.update([('a', 1), ('b', 2), ('c', 3)])
-+            >>> list(index.itervalues())
-+            [1, 2, 3]
-+
-+            :return: iterator of values
-+
-+            """
-+            _cache = self._cache
-+
-+            for key in _cache:
-+                while True:
-+                    try:
-+                        yield _cache[key]
-+                    except KeyError:
-+                        pass
-+                    break
-+
-+
-+        def iteritems(self):
-+            """Iterator of index items.
-+
-+            >>> index = Index()
-+            >>> index.update([('a', 1), ('b', 2), ('c', 3)])
-+            >>> list(index.iteritems())
-+            [('a', 1), ('b', 2), ('c', 3)]
-+
-+            :return: iterator of items
-+
-+            """
-+            _cache = self._cache
-+
-+            for key in _cache:
-+                while True:
-+                    try:
-+                        yield key, _cache[key]
-+                    except KeyError:
-+                        pass
-+                    break
-+
-+
-+        def viewkeys(self):
-+            """Set-like object providing a view of index keys.
-+
-+            >>> index = Index()
-+            >>> index.update({'a': 1, 'b': 2, 'c': 3})
-+            >>> keys_view = index.viewkeys()
-+            >>> 'b' in keys_view
-+            True
-+
-+            :return: keys view
-+
-+            """
-+            return KeysView(self)
-+
-+
-+        def viewvalues(self):
-+            """Set-like object providing a view of index values.
-+
-+            >>> index = Index()
-+            >>> index.update({'a': 1, 'b': 2, 'c': 3})
-+            >>> values_view = index.viewvalues()
-+            >>> 2 in values_view
-+            True
-+
-+            :return: values view
-+
-+            """
-+            return ValuesView(self)
-+
-+
-+        def viewitems(self):
-+            """Set-like object providing a view of index items.
-+
-+            >>> index = Index()
-+            >>> index.update({'a': 1, 'b': 2, 'c': 3})
-+            >>> items_view = index.viewitems()
-+            >>> ('b', 2) in items_view
-+            True
-+
-+            :return: items view
-+
-+            """
-+            return ItemsView(self)
-+
-+
-+    else:
-+        def keys(self):
-+            """Set-like object providing a view of index keys.
-+
-+            >>> index = Index()
-+            >>> index.update({'a': 1, 'b': 2, 'c': 3})
-+            >>> keys_view = index.keys()
-+            >>> 'b' in keys_view
-+            True
-+
-+            :return: keys view
-+
-+            """
-+            return KeysView(self)
-+
-+
-+        def values(self):
-+            """Set-like object providing a view of index values.
-+
-+            >>> index = Index()
-+            >>> index.update({'a': 1, 'b': 2, 'c': 3})
-+            >>> values_view = index.values()
-+            >>> 2 in values_view
-+            True
-+
-+            :return: values view
-+
-+            """
-+            return ValuesView(self)
-+
-+
-+        def items(self):
-+            """Set-like object providing a view of index items.
-+
-+            >>> index = Index()
-+            >>> index.update({'a': 1, 'b': 2, 'c': 3})
-+            >>> items_view = index.items()
-+            >>> ('b', 2) in items_view
-+            True
-+
-+            :return: items view
-+
-+            """
-+            return ItemsView(self)
-+
-+
-+    __hash__ = None
-+
-+
-+    def __getstate__(self):
-+        return self.directory
-+
-+
-+    def __setstate__(self, state):
-+        self.__init__(state)
-+
-+
-+    def __eq__(self, other):
-+        """index.__eq__(other) <==> index == other
-+
-+        Compare equality for index and `other`.
-+
-+        Comparison to another index or ordered dictionary is
-+        order-sensitive. Comparison to all other mappings is order-insensitive.
-+
-+        >>> index = Index()
-+        >>> pairs = [('a', 1), ('b', 2), ('c', 3)]
-+        >>> index.update(pairs)
-+        >>> from collections import OrderedDict
-+        >>> od = OrderedDict(pairs)
-+        >>> index == od
-+        True
-+        >>> index == {'c': 3, 'b': 2, 'a': 1}
-+        True
-+
-+        :param other: other mapping in equality comparison
-+        :return: True if index equals other
-+
-+        """
-+        if len(self) != len(other):
-+            return False
-+
-+        if isinstance(other, (Index, OrderedDict)):
-+            alpha = ((key, self[key]) for key in self)
-+            beta = ((key, other[key]) for key in other)
-+            pairs = zip(alpha, beta)
-+            return not any(a != x or b != y for (a, b), (x, y) in pairs)
-+        else:
-+            return all(self[key] == other.get(key, ENOVAL) for key in self)
-+
-+
-+    def __ne__(self, other):
-+        """index.__ne__(other) <==> index != other
-+
-+        Compare inequality for index and `other`.
-+
-+        Comparison to another index or ordered dictionary is
-+        order-sensitive. Comparison to all other mappings is order-insensitive.
-+
-+        >>> index = Index()
-+        >>> index.update([('a', 1), ('b', 2), ('c', 3)])
-+        >>> from collections import OrderedDict
-+        >>> od = OrderedDict([('c', 3), ('b', 2), ('a', 1)])
-+        >>> index != od
-+        True
-+        >>> index != {'a': 1, 'b': 2}
-+        True
-+
-+        :param other: other mapping in inequality comparison
-+        :return: True if index does not equal other
-+
-+        """
-+        return not self == other
-+
-+
-+    def memoize(self, name=None, typed=False):
-+        """Memoizing cache decorator.
-+
-+        Decorator to wrap callable with memoizing function using cache.
-+        Repeated calls with the same arguments will lookup result in cache and
-+        avoid function evaluation.
-+
-+        If name is set to None (default), the callable name will be determined
-+        automatically.
-+
-+        If typed is set to True, function arguments of different types will be
-+        cached separately. For example, f(3) and f(3.0) will be treated as
-+        distinct calls with distinct results.
-+
-+        The original underlying function is accessible through the __wrapped__
-+        attribute. This is useful for introspection, for bypassing the cache,
-+        or for rewrapping the function with a different cache.
-+
-+        >>> from diskcache import Index
-+        >>> mapping = Index()
-+        >>> @mapping.memoize()
-+        ... def fibonacci(number):
-+        ...     if number == 0:
-+        ...         return 0
-+        ...     elif number == 1:
-+        ...         return 1
-+        ...     else:
-+        ...         return fibonacci(number - 1) + fibonacci(number - 2)
-+        >>> print(fibonacci(100))
-+        354224848179261915075
-+
-+        An additional `__cache_key__` attribute can be used to generate the
-+        cache key used for the given arguments.
-+
-+        >>> key = fibonacci.__cache_key__(100)
-+        >>> print(mapping[key])
-+        354224848179261915075
-+
-+        Remember to call memoize when decorating a callable. If you forget,
-+        then a TypeError will occur. Note the lack of parenthenses after
-+        memoize below:
-+
-+        >>> @mapping.memoize
-+        ... def test():
-+        ...     pass
-+        Traceback (most recent call last):
-+            ...
-+        TypeError: name cannot be callable
-+
-+        :param str name: name given for callable (default None, automatic)
-+        :param bool typed: cache different types separately (default False)
-+        :return: callable decorator
-+
-+        """
-+        return self._cache.memoize(name, typed)
-+
-+
-+    @contextmanager
-+    def transact(self):
-+        """Context manager to perform a transaction by locking the index.
-+
-+        While the index is locked, no other write operation is permitted.
-+        Transactions should therefore be as short as possible. Read and write
-+        operations performed in a transaction are atomic. Read operations may
-+        occur concurrent to a transaction.
-+
-+        Transactions may be nested and may not be shared between threads.
-+
-+        >>> from diskcache import Index
-+        >>> mapping = Index()
-+        >>> with mapping.transact():  # Atomically increment two keys.
-+        ...     mapping['total'] = mapping.get('total', 0) + 123.4
-+        ...     mapping['count'] = mapping.get('count', 0) + 1
-+        >>> with mapping.transact():  # Atomically calculate average.
-+        ...     average = mapping['total'] / mapping['count']
-+        >>> average
-+        123.4
-+
-+        :return: context manager for use in `with` statement
-+
-+        """
-+        with self._cache.transact(retry=True):
-+            yield
-+
-+
-+    def __repr__(self):
-+        """index.__repr__() <==> repr(index)
-+
-+        Return string with printable representation of index.
-+
-+        """
-+        name = type(self).__name__
-+        return '{0}({1!r})'.format(name, self.directory)
-diff --git a/third_party/python/diskcache/diskcache/recipes.py b/third_party/python/diskcache/diskcache/recipes.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/diskcache/diskcache/recipes.py
-@@ -0,0 +1,437 @@
-+"""Disk Cache Recipes
-+
-+"""
-+
-+import functools
-+import math
-+import os
-+import random
-+import sys
-+import threading
-+import time
-+
-+from .core import ENOVAL, args_to_key, full_name
-+
-+############################################################################
-+# BEGIN Python 2/3 Shims
-+############################################################################
-+
-+if sys.hexversion < 0x03000000:
-+    from thread import get_ident  # pylint: disable=import-error
-+else:
-+    from threading import get_ident
-+
-+############################################################################
-+# END Python 2/3 Shims
-+############################################################################
-+
-+
-+class Averager(object):
-+    """Recipe for calculating a running average.
-+
-+    Sometimes known as "online statistics," the running average maintains the
-+    total and count. The average can then be calculated at any time.
-+
-+    >>> import diskcache
-+    >>> cache = diskcache.FanoutCache()
-+    >>> ave = Averager(cache, 'latency')
-+    >>> ave.add(0.080)
-+    >>> ave.add(0.120)
-+    >>> ave.get()
-+    0.1
-+    >>> ave.add(0.160)
-+    >>> ave.pop()
-+    0.12
-+    >>> print(ave.get())
-+    None
-+
-+    """
-+    def __init__(self, cache, key, expire=None, tag=None):
-+        self._cache = cache
-+        self._key = key
-+        self._expire = expire
-+        self._tag = tag
-+
-+    def add(self, value):
-+        "Add `value` to average."
-+        with self._cache.transact(retry=True):
-+            total, count = self._cache.get(self._key, default=(0.0, 0))
-+            total += value
-+            count += 1
-+            self._cache.set(
-+                self._key, (total, count), expire=self._expire, tag=self._tag,
-+            )
-+
-+    def get(self):
-+        "Get current average or return `None` if count equals zero."
-+        total, count = self._cache.get(self._key, default=(0.0, 0), retry=True)
-+        return None if count == 0 else total / count
-+
-+    def pop(self):
-+        "Return current average and delete key."
-+        total, count = self._cache.pop(self._key, default=(0.0, 0), retry=True)
-+        return None if count == 0 else total / count
-+
-+
-+class Lock(object):
-+    """Recipe for cross-process and cross-thread lock.
-+
-+    >>> import diskcache
-+    >>> cache = diskcache.Cache()
-+    >>> lock = Lock(cache, 'report-123')
-+    >>> lock.acquire()
-+    >>> lock.release()
-+    >>> with lock:
-+    ...     pass
-+
-+    """
-+    def __init__(self, cache, key, expire=None, tag=None):
-+        self._cache = cache
-+        self._key = key
-+        self._expire = expire
-+        self._tag = tag
-+
-+    def acquire(self):
-+        "Acquire lock using spin-lock algorithm."
-+        while True:
-+            added = self._cache.add(
-+                self._key, None, expire=self._expire, tag=self._tag, retry=True,
-+            )
-+            if added:
-+                break
-+            time.sleep(0.001)
-+
-+    def release(self):
-+        "Release lock by deleting key."
-+        self._cache.delete(self._key, retry=True)
-+
-+    def __enter__(self):
-+        self.acquire()
-+
-+    def __exit__(self, *exc_info):
-+        self.release()
-+
-+
-+class RLock(object):
-+    """Recipe for cross-process and cross-thread re-entrant lock.
-+
-+    >>> import diskcache
-+    >>> cache = diskcache.Cache()
-+    >>> rlock = RLock(cache, 'user-123')
-+    >>> rlock.acquire()
-+    >>> rlock.acquire()
-+    >>> rlock.release()
-+    >>> with rlock:
-+    ...     pass
-+    >>> rlock.release()
-+    >>> rlock.release()
-+    Traceback (most recent call last):
-+      ...
-+    AssertionError: cannot release un-acquired lock
-+
-+    """
-+    def __init__(self, cache, key, expire=None, tag=None):
-+        self._cache = cache
-+        self._key = key
-+        self._expire = expire
-+        self._tag = tag
-+
-+    def acquire(self):
-+        "Acquire lock by incrementing count using spin-lock algorithm."
-+        pid = os.getpid()
-+        tid = get_ident()
-+        pid_tid = '{}-{}'.format(pid, tid)
-+
-+        while True:
-+            with self._cache.transact(retry=True):
-+                value, count = self._cache.get(self._key, default=(None, 0))
-+                if pid_tid == value or count == 0:
-+                    self._cache.set(
-+                        self._key, (pid_tid, count + 1),
-+                        expire=self._expire, tag=self._tag,
-+                    )
-+                    return
-+            time.sleep(0.001)
-+
-+    def release(self):
-+        "Release lock by decrementing count."
-+        pid = os.getpid()
-+        tid = get_ident()
-+        pid_tid = '{}-{}'.format(pid, tid)
-+
-+        with self._cache.transact(retry=True):
-+            value, count = self._cache.get(self._key, default=(None, 0))
-+            is_owned = pid_tid == value and count > 0
-+            assert is_owned, 'cannot release un-acquired lock'
-+            self._cache.set(
-+                self._key, (value, count - 1),
-+                expire=self._expire, tag=self._tag,
-+            )
-+
-+    def __enter__(self):
-+        self.acquire()
-+
-+    def __exit__(self, *exc_info):
-+        self.release()
-+
-+
-+class BoundedSemaphore(object):
-+    """Recipe for cross-process and cross-thread bounded semaphore.
-+
-+    >>> import diskcache
-+    >>> cache = diskcache.Cache()
-+    >>> semaphore = BoundedSemaphore(cache, 'max-cons', value=2)
-+    >>> semaphore.acquire()
-+    >>> semaphore.acquire()
-+    >>> semaphore.release()
-+    >>> with semaphore:
-+    ...     pass
-+    >>> semaphore.release()
-+    >>> semaphore.release()
-+    Traceback (most recent call last):
-+      ...
-+    AssertionError: cannot release un-acquired semaphore
-+
-+    """
-+    def __init__(self, cache, key, value=1, expire=None, tag=None):
-+        self._cache = cache
-+        self._key = key
-+        self._value = value
-+        self._expire = expire
-+        self._tag = tag
-+
-+    def acquire(self):
-+        "Acquire semaphore by decrementing value using spin-lock algorithm."
-+        while True:
-+            with self._cache.transact(retry=True):
-+                value = self._cache.get(self._key, default=self._value)
-+                if value > 0:
-+                    self._cache.set(
-+                        self._key, value - 1,
-+                        expire=self._expire, tag=self._tag,
-+                    )
-+                    return
-+            time.sleep(0.001)
-+
-+    def release(self):
-+        "Release semaphore by incrementing value."
-+        with self._cache.transact(retry=True):
-+            value = self._cache.get(self._key, default=self._value)
-+            assert self._value > value, 'cannot release un-acquired semaphore'
-+            value += 1
-+            self._cache.set(
-+                self._key, value, expire=self._expire, tag=self._tag,
-+            )
-+
-+    def __enter__(self):
-+        self.acquire()
-+
-+    def __exit__(self, *exc_info):
-+        self.release()
-+
-+
-+def throttle(cache, count, seconds, name=None, expire=None, tag=None,
-+             time_func=time.time, sleep_func=time.sleep):
-+    """Decorator to throttle calls to function.
-+
-+    >>> import diskcache, time
-+    >>> cache = diskcache.Cache()
-+    >>> count = 0
-+    >>> @throttle(cache, 2, 1)  # 2 calls per 1 second
-+    ... def increment():
-+    ...     global count
-+    ...     count += 1
-+    >>> start = time.time()
-+    >>> while (time.time() - start) <= 2:
-+    ...     increment()
-+    >>> count in (6, 7)  # 6 or 7 calls depending on CPU load
-+    True
-+
-+    """
-+    def decorator(func):
-+        rate = count / float(seconds)
-+        key = full_name(func) if name is None else name
-+        now = time_func()
-+        cache.set(key, (now, count), expire=expire, tag=tag, retry=True)
-+
-+        @functools.wraps(func)
-+        def wrapper(*args, **kwargs):
-+            while True:
-+                with cache.transact(retry=True):
-+                    last, tally = cache.get(key)
-+                    now = time_func()
-+                    tally += (now - last) * rate
-+                    delay = 0
-+
-+                    if tally > count:
-+                        cache.set(key, (now, count - 1), expire)
-+                    elif tally >= 1:
-+                        cache.set(key, (now, tally - 1), expire)
-+                    else:
-+                        delay = (1 - tally) / rate
-+
-+                if delay:
-+                    sleep_func(delay)
-+                else:
-+                    break
-+
-+            return func(*args, **kwargs)
-+
-+        return wrapper
-+
-+    return decorator
-+
-+
-+def barrier(cache, lock_factory, name=None, expire=None, tag=None):
-+    """Barrier to calling decorated function.
-+
-+    Supports different kinds of locks: Lock, RLock, BoundedSemaphore.
-+
-+    >>> import diskcache, time
-+    >>> cache = diskcache.Cache()
-+    >>> @barrier(cache, Lock)
-+    ... def work(num):
-+    ...     print('worker started')
-+    ...     time.sleep(1)
-+    ...     print('worker finished')
-+    >>> import multiprocessing.pool
-+    >>> pool = multiprocessing.pool.ThreadPool(2)
-+    >>> _ = pool.map(work, range(2))
-+    worker started
-+    worker finished
-+    worker started
-+    worker finished
-+    >>> pool.terminate()
-+
-+    """
-+    def decorator(func):
-+        key = full_name(func) if name is None else name
-+        lock = lock_factory(cache, key, expire=expire, tag=tag)
-+
-+        @functools.wraps(func)
-+        def wrapper(*args, **kwargs):
-+            with lock:
-+                return func(*args, **kwargs)
-+
-+        return wrapper
-+
-+    return decorator
-+
-+
-+def memoize_stampede(cache, expire, name=None, typed=False, tag=None, beta=1):
-+    """Memoizing cache decorator with cache stampede protection.
-+
-+    Cache stampedes are a type of system overload that can occur when parallel
-+    computing systems using memoization come under heavy load. This behaviour
-+    is sometimes also called dog-piling, cache miss storm, cache choking, or
-+    the thundering herd problem.
-+
-+    The memoization decorator implements cache stampede protection through
-+    early recomputation. Early recomputation of function results will occur
-+    probabilistically before expiration in a background thread of
-+    execution. Early probabilistic recomputation is based on research by
-+    Vattani, A.; Chierichetti, F.; Lowenstein, K. (2015), Optimal Probabilistic
-+    Cache Stampede Prevention, VLDB, pp. 886-897, ISSN 2150-8097
-+
-+    If name is set to None (default), the callable name will be determined
-+    automatically.
-+
-+    If typed is set to True, function arguments of different types will be
-+    cached separately. For example, f(3) and f(3.0) will be treated as distinct
-+    calls with distinct results.
-+
-+    The original underlying function is accessible through the `__wrapped__`
-+    attribute. This is useful for introspection, for bypassing the cache, or
-+    for rewrapping the function with a different cache.
-+
-+    >>> from diskcache import Cache
-+    >>> cache = Cache()
-+    >>> @memoize_stampede(cache, expire=1)
-+    ... def fib(number):
-+    ...     if number == 0:
-+    ...         return 0
-+    ...     elif number == 1:
-+    ...         return 1
-+    ...     else:
-+    ...         return fib(number - 1) + fib(number - 2)
-+    >>> print(fib(100))
-+    354224848179261915075
-+
-+    An additional `__cache_key__` attribute can be used to generate the cache
-+    key used for the given arguments.
-+
-+    >>> key = fib.__cache_key__(100)
-+    >>> del cache[key]
-+
-+    Remember to call memoize when decorating a callable. If you forget, then a
-+    TypeError will occur.
-+
-+    :param cache: cache to store callable arguments and return values
-+    :param float expire: seconds until arguments expire
-+    :param str name: name given for callable (default None, automatic)
-+    :param bool typed: cache different types separately (default False)
-+    :param str tag: text to associate with arguments (default None)
-+    :return: callable decorator
-+
-+    """
-+    # Caution: Nearly identical code exists in Cache.memoize
-+    def decorator(func):
-+        "Decorator created by memoize call for callable."
-+        base = (full_name(func),) if name is None else (name,)
-+
-+        def timer(*args, **kwargs):
-+            "Time execution of `func` and return result and time delta."
-+            start = time.time()
-+            result = func(*args, **kwargs)
-+            delta = time.time() - start
-+            return result, delta
-+
-+        @functools.wraps(func)
-+        def wrapper(*args, **kwargs):
-+            "Wrapper for callable to cache arguments and return values."
-+            key = wrapper.__cache_key__(*args, **kwargs)
-+            pair, expire_time = cache.get(
-+                key, default=ENOVAL, expire_time=True, retry=True,
-+            )
-+
-+            if pair is not ENOVAL:
-+                result, delta = pair
-+                now = time.time()
-+                ttl = expire_time - now
-+
-+                if (-delta * beta * math.log(random.random())) < ttl:
-+                    return result  # Cache hit.
-+
-+                # Check whether a thread has started for early recomputation.
-+
-+                thread_key = key + (ENOVAL,)
-+                thread_added = cache.add(
-+                    thread_key, None, expire=delta, retry=True,
-+                )
-+
-+                if thread_added:
-+                    # Start thread for early recomputation.
-+                    def recompute():
-+                        with cache:
-+                            pair = timer(*args, **kwargs)
-+                            cache.set(
-+                                key, pair, expire=expire, tag=tag, retry=True,
-+                            )
-+                    thread = threading.Thread(target=recompute)
-+                    thread.daemon = True
-+                    thread.start()
-+
-+                return result
-+
-+            pair = timer(*args, **kwargs)
-+            cache.set(key, pair, expire=expire, tag=tag, retry=True)
-+            return pair[0]
-+
-+        def __cache_key__(*args, **kwargs):
-+            "Make key for cache given function arguments."
-+            return args_to_key(base, args, kwargs, typed)
-+
-+        wrapper.__cache_key__ = __cache_key__
-+        return wrapper
-+
-+    return decorator
-diff --git a/third_party/python/glean_parser/glean_parser/__init__.py b/third_party/python/glean_parser/glean_parser/__init__.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/glean_parser/glean_parser/__init__.py
-@@ -0,0 +1,18 @@
-+# -*- coding: utf-8 -*-
-+
-+# This Source Code Form is subject to the terms of the Mozilla Public
-+# License, v. 2.0. If a copy of the MPL was not distributed with this
-+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
-+
-+"""Top-level package for Glean parser."""
-+
-+from pkg_resources import get_distribution, DistributionNotFound
-+
-+try:
-+    __version__ = get_distribution(__name__).version
-+except DistributionNotFound:
-+    # package is not installed
-+    pass
-+
-+__author__ = """Michael Droettboom"""
-+__email__ = "mdroettboom@mozilla.com"
-diff --git a/third_party/python/glean_parser/glean_parser/__main__.py b/third_party/python/glean_parser/glean_parser/__main__.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/glean_parser/glean_parser/__main__.py
-@@ -0,0 +1,131 @@
-+# -*- coding: utf-8 -*-
-+
-+# This Source Code Form is subject to the terms of the Mozilla Public
-+# License, v. 2.0. If a copy of the MPL was not distributed with this
-+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
-+
-+"""Console script for glean_parser."""
-+
-+import io
-+from pathlib import Path
-+import sys
-+
-+import click
-+
-+from . import lint
-+from . import translate as mod_translate
-+from . import validate_ping
-+
-+
-+@click.command()
-+@click.argument(
-+    "input",
-+    type=click.Path(exists=False, dir_okay=False, file_okay=True, readable=True),
-+    nargs=-1,
-+)
-+@click.option(
-+    "--output",
-+    "-o",
-+    type=click.Path(dir_okay=True, file_okay=False, writable=True),
-+    nargs=1,
-+    required=True,
-+)
-+@click.option(
-+    "--format", "-f", type=click.Choice(mod_translate.OUTPUTTERS.keys()), required=True
-+)
-+@click.option(
-+    "--option",
-+    "-s",
-+    help="backend-specific option. Must be of the form key=value",
-+    type=str,
-+    multiple=True,
-+    required=False,
-+)
-+@click.option(
-+    "--allow-reserved",
-+    is_flag=True,
-+    help=(
-+        "If provided, allow the use of reserved fields. "
-+        "Should only be set when building the Glean library itself."
-+    ),
-+)
-+def translate(input, format, output, option, allow_reserved):
-+    """
-+    Translate metrics.yaml and pings.yaml files to other formats.
-+    """
-+    option_dict = {}
-+    for opt in option:
-+        key, val = opt.split("=", 1)
-+        option_dict[key] = val
-+
-+    sys.exit(
-+        mod_translate.translate(
-+            [Path(x) for x in input],
-+            format,
-+            Path(output),
-+            option_dict,
-+            {"allow_reserved": allow_reserved},
-+        )
-+    )
-+
-+
-+@click.command()
-+@click.option(
-+    "--schema",
-+    "-s",
-+    type=str,
-+    nargs=1,
-+    required=True,
-+    help=("HTTP url or file path to Glean ping schema. If remote, will cache to disk."),
-+)
-+def check(schema):
-+    """
-+    Validate the contents of a Glean ping.
-+
-+    The ping contents are read from stdin, and the validation errors are
-+    written to stdout.
-+    """
-+    sys.exit(
-+        validate_ping.validate_ping(
-+            io.TextIOWrapper(sys.stdin.buffer, encoding="utf-8"),
-+            io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8"),
-+            schema_url=schema,
-+        )
-+    )
-+
-+
-+@click.command()
-+@click.argument(
-+    "input",
-+    type=click.Path(exists=True, dir_okay=False, file_okay=True, readable=True),
-+    nargs=-1,
-+)
-+@click.option(
-+    "--allow-reserved",
-+    is_flag=True,
-+    help=(
-+        "If provided, allow the use of reserved fields. "
-+        "Should only be set when building the Glean library itself."
-+    ),
-+)
-+def glinter(input, allow_reserved):
-+    """
-+    Runs a linter over the metrics.
-+    """
-+    sys.exit(lint.glinter([Path(x) for x in input], {"allow_reserved": allow_reserved}))
-+
-+
-+@click.group()
-+@click.version_option()
-+def main(args=None):
-+    """Command line utility for glean_parser."""
-+    pass
-+
-+
-+main.add_command(translate)
-+main.add_command(check)
-+main.add_command(glinter)
-+
-+
-+if __name__ == "__main__":
-+    sys.exit(main())  # pragma: no cover
-diff --git a/third_party/python/glean_parser/glean_parser/kotlin.py b/third_party/python/glean_parser/glean_parser/kotlin.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/glean_parser/glean_parser/kotlin.py
-@@ -0,0 +1,262 @@
-+# -*- coding: utf-8 -*-
-+
-+# This Source Code Form is subject to the terms of the Mozilla Public
-+# License, v. 2.0. If a copy of the MPL was not distributed with this
-+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
-+
-+"""
-+Outputter to generate Kotlin code for metrics.
-+"""
-+
-+from collections import OrderedDict
-+import enum
-+import json
-+
-+from . import util
-+
-+
-+def kotlin_datatypes_filter(value):
-+    """
-+    A Jinja2 filter that renders Kotlin literals.
-+
-+    Based on Python's JSONEncoder, but overrides:
-+      - lists to use listOf
-+      - dicts to use mapOf
-+      - sets to use setOf
-+      - enums to use the like-named Kotlin enum
-+    """
-+
-+    class KotlinEncoder(json.JSONEncoder):
-+        def iterencode(self, value):
-+            if isinstance(value, list):
-+                yield "listOf("
-+                first = True
-+                for subvalue in value:
-+                    if not first:
-+                        yield ", "
-+                    yield from self.iterencode(subvalue)
-+                    first = False
-+                yield ")"
-+            elif isinstance(value, dict):
-+                yield "mapOf("
-+                first = True
-+                for key, subvalue in value.items():
-+                    if not first:
-+                        yield ", "
-+                    yield from self.iterencode(key)
-+                    yield " to "
-+                    yield from self.iterencode(subvalue)
-+                    first = False
-+                yield ")"
-+            elif isinstance(value, enum.Enum):
-+                yield (value.__class__.__name__ + "." + util.Camelize(value.name))
-+            elif isinstance(value, set):
-+                yield "setOf("
-+                first = True
-+                for subvalue in sorted(list(value)):
-+                    if not first:
-+                        yield ", "
-+                    yield from self.iterencode(subvalue)
-+                    first = False
-+                yield ")"
-+            else:
-+                yield from super().iterencode(value)
-+
-+    return "".join(KotlinEncoder().iterencode(value))
-+
-+
-+def type_name(obj):
-+    """
-+    Returns the Kotlin type to use for a given metric or ping object.
-+    """
-+    generate_enums = getattr(obj, "_generate_enums", [])
-+    if len(generate_enums):
-+        template_args = []
-+        for member, suffix in generate_enums:
-+            if len(getattr(obj, member)):
-+                template_args.append(util.camelize(obj.name) + suffix)
-+            else:
-+                if suffix == "Keys":
-+                    template_args.append("NoExtraKeys")
-+                else:
-+                    template_args.append("No" + suffix)
-+
-+        return "{}<{}>".format(class_name(obj.type), ", ".join(template_args))
-+
-+    return class_name(obj.type)
-+
-+
-+def class_name(obj_type):
-+    """
-+    Returns the Kotlin class name for a given metric or ping type.
-+    """
-+    if obj_type == "ping":
-+        return "PingType"
-+    if obj_type.startswith("labeled_"):
-+        obj_type = obj_type[8:]
-+    return util.Camelize(obj_type) + "MetricType"
-+
-+
-+def output_gecko_lookup(objs, output_dir, options={}):
-+    """
-+    Given a tree of objects, generate a Kotlin map between Gecko histograms and
-+    Glean SDK metric types.
-+
-+    :param objects: A tree of objects (metrics and pings) as returned from
-+    `parser.parse_objects`.
-+    :param output_dir: Path to an output directory to write to.
-+    :param options: options dictionary, with the following optional keys:
-+
-+        - `namespace`: The package namespace to declare at the top of the
-+          generated files. Defaults to `GleanMetrics`.
-+        - `glean_namespace`: The package namespace of the glean library itself.
-+          This is where glean objects will be imported from in the generated
-+          code.
-+    """
-+    template = util.get_jinja2_template(
-+        "kotlin.geckoview.jinja2",
-+        filters=(
-+            ("kotlin", kotlin_datatypes_filter),
-+            ("type_name", type_name),
-+            ("class_name", class_name),
-+        ),
-+    )
-+
-+    namespace = options.get("namespace", "GleanMetrics")
-+    glean_namespace = options.get("glean_namespace", "mozilla.components.service.glean")
-+
-+    # Build a dictionary that contains data for metrics that are
-+    # histogram-like/scalar-like and contain a gecko_datapoint, with this format:
-+    #
-+    # {
-+    #   "histograms": {
-+    #     "category": [
-+    #       {"gecko_datapoint": "the-datapoint", "name": "the-metric-name"},
-+    #       ...
-+    #     ],
-+    #     ...
-+    #   },
-+    #   "other-type": {}
-+    # }
-+    gecko_metrics = OrderedDict()
-+
-+    # Define scalar-like types.
-+    SCALAR_LIKE_TYPES = ["boolean", "string", "quantity"]
-+
-+    for category_key, category_val in objs.items():
-+        # Support exfiltration of Gecko metrics from products using both the
-+        # Glean SDK and GeckoView. See bug 1566356 for more context.
-+        for metric in category_val.values():
-+            # This is not a Gecko metric, skip it.
-+            if not getattr(metric, "gecko_datapoint", False):
-+                continue
-+
-+            # Put scalars in their own categories, histogram-like in "histograms" and
-+            # categorical histograms in "categoricals".
-+            type_category = "histograms"
-+            if metric.type in SCALAR_LIKE_TYPES:
-+                type_category = metric.type
-+            elif metric.type == "labeled_counter":
-+                # Labeled counters with a 'gecko_datapoint' property
-+                # are categorical histograms.
-+                type_category = "categoricals"
-+
-+            gecko_metrics.setdefault(type_category, OrderedDict())
-+            gecko_metrics[type_category].setdefault(category_key, [])
-+
-+            gecko_metrics[type_category][category_key].append(
-+                {"gecko_datapoint": metric.gecko_datapoint, "name": metric.name}
-+            )
-+
-+    if not gecko_metrics:
-+        # Bail out and don't create a file if no gecko metrics
-+        # are found.
-+        return
-+
-+    filepath = output_dir / "GleanGeckoMetricsMapping.kt"
-+    with filepath.open("w", encoding="utf-8") as fd:
-+        fd.write(
-+            template.render(
-+                gecko_metrics=gecko_metrics,
-+                namespace=namespace,
-+                glean_namespace=glean_namespace,
-+            )
-+        )
-+        # Jinja2 squashes the final newline, so we explicitly add it
-+        fd.write("\n")
-+
-+
-+def output_kotlin(objs, output_dir, options={}):
-+    """
-+    Given a tree of objects, output Kotlin code to `output_dir`.
-+
-+    :param objects: A tree of objects (metrics and pings) as returned from
-+    `parser.parse_objects`.
-+    :param output_dir: Path to an output directory to write to.
-+    :param options: options dictionary, with the following optional keys:
-+
-+        - `namespace`: The package namespace to declare at the top of the
-+          generated files. Defaults to `GleanMetrics`.
-+        - `glean_namespace`: The package namespace of the glean library itself.
-+          This is where glean objects will be imported from in the generated
-+          code.
-+    """
-+    template = util.get_jinja2_template(
-+        "kotlin.jinja2",
-+        filters=(
-+            ("kotlin", kotlin_datatypes_filter),
-+            ("type_name", type_name),
-+            ("class_name", class_name),
-+        ),
-+    )
-+
-+    # The object parameters to pass to constructors
-+    extra_args = [
-+        "allowed_extra_keys",
-+        "bucket_count",
-+        "category",
-+        "disabled",
-+        "histogram_type",
-+        "include_client_id",
-+        "send_if_empty",
-+        "lifetime",
-+        "memory_unit",
-+        "name",
-+        "range_max",
-+        "range_min",
-+        "reason_codes",
-+        "send_in_pings",
-+        "time_unit",
-+    ]
-+
-+    namespace = options.get("namespace", "GleanMetrics")
-+    glean_namespace = options.get("glean_namespace", "mozilla.components.service.glean")
-+
-+    for category_key, category_val in objs.items():
-+        filename = util.Camelize(category_key) + ".kt"
-+        filepath = output_dir / filename
-+
-+        obj_types = sorted(
-+            list(set(class_name(obj.type) for obj in category_val.values()))
-+        )
-+        has_labeled_metrics = any(
-+            getattr(metric, "labeled", False) for metric in category_val.values()
-+        )
-+
-+        with filepath.open("w", encoding="utf-8") as fd:
-+            fd.write(
-+                template.render(
-+                    category_name=category_key,
-+                    objs=category_val,
-+                    obj_types=obj_types,
-+                    extra_args=extra_args,
-+                    namespace=namespace,
-+                    has_labeled_metrics=has_labeled_metrics,
-+                    glean_namespace=glean_namespace,
-+                )
-+            )
-+            # Jinja2 squashes the final newline, so we explicitly add it
-+            fd.write("\n")
-+
-+    # TODO: Maybe this should just be a separate outputter?
-+    output_gecko_lookup(objs, output_dir, options)
-diff --git a/third_party/python/glean_parser/glean_parser/lint.py b/third_party/python/glean_parser/glean_parser/lint.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/glean_parser/glean_parser/lint.py
-@@ -0,0 +1,302 @@
-+# This Source Code Form is subject to the terms of the Mozilla Public
-+# License, v. 2.0. If a copy of the MPL was not distributed with this
-+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
-+
-+
-+import re
-+import sys
-+
-+
-+from . import parser
-+from . import util
-+
-+from yamllint.config import YamlLintConfig
-+from yamllint import linter
-+
-+
-+def _split_words(name):
-+    """
-+    Helper function to split words on either `.` or `_`.
-+    """
-+    return re.split("[._]", name)
-+
-+
-+def _hamming_distance(str1, str2):
-+    """
-+    Count the # of differences between strings str1 and str2,
-+    padding the shorter one with whitespace
-+    """
-+
-+    diffs = 0
-+    if len(str1) < len(str2):
-+        str1, str2 = str2, str1
-+    len_dist = len(str1) - len(str2)
-+    str2 += " " * len_dist
-+
-+    for ch1, ch2 in zip(str1, str2):
-+        if ch1 != ch2:
-+            diffs += 1
-+    return diffs
-+
-+
-+def check_common_prefix(category_name, metrics):
-+    """
-+    Check if all metrics begin with a common prefix.
-+    """
-+    metric_words = sorted([_split_words(metric.name) for metric in metrics])
-+
-+    if len(metric_words) < 2:
-+        return
-+
-+    first = metric_words[0]
-+    last = metric_words[-1]
-+
-+    for i in range(min(len(first), len(last))):
-+        if first[i] != last[i]:
-+            break
-+
-+    if i > 0:
-+        common_prefix = "_".join(first[:i])
-+        yield (
-+            "Within category '{}', all metrics begin with prefix "
-+            "'{}'. Remove prefixes and (possibly) rename category."
-+        ).format(category_name, common_prefix)
-+
-+
-+def check_unit_in_name(metric, parser_config={}):
-+    """
-+    The metric name ends in a unit.
-+    """
-+    TIME_UNIT_ABBREV = {
-+        "nanosecond": "ns",
-+        "microsecond": "us",
-+        "millisecond": "ms",
-+        "second": "s",
-+        "minute": "m",
-+        "hour": "h",
-+        "day": "d",
-+    }
-+
-+    MEMORY_UNIT_ABBREV = {
-+        "byte": "b",
-+        "kilobyte": "kb",
-+        "megabyte": "mb",
-+        "gigabyte": "gb",
-+    }
-+
-+    name_words = _split_words(metric.name)
-+    unit_in_name = name_words[-1]
-+
-+    if hasattr(metric, "time_unit"):
-+        if (
-+            unit_in_name == TIME_UNIT_ABBREV.get(metric.time_unit.name)
-+            or unit_in_name == metric.time_unit.name
-+        ):
-+            yield (
-+                "Suffix '{}' is redundant with time_unit. " "Only include time_unit."
-+            ).format(unit_in_name)
-+        elif (
-+            unit_in_name in TIME_UNIT_ABBREV.keys()
-+            or unit_in_name in TIME_UNIT_ABBREV.values()
-+        ):
-+            yield (
-+                "Suffix '{}' doesn't match time_unit. "
-+                "Confirm the unit is correct and only include time_unit."
-+            ).format(unit_in_name)
-+
-+    elif hasattr(metric, "memory_unit"):
-+        if (
-+            unit_in_name == MEMORY_UNIT_ABBREV.get(metric.memory_unit.name)
-+            or unit_in_name == metric.memory_unit.name
-+        ):
-+            yield (
-+                "Suffix '{}' is redundant with memory_unit. "
-+                "Only include memory_unit."
-+            ).format(unit_in_name)
-+        elif (
-+            unit_in_name in MEMORY_UNIT_ABBREV.keys()
-+            or unit_in_name in MEMORY_UNIT_ABBREV.values()
-+        ):
-+            yield (
-+                "Suffix '{}' doesn't match memory_unit. "
-+                "Confirm the unit is correct and only include memory_unit."
-+            ).format(unit_in_name)
-+
-+    elif hasattr(metric, "unit"):
-+        if unit_in_name == metric.unit:
-+            yield (
-+                "Suffix '{}' is redundant with unit param. " "Only include unit."
-+            ).format(unit_in_name)
-+
-+
-+def check_category_generic(category_name, metrics):
-+    """
-+    The category name is too generic.
-+    """
-+    GENERIC_CATEGORIES = ["metrics", "events"]
-+
-+    if category_name in GENERIC_CATEGORIES:
-+        yield "Category '{}' is too generic.".format(category_name)
-+
-+
-+def check_bug_number(metric, parser_config={}):
-+    number_bugs = [str(bug) for bug in metric.bugs if isinstance(bug, int)]
-+
-+    if len(number_bugs):
-+        yield (
-+            "For bugs {}: "
-+            "Bug numbers are deprecated and should be changed to full URLs."
-+        ).format(", ".join(number_bugs))
-+
-+
-+def check_valid_in_baseline(metric, parser_config={}):
-+    allow_reserved = parser_config.get("allow_reserved", False)
-+
-+    if not allow_reserved and "baseline" in metric.send_in_pings:
-+        yield (
-+            "The baseline ping is Glean-internal. "
-+            "User metrics should go into the 'metrics' ping or custom pings."
-+        )
-+
-+
-+def check_misspelled_pings(metric, parser_config={}):
-+    builtin_pings = ["metrics", "events"]
-+
-+    for ping in metric.send_in_pings:
-+        for builtin in builtin_pings:
-+            distance = _hamming_distance(ping, builtin)
-+            if distance == 1:
-+                yield ("Ping '{}' seems misspelled. Did you mean '{}'?").format(
-+                    ping, builtin
-+                )
-+
-+
-+CATEGORY_CHECKS = {
-+    "COMMON_PREFIX": check_common_prefix,
-+    "CATEGORY_GENERIC": check_category_generic,
-+}
-+
-+
-+INDIVIDUAL_CHECKS = {
-+    "UNIT_IN_NAME": check_unit_in_name,
-+    "BUG_NUMBER": check_bug_number,
-+    "BASELINE_PING": check_valid_in_baseline,
-+    "MISSPELLED_PING": check_misspelled_pings,
-+}
-+
-+
-+def lint_metrics(objs, parser_config={}, file=sys.stderr):
-+    """
-+    Performs glinter checks on a set of metrics objects.
-+
-+    :param objs: Tree of metric objects, as returns by `parser.parse_objects`.
-+    :param file: The stream to write errors to.
-+    :returns: List of nits.
-+    """
-+    nits = []
-+    for (category_name, metrics) in sorted(list(objs.items())):
-+        if category_name == "pings":
-+            continue
-+
-+        for (check_name, check_func) in CATEGORY_CHECKS.items():
-+            if any(check_name in metric.no_lint for metric in metrics.values()):
-+                continue
-+            nits.extend(
-+                (check_name, category_name, msg)
-+                for msg in check_func(category_name, metrics.values())
-+            )
-+
-+        for (metric_name, metric) in sorted(list(metrics.items())):
-+            for (check_name, check_func) in INDIVIDUAL_CHECKS.items():
-+                new_nits = list(check_func(metric, parser_config))
-+                if len(new_nits):
-+                    if check_name not in metric.no_lint:
-+                        nits.extend(
-+                            (check_name, ".".join([metric.category, metric.name]), msg)
-+                            for msg in new_nits
-+                        )
-+                else:
-+                    if (
-+                        check_name not in CATEGORY_CHECKS
-+                        and check_name in metric.no_lint
-+                    ):
-+                        nits.append(
-+                            (
-+                                "SUPERFLUOUS_NO_LINT",
-+                                ".".join([metric.category, metric.name]),
-+                                (
-+                                    "Superfluous no_lint entry '{}'. "
-+                                    "Please remove it."
-+                                ).format(check_name),
-+                            )
-+                        )
-+
-+    if len(nits):
-+        print("Sorry, Glean found some glinter nits:", file=file)
-+        for check_name, name, msg in nits:
-+            print("{}: {}: {}".format(check_name, name, msg), file=file)
-+        print("", file=file)
-+        print("Please fix the above nits to continue.", file=file)
-+        print(
-+            "To disable a check, add a `no_lint` parameter "
-+            "with a list of check names to disable.\n"
-+            "This parameter can appear with each individual metric, or at the "
-+            "top-level to affect the entire file.",
-+            file=file,
-+        )
-+
-+    return nits
-+
-+
-+def lint_yaml_files(input_filepaths, file=sys.stderr):
-+    """
-+    Performs glinter YAML lint on a set of files.
-+
-+    :param input_filepaths: List of input files to lint.
-+    :param file: The stream to write errors to.
-+    :returns: List of nits.
-+    """
-+
-+    nits = []
-+    for path in input_filepaths:
-+        # yamllint needs both the file content and the path.
-+        file_content = None
-+        with path.open("r") as fd:
-+            file_content = fd.read()
-+
-+        problems = linter.run(file_content, YamlLintConfig("extends: default"), path)
-+        nits.extend(p for p in problems)
-+
-+    if len(nits):
-+        print("Sorry, Glean found some glinter nits:", file=file)
-+        for p in nits:
-+            print("{} ({}:{}) - {}".format(path, p.line, p.column, p.message))
-+        print("", file=file)
-+        print("Please fix the above nits to continue.", file=file)
-+
-+    return nits
-+
-+
-+def glinter(input_filepaths, parser_config={}, file=sys.stderr):
-+    """
-+    Commandline helper for glinter.
-+
-+    :param input_filepaths: List of Path objects to load metrics from.
-+    :param parser_config: Parser configuration objects, passed to
-+      `parser.parse_objects`.
-+    :param file: The stream to write the errors to.
-+    :return: Non-zero if there were any glinter errors.
-+    """
-+    if lint_yaml_files(input_filepaths, file=file):
-+        return 1
-+
-+    objs = parser.parse_objects(input_filepaths, parser_config)
-+
-+    if util.report_validation_errors(objs):
-+        return 1
-+
-+    if lint_metrics(objs.value, parser_config=parser_config, file=file):
-+        return 1
-+
-+    print("✨ Your metrics are Glean! ✨", file=file)
-+    return 0
-diff --git a/third_party/python/glean_parser/glean_parser/markdown.py b/third_party/python/glean_parser/glean_parser/markdown.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/glean_parser/glean_parser/markdown.py
-@@ -0,0 +1,181 @@
-+# -*- coding: utf-8 -*-
-+
-+# This Source Code Form is subject to the terms of the Mozilla Public
-+# License, v. 2.0. If a copy of the MPL was not distributed with this
-+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
-+
-+"""
-+Outputter to generate Markdown documentation for metrics.
-+"""
-+
-+from . import metrics
-+from . import pings
-+from . import util
-+from collections import defaultdict
-+
-+
-+def extra_info(obj):
-+    """
-+    Returns a list of string to string tuples with extra information for the type
-+    (e.g. extra keys for events) or an empty list if nothing is available.
-+    """
-+    extra_info = []
-+
-+    if isinstance(obj, metrics.Event):
-+        for key in obj.allowed_extra_keys:
-+            extra_info.append((key, obj.extra_keys[key]["description"]))
-+
-+    if isinstance(obj, metrics.Labeled) and obj.ordered_labels is not None:
-+        for label in obj.ordered_labels:
-+            extra_info.append((label, None))
-+
-+    return extra_info
-+
-+
-+def ping_desc(ping_name, custom_pings_cache={}):
-+    """
-+    Return a text description of the ping. If a custom_pings_cache
-+    is available, look in there for non-reserved ping names description.
-+    """
-+    desc = ""
-+
-+    if ping_name in pings.RESERVED_PING_NAMES:
-+        desc = (
-+            "This is a built-in ping that is assembled out of the "
-+            "box by the Glean SDK."
-+        )
-+    elif ping_name == "all-pings":
-+        desc = "These metrics are sent in every ping."
-+    elif ping_name in custom_pings_cache:
-+        desc = custom_pings_cache[ping_name].description
-+
-+    return desc
-+
-+
-+def metrics_docs(obj_name):
-+    """
-+    Return a link to the documentation entry for the Glean SDK metric of the
-+    requested type.
-+    """
-+    base_url = "https://mozilla.github.io/glean/book/user/metrics/{}.html"
-+
-+    # We need to fixup labeled stuff, as types are singular and docs refer
-+    # to them as plural.
-+    fixedup_name = obj_name
-+    if obj_name.startswith("labeled_"):
-+        fixedup_name += "s"
-+
-+    return base_url.format(fixedup_name)
-+
-+
-+def ping_docs(ping_name):
-+    """
-+    Return a link to the documentation entry for the requested Glean SDK
-+    built-in ping.
-+    """
-+    if ping_name not in pings.RESERVED_PING_NAMES:
-+        return ""
-+
-+    return "https://mozilla.github.io/glean/book/user/pings/{}.html".format(ping_name)
-+
-+
-+def if_empty(ping_name, custom_pings_cache={}):
-+    return (
-+        custom_pings_cache.get(ping_name)
-+        and custom_pings_cache[ping_name].send_if_empty
-+    )
-+
-+
-+def ping_reasons(ping_name, custom_pings_cache):
-+    """
-+    Returns the reasons dictionary for the ping.
-+    """
-+    if ping_name == "all-pings":
-+        return {}
-+    elif ping_name in custom_pings_cache:
-+        return custom_pings_cache[ping_name].reasons
-+
-+    return {}
-+
-+
-+def output_markdown(objs, output_dir, options={}):
-+    """
-+    Given a tree of objects, output Markdown docs to `output_dir`.
-+
-+    This produces a single `metrics.md`. The file contains a table of
-+    contents and a section for each ping metrics are collected for.
-+
-+    :param objects: A tree of objects (metrics and pings) as returned from
-+    `parser.parse_objects`.
-+    :param output_dir: Path to an output directory to write to.
-+    :param options: options dictionary, with the following optional key:
-+        - `project_title`: The projects title.
-+    """
-+
-+    # Build a dictionary that associates pings with their metrics.
-+    #
-+    # {
-+    #  "baseline": [
-+    #    { ... metric data ... },
-+    #    ...
-+    #  ],
-+    #  "metrics": [
-+    #    { ... metric data ... },
-+    #    ...
-+    #  ],
-+    #  ...
-+    # }
-+    #
-+    # This also builds a dictionary of custom pings, if available.
-+    custom_pings_cache = defaultdict()
-+    metrics_by_pings = defaultdict(list)
-+    for category_key, category_val in objs.items():
-+        for obj in category_val.values():
-+            # Filter out custom pings. We will need them for extracting
-+            # the description
-+            if isinstance(obj, pings.Ping):
-+                custom_pings_cache[obj.name] = obj
-+                if obj.send_if_empty:
-+                    metrics_by_pings[obj.name] = []
-+            elif obj.is_internal_metric():
-+                # This is an internal Glean metric, and we don't
-+                # want docs for it.
-+                continue
-+            else:
-+                # If we get here, obj is definitely a metric we want
-+                # docs for.
-+                for ping_name in obj.send_in_pings:
-+                    metrics_by_pings[ping_name].append(obj)
-+
-+    # Sort the metrics by their identifier, to make them show up nicely
-+    # in the docs and to make generated docs reproducible.
-+    for ping_name in metrics_by_pings:
-+        metrics_by_pings[ping_name] = sorted(
-+            metrics_by_pings[ping_name], key=lambda x: x.identifier()
-+        )
-+
-+    project_title = options.get("project_title", "this project")
-+
-+    template = util.get_jinja2_template(
-+        "markdown.jinja2",
-+        filters=(
-+            ("extra_info", extra_info),
-+            ("metrics_docs", metrics_docs),
-+            ("ping_desc", lambda x: ping_desc(x, custom_pings_cache)),
-+            ("ping_send_if_empty", lambda x: if_empty(x, custom_pings_cache)),
-+            ("ping_docs", ping_docs),
-+            ("ping_reasons", lambda x: ping_reasons(x, custom_pings_cache)),
-+        ),
-+    )
-+
-+    filename = "metrics.md"
-+    filepath = output_dir / filename
-+
-+    with filepath.open("w", encoding="utf-8") as fd:
-+        fd.write(
-+            template.render(
-+                metrics_by_pings=metrics_by_pings, project_title=project_title
-+            )
-+        )
-+        # Jinja2 squashes the final newline, so we explicitly add it
-+        fd.write("\n")
-diff --git a/third_party/python/glean_parser/glean_parser/metrics.py b/third_party/python/glean_parser/glean_parser/metrics.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/glean_parser/glean_parser/metrics.py
-@@ -0,0 +1,311 @@
-+# -*- coding: utf-8 -*-
-+
-+# This Source Code Form is subject to the terms of the Mozilla Public
-+# License, v. 2.0. If a copy of the MPL was not distributed with this
-+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
-+
-+"""
-+Classes for each of the high-level metric types.
-+"""
-+
-+import enum
-+import sys
-+
-+from . import util
-+
-+
-+# Import a backport of PEP487 to support __init_subclass__
-+if sys.version_info < (3, 6):
-+    import pep487
-+
-+    base_object = pep487.PEP487Object
-+else:
-+    base_object = object
-+
-+
-+class Lifetime(enum.Enum):
-+    ping = 0
-+    user = 1
-+    application = 2
-+
-+
-+class Metric(base_object):
-+    glean_internal_metric_cat = "glean.internal.metrics"
-+    metric_types = {}
-+    default_store_names = ["metrics"]
-+
-+    def __init__(
-+        self,
-+        type,
-+        category,
-+        name,
-+        bugs,
-+        description,
-+        notification_emails,
-+        expires,
-+        data_reviews=None,
-+        version=0,
-+        disabled=False,
-+        lifetime="ping",
-+        send_in_pings=None,
-+        unit="",
-+        gecko_datapoint="",
-+        no_lint=None,
-+        _config=None,
-+        _validated=False,
-+    ):
-+        # Avoid cyclical import
-+        from . import parser
-+
-+        self.type = type
-+        self.category = category
-+        self.name = name
-+        self.bugs = bugs
-+        self.description = description
-+        self.notification_emails = notification_emails
-+        self.expires = expires
-+        if data_reviews is None:
-+            data_reviews = []
-+        self.data_reviews = data_reviews
-+        self.version = version
-+        self.disabled = disabled
-+        self.lifetime = getattr(Lifetime, lifetime)
-+        if send_in_pings is None:
-+            send_in_pings = ["default"]
-+        self.send_in_pings = send_in_pings
-+        self.unit = unit
-+        self.gecko_datapoint = gecko_datapoint
-+        if no_lint is None:
-+            no_lint = []
-+        self.no_lint = no_lint
-+
-+        # _validated indicates whether this metric has already been jsonschema
-+        # validated (but not any of the Python-level validation).
-+        if not _validated:
-+            data = {
-+                "$schema": parser.METRICS_ID,
-+                self.category: {self.name: self.serialize()},
-+            }
-+            for error in parser.validate(data):
-+                raise ValueError(error)
-+
-+        # Metrics in the special category "glean.internal.metrics" need to have
-+        # an empty category string when identifying the metrics in the ping.
-+        if self.category == Metric.glean_internal_metric_cat:
-+            self.category = ""
-+
-+    def __init_subclass__(cls, **kwargs):
-+        # Create a mapping of all of the subclasses of this class
-+        if cls not in Metric.metric_types and hasattr(cls, "typename"):
-+            Metric.metric_types[cls.typename] = cls
-+        super().__init_subclass__(**kwargs)
-+
-+    @classmethod
-+    def make_metric(cls, category, name, metric_info, config={}, validated=False):
-+        """
-+        Given a metric_info dictionary from metrics.yaml, return a metric
-+        instance.
-+
-+        :param: category The category the metric lives in
-+        :param: name The name of the metric
-+        :param: metric_info A dictionary of the remaining metric parameters
-+        :param: config A dictionary containing commandline configuration
-+            parameters
-+        :param: validated True if the metric has already gone through
-+            jsonschema validation
-+        :return: A new Metric instance.
-+        """
-+        metric_type = metric_info["type"]
-+        return cls.metric_types[metric_type](
-+            category=category,
-+            name=name,
-+            _validated=validated,
-+            _config=config,
-+            **metric_info
-+        )
-+
-+    def serialize(self):
-+        """
-+        Serialize the metric back to JSON object model.
-+        """
-+        d = self.__dict__.copy()
-+        # Convert enum fields back to strings
-+        for key, val in d.items():
-+            if isinstance(val, enum.Enum):
-+                d[key] = d[key].name
-+            if isinstance(val, set):
-+                d[key] = sorted(list(val))
-+        del d["name"]
-+        del d["category"]
-+        return d
-+
-+    def identifier(self):
-+        """
-+        Create an identifier unique for this metric.
-+        Generally, category.name; however, Glean internal
-+        metrics only use name.
-+        """
-+        if not self.category:
-+            return self.name
-+        return ".".join((self.category, self.name))
-+
-+    def is_disabled(self):
-+        return self.disabled or self.is_expired()
-+
-+    def is_expired(self):
-+        return util.is_expired(self.expires)
-+
-+    @staticmethod
-+    def validate_expires(expires):
-+        return util.validate_expires(expires)
-+
-+    def is_internal_metric(self):
-+        return self.category in (Metric.glean_internal_metric_cat, "")
-+
-+
-+class Boolean(Metric):
-+    typename = "boolean"
-+
-+
-+class String(Metric):
-+    typename = "string"
-+
-+
-+class StringList(Metric):
-+    typename = "string_list"
-+
-+
-+class Counter(Metric):
-+    typename = "counter"
-+
-+
-+class Quantity(Metric):
-+    typename = "quantity"
-+
-+
-+class TimeUnit(enum.Enum):
-+    nanosecond = 0
-+    microsecond = 1
-+    millisecond = 2
-+    second = 3
-+    minute = 4
-+    hour = 5
-+    day = 6
-+
-+
-+class TimeBase(Metric):
-+    def __init__(self, *args, **kwargs):
-+        self.time_unit = getattr(TimeUnit, kwargs.pop("time_unit", "millisecond"))
-+        super().__init__(*args, **kwargs)
-+
-+
-+class Timespan(TimeBase):
-+    typename = "timespan"
-+
-+
-+class TimingDistribution(TimeBase):
-+    typename = "timing_distribution"
-+
-+
-+class MemoryUnit(enum.Enum):
-+    byte = 0
-+    kilobyte = 1
-+    megabyte = 2
-+    gigabyte = 3
-+
-+
-+class MemoryDistribution(Metric):
-+    typename = "memory_distribution"
-+
-+    def __init__(self, *args, **kwargs):
-+        self.memory_unit = getattr(MemoryUnit, kwargs.pop("memory_unit", "byte"))
-+        super().__init__(*args, **kwargs)
-+
-+
-+class HistogramType(enum.Enum):
-+    linear = 0
-+    exponential = 1
-+
-+
-+class CustomDistribution(Metric):
-+    typename = "custom_distribution"
-+
-+    def __init__(self, *args, **kwargs):
-+        self.range_min = kwargs.pop("range_min", 1)
-+        self.range_max = kwargs.pop("range_max")
-+        self.bucket_count = kwargs.pop("bucket_count")
-+        self.histogram_type = getattr(
-+            HistogramType, kwargs.pop("histogram_type", "exponential")
-+        )
-+        super().__init__(*args, **kwargs)
-+
-+
-+class Datetime(TimeBase):
-+    typename = "datetime"
-+
-+
-+class Event(Metric):
-+    typename = "event"
-+
-+    default_store_names = ["events"]
-+
-+    _generate_enums = [("extra_keys", "Keys")]
-+
-+    def __init__(self, *args, **kwargs):
-+        self.extra_keys = kwargs.pop("extra_keys", {})
-+        self.validate_extra_keys(self.extra_keys, kwargs.get("_config", {}))
-+        super().__init__(*args, **kwargs)
-+
-+    @property
-+    def allowed_extra_keys(self):
-+        # Sort keys so that output is deterministic
-+        return sorted(list(self.extra_keys.keys()))
-+
-+    @staticmethod
-+    def validate_extra_keys(extra_keys, config):
-+        if not config.get("allow_reserved") and any(
-+            k.startswith("glean.") for k in extra_keys.keys()
-+        ):
-+            raise ValueError(
-+                "Extra keys beginning with 'glean.' are reserved for "
-+                "Glean internal use."
-+            )
-+
-+
-+class Uuid(Metric):
-+    typename = "uuid"
-+
-+
-+class Labeled(Metric):
-+    labeled = True
-+
-+    def __init__(self, *args, **kwargs):
-+        labels = kwargs.pop("labels", None)
-+        if labels is not None:
-+            self.ordered_labels = labels
-+            self.labels = set(labels)
-+        else:
-+            self.ordered_labels = None
-+            self.labels = None
-+        super().__init__(*args, **kwargs)
-+
-+    def serialize(self):
-+        """
-+        Serialize the metric back to JSON object model.
-+        """
-+        d = super().serialize()
-+        d["labels"] = self.ordered_labels
-+        del d["ordered_labels"]
-+        return d
-+
-+
-+class LabeledBoolean(Labeled, Boolean):
-+    typename = "labeled_boolean"
-+
-+
-+class LabeledString(Labeled, String):
-+    typename = "labeled_string"
-+
-+
-+class LabeledCounter(Labeled, Counter):
-+    typename = "labeled_counter"
-diff --git a/third_party/python/glean_parser/glean_parser/parser.py b/third_party/python/glean_parser/glean_parser/parser.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/glean_parser/glean_parser/parser.py
-@@ -0,0 +1,321 @@
-+# -*- coding: utf-8 -*-
-+
-+# This Source Code Form is subject to the terms of the Mozilla Public
-+# License, v. 2.0. If a copy of the MPL was not distributed with this
-+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
-+
-+"""
-+Code for parsing metrics.yaml files.
-+"""
-+
-+from collections import OrderedDict
-+import functools
-+from pathlib import Path
-+import textwrap
-+
-+import jsonschema
-+from jsonschema.exceptions import ValidationError
-+
-+from .metrics import Metric
-+from .pings import Ping, RESERVED_PING_NAMES
-+from . import util
-+
-+
-+ROOT_DIR = Path(__file__).parent
-+SCHEMAS_DIR = ROOT_DIR / "schemas"
-+
-+METRICS_ID = "moz://mozilla.org/schemas/glean/metrics/1-0-0"
-+PINGS_ID = "moz://mozilla.org/schemas/glean/pings/1-0-0"
-+
-+FILE_TYPES = {METRICS_ID: "metrics", PINGS_ID: "pings"}
-+
-+
-+def _update_validator(validator):
-+    """
-+    Adds some custom validators to the jsonschema validator that produce
-+    nicer error messages.
-+    """
-+
-+    def required(validator, required, instance, schema):
-+        if not validator.is_type(instance, "object"):
-+            return
-+        missing_properties = set(
-+            property for property in required if property not in instance
-+        )
-+        if len(missing_properties):
-+            missing_properties = sorted(list(missing_properties))
-+            yield ValidationError(
-+                "Missing required properties: {}".format(", ".join(missing_properties))
-+            )
-+
-+    validator.VALIDATORS["required"] = required
-+
-+
-+def _load_file(filepath):
-+    """
-+    Load a metrics.yaml or pings.yaml format file.
-+    """
-+    try:
-+        content = util.load_yaml_or_json(filepath, ordered_dict=True)
-+    except Exception as e:
-+        yield util.format_error(filepath, "", textwrap.fill(str(e)))
-+        return {}, None
-+
-+    if content is None:
-+        yield util.format_error(
-+            filepath, "", "'{}' file can not be empty.".format(filepath)
-+        )
-+        return {}, None
-+
-+    if content == {}:
-+        return {}, None
-+
-+    filetype = FILE_TYPES.get(content.get("$schema"))
-+
-+    for error in validate(content, filepath):
-+        content = {}
-+        yield error
-+
-+    return content, filetype
-+
-+
-+@functools.lru_cache(maxsize=1)
-+def _load_schemas():
-+    """
-+    Load all of the known schemas from disk, and put them in a map based on the
-+    schema's $id.
-+    """
-+    schemas = {}
-+    for schema_path in SCHEMAS_DIR.glob("*.yaml"):
-+        schema = util.load_yaml_or_json(schema_path)
-+        resolver = util.get_null_resolver(schema)
-+        validator_class = jsonschema.validators.validator_for(schema)
-+        _update_validator(validator_class)
-+        validator_class.check_schema(schema)
-+        validator = validator_class(schema, resolver=resolver)
-+        schemas[schema["$id"]] = (schema, validator)
-+    return schemas
-+
-+
-+def _get_schema(schema_id, filepath="<input>"):
-+    """
-+    Get the schema for the given schema $id.
-+    """
-+    schemas = _load_schemas()
-+    if schema_id not in schemas:
-+        raise ValueError(
-+            util.format_error(
-+                filepath,
-+                "",
-+                "$schema key must be one of {}".format(", ".join(schemas.keys())),
-+            )
-+        )
-+    return schemas[schema_id]
-+
-+
-+def _get_schema_for_content(content, filepath):
-+    """
-+    Get the appropriate schema for the given JSON content.
-+    """
-+    return _get_schema(content.get("$schema"), filepath)
-+
-+
-+def get_parameter_doc(key):
-+    """
-+    Returns documentation about a specific metric parameter.
-+    """
-+    schema, _ = _get_schema(METRICS_ID)
-+    return schema["definitions"]["metric"]["properties"][key]["description"]
-+
-+
-+def get_ping_parameter_doc(key):
-+    """
-+    Returns documentation about a specific ping parameter.
-+    """
-+    schema, _ = _get_schema(PINGS_ID)
-+    return schema["additionalProperties"]["properties"][key]["description"]
-+
-+
-+def validate(content, filepath="<input>"):
-+    """
-+    Validate the given content against the appropriate schema.
-+    """
-+    try:
-+        schema, validator = _get_schema_for_content(content, filepath)
-+    except ValueError as e:
-+        yield str(e)
-+    else:
-+        yield from (
-+            util.format_error(filepath, "", util.pprint_validation_error(e))
-+            for e in validator.iter_errors(content)
-+        )
-+
-+
-+def _instantiate_metrics(all_objects, sources, content, filepath, config):
-+    """
-+    Load a list of metrics.yaml files, convert the JSON information into Metric
-+    objects, and merge them into a single tree.
-+    """
-+    global_no_lint = content.get("no_lint", [])
-+
-+    for category_key, category_val in content.items():
-+        if category_key.startswith("$"):
-+            continue
-+        if category_key == "no_lint":
-+            continue
-+        if not config.get("allow_reserved") and category_key.split(".")[0] == "glean":
-+            yield util.format_error(
-+                filepath,
-+                "For category '{}'".format(category_key),
-+                "Categories beginning with 'glean' are reserved for "
-+                "Glean internal use.",
-+            )
-+            continue
-+        all_objects.setdefault(category_key, OrderedDict())
-+        for metric_key, metric_val in category_val.items():
-+            try:
-+                metric_obj = Metric.make_metric(
-+                    category_key, metric_key, metric_val, validated=True, config=config
-+                )
-+            except Exception as e:
-+                yield util.format_error(
-+                    filepath,
-+                    "On instance {}.{}".format(category_key, metric_key),
-+                    str(e),
-+                )
-+                metric_obj = None
-+            else:
-+                if (
-+                    not config.get("allow_reserved")
-+                    and "all-pings" in metric_obj.send_in_pings
-+                ):
-+                    yield util.format_error(
-+                        filepath,
-+                        "On instance {}.{}".format(category_key, metric_key),
-+                        'Only internal metrics may specify "all-pings" '
-+                        'in "send_in_pings"',
-+                    )
-+                    metric_obj = None
-+
-+            if metric_obj is not None:
-+                metric_obj.no_lint = list(set(metric_obj.no_lint + global_no_lint))
-+
-+            already_seen = sources.get((category_key, metric_key))
-+            if already_seen is not None:
-+                # We've seen this metric name already
-+                yield util.format_error(
-+                    filepath,
-+                    "",
-+                    ("Duplicate metric name '{}.{}'" "already defined in '{}'").format(
-+                        category_key, metric_key, already_seen
-+                    ),
-+                )
-+            else:
-+                all_objects[category_key][metric_key] = metric_obj
-+                sources[(category_key, metric_key)] = filepath
-+
-+
-+def _instantiate_pings(all_objects, sources, content, filepath, config):
-+    """
-+    Load a list of pings.yaml files, convert the JSON information into Ping
-+    objects.
-+    """
-+    for ping_key, ping_val in content.items():
-+        if ping_key.startswith("$"):
-+            continue
-+        if not config.get("allow_reserved"):
-+            if ping_key in RESERVED_PING_NAMES:
-+                yield util.format_error(
-+                    filepath,
-+                    "For ping '{}'".format(ping_key),
-+                    "Ping uses a reserved name ({})".format(RESERVED_PING_NAMES),
-+                )
-+                continue
-+        ping_val["name"] = ping_key
-+        try:
-+            ping_obj = Ping(**ping_val)
-+        except Exception as e:
-+            yield util.format_error(
-+                filepath, "On instance '{}'".format(ping_key), str(e)
-+            )
-+            ping_obj = None
-+
-+        already_seen = sources.get(ping_key)
-+        if already_seen is not None:
-+            # We've seen this ping name already
-+            yield util.format_error(
-+                filepath,
-+                "",
-+                ("Duplicate ping name '{}'" "already defined in '{}'").format(
-+                    ping_key, already_seen
-+                ),
-+            )
-+        else:
-+            all_objects.setdefault("pings", {})[ping_key] = ping_obj
-+            sources[ping_key] = filepath
-+
-+
-+def _preprocess_objects(objs, config):
-+    """
-+    Preprocess the object tree to better set defaults.
-+    """
-+    for category in objs.values():
-+        for obj in category.values():
-+            if not config.get("do_not_disable_expired", False) and hasattr(
-+                obj, "is_disabled"
-+            ):
-+                obj.disabled = obj.is_disabled()
-+
-+            if hasattr(obj, "send_in_pings"):
-+                if "default" in obj.send_in_pings:
-+                    obj.send_in_pings = obj.default_store_names + [
-+                        x for x in obj.send_in_pings if x != "default"
-+                    ]
-+                obj.send_in_pings = sorted(list(set(obj.send_in_pings)))
-+    return objs
-+
-+
-+@util.keep_value
-+def parse_objects(filepaths, config={}):
-+    """
-+    Parse one or more metrics.yaml and/or pings.yaml files, returning a tree of
-+    `metrics.Metric` and `pings.Ping` instances.
-+
-+    The result is a generator over any errors.  If there are no errors, the
-+    actual metrics can be obtained from `result.value`.  For example::
-+
-+      result = metrics.parse_metrics(filepaths)
-+      for err in result:
-+          print(err)
-+      all_metrics = result.value
-+
-+    The result value is a dictionary of category names to categories, where
-+    each category is a dictionary from metric name to `metrics.Metric`
-+    instances.  There is also the special category `pings` containing all
-+    of the `pings.Ping` instances.
-+
-+    :param filepaths: list of Path objects to metrics.yaml and/or pings.yaml
-+        files
-+    :param config: A dictionary of options that change parsing behavior.
-+        Supported keys are:
-+        - `allow_reserved`: Allow values reserved for internal Glean use.
-+        - `do_not_disable_expired`: Don't mark expired metrics as disabled.
-+          This is useful when you want to retain the original "disabled"
-+          value from the `metrics.yaml`, rather than having it overridden when
-+          the metric expires.
-+    """
-+    all_objects = OrderedDict()
-+    sources = {}
-+    filepaths = util.ensure_list(filepaths)
-+    for filepath in filepaths:
-+        content, filetype = yield from _load_file(filepath)
-+        if filetype == "metrics":
-+            yield from _instantiate_metrics(
-+                all_objects, sources, content, filepath, config
-+            )
-+        elif filetype == "pings":
-+            yield from _instantiate_pings(
-+                all_objects, sources, content, filepath, config
-+            )
-+
-+    return _preprocess_objects(all_objects, config)
-diff --git a/third_party/python/glean_parser/glean_parser/pings.py b/third_party/python/glean_parser/glean_parser/pings.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/glean_parser/glean_parser/pings.py
-@@ -0,0 +1,78 @@
-+# -*- coding: utf-8 -*-
-+
-+# This Source Code Form is subject to the terms of the Mozilla Public
-+# License, v. 2.0. If a copy of the MPL was not distributed with this
-+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
-+
-+"""
-+Classes for managing the description of pings.
-+"""
-+
-+import sys
-+
-+
-+# Import a backport of PEP487 to support __init_subclass__
-+if sys.version_info < (3, 6):
-+    import pep487
-+
-+    base_object = pep487.PEP487Object
-+else:
-+    base_object = object
-+
-+
-+RESERVED_PING_NAMES = ["baseline", "metrics", "events", "deletion_request"]
-+
-+
-+class Ping(base_object):
-+    def __init__(
-+        self,
-+        name,
-+        description,
-+        bugs,
-+        notification_emails,
-+        data_reviews=None,
-+        include_client_id=False,
-+        send_if_empty=False,
-+        reasons=None,
-+        _validated=False,
-+    ):
-+        # Avoid cyclical import
-+        from . import parser
-+
-+        self.name = name
-+        self.description = description
-+        self.bugs = bugs
-+        self.notification_emails = notification_emails
-+        if data_reviews is None:
-+            data_reviews = []
-+        self.data_reviews = data_reviews
-+        self.include_client_id = include_client_id
-+        self.send_if_empty = send_if_empty
-+        if reasons is None:
-+            reasons = {}
-+        self.reasons = reasons
-+
-+        # _validated indicates whether this metric has already been jsonschema
-+        # validated (but not any of the Python-level validation).
-+        if not _validated:
-+            data = {"$schema": parser.PINGS_ID, self.name: self.serialize()}
-+            for error in parser.validate(data):
-+                raise ValueError(error)
-+
-+    _generate_enums = [("reason_codes", "ReasonCodes")]
-+
-+    @property
-+    def type(self):
-+        return "ping"
-+
-+    @property
-+    def reason_codes(self):
-+        return sorted(list(self.reasons.keys()))
-+
-+    def serialize(self):
-+        """
-+        Serialize the metric back to JSON object model.
-+        """
-+        d = self.__dict__.copy()
-+        del d["name"]
-+        return d
-diff --git a/third_party/python/glean_parser/glean_parser/schemas/metrics.1-0-0.schema.yaml b/third_party/python/glean_parser/glean_parser/schemas/metrics.1-0-0.schema.yaml
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/glean_parser/glean_parser/schemas/metrics.1-0-0.schema.yaml
-@@ -0,0 +1,520 @@
-+# This Source Code Form is subject to the terms of the Mozilla Public
-+# License, v. 2.0. If a copy of the MPL was not distributed with this
-+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
-+
-+---
-+$schema: http://json-schema.org/draft-07/schema#
-+title: Metrics
-+description: |
-+  Schema for the metrics.yaml files for Mozilla's Glean telemetry SDK.
-+
-+  The top-level of the `metrics.yaml` file has a key defining each category of
-+  metrics. Categories must be snake_case, and they may also have dots `.` to
-+  define subcategories.
-+
-+$id: moz://mozilla.org/schemas/glean/metrics/1-0-0
-+
-+definitions:
-+  token:
-+    type: string
-+    pattern: "^[A-Za-z_][A-Za-z0-9_\\.]*$"
-+
-+  snake_case:
-+    type: string
-+    pattern: "^[a-z_][a-z0-9_]*$"
-+
-+  dotted_snake_case:
-+    type: string
-+    pattern: "^[a-z_][a-z0-9_]{0,29}(\\.[a-z_][a-z0-9_]{0,29})*$"
-+    maxLength: 40
-+
-+  kebab_case:
-+    type: string
-+    # Bug 1601270; we allow 3 specific existing snake_cased ping names for now,
-+    # but these special cases can be removed once the number of legacy clients
-+    # sufficiently dwindles, likely in 2020H2.
-+    pattern: "^[a-z][a-z0-9-]{0,29}$\
-+      |^deletion_request$|^bookmarks_sync$|^history_sync$|^session_end$|^all_pings$|^glean_.*$"
-+
-+  long_id:
-+    allOf:
-+      - $ref: "#/definitions/snake_case"
-+      - maxLength: 40
-+
-+  short_id:
-+    allOf:
-+      - $ref: "#/definitions/snake_case"
-+      - maxLength: 30
-+
-+  labeled_metric_id:
-+    type: string
-+    pattern: "^[a-z_][a-z0-9_-]{0,29}(\\.[a-z_][a-z0-9_-]{0,29})*$"
-+    maxLength: 71  # Note: this should be category + metric + 1
-+
-+  metric:
-+    description: |
-+      Describes a single metric.
-+
-+      See https://mozilla.github.io/glean_parser/metrics-yaml.html
-+
-+    type: object
-+
-+    additionalProperties: false
-+
-+    properties:
-+      type:
-+        title: Metric type
-+        description: |
-+          **Required.**
-+
-+          Specifies the type of a metric, like "counter" or "event". This
-+          defines which operations are valid for the metric, how it is stored
-+          and how data analysis tooling displays it.
-+
-+          The supported types are:
-+            - `event`: Record a specific event (with optional metadata).
-+              Additional properties: `extra_keys`.
-+
-+            - `boolean`: A metric storing values of true or false.
-+
-+            - `string`: A metric storing Unicode string values.
-+
-+            - `string_list`: a list of Unicode strings.
-+
-+            - `counter`: A numeric value that can only be incremented.
-+
-+            - `quantity`: A numeric value that is set directly. Only allowed for
-+              metrics coming from GeckoView.
-+
-+            - `timespan`: Represents a time interval. Additional properties:
-+              `time_unit`_.
-+
-+            - `timing_distribution`: Record the distribution of multiple
-+              timings. Additional properties: `time_unit`_.
-+
-+            - `datetime`: A date/time value. Represented as an ISO datetime in
-+              UTC. Additional properties: `time_unit`_.
-+
-+            - `uuid`: Record a UUID v4.
-+
-+            - `memory_distribution`: A histogram for recording memory usage
-+              values. Additional properties: `memory_unit`_.
-+
-+            - `custom_distribution`: A histogram with a custom range and number
-+              of buckets. This metric type is for legacy support only and is
-+              only allowed for metrics coming from GeckoView. Additional
-+              properties: `range_min`_, `range_max`_, `bucket_count`_,
-+              `histogram_type`_.
-+
-+            - Additionally, labeled versions of many metric types are supported.
-+              These support the `labels`_ parameter, allowing multiple instances
-+              of the metric to be stored at a given set of labels. The labeled
-+              metric types include:
-+
-+                `labeled_boolean`, `labeled_string`, `labeled_counter`.
-+
-+        type: string
-+        enum:
-+          - event
-+          - boolean
-+          - string
-+          - string_list
-+          - counter
-+          - quantity
-+          - timespan
-+          - timing_distribution
-+          - custom_distribution
-+          - memory_distribution
-+          - datetime
-+          - uuid
-+          - labeled_boolean
-+          - labeled_string
-+          - labeled_counter
-+
-+      description:
-+        title: Description
-+        description: |
-+          **Required.**
-+
-+          A textual description of what this metric does, what it means, and its
-+          edge cases or any other helpful information.
-+
-+          Descriptions may contain [markdown
-+          syntax](https://www.markdownguide.org/basic-syntax/).
-+        type: string
-+
-+      lifetime:
-+        title: Lifetime
-+        description: |
-+          Defines the lifetime of the metric. It must be one of the following
-+          values:
-+
-+          - `ping` (default): The metric is reset each time it is sent in a
-+            ping.
-+
-+          - `user`: The metric contains a property that is part of the user's
-+            profile and is never reset.
-+
-+          - `application`: The metric contains a property that is related to the
-+            application, and is reset only at application restarts.
-+        enum:
-+          - ping
-+          - user
-+          - application
-+        default: ping
-+
-+      send_in_pings:
-+        title: Send in pings
-+        description: |
-+          Which pings the metric should be sent on. If not specified, the metric
-+          is sent on the "default ping", which is the `events` ping for events,
-+          and the `metrics` ping for everything else. Most metrics don't need to
-+          specify this.
-+
-+          (There is an additional special value of `all-pings` for internal
-+          Glean metrics only that is used to indicate that a metric may appear
-+          in any ping.)
-+        type: array
-+        items:
-+          $ref: "#/definitions/kebab_case"
-+        default:
-+          - default
-+
-+      notification_emails:
-+        title: Notification emails
-+        description: |
-+          **Required.**
-+
-+          A list of email addresses to notify for important events with the
-+          metric or when people with context or ownership for the metric need to
-+          be contacted.
-+        type: array
-+        minItems: 1
-+        items:
-+          type: string
-+          format: email
-+
-+      bugs:
-+        title: Related bugs
-+        description: |
-+          **Required.**
-+
-+          A list of bug URLs (e.g. Bugzilla and Github) that are relevant to
-+          this metric, e.g., tracking its original implementation or later
-+          changes to it.
-+
-+          Using bug numbers alone is deprecated and will be an error in the
-+          future. Each entry should be a full URL to the bug in its tracker.
-+        type: array
-+        minItems: 1
-+        items:
-+          anyOf:
-+            - type: integer
-+            - type: string
-+              format: uri
-+
-+      data_reviews:
-+        title: Review references
-+        description: |
-+          **Required.**
-+
-+          A list of URIs to any data collection reviews relevant to the metric.
-+        type: array
-+        items:
-+          type: string
-+          format: uri
-+
-+      disabled:
-+        title: Disabled
-+        description: |
-+          If `true`, the metric is disabled, and any metric collection on it
-+          will be silently ignored at runtime.
-+        type: boolean
-+        default: false
-+
-+      expires:
-+        title: Expires
-+        description: |
-+          **Required.**
-+
-+          May be one of the following values:
-+            - `<build date>`: An ISO date `yyyy-mm-dd` in UTC on which the
-+              metric expires. For example, `2019-03-13`. This date is checked at
-+              build time. Except in special cases, this form should be used so
-+              that the metric automatically "sunsets" after a period of time.
-+            - `never`: This metric never expires.
-+            - `expired`: This metric is manually expired.
-+        type: string
-+        pattern: "(never)|(expired)|([0-9]{4}-[0-9]{2}-[0-9]{2})"
-+
-+      version:
-+        title: Metric version
-+        description: |
-+          The version of the metric. A monotonically increasing value. If not
-+          provided, defaults to 0.
-+
-+      time_unit:
-+        title: Time unit
-+        description: |
-+          Specifies the unit that the metric will be stored and displayed in. If
-+          not provided, it defaults to milliseconds. Time values are sent to the
-+          backend as integers, so `time_unit`_ determines the maximum resolution
-+          at which timespans are recorded. Times are always truncated, not
-+          rounded, to the nearest time unit. For example, a measurement of 25 ns
-+          will be returned as 0 ms if `time_unit` is `"millisecond"`.
-+
-+          Valid when `type`_ is `timespan`, `timing_distribution` or `datetime`.
-+        enum:
-+          - nanosecond
-+          - microsecond
-+          - millisecond
-+          - second
-+          - minute
-+          - hour
-+          - day
-+
-+      memory_unit:
-+        title: Memory unit
-+        description: |
-+          The unit that the incoming memory size values are recorded in.
-+
-+          The units are the power-of-2 units, so "kilobyte" is correctly a
-+          "kibibyte".
-+
-+          - kilobyte == 2^10 ==         1,024 bytes
-+          - megabyte == 2^20 ==     1,048,576 bytes
-+          - gigabyte == 2^30 == 1,073,741,824 bytes
-+
-+          Values are automatically converted to and transmitted as bytes.
-+
-+          Valid when `type`_ is `memory_distribution`.
-+        enum:
-+          - byte
-+          - kilobyte
-+          - megabyte
-+          - gigabyte
-+
-+      labels:
-+        title: Labels
-+        description: |
-+          A list of labels for a labeled metric.  If provided, the labels are
-+          enforced at run time, and recording to an unknown label is recorded
-+          to the special label ``__other__``.  If not provided, the labels
-+          may be anything, but using too many unique labels will put some
-+          labels in the special label ``__other__``.
-+
-+          Valid with any of the labeled metric types.
-+        anyOf:
-+          - type: array
-+            uniqueItems: true
-+            items:
-+              $ref: "#/definitions/labeled_metric_id"
-+            maxItems: 16
-+          - type: "null"
-+
-+      extra_keys:
-+        title: Extra keys
-+        description: |
-+          The acceptable keys on the "extra" object sent with events. This is an
-+          object mapping the key to an object containing metadata about the key.
-+          This metadata object has the following keys:
-+
-+            - `description`: **Required.** A description of the key.
-+
-+          Valid when `type`_ is `event`.
-+        type: object
-+        propertyNames:
-+          $ref: "#/definitions/dotted_snake_case"
-+        additionalProperties:
-+          type: object
-+          properties:
-+            description:
-+              type: string
-+          required:
-+            - description
-+        default: {}
-+
-+      gecko_datapoint:
-+        title: Gecko Datapoint
-+        description: |
-+          This is a Gecko-specific property. It is the name of the Gecko metric
-+          to accumulate the data from, when using the Glean SDK in a product
-+          using GeckoView. See bug 1566356 for more context.
-+
-+        type: string
-+
-+      range_min:
-+        title: Range minimum
-+        description: |
-+          The minimum value of a custom distribution.
-+
-+          Valid when `type`_ is `custom_distribution`.
-+        type: number
-+        default: 1
-+
-+      range_max:
-+        title: Range maximum
-+        description: |
-+          The maximum value of a custom distribution.
-+
-+          Required when `type`_ is `custom_distribution`.
-+        type: number
-+
-+      bucket_count:
-+        title: Bucket count
-+        description: |
-+          The number of buckets to include in a custom distribution.
-+
-+          Required when `type`_ is `custom_distribution`.
-+        type: number
-+        minimum: 1
-+        maximum: 100
-+
-+      histogram_type:
-+        title: Histogram type
-+        description: |
-+          The type of histogram bucketing to use:
-+            - `linear`: The buckets are linearly spaced within the range.
-+            - `exponential`: The buckets use the natural logarithmic so the
-+              smaller-valued buckets are smaller in size than the higher-valued
-+              buckets.
-+
-+          Required when `type`_ is `custom_distribution`.
-+        enum:
-+          - linear
-+          - exponential
-+
-+      unit:
-+        title: Unit
-+        description: |
-+          The unit of the metric, for metrics that don't already require a
-+          meaningful unit, such as `time_unit`.
-+          This is provided for informational purposes only and doesn't have any
-+          effect on data collection.
-+        type: string
-+
-+      no_lint:
-+        title: Lint checks to skip
-+        description: |
-+          This parameter lists any lint checks to skip for this metric only.
-+        type: array
-+        items:
-+          type: string
-+
-+    required:
-+      - type
-+      - bugs
-+      - description
-+      - notification_emails
-+      - data_reviews
-+      - expires
-+
-+type: object
-+
-+propertyNames:
-+  anyOf:
-+    - allOf:
-+        - $ref: "#/definitions/dotted_snake_case"
-+        - not:
-+            description: "'pings' is reserved as a category name."
-+            const: pings
-+    - enum: ['$schema']
-+
-+properties:
-+  $schema:
-+    type: string
-+    format: url
-+
-+  no_lint:
-+    title: Lint checks to skip globally
-+    description: |
-+      This parameter lists any lint checks to skip for this whole file.
-+    type: array
-+    items:
-+      type: string
-+
-+additionalProperties:
-+  type: object
-+  propertyNames:
-+    anyOf:
-+      - $ref: "#/definitions/short_id"
-+  additionalProperties:
-+    allOf:
-+      - $ref: "#/definitions/metric"
-+      -
-+        if:
-+          properties:
-+            type:
-+              const: event
-+        then:
-+          properties:
-+            lifetime:
-+              description: |
-+                Event metrics must have ping lifetime.
-+              const: ping
-+      - if:
-+          not:
-+            properties:
-+              type:
-+                enum:
-+                  - timing_distribution
-+                  - custom_distribution
-+                  - memory_distribution
-+                  - quantity
-+                  - boolean
-+                  - string
-+                  - labeled_counter
-+        then:
-+          properties:
-+            gecko_datapoint:
-+              description: |
-+                `gecko_datapoint` is only allowed for `timing_distribution`,
-+                `custom_distribution`, `memory_distribution`, `quantity`,
-+                `boolean`, `string` and `labeled_counter`.
-+              maxLength: 0
-+      -
-+        if:
-+          properties:
-+            type:
-+              enum:
-+                - custom_distribution
-+                - quantity
-+        then:
-+          required:
-+            - gecko_datapoint
-+          description: |
-+            `custom_distribution` and `quantity` is only allowed for Gecko
-+            metrics.
-+      -
-+        if:
-+          properties:
-+            type:
-+              const: custom_distribution
-+        then:
-+          required:
-+            - range_max
-+            - bucket_count
-+            - histogram_type
-+          description: |
-+            `custom_distribution` is missing required parameters `range_max`,
-+            `bucket_count` and `histogram_type`.
-+      -
-+        if:
-+          properties:
-+            type:
-+              const: memory_distribution
-+        then:
-+          required:
-+            - memory_unit
-+          description: |
-+            `memory_distribution` is missing required parameter `memory_unit`.
-+      -
-+        if:
-+          properties:
-+            type:
-+              const: quantity
-+        then:
-+          required:
-+            - unit
-+          description: |
-+            `quantity` is missing required parameter `unit`.
-diff --git a/third_party/python/glean_parser/glean_parser/schemas/pings.1-0-0.schema.yaml b/third_party/python/glean_parser/glean_parser/schemas/pings.1-0-0.schema.yaml
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/glean_parser/glean_parser/schemas/pings.1-0-0.schema.yaml
-@@ -0,0 +1,141 @@
-+# This Source Code Form is subject to the terms of the Mozilla Public
-+# License, v. 2.0. If a copy of the MPL was not distributed with this
-+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
-+
-+---
-+$schema: http://json-schema.org/draft-07/schema#
-+title: Pings
-+description: |
-+  Schema for the pings.yaml files for Mozilla's Glean telemetry SDK.
-+
-+  The top-level of the `pings.yaml` file has a key defining the name of each
-+  ping. The values contain metadata about that ping. Ping names must be
-+  kebab-case per https://docs.telemetry.mozilla.org/cookbooks/new_ping.html
-+
-+$id: moz://mozilla.org/schemas/glean/pings/1-0-0
-+
-+definitions:
-+  dotted_snake_case:
-+    type: string
-+    pattern: "^[a-z_][a-z0-9_]{0,29}(\\.[a-z_][a-z0-9_]{0,29})*$"
-+    maxLength: 40
-+  kebab_case:
-+    type: string
-+    # Bug 1601270; we allow 3 specific existing snake_cased ping names for now,
-+    # but these special cases can be removed once the number of legacy clients
-+    # sufficiently dwindles, likely in 2020H2.
-+    pattern: "^[a-z][a-z0-9-]{0,29}$\
-+      |^deletion_request$|^bookmarks_sync$|^history_sync$|^session_end$|^all_pings$|^glean_.*$"
-+
-+type: object
-+
-+propertyNames:
-+  allOf:
-+    - anyOf:
-+        - $ref: "#/definitions/kebab_case"
-+        - enum: ['$schema']
-+    - not:
-+        enum: ['all-pings']
-+
-+properties:
-+  $schema:
-+    type: string
-+    format: url
-+
-+additionalProperties:
-+  type: object
-+  properties:
-+    description:
-+      title: Description
-+      description: |
-+        **Required.**
-+
-+        A textual description of the purpose of this ping and what it contains.
-+
-+        Descriptions may contain [markdown
-+        syntax](https://www.markdownguide.org/basic-syntax/).
-+      type: string
-+
-+    include_client_id:
-+      title: Include client id
-+      description: |
-+        **Required.**
-+
-+        When `true`, include the `client_id` value in the ping.
-+      type: boolean
-+
-+    send_if_empty:
-+      title: Send if empty
-+      description: |
-+        When `false` a ping is sent only if it contains data (the default).
-+        When `true` a ping is sent even if it contains no data.
-+      type: boolean
-+
-+    notification_emails:
-+      title: Notification emails
-+      description: |
-+        **Required.**
-+
-+        A list of email addresses to notify for important events with the
-+        ping or when people with context or ownership for the ping need to
-+        be contacted.
-+      type: array
-+      minItems: 1
-+      items:
-+        type: string
-+        format: email
-+
-+    bugs:
-+      title: Related bugs
-+      description: |
-+        **Required.**
-+
-+        A list of bugs (e.g. Bugzilla and Github) that are relevant to this
-+        ping, e.g., tracking its original implementation or later changes to
-+        it.
-+
-+        If a number, it is an ID to an issue in the default tracker (e.g.
-+        Mozilla's Bugzilla instance). If a string, it must be a URI to a bug
-+        page in a tracker.
-+      type: array
-+      minItems: 1
-+      items:
-+        anyOf:
-+          - type: integer
-+          - type: string
-+            format: uri
-+
-+    data_reviews:
-+      title: Review references
-+      description: |
-+        **Required.**
-+
-+        A list of URIs to any data collection reviews relevant to the ping.
-+      type: array
-+      items:
-+        type: string
-+        format: uri
-+
-+    reasons:
-+      title: The reasons this ping can be sent.
-+      description: |
-+        A list of reasons that the ping might be triggered. Sent in the ping's
-+        `ping_info.reason` field.
-+
-+        Specified as a mapping from reason codes (which are short strings), to
-+        a textual description of the reason.
-+      type: object
-+      propertyNames:
-+        type: string
-+        maxLength: 30
-+      additionalProperties:
-+        type: string
-+
-+  required:
-+    - description
-+    - include_client_id
-+    - bugs
-+    - notification_emails
-+    - data_reviews
-+
-+  additionalProperties: false
-diff --git a/third_party/python/glean_parser/glean_parser/swift.py b/third_party/python/glean_parser/glean_parser/swift.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/glean_parser/glean_parser/swift.py
-@@ -0,0 +1,174 @@
-+# -*- coding: utf-8 -*-
-+
-+# This Source Code Form is subject to the terms of the Mozilla Public
-+# License, v. 2.0. If a copy of the MPL was not distributed with this
-+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
-+
-+"""
-+Outputter to generate Swift code for metrics.
-+"""
-+
-+import enum
-+import json
-+
-+from . import pings
-+from . import util
-+from collections import defaultdict
-+
-+# An (imcomplete) list of reserved keywords in Swift.
-+# These will be replaced in generated code by their escaped form.
-+SWIFT_RESERVED_NAMES = ["internal", "typealias"]
-+
-+
-+def swift_datatypes_filter(value):
-+    """
-+    A Jinja2 filter that renders Swift literals.
-+
-+    Based on Python's JSONEncoder, but overrides:
-+      - dicts to use `[key: value]`
-+      - sets to use `[...]`
-+      - enums to use the like-named Swift enum
-+    """
-+
-+    class SwiftEncoder(json.JSONEncoder):
-+        def iterencode(self, value):
-+            if isinstance(value, dict):
-+                yield "["
-+                first = True
-+                for key, subvalue in value.items():
-+                    if not first:
-+                        yield ", "
-+                    yield from self.iterencode(key)
-+                    yield ": "
-+                    yield from self.iterencode(subvalue)
-+                    first = False
-+                yield "]"
-+            elif isinstance(value, enum.Enum):
-+                yield ("." + util.camelize(value.name))
-+            elif isinstance(value, set):
-+                yield "["
-+                first = True
-+                for subvalue in sorted(list(value)):
-+                    if not first:
-+                        yield ", "
-+                    yield from self.iterencode(subvalue)
-+                    first = False
-+                yield "]"
-+            elif value is None:
-+                yield "nil"
-+            else:
-+                yield from super().iterencode(value)
-+
-+    return "".join(SwiftEncoder().iterencode(value))
-+
-+
-+def type_name(obj):
-+    """
-+    Returns the Swift type to use for a given metric or ping object.
-+    """
-+    generate_enums = getattr(obj, "_generate_enums", [])
-+    if len(generate_enums):
-+        template_args = []
-+        for member, suffix in generate_enums:
-+            if len(getattr(obj, member)):
-+                template_args.append(util.Camelize(obj.name) + suffix)
-+            else:
-+                if suffix == "Keys":
-+                    template_args.append("NoExtraKeys")
-+                else:
-+                    template_args.append("No" + suffix)
-+
-+        return "{}<{}>".format(class_name(obj.type), ", ".join(template_args))
-+
-+    return class_name(obj.type)
-+
-+
-+def class_name(obj_type):
-+    """
-+    Returns the Swift class name for a given metric or ping type.
-+    """
-+    if obj_type == "ping":
-+        return "Ping"
-+    if obj_type.startswith("labeled_"):
-+        obj_type = obj_type[8:]
-+    return util.Camelize(obj_type) + "MetricType"
-+
-+
-+def variable_name(var):
-+    """
-+    Returns a valid Swift variable name, escaping keywords if necessary.
-+    """
-+    if var in SWIFT_RESERVED_NAMES:
-+        return "`" + var + "`"
-+    else:
-+        return var
-+
-+
-+def output_swift(objs, output_dir, options={}):
-+    """
-+    Given a tree of objects, output Swift code to `output_dir`.
-+
-+    :param objects: A tree of objects (metrics and pings) as returned from
-+    `parser.parse_objects`.
-+    :param output_dir: Path to an output directory to write to.
-+    :param options: options dictionary, with the following optional keys:
-+        - namespace: The namespace to generate metrics in
-+        - glean_namespace: The namespace to import Glean from
-+        - allow_reserved: When True, this is a Glean-internal build
-+    """
-+    template = util.get_jinja2_template(
-+        "swift.jinja2",
-+        filters=(
-+            ("swift", swift_datatypes_filter),
-+            ("type_name", type_name),
-+            ("class_name", class_name),
-+            ("variable_name", variable_name),
-+        ),
-+    )
-+
-+    # The object parameters to pass to constructors.
-+    # **CAUTION**: This list needs to be in the order the type constructor expects them.
-+    # The `test_order_of_fields` test checks that the generated code is valid.
-+    # **DO NOT CHANGE THE ORDER OR ADD NEW FIELDS IN THE MIDDLE**
-+    extra_args = [
-+        "category",
-+        "name",
-+        "send_in_pings",
-+        "lifetime",
-+        "disabled",
-+        "time_unit",
-+        "allowed_extra_keys",
-+        "reason_codes",
-+    ]
-+
-+    namespace = options.get("namespace", "GleanMetrics")
-+    glean_namespace = options.get("glean_namespace", "Glean")
-+
-+    for category_key, category_val in objs.items():
-+        filename = util.Camelize(category_key) + ".swift"
-+        filepath = output_dir / filename
-+
-+        custom_pings = defaultdict()
-+        for obj in category_val.values():
-+            if isinstance(obj, pings.Ping):
-+                custom_pings[obj.name] = obj
-+
-+        has_labeled_metrics = any(
-+            getattr(metric, "labeled", False) for metric in category_val.values()
-+        )
-+
-+        with filepath.open("w", encoding="utf-8") as fd:
-+            fd.write(
-+                template.render(
-+                    category_name=category_key,
-+                    objs=category_val,
-+                    extra_args=extra_args,
-+                    namespace=namespace,
-+                    glean_namespace=glean_namespace,
-+                    has_labeled_metrics=has_labeled_metrics,
-+                    is_ping_type=len(custom_pings) > 0,
-+                    allow_reserved=options.get("allow_reserved", False)
-+                )
-+            )
-+            # Jinja2 squashes the final newline, so we explicitly add it
-+            fd.write("\n")
-diff --git a/third_party/python/glean_parser/glean_parser/templates/kotlin.geckoview.jinja2 b/third_party/python/glean_parser/glean_parser/templates/kotlin.geckoview.jinja2
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/glean_parser/glean_parser/templates/kotlin.geckoview.jinja2
-@@ -0,0 +1,124 @@
-+// -*- mode: kotlin -*-
-+
-+/*
-+ * AUTOGENERATED BY glean_parser.  DO NOT EDIT.
-+ */
-+{# The rendered markdown is autogenerated, but this
-+Jinja2 template is not. Please file bugs! #}
-+
-+/* This Source Code Form is subject to the terms of the Mozilla Public
-+ * License, v. 2.0. If a copy of the MPL was not distributed with this
-+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-+
-+@file:Suppress("PackageNaming", "MaxLineLength")
-+package {{ namespace }}
-+
-+import {{ glean_namespace }}.private.BooleanMetricType // ktlint-disable import-ordering no-unused-imports
-+import {{ glean_namespace }}.private.CounterMetricType // ktlint-disable import-ordering no-unused-imports
-+import {{ glean_namespace }}.private.HistogramMetricBase // ktlint-disable import-ordering no-unused-imports
-+import {{ glean_namespace }}.private.LabeledMetricType // ktlint-disable import-ordering no-unused-imports
-+import {{ glean_namespace }}.private.QuantityMetricType // ktlint-disable import-ordering no-unused-imports
-+import {{ glean_namespace }}.private.StringMetricType // ktlint-disable import-ordering no-unused-imports
-+
-+/*
-+ * This class performs the mapping between Gecko metrics and Glean SDK
-+ * metric types.
-+ */
-+internal object GleanGeckoMetricsMapping {
-+    // Support exfiltration of Gecko histograms from products using both the
-+    // Glean SDK and GeckoView. See bug 1566356 for more context.
-+    @Suppress("UNUSED_PARAMETER")
-+    fun getHistogram(geckoMetricName: String): HistogramMetricBase? {
-+    {% if 'histograms' in gecko_metrics %}
-+        return when (geckoMetricName) {
-+        {% for category in gecko_metrics['histograms'].keys()|sort %}
-+            // From {{ category|Camelize }}.kt
-+            {% for metric in gecko_metrics['histograms'][category] %}
-+            "{{ metric.gecko_datapoint }}" -> {{ category|Camelize }}.{{ metric.name|camelize }}
-+            {% endfor %}
-+        {%- endfor %}
-+            else -> null
-+        }
-+    {% else %}
-+        return null
-+    {% endif %}
-+    }
-+
-+    // Support exfiltration of Gecko categorical histograms from products using
-+    // both the Glean SDK and GeckoView. See bug 1571740 for more context.
-+    @Suppress("UNUSED_PARAMETER")
-+    fun getCategoricalMetric(
-+        geckoMetricName: String
-+    ): LabeledMetricType<CounterMetricType>? {
-+    {% if 'categoricals' in gecko_metrics %}
-+        return when (geckoMetricName) {
-+        {% for category in gecko_metrics['categoricals'].keys()|sort %}
-+            // From {{ category|Camelize }}.kt
-+            {% for metric in gecko_metrics['categoricals'][category] %}
-+            "{{ metric.gecko_datapoint }}" -> {{ category|Camelize }}.{{ metric.name|camelize }}
-+            {% endfor %}
-+        {%- endfor %}
-+            else -> null
-+        }
-+    {% else %}
-+        return null
-+    {% endif %}
-+    }
-+
-+    // Support exfiltration of Gecko boolean scalars from products using both the
-+    // Glean SDK and GeckoView. See bug 1579365 for more context.
-+    @Suppress("UNUSED_PARAMETER")
-+    fun getBooleanScalar(geckoMetricName: String): BooleanMetricType? {
-+    {% if 'boolean' in gecko_metrics %}
-+        return when (geckoMetricName) {
-+        {% for category in gecko_metrics['boolean'].keys()|sort %}
-+            // From {{ category|Camelize }}.kt
-+            {% for metric in gecko_metrics['boolean'][category] %}
-+            "{{ metric.gecko_datapoint }}" -> {{ category|Camelize }}.{{ metric.name|camelize }}
-+            {% endfor %}
-+        {%- endfor %}
-+            else -> null
-+        }
-+    {% else %}
-+        return null
-+    {% endif %}
-+    }
-+
-+    // Support exfiltration of Gecko string scalars from products using both the
-+    // Glean SDK and GeckoView. See bug 1579365 for more context.
-+    @Suppress("UNUSED_PARAMETER")
-+    fun getStringScalar(geckoMetricName: String): StringMetricType? {
-+    {% if 'string' in gecko_metrics %}
-+        return when (geckoMetricName) {
-+        {% for category in gecko_metrics['string'].keys()|sort %}
-+            // From {{ category|Camelize }}.kt
-+            {% for metric in gecko_metrics['string'][category] %}
-+            "{{ metric.gecko_datapoint }}" -> {{ category|Camelize }}.{{ metric.name|camelize }}
-+            {% endfor %}
-+        {%- endfor %}
-+            else -> null
-+        }
-+    {% else %}
-+        return null
-+    {% endif %}
-+    }
-+
-+    // Support exfiltration of Gecko quantity scalars from products using both the
-+    // Glean SDK and GeckoView. See bug 1579365 for more context.
-+    @Suppress("UNUSED_PARAMETER")
-+    fun getQuantityScalar(geckoMetricName: String): QuantityMetricType? {
-+    {% if 'quantity' in gecko_metrics %}
-+        return when (geckoMetricName) {
-+        {% for category in gecko_metrics['quantity'].keys()|sort %}
-+            // From {{ category|Camelize }}.kt
-+            {% for metric in gecko_metrics['quantity'][category] %}
-+            "{{ metric.gecko_datapoint }}" -> {{ category|Camelize }}.{{ metric.name|camelize }}
-+            {% endfor %}
-+        {%- endfor %}
-+            else -> null
-+        }
-+    {% else %}
-+        return null
-+    {% endif %}
-+    }
-+}
-diff --git a/third_party/python/glean_parser/glean_parser/templates/kotlin.jinja2 b/third_party/python/glean_parser/glean_parser/templates/kotlin.jinja2
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/glean_parser/glean_parser/templates/kotlin.jinja2
-@@ -0,0 +1,81 @@
-+// -*- mode: kotlin -*-
-+
-+/*
-+ * AUTOGENERATED BY glean_parser.  DO NOT EDIT.
-+ */
-+{# The rendered markdown is autogenerated, but this
-+Jinja2 template is not. Please file bugs! #}
-+
-+/* This Source Code Form is subject to the terms of the Mozilla Public
-+ * License, v. 2.0. If a copy of the MPL was not distributed with this
-+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-+{% macro obj_declaration(obj, suffix='', access='', lazy=False) %}
-+{% if (access != "private ") -%}
-+@get:JvmName("{{ obj.name|camelize }}{{ suffix }}")
-+{% endif -%}
-+{{ access }}val {{ obj.name|camelize }}{{ suffix }}: {{ obj|type_name }}{% if lazy %} by lazy { {%- else %} ={% endif %}
-+
-+        {{ obj|type_name }}(
-+            {% for arg_name in extra_args if obj[arg_name] is defined %}
-+            {{ arg_name|camelize }} = {{ obj[arg_name]|kotlin }}{{ "," if not loop.last }}
-+            {% endfor %}
-+        )
-+{% if lazy %}    }{% endif %}{% endmacro %}
-+
-+/* ktlint-disable no-blank-line-before-rbrace */
-+@file:Suppress("PackageNaming", "MaxLineLength")
-+package {{ namespace }}
-+
-+import {{ glean_namespace }}.private.HistogramType // ktlint-disable import-ordering no-unused-imports
-+import {{ glean_namespace }}.private.Lifetime // ktlint-disable import-ordering no-unused-imports
-+import {{ glean_namespace }}.private.MemoryUnit // ktlint-disable import-ordering no-unused-imports
-+import {{ glean_namespace }}.private.NoExtraKeys // ktlint-disable import-ordering no-unused-imports
-+import {{ glean_namespace }}.private.NoReasonCodes // ktlint-disable import-ordering no-unused-imports
-+import {{ glean_namespace }}.private.TimeUnit // ktlint-disable import-ordering no-unused-imports
-+{% for obj_type in obj_types %}
-+import {{ glean_namespace }}.private.{{ obj_type }} // ktlint-disable import-ordering
-+{% endfor %}
-+{% if has_labeled_metrics %}
-+import {{ glean_namespace }}.private.LabeledMetricType // ktlint-disable import-ordering
-+{% endif %}
-+
-+internal object {{ category_name|Camelize }} {
-+{% for obj in objs.values() %}
-+    {% if obj|attr("_generate_enums") %}
-+    {% for name, suffix in obj["_generate_enums"] %}
-+    {% if obj|attr(name)|length %}
-+    @Suppress("ClassNaming", "EnumNaming")
-+    enum class {{ obj.name|camelize }}{{ suffix }} {
-+    {% for key in obj|attr(name) %}
-+        {{ key|camelize }}{{ "," if not loop.last }}
-+    {% endfor %}
-+    }
-+    {% endif %}
-+    {% endfor %}
-+    {% endif %}
-+{% endfor %}
-+{% for obj in objs.values() %}
-+    {% if obj.labeled %}
-+    {{ obj_declaration(obj, 'Label', 'private ') }}
-+    /**
-+     * {{ obj.description|wordwrap() | replace('\n', '\n        * ') }}
-+     */
-+    val {{ obj.name|camelize }}: LabeledMetricType<{{ obj|type_name }}> by lazy {
-+        LabeledMetricType(
-+            category = {{ obj.category|kotlin }},
-+            name = {{ obj.name|kotlin }},
-+            subMetric = {{ obj.name|camelize }}Label,
-+            disabled = {{ obj.is_disabled()|kotlin }},
-+            lifetime = {{ obj.lifetime|kotlin }},
-+            sendInPings = {{ obj.send_in_pings|kotlin }},
-+            labels = {{ obj.labels|kotlin }}
-+        )
-+    }
-+    {% else %}
-+    /**
-+     * {{ obj.description|wordwrap() | replace('\n', '\n     * ') }}
-+     */
-+    {{ obj_declaration(obj, lazy=obj.type != 'ping') }}
-+    {% endif %}
-+{%- endfor %}
-+}
-diff --git a/third_party/python/glean_parser/glean_parser/templates/markdown.jinja2 b/third_party/python/glean_parser/glean_parser/templates/markdown.jinja2
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/glean_parser/glean_parser/templates/markdown.jinja2
-@@ -0,0 +1,69 @@
-+<!-- AUTOGENERATED BY glean_parser.  DO NOT EDIT. -->
-+{# The rendered markdown is autogenerated, but this
-+Jinja2 template is not. Please file bugs! #}
-+
-+# Metrics
-+This document enumerates the metrics collected by {{ project_title }}.
-+This project may depend on other projects which also collect metrics.
-+This means you might have to go searching through the dependency tree to get a full picture of everything collected by this project.
-+
-+# Pings
-+
-+{% for ping_name in metrics_by_pings.keys()|sort %}
-+ - [{{ ping_name }}]({{ '#' }}{{ ping_name|replace(" ","-") }})
-+{% endfor %}
-+
-+
-+{% for ping_name in metrics_by_pings.keys()|sort %}
-+{% raw %}##{% endraw %} {{ ping_name }}
-+
-+{% if ping_name|ping_desc and ping_name|ping_desc|length > 0 %}
-+{{ ping_name|ping_desc }}
-+
-+{% if ping_name|ping_docs|length > 0 %}
-+See the Glean SDK documentation for the [`{{ ping_name }}` ping]({{ ping_name|ping_docs }}).
-+
-+{% endif %}
-+{% endif %}
-+{% if ping_name|ping_send_if_empty %}
-+This ping is sent if empty.
-+
-+{% endif %}
-+{% if ping_name|ping_reasons %}
-+Reasons this ping may be sent:
-+
-+{% for (reason, desc) in ping_name|ping_reasons|dictsort %}
-+    - `{{ reason }}`: {{ desc|indent(6, indentfirst=False) }}
-+{% endfor %}
-+
-+{% endif %}
-+{% if metrics_by_pings[ping_name] %}
-+The following metrics are added to the ping:
-+
-+| Name | Type | Description | Data reviews | Extras | Expiration |
-+| --- | --- | --- | --- | --- | --- |
-+{% for metric in metrics_by_pings[ping_name] %}
-+| {{ metric.identifier() }} |
-+{{- '['}}{{ metric.type }}]({{ metric.type|metrics_docs }}) |
-+{{- metric.description|replace("\n", " ") }} |
-+{%- for data_review in metric.data_reviews %}
-+[{{ loop.index }}]({{ data_review }}){{ ", " if not loop.last }}
-+{%- endfor -%} |
-+{%- if metric|extra_info -%}
-+<ul>
-+{%- for property, desc in metric|extra_info %}
-+<li>{{ property }}{%- if desc is not none -%}: {{ desc|replace("\n", " ") }}{%- endif -%}</li>
-+{%- endfor -%}
-+</ul>
-+{%- endif -%} |
-+{{- metric.expires }} |
-+{% endfor %}
-+{% else %}
-+This ping contains no metrics.
-+{% endif %}
-+
-+{% endfor %}
-+
-+<!-- AUTOGENERATED BY glean_parser.  DO NOT EDIT. -->
-+{# The rendered markdown is autogenerated, but this
-+Jinja2 template is not. Please file bugs! #}
-diff --git a/third_party/python/glean_parser/glean_parser/templates/swift.jinja2 b/third_party/python/glean_parser/glean_parser/templates/swift.jinja2
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/glean_parser/glean_parser/templates/swift.jinja2
-@@ -0,0 +1,105 @@
-+// -*- mode: Swift -*-
-+
-+// AUTOGENERATED BY glean_parser.  DO NOT EDIT.
-+{# The rendered markdown is autogenerated, but this
-+Jinja2 template is not. Please file bugs! #}
-+
-+/* This Source Code Form is subject to the terms of the Mozilla Public
-+ * License, v. 2.0. If a copy of the MPL was not distributed with this
-+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-+{% macro obj_declaration(obj, suffix='', access='') %}
-+{{ access }}static let {{ obj.name|camelize|variable_name }}{{ suffix }} = {{ obj|type_name }}(
-+            {% for arg_name in extra_args if obj[arg_name] is defined %}
-+            {{ arg_name|camelize }}: {{ obj[arg_name]|swift }}{{ "," if not loop.last }}
-+            {% endfor %}
-+        )
-+{% endmacro %}
-+
-+{% if not allow_reserved %}
-+import {{ glean_namespace }}
-+
-+{% endif %}
-+// swiftlint:disable superfluous_disable_command
-+// swiftlint:disable nesting
-+// swiftlint:disable line_length
-+// swiftlint:disable identifier_name
-+// swiftlint:disable force_try
-+
-+extension {{ namespace }} {
-+    {% if is_ping_type %}
-+    class {{ category_name|Camelize }} {
-+        public static let shared = {{ category_name|Camelize }}()
-+        private init() {
-+            // Intentionally left private, no external user can instantiate a new global object.
-+        }
-+
-+        {% for obj in objs.values() %}
-+        {% if obj|attr("_generate_enums") %}
-+        {% for name, suffix in obj["_generate_enums"] %}
-+        {% if obj|attr(name)|length %}
-+        enum {{ obj.name|Camelize }}{{ suffix }}: Int, ReasonCodes {
-+            {% for key in obj|attr(name) %}
-+            case {{ key|camelize|variable_name }} = {{ loop.index-1 }}
-+            {% endfor %}
-+
-+            public func index() -> Int {
-+                return self.rawValue
-+            }
-+        }
-+
-+        {% endif %}
-+        {% endfor %}
-+        {% endif %}
-+
-+        /// {{ obj.description|wordwrap() | replace('\n', '\n        /// ') }}
-+        let {{ obj.name|camelize|variable_name }} = {{obj|type_name}}(
-+            name: {{ obj.name|swift }},
-+            includeClientId: {{obj.include_client_id|swift}},
-+            sendIfEmpty: {{obj.send_if_empty|swift}},
-+            reasonCodes: {{obj.reason_codes|swift}}
-+        )
-+
-+      {% endfor %}
-+    }
-+    {% else %}
-+    enum {{ category_name|Camelize }} {
-+    {% for obj in objs.values() %}
-+        {% if obj|attr("_generate_enums") %}
-+        {% for name, suffix in obj["_generate_enums"] %}
-+        {% if obj|attr(name)|length %}
-+        enum {{ obj.name|Camelize }}{{ suffix }}: Int32, ExtraKeys {
-+        {% for key in obj|attr(name) %}
-+            case {{ key|camelize|variable_name }} = {{ loop.index-1 }}
-+        {% endfor %}
-+
-+            public func index() -> Int32 {
-+                return self.rawValue
-+            }
-+        }
-+
-+        {% endif %}
-+        {% endfor %}
-+        {% endif %}
-+    {% endfor %}
-+    {% for obj in objs.values() %}
-+        {% if obj.labeled %}
-+        {{ obj_declaration(obj, 'Label', 'private ') }}
-+        /// {{ obj.description|wordwrap() | replace('\n', '\n        /// ') }}
-+        static let {{ obj.name|camelize|variable_name }} = try! LabeledMetricType<{{ obj|type_name }}>(
-+            category: {{ obj.category|swift }},
-+            name: {{ obj.name|swift }},
-+            sendInPings: {{ obj.send_in_pings|swift }},
-+            lifetime: {{ obj.lifetime|swift }},
-+            disabled: {{ obj.is_disabled()|swift }},
-+            subMetric: {{ obj.name|camelize }}Label,
-+            labels: {{ obj.labels|swift }}
-+        )
-+
-+        {% else %}
-+        /// {{ obj.description|wordwrap() | replace('\n', '\n        /// ') }}
-+        {{ obj_declaration(obj) }}
-+        {% endif %}
-+    {% endfor %}
-+    }
-+    {% endif %}
-+}
-diff --git a/third_party/python/glean_parser/glean_parser/translate.py b/third_party/python/glean_parser/glean_parser/translate.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/glean_parser/glean_parser/translate.py
-@@ -0,0 +1,101 @@
-+# -*- coding: utf-8 -*-
-+
-+# This Source Code Form is subject to the terms of the Mozilla Public
-+# License, v. 2.0. If a copy of the MPL was not distributed with this
-+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
-+
-+"""
-+High-level interface for translating `metrics.yaml` into other formats.
-+"""
-+
-+from pathlib import Path
-+import os
-+import shutil
-+import sys
-+import tempfile
-+
-+from . import lint
-+from . import parser
-+from . import kotlin
-+from . import markdown
-+from . import swift
-+from . import util
-+
-+
-+# Each outputter in the table has the following keys:
-+# - "output_func": the main function of the outputter, the one which
-+#   does the actual translation.
-+# - "clear_output_dir": a flag to clear the target directory before moving there
-+#   the generated files.
-+OUTPUTTERS = {
-+    "kotlin": {
-+        "output_func": kotlin.output_kotlin,
-+        "clear_output_dir": True,
-+        "extensions": ["*.kt"],
-+    },
-+    "markdown": {"output_func": markdown.output_markdown, "clear_output_dir": False},
-+    "swift": {
-+        "output_func": swift.output_swift,
-+        "clear_output_dir": True,
-+        "extensions": ["*.swift"],
-+    },
-+}
-+
-+
-+def translate(input_filepaths, output_format, output_dir, options={}, parser_config={}):
-+    """
-+    Translate the files in `input_filepaths` to the given `output_format` and
-+    put the results in `output_dir`.
-+
-+    :param input_filepaths: list of paths to input metrics.yaml files
-+    :param output_format: the name of the output formats
-+    :param output_dir: the path to the output directory
-+    :param options: dictionary of options. The available options are backend
-+        format specific.
-+    :param parser_config: A dictionary of options that change parsing behavior.
-+        See `parser.parse_metrics` for more info.
-+    """
-+    if output_format not in OUTPUTTERS:
-+        raise ValueError("Unknown output format '{}'".format(output_format))
-+
-+    all_objects = parser.parse_objects(input_filepaths, parser_config)
-+
-+    if util.report_validation_errors(all_objects):
-+        return 1
-+
-+    if lint.lint_metrics(all_objects.value, parser_config):
-+        print(
-+            "NOTE: These warnings will become errors in a future release of Glean.",
-+            file=sys.stderr,
-+        )
-+
-+    # allow_reserved is also relevant to the translators, so copy it there
-+    if parser_config.get("allow_reserved"):
-+        options["allow_reserved"] = True
-+
-+    # Write everything out to a temporary directory, and then move it to the
-+    # real directory, for transactional integrity.
-+    with tempfile.TemporaryDirectory() as tempdir:
-+        tempdir_path = Path(tempdir)
-+        OUTPUTTERS[output_format]["output_func"](
-+            all_objects.value, tempdir_path, options
-+        )
-+
-+        if OUTPUTTERS[output_format]["clear_output_dir"]:
-+            if output_dir.is_file():
-+                output_dir.unlink()
-+            elif output_dir.is_dir():
-+                for extensions in OUTPUTTERS[output_format]["extensions"]:
-+                    for filepath in output_dir.glob(extensions):
-+                        filepath.unlink()
-+                if len(list(output_dir.iterdir())):
-+                    print("Extra contents found in '{}'.".format(output_dir))
-+
-+        # We can't use shutil.copytree alone if the directory already exists.
-+        # However, if it doesn't exist, make sure to create one otherwise
-+        # shutil.copy will fail.
-+        os.makedirs(str(output_dir), exist_ok=True)
-+        for filename in tempdir_path.glob("*"):
-+            shutil.copy(str(filename), str(output_dir))
-+
-+    return 0
-diff --git a/third_party/python/glean_parser/glean_parser/util.py b/third_party/python/glean_parser/glean_parser/util.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/glean_parser/glean_parser/util.py
-@@ -0,0 +1,376 @@
-+# -*- coding: utf-8 -*-
-+
-+# This Source Code Form is subject to the terms of the Mozilla Public
-+# License, v. 2.0. If a copy of the MPL was not distributed with this
-+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
-+
-+from collections import OrderedDict
-+import datetime
-+import functools
-+import json
-+from pathlib import Path
-+import sys
-+import textwrap
-+import urllib.request
-+
-+import appdirs
-+import diskcache
-+import jinja2
-+import jsonschema
-+from jsonschema import _utils
-+import yaml
-+
-+if sys.version_info < (3, 7):
-+    import iso8601
-+
-+
-+TESTING_MODE = "pytest" in sys.modules
-+
-+
-+# Adapted from
-+# https://stackoverflow.com/questions/34667108/ignore-dates-and-times-while-parsing-yaml
-+class _NoDatesSafeLoader(yaml.SafeLoader):
-+    @classmethod
-+    def remove_implicit_resolver(cls, tag_to_remove):
-+        """
-+        Remove implicit resolvers for a particular tag
-+
-+        Takes care not to modify resolvers in super classes.
-+
-+        We want to load datetimes as strings, not dates, because we
-+        go on to serialise as json which doesn't have the advanced types
-+        of yaml, and leads to incompatibilities down the track.
-+        """
-+        if "yaml_implicit_resolvers" not in cls.__dict__:
-+            cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy()
-+
-+        for first_letter, mappings in cls.yaml_implicit_resolvers.items():
-+            cls.yaml_implicit_resolvers[first_letter] = [
-+                (tag, regexp) for tag, regexp in mappings if tag != tag_to_remove
-+            ]
-+
-+
-+# Since we use JSON schema to validate, and JSON schema doesn't support
-+# datetimes, we don't want the YAML loader to give us datetimes -- just
-+# strings.
-+_NoDatesSafeLoader.remove_implicit_resolver("tag:yaml.org,2002:timestamp")
-+
-+
-+if sys.version_info < (3, 7):
-+    # In Python prior to 3.7, dictionary order is not preserved. However, we
-+    # want the metrics to appear in the output in the same order as they are in
-+    # the metrics.yaml file, so on earlier versions of Python we must use an
-+    # OrderedDict object.
-+    def ordered_yaml_load(stream):
-+        class OrderedLoader(_NoDatesSafeLoader):
-+            pass
-+
-+        def construct_mapping(loader, node):
-+            loader.flatten_mapping(node)
-+            return OrderedDict(loader.construct_pairs(node))
-+
-+        OrderedLoader.add_constructor(
-+            yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, construct_mapping
-+        )
-+        return yaml.load(stream, OrderedLoader)
-+
-+    def ordered_yaml_dump(data, **kwargs):
-+        class OrderedDumper(yaml.Dumper):
-+            pass
-+
-+        def _dict_representer(dumper, data):
-+            return dumper.represent_mapping(
-+                yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, data.items()
-+            )
-+
-+        OrderedDumper.add_representer(OrderedDict, _dict_representer)
-+        return yaml.dump(data, Dumper=OrderedDumper, **kwargs)
-+
-+
-+else:
-+
-+    def ordered_yaml_load(stream):
-+        return yaml.load(stream, Loader=_NoDatesSafeLoader)
-+
-+    def ordered_yaml_dump(data, **kwargs):
-+        return yaml.dump(data, **kwargs)
-+
-+
-+def load_yaml_or_json(path, ordered_dict=False):
-+    """
-+    Load the content from either a .json or .yaml file, based on the filename
-+    extension.
-+
-+    :param path: `pathlib.Path` object
-+    :rtype object: The tree of objects as a result of parsing the file.
-+    :raises ValueError: The file is neither a .json, .yml or .yaml file.
-+    """
-+    # If in py.test, support bits of literal JSON/YAML content
-+    if TESTING_MODE and isinstance(path, dict):
-+        return path
-+
-+    if not path.is_file():
-+        return {}
-+
-+    if path.suffix == ".json":
-+        with path.open("r") as fd:
-+            return json.load(fd)
-+    elif path.suffix in (".yml", ".yaml", ".yamlx"):
-+        with path.open("r") as fd:
-+            if ordered_dict:
-+                return ordered_yaml_load(fd)
-+            else:
-+                return yaml.load(fd, Loader=_NoDatesSafeLoader)
-+    else:
-+        raise ValueError("Unknown file extension {}".format(path.suffix))
-+
-+
-+def ensure_list(value):
-+    """
-+    Ensures that the value is a list. If it is anything but a list or tuple, a
-+    list with a single element containing only value is returned.
-+    """
-+    if not isinstance(value, (list, tuple)):
-+        return [value]
-+    return value
-+
-+
-+def to_camel_case(input, capitalize_first_letter):
-+    """
-+    Convert the value to camelCase.
-+
-+    This additionally replaces any '.' with '_'. The first letter is capitalized
-+    depending on `capitalize_first_letter`.
-+    """
-+    sanitized_input = input.replace(".", "_").replace("-", "_")
-+    # Filter out any empty token. This could happen due to leading '_' or
-+    # consecutive '__'.
-+    tokens = [s.capitalize() for s in sanitized_input.split("_") if len(s) != 0]
-+    # If we're not meant to capitalize the first letter, then lowercase it.
-+    if not capitalize_first_letter:
-+        tokens[0] = tokens[0].lower()
-+    # Finally join the tokens and capitalize.
-+    return ''.join(tokens)
-+
-+
-+def camelize(value):
-+    """
-+    Convert the value to camelCase (with a lower case first letter).
-+
-+    This is a thin wrapper around inflection.camelize that handles dots in
-+    addition to underscores.
-+    """
-+    return to_camel_case(value, False)
-+
-+
-+def Camelize(value):
-+    """
-+    Convert the value to CamelCase (with an upper case first letter).
-+
-+    This is a thin wrapper around inflection.camelize that handles dots in
-+    addition to underscores.
-+    """
-+    return to_camel_case(value, True)
-+
-+
-+@functools.lru_cache()
-+def get_jinja2_template(template_name, filters=()):
-+    """
-+    Get a Jinja2 template that ships with glean_parser.
-+
-+    The template has extra filters for camel-casing identifiers.
-+
-+    :param template_name: Name of a file in ``glean_parser/templates``
-+    :param filters: tuple of 2-tuple. A tuple of (name, func) pairs defining
-+        additional filters.
-+    """
-+    env = jinja2.Environment(
-+        loader=jinja2.PackageLoader("glean_parser", "templates"),
-+        trim_blocks=True,
-+        lstrip_blocks=True,
-+    )
-+
-+    env.filters["camelize"] = camelize
-+    env.filters["Camelize"] = Camelize
-+    for filter_name, filter_func in filters:
-+        env.filters[filter_name] = filter_func
-+
-+    return env.get_template(template_name)
-+
-+
-+def keep_value(f):
-+    """
-+    Wrap a generator so the value it returns (rather than yields), will be
-+    accessible on the .value attribute when the generator is exhausted.
-+    """
-+
-+    class ValueKeepingGenerator(object):
-+        def __init__(self, g):
-+            self.g = g
-+            self.value = None
-+
-+        def __iter__(self):
-+            self.value = yield from self.g
-+
-+    @functools.wraps(f)
-+    def g(*args, **kwargs):
-+        return ValueKeepingGenerator(f(*args, **kwargs))
-+
-+    return g
-+
-+
-+def get_null_resolver(schema):
-+    """
-+    Returns a JSON Pointer resolver that does nothing.
-+
-+    This lets us handle the moz: URLs in our schemas.
-+    """
-+
-+    class NullResolver(jsonschema.RefResolver):
-+        def resolve_remote(self, uri):
-+            if uri in self.store:
-+                return self.store[uri]
-+            if uri == "":
-+                return self.referrer
-+
-+    return NullResolver.from_schema(schema)
-+
-+
-+def fetch_remote_url(url, cache=True):
-+    """
-+    Fetches the contents from an HTTP url or local file path, and optionally
-+    caches it to disk.
-+    """
-+    is_http = url.startswith("http")
-+
-+    if not is_http:
-+        with open(url, "r", encoding="utf-8") as fd:
-+            contents = fd.read()
-+        return contents
-+
-+    if cache:
-+        cache_dir = appdirs.user_cache_dir("glean_parser", "mozilla")
-+        with diskcache.Cache(cache_dir) as dc:
-+            if url in dc:
-+                return dc[url]
-+
-+    contents = urllib.request.urlopen(url).read()
-+
-+    # On Python 3.5, urlopen does not handle the unicode decoding for us. This
-+    # is ok because we control these files and we know they are in UTF-8,
-+    # however, this wouldn't be correct in general.
-+    if sys.version_info < (3, 6):
-+        contents = contents.decode("utf8")
-+
-+    if cache:
-+        with diskcache.Cache(cache_dir) as dc:
-+            dc[url] = contents
-+
-+    return contents
-+
-+
-+_unset = _utils.Unset()
-+
-+
-+def pprint_validation_error(error):
-+    """
-+    A version of jsonschema's ValidationError __str__ method that doesn't
-+    include the schema fragment that failed.  This makes the error messages
-+    much more succinct.
-+
-+    It also shows any subschemas of anyOf/allOf that failed, if any (what
-+    jsonschema calls "context").
-+    """
-+    essential_for_verbose = (
-+        error.validator,
-+        error.validator_value,
-+        error.instance,
-+        error.schema,
-+    )
-+    if any(m is _unset for m in essential_for_verbose):
-+        return textwrap.fill(error.message)
-+
-+    instance = error.instance
-+    for path in list(error.relative_path)[::-1]:
-+        if isinstance(path, str):
-+            instance = {path: instance}
-+        else:
-+            instance = [instance]
-+
-+    yaml_instance = ordered_yaml_dump(instance, width=72, default_flow_style=False)
-+
-+    parts = ["```", yaml_instance.rstrip(), "```", "", textwrap.fill(error.message)]
-+    if error.context:
-+        parts.extend(
-+            textwrap.fill(x.message, initial_indent="    ", subsequent_indent="    ")
-+            for x in error.context
-+        )
-+
-+    description = error.schema.get("description")
-+    if description:
-+        parts.extend(["", "Documentation for this node:", _utils.indent(description)])
-+
-+    return "\n".join(parts)
-+
-+
-+def format_error(filepath, header, content):
-+    """
-+    Format a jsonshema validation error.
-+    """
-+    if isinstance(filepath, Path):
-+        filepath = filepath.resolve()
-+    else:
-+        filepath = "<string>"
-+    if header:
-+        return "{}: {}\n{}".format(filepath, header, _utils.indent(content))
-+    else:
-+        return "{}:\n{}".format(filepath, _utils.indent(content))
-+
-+
-+def is_expired(expires):
-+    """
-+    Parses the `expires` field in a metric or ping and returns whether
-+    the object should be considered expired.
-+    """
-+    if expires == "never":
-+        return False
-+    elif expires == "expired":
-+        return True
-+    else:
-+        try:
-+            if sys.version_info < (3, 7):
-+                date = iso8601.parse_date(expires).date()
-+            else:
-+                date = datetime.date.fromisoformat(expires)
-+        except ValueError:
-+            raise ValueError(
-+                (
-+                    "Invalid expiration date '{}'. "
-+                    "Must be of the form yyyy-mm-dd in UTC."
-+                ).format(expires)
-+            )
-+        return date <= datetime.datetime.utcnow().date()
-+
-+
-+def validate_expires(expires):
-+    """
-+    Raises ValueError if `expires` is not valid.
-+    """
-+    if expires in ("never", "expired"):
-+        return
-+    if sys.version_info < (3, 7):
-+        iso8601.parse_date(expires)
-+    else:
-+        datetime.date.fromisoformat(expires)
-+
-+
-+def report_validation_errors(all_objects):
-+    """
-+    Report any validation errors found to the console.
-+    """
-+    found_error = False
-+    for error in all_objects:
-+        found_error = True
-+        print("=" * 78, file=sys.stderr)
-+        print(error, file=sys.stderr)
-+    return found_error
-diff --git a/third_party/python/glean_parser/glean_parser/validate_ping.py b/third_party/python/glean_parser/glean_parser/validate_ping.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/glean_parser/glean_parser/validate_ping.py
-@@ -0,0 +1,74 @@
-+# -*- coding: utf-8 -*-
-+
-+# This Source Code Form is subject to the terms of the Mozilla Public
-+# License, v. 2.0. If a copy of the MPL was not distributed with this
-+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
-+
-+"""
-+Validates the contents of a Glean ping against the schema.
-+"""
-+
-+import functools
-+import io
-+import json
-+from pathlib import Path
-+import sys
-+
-+import jsonschema
-+
-+from . import util
-+
-+
-+ROOT_DIR = Path(__file__).parent
-+SCHEMAS_DIR = ROOT_DIR / "schemas"
-+
-+
-+@functools.lru_cache(maxsize=1)
-+def _get_ping_schema(schema_url):
-+    contents = util.fetch_remote_url(schema_url)
-+    return json.loads(contents)
-+
-+
-+def _validate_ping(ins, outs, schema_url):
-+    schema = _get_ping_schema(schema_url)
-+
-+    resolver = util.get_null_resolver(schema)
-+
-+    document = json.load(ins)
-+
-+    validator_class = jsonschema.validators.validator_for(schema)
-+    validator = validator_class(schema, resolver=resolver)
-+
-+    has_error = 0
-+    for error in validator.iter_errors(document):
-+        outs.write("=" * 76)
-+        outs.write("\n")
-+        outs.write(util.format_error("", "", util.pprint_validation_error(error)))
-+        outs.write("\n")
-+        has_error = 1
-+
-+    return has_error
-+
-+
-+def validate_ping(ins, outs=None, schema_url=None):
-+    """
-+    Validates the contents of a Glean ping.
-+
-+    :param ins: Input stream or file path to the ping contents to validate
-+    :param outs: Output stream to write errors to. (Defaults to stdout)
-+    :param schema_url: HTTP URL or local filesystem path to Glean ping schema.
-+        Defaults to the current version of the schema in
-+        mozilla-pipeline-schemas.
-+    :rtype: int 1 if any errors occurred, otherwise 0.
-+    """
-+    if schema_url is None:
-+        raise TypeError("Missing required argument 'schema_url'")
-+
-+    if outs is None:
-+        outs = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8")
-+
-+    if isinstance(ins, (str, bytes, Path)):
-+        with open(ins, "r") as fd:
-+            return _validate_ping(fd, outs, schema_url=schema_url)
-+    else:
-+        return _validate_ping(ins, outs, schema_url=schema_url)
-diff --git a/third_party/python/importlib_metadata/importlib_metadata-1.5.0.dist-info/LICENSE b/third_party/python/importlib_metadata/importlib_metadata-1.5.0.dist-info/LICENSE
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/importlib_metadata/importlib_metadata-1.5.0.dist-info/LICENSE
-@@ -0,0 +1,13 @@
-+Copyright 2017-2019 Jason R. Coombs, Barry Warsaw
-+
-+Licensed under the Apache License, Version 2.0 (the "License");
-+you may not use this file except in compliance with the License.
-+You may obtain a copy of the License at
-+
-+http://www.apache.org/licenses/LICENSE-2.0
-+
-+Unless required by applicable law or agreed to in writing, software
-+distributed under the License is distributed on an "AS IS" BASIS,
-+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-+See the License for the specific language governing permissions and
-+limitations under the License.
-diff --git a/third_party/python/importlib_metadata/importlib_metadata-1.5.0.dist-info/METADATA b/third_party/python/importlib_metadata/importlib_metadata-1.5.0.dist-info/METADATA
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/importlib_metadata/importlib_metadata-1.5.0.dist-info/METADATA
-@@ -0,0 +1,65 @@
-+Metadata-Version: 2.1
-+Name: importlib-metadata
-+Version: 1.5.0
-+Summary: Read metadata from Python packages
-+Home-page: http://importlib-metadata.readthedocs.io/
-+Author: Barry Warsaw
-+Author-email: barry@python.org
-+License: Apache Software License
-+Platform: UNKNOWN
-+Classifier: Development Status :: 3 - Alpha
-+Classifier: Intended Audience :: Developers
-+Classifier: License :: OSI Approved :: Apache Software License
-+Classifier: Topic :: Software Development :: Libraries
-+Classifier: Programming Language :: Python :: 3
-+Classifier: Programming Language :: Python :: 2
-+Requires-Python: !=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7
-+Requires-Dist: zipp (>=0.5)
-+Requires-Dist: pathlib2 ; python_version < "3"
-+Requires-Dist: contextlib2 ; python_version < "3"
-+Requires-Dist: configparser (>=3.5) ; python_version < "3"
-+Provides-Extra: docs
-+Requires-Dist: sphinx ; extra == 'docs'
-+Requires-Dist: rst.linker ; extra == 'docs'
-+Provides-Extra: testing
-+Requires-Dist: packaging ; extra == 'testing'
-+Requires-Dist: importlib-resources ; (python_version < "3.7") and extra == 'testing'
-+
-+=========================
-+ ``importlib_metadata``
-+=========================
-+
-+``importlib_metadata`` is a library to access the metadata for a Python
-+package.  It is intended to be ported to Python 3.8.
-+
-+
-+Usage
-+=====
-+
-+See the `online documentation <https://importlib_metadata.readthedocs.io/>`_
-+for usage details.
-+
-+`Finder authors
-+<https://docs.python.org/3/reference/import.html#finders-and-loaders>`_ can
-+also add support for custom package installers.  See the above documentation
-+for details.
-+
-+
-+Caveats
-+=======
-+
-+This project primarily supports third-party packages installed by PyPA
-+tools (or other conforming packages). It does not support:
-+
-+- Packages in the stdlib.
-+- Packages installed without metadata.
-+
-+Project details
-+===============
-+
-+ * Project home: https://gitlab.com/python-devs/importlib_metadata
-+ * Report bugs at: https://gitlab.com/python-devs/importlib_metadata/issues
-+ * Code hosting: https://gitlab.com/python-devs/importlib_metadata.git
-+ * Documentation: http://importlib_metadata.readthedocs.io/
-+
-+
-diff --git a/third_party/python/importlib_metadata/importlib_metadata-1.5.0.dist-info/RECORD b/third_party/python/importlib_metadata/importlib_metadata-1.5.0.dist-info/RECORD
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/importlib_metadata/importlib_metadata-1.5.0.dist-info/RECORD
-@@ -0,0 +1,21 @@
-+importlib_metadata/__init__.py,sha256=09MTlbzRH9XUpar5uODOPdevOQ0HgR5DJsapV32I-DY,18117
-+importlib_metadata/_compat.py,sha256=wnOChfVj2Vx9gSQNe8BF5Tddy1VDxzDfsC5iyV4I8_0,3884
-+importlib_metadata/docs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
-+importlib_metadata/docs/changelog.rst,sha256=Ikau3lhoqOcqeMEnRmtrufO61ZTsyDtVYdfWRggonwA,7755
-+importlib_metadata/docs/conf.py,sha256=m-b6Mju5gFkpSHh-lyJ4iwqf_8t4LjYYFRumtutQSZc,5578
-+importlib_metadata/docs/index.rst,sha256=rbXrDkLAKLIDccqME5u9CCMEfMKprqzQOkIOuwOnfz4,1907
-+importlib_metadata/docs/using.rst,sha256=tlh7M8y0hIRB0cYIflhVFQtdQSfm-Q4GE1luXCU4lIY,9286
-+importlib_metadata/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
-+importlib_metadata/tests/fixtures.py,sha256=XescaYiWeK6sOwoP8DckmRjeGKg-eaISRY49gpyWxGY,5329
-+importlib_metadata/tests/test_api.py,sha256=YMAGTsRENrtvpw2CSLmRndJMBeT4q_M0GSe-QsnnMZ4,5544
-+importlib_metadata/tests/test_integration.py,sha256=vhbAi9zmaOa8b0OWSZGETWpn5542wXabfDVPd2lcIaY,1275
-+importlib_metadata/tests/test_main.py,sha256=n1gzecrIaoHb5mup8pY-YT74LwwEy5LeNWkb7CZVxjY,7983
-+importlib_metadata/tests/test_zip.py,sha256=qG3IquiTFLSrUtpxEJblqiUtgEcOTfjU2yM35REk0fo,2372
-+importlib_metadata/tests/data/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
-+importlib_metadata/tests/data/example-21.12-py3-none-any.whl,sha256=I-kYufETid-tDYyR8f1OFJ3t5u_Io23k0cbQxJTUN4I,1455
-+importlib_metadata/tests/data/example-21.12-py3.6.egg,sha256=-EeugFAijkdUO9xyQHTZkQwZoFXK0_QxICBj6R5AAJo,1497
-+importlib_metadata-1.5.0.dist-info/LICENSE,sha256=wNe6dAchmJ1VvVB8D9oTc-gHHadCuaSBAev36sYEM6U,571
-+importlib_metadata-1.5.0.dist-info/METADATA,sha256=puK9j4_6OcxogVH7AvbjHxugq8e51AtHQb-UKkW-6cM,2093
-+importlib_metadata-1.5.0.dist-info/WHEEL,sha256=8zNYZbwQSXoB9IfXOjPfeNwvAsALAjffgk27FqvCWbo,110
-+importlib_metadata-1.5.0.dist-info/top_level.txt,sha256=CO3fD9yylANiXkrMo4qHLV_mqXL2sC5JFKgt1yWAT-A,19
-+importlib_metadata-1.5.0.dist-info/RECORD,,
-diff --git a/third_party/python/importlib_metadata/importlib_metadata-1.5.0.dist-info/WHEEL b/third_party/python/importlib_metadata/importlib_metadata-1.5.0.dist-info/WHEEL
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/importlib_metadata/importlib_metadata-1.5.0.dist-info/WHEEL
-@@ -0,0 +1,6 @@
-+Wheel-Version: 1.0
-+Generator: bdist_wheel (0.33.6)
-+Root-Is-Purelib: true
-+Tag: py2-none-any
-+Tag: py3-none-any
-+
-diff --git a/third_party/python/importlib_metadata/importlib_metadata-1.5.0.dist-info/top_level.txt b/third_party/python/importlib_metadata/importlib_metadata-1.5.0.dist-info/top_level.txt
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/importlib_metadata/importlib_metadata-1.5.0.dist-info/top_level.txt
-@@ -0,0 +1,1 @@
-+importlib_metadata
-diff --git a/third_party/python/importlib_metadata/importlib_metadata/__init__.py b/third_party/python/importlib_metadata/importlib_metadata/__init__.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/importlib_metadata/importlib_metadata/__init__.py
-@@ -0,0 +1,591 @@
-+from __future__ import unicode_literals, absolute_import
-+
-+import io
-+import os
-+import re
-+import abc
-+import csv
-+import sys
-+import zipp
-+import operator
-+import functools
-+import itertools
-+import posixpath
-+import collections
-+
-+from ._compat import (
-+    install,
-+    NullFinder,
-+    ConfigParser,
-+    suppress,
-+    map,
-+    FileNotFoundError,
-+    IsADirectoryError,
-+    NotADirectoryError,
-+    PermissionError,
-+    pathlib,
-+    ModuleNotFoundError,
-+    MetaPathFinder,
-+    email_message_from_string,
-+    PyPy_repr,
-+    )
-+from importlib import import_module
-+from itertools import starmap
-+
-+
-+__metaclass__ = type
-+
-+
-+__all__ = [
-+    'Distribution',
-+    'DistributionFinder',
-+    'PackageNotFoundError',
-+    'distribution',
-+    'distributions',
-+    'entry_points',
-+    'files',
-+    'metadata',
-+    'requires',
-+    'version',
-+    ]
-+
-+
-+class PackageNotFoundError(ModuleNotFoundError):
-+    """The package was not found."""
-+
-+
-+class EntryPoint(
-+        PyPy_repr,
-+        collections.namedtuple('EntryPointBase', 'name value group')):
-+    """An entry point as defined by Python packaging conventions.
-+
-+    See `the packaging docs on entry points
-+    <https://packaging.python.org/specifications/entry-points/>`_
-+    for more information.
-+    """
-+
-+    pattern = re.compile(
-+        r'(?P<module>[\w.]+)\s*'
-+        r'(:\s*(?P<attr>[\w.]+))?\s*'
-+        r'(?P<extras>\[.*\])?\s*$'
-+        )
-+    """
-+    A regular expression describing the syntax for an entry point,
-+    which might look like:
-+
-+        - module
-+        - package.module
-+        - package.module:attribute
-+        - package.module:object.attribute
-+        - package.module:attr [extra1, extra2]
-+
-+    Other combinations are possible as well.
-+
-+    The expression is lenient about whitespace around the ':',
-+    following the attr, and following any extras.
-+    """
-+
-+    def load(self):
-+        """Load the entry point from its definition. If only a module
-+        is indicated by the value, return that module. Otherwise,
-+        return the named object.
-+        """
-+        match = self.pattern.match(self.value)
-+        module = import_module(match.group('module'))
-+        attrs = filter(None, (match.group('attr') or '').split('.'))
-+        return functools.reduce(getattr, attrs, module)
-+
-+    @property
-+    def extras(self):
-+        match = self.pattern.match(self.value)
-+        return list(re.finditer(r'\w+', match.group('extras') or ''))
-+
-+    @classmethod
-+    def _from_config(cls, config):
-+        return [
-+            cls(name, value, group)
-+            for group in config.sections()
-+            for name, value in config.items(group)
-+            ]
-+
-+    @classmethod
-+    def _from_text(cls, text):
-+        config = ConfigParser(delimiters='=')
-+        # case sensitive: https://stackoverflow.com/q/1611799/812183
-+        config.optionxform = str
-+        try:
-+            config.read_string(text)
-+        except AttributeError:  # pragma: nocover
-+            # Python 2 has no read_string
-+            config.readfp(io.StringIO(text))
-+        return EntryPoint._from_config(config)
-+
-+    def __iter__(self):
-+        """
-+        Supply iter so one may construct dicts of EntryPoints easily.
-+        """
-+        return iter((self.name, self))
-+
-+    def __reduce__(self):
-+        return (
-+            self.__class__,
-+            (self.name, self.value, self.group),
-+            )
-+
-+
-+class PackagePath(pathlib.PurePosixPath):
-+    """A reference to a path in a package"""
-+
-+    def read_text(self, encoding='utf-8'):
-+        with self.locate().open(encoding=encoding) as stream:
-+            return stream.read()
-+
-+    def read_binary(self):
-+        with self.locate().open('rb') as stream:
-+            return stream.read()
-+
-+    def locate(self):
-+        """Return a path-like object for this path"""
-+        return self.dist.locate_file(self)
-+
-+
-+class FileHash:
-+    def __init__(self, spec):
-+        self.mode, _, self.value = spec.partition('=')
-+
-+    def __repr__(self):
-+        return '<FileHash mode: {} value: {}>'.format(self.mode, self.value)
-+
-+
-+class Distribution:
-+    """A Python distribution package."""
-+
-+    @abc.abstractmethod
-+    def read_text(self, filename):
-+        """Attempt to load metadata file given by the name.
-+
-+        :param filename: The name of the file in the distribution info.
-+        :return: The text if found, otherwise None.
-+        """
-+
-+    @abc.abstractmethod
-+    def locate_file(self, path):
-+        """
-+        Given a path to a file in this distribution, return a path
-+        to it.
-+        """
-+
-+    @classmethod
-+    def from_name(cls, name):
-+        """Return the Distribution for the given package name.
-+
-+        :param name: The name of the distribution package to search for.
-+        :return: The Distribution instance (or subclass thereof) for the named
-+            package, if found.
-+        :raises PackageNotFoundError: When the named package's distribution
-+            metadata cannot be found.
-+        """
-+        for resolver in cls._discover_resolvers():
-+            dists = resolver(DistributionFinder.Context(name=name))
-+            dist = next(dists, None)
-+            if dist is not None:
-+                return dist
-+        else:
-+            raise PackageNotFoundError(name)
-+
-+    @classmethod
-+    def discover(cls, **kwargs):
-+        """Return an iterable of Distribution objects for all packages.
-+
-+        Pass a ``context`` or pass keyword arguments for constructing
-+        a context.
-+
-+        :context: A ``DistributionFinder.Context`` object.
-+        :return: Iterable of Distribution objects for all packages.
-+        """
-+        context = kwargs.pop('context', None)
-+        if context and kwargs:
-+            raise ValueError("cannot accept context and kwargs")
-+        context = context or DistributionFinder.Context(**kwargs)
-+        return itertools.chain.from_iterable(
-+            resolver(context)
-+            for resolver in cls._discover_resolvers()
-+            )
-+
-+    @staticmethod
-+    def at(path):
-+        """Return a Distribution for the indicated metadata path
-+
-+        :param path: a string or path-like object
-+        :return: a concrete Distribution instance for the path
-+        """
-+        return PathDistribution(pathlib.Path(path))
-+
-+    @staticmethod
-+    def _discover_resolvers():
-+        """Search the meta_path for resolvers."""
-+        declared = (
-+            getattr(finder, 'find_distributions', None)
-+            for finder in sys.meta_path
-+            )
-+        return filter(None, declared)
-+
-+    @property
-+    def metadata(self):
-+        """Return the parsed metadata for this Distribution.
-+
-+        The returned object will have keys that name the various bits of
-+        metadata.  See PEP 566 for details.
-+        """
-+        text = (
-+            self.read_text('METADATA')
-+            or self.read_text('PKG-INFO')
-+            # This last clause is here to support old egg-info files.  Its
-+            # effect is to just end up using the PathDistribution's self._path
-+            # (which points to the egg-info file) attribute unchanged.
-+            or self.read_text('')
-+            )
-+        return email_message_from_string(text)
-+
-+    @property
-+    def version(self):
-+        """Return the 'Version' metadata for the distribution package."""
-+        return self.metadata['Version']
-+
-+    @property
-+    def entry_points(self):
-+        return EntryPoint._from_text(self.read_text('entry_points.txt'))
-+
-+    @property
-+    def files(self):
-+        """Files in this distribution.
-+
-+        :return: List of PackagePath for this distribution or None
-+
-+        Result is `None` if the metadata file that enumerates files
-+        (i.e. RECORD for dist-info or SOURCES.txt for egg-info) is
-+        missing.
-+        Result may be empty if the metadata exists but is empty.
-+        """
-+        file_lines = self._read_files_distinfo() or self._read_files_egginfo()
-+
-+        def make_file(name, hash=None, size_str=None):
-+            result = PackagePath(name)
-+            result.hash = FileHash(hash) if hash else None
-+            result.size = int(size_str) if size_str else None
-+            result.dist = self
-+            return result
-+
-+        return file_lines and list(starmap(make_file, csv.reader(file_lines)))
-+
-+    def _read_files_distinfo(self):
-+        """
-+        Read the lines of RECORD
-+        """
-+        text = self.read_text('RECORD')
-+        return text and text.splitlines()
-+
-+    def _read_files_egginfo(self):
-+        """
-+        SOURCES.txt might contain literal commas, so wrap each line
-+        in quotes.
-+        """
-+        text = self.read_text('SOURCES.txt')
-+        return text and map('"{}"'.format, text.splitlines())
-+
-+    @property
-+    def requires(self):
-+        """Generated requirements specified for this Distribution"""
-+        reqs = self._read_dist_info_reqs() or self._read_egg_info_reqs()
-+        return reqs and list(reqs)
-+
-+    def _read_dist_info_reqs(self):
-+        return self.metadata.get_all('Requires-Dist')
-+
-+    def _read_egg_info_reqs(self):
-+        source = self.read_text('requires.txt')
-+        return source and self._deps_from_requires_text(source)
-+
-+    @classmethod
-+    def _deps_from_requires_text(cls, source):
-+        section_pairs = cls._read_sections(source.splitlines())
-+        sections = {
-+            section: list(map(operator.itemgetter('line'), results))
-+            for section, results in
-+            itertools.groupby(section_pairs, operator.itemgetter('section'))
-+            }
-+        return cls._convert_egg_info_reqs_to_simple_reqs(sections)
-+
-+    @staticmethod
-+    def _read_sections(lines):
-+        section = None
-+        for line in filter(None, lines):
-+            section_match = re.match(r'\[(.*)\]$', line)
-+            if section_match:
-+                section = section_match.group(1)
-+                continue
-+            yield locals()
-+
-+    @staticmethod
-+    def _convert_egg_info_reqs_to_simple_reqs(sections):
-+        """
-+        Historically, setuptools would solicit and store 'extra'
-+        requirements, including those with environment markers,
-+        in separate sections. More modern tools expect each
-+        dependency to be defined separately, with any relevant
-+        extras and environment markers attached directly to that
-+        requirement. This method converts the former to the
-+        latter. See _test_deps_from_requires_text for an example.
-+        """
-+        def make_condition(name):
-+            return name and 'extra == "{name}"'.format(name=name)
-+
-+        def parse_condition(section):
-+            section = section or ''
-+            extra, sep, markers = section.partition(':')
-+            if extra and markers:
-+                markers = '({markers})'.format(markers=markers)
-+            conditions = list(filter(None, [markers, make_condition(extra)]))
-+            return '; ' + ' and '.join(conditions) if conditions else ''
-+
-+        for section, deps in sections.items():
-+            for dep in deps:
-+                yield dep + parse_condition(section)
-+
-+
-+class DistributionFinder(MetaPathFinder):
-+    """
-+    A MetaPathFinder capable of discovering installed distributions.
-+    """
-+
-+    class Context:
-+        """
-+        Keyword arguments presented by the caller to
-+        ``distributions()`` or ``Distribution.discover()``
-+        to narrow the scope of a search for distributions
-+        in all DistributionFinders.
-+
-+        Each DistributionFinder may expect any parameters
-+        and should attempt to honor the canonical
-+        parameters defined below when appropriate.
-+        """
-+
-+        name = None
-+        """
-+        Specific name for which a distribution finder should match.
-+        A name of ``None`` matches all distributions.
-+        """
-+
-+        def __init__(self, **kwargs):
-+            vars(self).update(kwargs)
-+
-+        @property
-+        def path(self):
-+            """
-+            The path that a distribution finder should search.
-+
-+            Typically refers to Python package paths and defaults
-+            to ``sys.path``.
-+            """
-+            return vars(self).get('path', sys.path)
-+
-+    @abc.abstractmethod
-+    def find_distributions(self, context=Context()):
-+        """
-+        Find distributions.
-+
-+        Return an iterable of all Distribution instances capable of
-+        loading the metadata for packages matching the ``context``,
-+        a DistributionFinder.Context instance.
-+        """
-+
-+
-+class FastPath:
-+    """
-+    Micro-optimized class for searching a path for
-+    children.
-+    """
-+
-+    def __init__(self, root):
-+        self.root = root
-+        self.base = os.path.basename(root).lower()
-+
-+    def joinpath(self, child):
-+        return pathlib.Path(self.root, child)
-+
-+    def children(self):
-+        with suppress(Exception):
-+            return os.listdir(self.root or '')
-+        with suppress(Exception):
-+            return self.zip_children()
-+        return []
-+
-+    def zip_children(self):
-+        zip_path = zipp.Path(self.root)
-+        names = zip_path.root.namelist()
-+        self.joinpath = zip_path.joinpath
-+
-+        return (
-+            posixpath.split(child)[0]
-+            for child in names
-+            )
-+
-+    def is_egg(self, search):
-+        base = self.base
-+        return (
-+            base == search.versionless_egg_name
-+            or base.startswith(search.prefix)
-+            and base.endswith('.egg'))
-+
-+    def search(self, name):
-+        for child in self.children():
-+            n_low = child.lower()
-+            if (n_low in name.exact_matches
-+                    or n_low.startswith(name.prefix)
-+                    and n_low.endswith(name.suffixes)
-+                    # legacy case:
-+                    or self.is_egg(name) and n_low == 'egg-info'):
-+                yield self.joinpath(child)
-+
-+
-+class Prepared:
-+    """
-+    A prepared search for metadata on a possibly-named package.
-+    """
-+    normalized = ''
-+    prefix = ''
-+    suffixes = '.dist-info', '.egg-info'
-+    exact_matches = [''][:0]
-+    versionless_egg_name = ''
-+
-+    def __init__(self, name):
-+        self.name = name
-+        if name is None:
-+            return
-+        self.normalized = name.lower().replace('-', '_')
-+        self.prefix = self.normalized + '-'
-+        self.exact_matches = [
-+            self.normalized + suffix for suffix in self.suffixes]
-+        self.versionless_egg_name = self.normalized + '.egg'
-+
-+
-+@install
-+class MetadataPathFinder(NullFinder, DistributionFinder):
-+    """A degenerate finder for distribution packages on the file system.
-+
-+    This finder supplies only a find_distributions() method for versions
-+    of Python that do not have a PathFinder find_distributions().
-+    """
-+
-+    def find_distributions(self, context=DistributionFinder.Context()):
-+        """
-+        Find distributions.
-+
-+        Return an iterable of all Distribution instances capable of
-+        loading the metadata for packages matching ``context.name``
-+        (or all names if ``None`` indicated) along the paths in the list
-+        of directories ``context.path``.
-+        """
-+        found = self._search_paths(context.name, context.path)
-+        return map(PathDistribution, found)
-+
-+    @classmethod
-+    def _search_paths(cls, name, paths):
-+        """Find metadata directories in paths heuristically."""
-+        return itertools.chain.from_iterable(
-+            path.search(Prepared(name))
-+            for path in map(FastPath, paths)
-+            )
-+
-+
-+class PathDistribution(Distribution):
-+    def __init__(self, path):
-+        """Construct a distribution from a path to the metadata directory.
-+
-+        :param path: A pathlib.Path or similar object supporting
-+                     .joinpath(), __div__, .parent, and .read_text().
-+        """
-+        self._path = path
-+
-+    def read_text(self, filename):
-+        with suppress(FileNotFoundError, IsADirectoryError, KeyError,
-+                      NotADirectoryError, PermissionError):
-+            return self._path.joinpath(filename).read_text(encoding='utf-8')
-+    read_text.__doc__ = Distribution.read_text.__doc__
-+
-+    def locate_file(self, path):
-+        return self._path.parent / path
-+
-+
-+def distribution(distribution_name):
-+    """Get the ``Distribution`` instance for the named package.
-+
-+    :param distribution_name: The name of the distribution package as a string.
-+    :return: A ``Distribution`` instance (or subclass thereof).
-+    """
-+    return Distribution.from_name(distribution_name)
-+
-+
-+def distributions(**kwargs):
-+    """Get all ``Distribution`` instances in the current environment.
-+
-+    :return: An iterable of ``Distribution`` instances.
-+    """
-+    return Distribution.discover(**kwargs)
-+
-+
-+def metadata(distribution_name):
-+    """Get the metadata for the named package.
-+
-+    :param distribution_name: The name of the distribution package to query.
-+    :return: An email.Message containing the parsed metadata.
-+    """
-+    return Distribution.from_name(distribution_name).metadata
-+
-+
-+def version(distribution_name):
-+    """Get the version string for the named package.
-+
-+    :param distribution_name: The name of the distribution package to query.
-+    :return: The version string for the package as defined in the package's
-+        "Version" metadata key.
-+    """
-+    return distribution(distribution_name).version
-+
-+
-+def entry_points():
-+    """Return EntryPoint objects for all installed packages.
-+
-+    :return: EntryPoint objects for all installed packages.
-+    """
-+    eps = itertools.chain.from_iterable(
-+        dist.entry_points for dist in distributions())
-+    by_group = operator.attrgetter('group')
-+    ordered = sorted(eps, key=by_group)
-+    grouped = itertools.groupby(ordered, by_group)
-+    return {
-+        group: tuple(eps)
-+        for group, eps in grouped
-+        }
-+
-+
-+def files(distribution_name):
-+    """Return a list of files for the named package.
-+
-+    :param distribution_name: The name of the distribution package to query.
-+    :return: List of files composing the distribution.
-+    """
-+    return distribution(distribution_name).files
-+
-+
-+def requires(distribution_name):
-+    """
-+    Return a list of requirements for the named package.
-+
-+    :return: An iterator of requirements, suitable for
-+    packaging.requirement.Requirement.
-+    """
-+    return distribution(distribution_name).requires
-+
-+
-+__version__ = version(__name__)
-diff --git a/third_party/python/importlib_metadata/importlib_metadata/_compat.py b/third_party/python/importlib_metadata/importlib_metadata/_compat.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/importlib_metadata/importlib_metadata/_compat.py
-@@ -0,0 +1,131 @@
-+from __future__ import absolute_import
-+
-+import io
-+import abc
-+import sys
-+import email
-+
-+
-+if sys.version_info > (3,):  # pragma: nocover
-+    import builtins
-+    from configparser import ConfigParser
-+    from contextlib import suppress
-+    FileNotFoundError = builtins.FileNotFoundError
-+    IsADirectoryError = builtins.IsADirectoryError
-+    NotADirectoryError = builtins.NotADirectoryError
-+    PermissionError = builtins.PermissionError
-+    map = builtins.map
-+else:  # pragma: nocover
-+    from backports.configparser import ConfigParser
-+    from itertools import imap as map  # type: ignore
-+    from contextlib2 import suppress  # noqa
-+    FileNotFoundError = IOError, OSError
-+    IsADirectoryError = IOError, OSError
-+    NotADirectoryError = IOError, OSError
-+    PermissionError = IOError, OSError
-+
-+if sys.version_info > (3, 5):  # pragma: nocover
-+    import pathlib
-+else:  # pragma: nocover
-+    import pathlib2 as pathlib
-+
-+try:
-+    ModuleNotFoundError = builtins.FileNotFoundError
-+except (NameError, AttributeError):  # pragma: nocover
-+    ModuleNotFoundError = ImportError  # type: ignore
-+
-+
-+if sys.version_info >= (3,):  # pragma: nocover
-+    from importlib.abc import MetaPathFinder
-+else:  # pragma: nocover
-+    class MetaPathFinder(object):
-+        __metaclass__ = abc.ABCMeta
-+
-+
-+__metaclass__ = type
-+__all__ = [
-+    'install', 'NullFinder', 'MetaPathFinder', 'ModuleNotFoundError',
-+    'pathlib', 'ConfigParser', 'map', 'suppress', 'FileNotFoundError',
-+    'NotADirectoryError', 'email_message_from_string',
-+    ]
-+
-+
-+def install(cls):
-+    """
-+    Class decorator for installation on sys.meta_path.
-+
-+    Adds the backport DistributionFinder to sys.meta_path and
-+    attempts to disable the finder functionality of the stdlib
-+    DistributionFinder.
-+    """
-+    sys.meta_path.append(cls())
-+    disable_stdlib_finder()
-+    return cls
-+
-+
-+def disable_stdlib_finder():
-+    """
-+    Give the backport primacy for discovering path-based distributions
-+    by monkey-patching the stdlib O_O.
-+
-+    See #91 for more background for rationale on this sketchy
-+    behavior.
-+    """
-+    def matches(finder):
-+        return (
-+            getattr(finder, '__module__', None) == '_frozen_importlib_external'
-+            and hasattr(finder, 'find_distributions')
-+            )
-+    for finder in filter(matches, sys.meta_path):  # pragma: nocover
-+        del finder.find_distributions
-+
-+
-+class NullFinder:
-+    """
-+    A "Finder" (aka "MetaClassFinder") that never finds any modules,
-+    but may find distributions.
-+    """
-+    @staticmethod
-+    def find_spec(*args, **kwargs):
-+        return None
-+
-+    # In Python 2, the import system requires finders
-+    # to have a find_module() method, but this usage
-+    # is deprecated in Python 3 in favor of find_spec().
-+    # For the purposes of this finder (i.e. being present
-+    # on sys.meta_path but having no other import
-+    # system functionality), the two methods are identical.
-+    find_module = find_spec
-+
-+
-+def py2_message_from_string(text):  # nocoverpy3
-+    # Work around https://bugs.python.org/issue25545 where
-+    # email.message_from_string cannot handle Unicode on Python 2.
-+    io_buffer = io.StringIO(text)
-+    return email.message_from_file(io_buffer)
-+
-+
-+email_message_from_string = (
-+    py2_message_from_string
-+    if sys.version_info < (3,) else
-+    email.message_from_string
-+    )
-+
-+
-+class PyPy_repr:
-+    """
-+    Override repr for EntryPoint objects on PyPy to avoid __iter__ access.
-+    Ref #97, #102.
-+    """
-+    affected = hasattr(sys, 'pypy_version_info')
-+
-+    def __compat_repr__(self):  # pragma: nocover
-+        def make_param(name):
-+            value = getattr(self, name)
-+            return '{name}={value!r}'.format(**locals())
-+        params = ', '.join(map(make_param, self._fields))
-+        return 'EntryPoint({params})'.format(**locals())
-+
-+    if affected:  # pragma: nocover
-+        __repr__ = __compat_repr__
-+    del affected
-diff --git a/third_party/python/importlib_metadata/importlib_metadata/docs/__init__.py b/third_party/python/importlib_metadata/importlib_metadata/docs/__init__.py
-new file mode 100644
-diff --git a/third_party/python/importlib_metadata/importlib_metadata/docs/changelog.rst b/third_party/python/importlib_metadata/importlib_metadata/docs/changelog.rst
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/importlib_metadata/importlib_metadata/docs/changelog.rst
-@@ -0,0 +1,257 @@
-+=========================
-+ importlib_metadata NEWS
-+=========================
-+
-+v1.5.0
-+======
-+
-+* Additional performance optimizations in FastPath now
-+  saves an additional 20% on a typical call.
-+* Correct for issue where PyOxidizer finder has no
-+  ``__module__`` attribute. Closes #110.
-+
-+v1.4.0
-+======
-+
-+* Through careful optimization, ``distribution()`` is
-+  3-4x faster. Thanks to Antony Lee for the
-+  contribution. Closes #95.
-+
-+* When searching through ``sys.path``, if any error
-+  occurs attempting to list a path entry, that entry
-+  is skipped, making the system much more lenient
-+  to errors. Closes #94.
-+
-+v1.3.0
-+======
-+
-+* Improve custom finders documentation. Closes #105.
-+
-+v1.2.0
-+======
-+
-+* Once again, drop support for Python 3.4. Ref #104.
-+
-+v1.1.3
-+======
-+
-+* Restored support for Python 3.4 due to improper version
-+  compatibility declarations in the v1.1.0 and v1.1.1
-+  releases. Closes #104.
-+
-+v1.1.2
-+======
-+
-+* Repaired project metadata to correctly declare the
-+  ``python_requires`` directive. Closes #103.
-+
-+v1.1.1
-+======
-+
-+* Fixed ``repr(EntryPoint)`` on PyPy 3 also. Closes #102.
-+
-+v1.1.0
-+======
-+
-+* Dropped support for Python 3.4.
-+* EntryPoints are now pickleable. Closes #96.
-+* Fixed ``repr(EntryPoint)`` on PyPy 2. Closes #97.
-+
-+v1.0.0
-+======
-+
-+* Project adopts semver for versioning.
-+
-+* Removed compatibility shim introduced in 0.23.
-+
-+* For better compatibility with the stdlib implementation and to
-+  avoid the same distributions being discovered by the stdlib and
-+  backport implementations, the backport now disables the
-+  stdlib DistributionFinder during initialization (import time).
-+  Closes #91 and closes #100.
-+
-+0.23
-+====
-+* Added a compatibility shim to prevent failures on beta releases
-+  of Python before the signature changed to accept the
-+  "context" parameter on find_distributions. This workaround
-+  will have a limited lifespan, not to extend beyond release of
-+  Python 3.8 final.
-+
-+0.22
-+====
-+* Renamed ``package`` parameter to ``distribution_name``
-+  as `recommended <https://bugs.python.org/issue34632#msg349423>`_
-+  in the following functions: ``distribution``, ``metadata``,
-+  ``version``, ``files``, and ``requires``. This
-+  backward-incompatible change is expected to have little impact
-+  as these functions are assumed to be primarily used with
-+  positional parameters.
-+
-+0.21
-+====
-+* ``importlib.metadata`` now exposes the ``DistributionFinder``
-+  metaclass and references it in the docs for extending the
-+  search algorithm.
-+* Add ``Distribution.at`` for constructing a Distribution object
-+  from a known metadata directory on the file system. Closes #80.
-+* Distribution finders now receive a context object that
-+  supplies ``.path`` and ``.name`` properties. This change
-+  introduces a fundamental backward incompatibility for
-+  any projects implementing a ``find_distributions`` method
-+  on a ``MetaPathFinder``. This new layer of abstraction
-+  allows this context to be supplied directly or constructed
-+  on demand and opens the opportunity for a
-+  ``find_distributions`` method to solicit additional
-+  context from the caller. Closes #85.
-+
-+0.20
-+====
-+* Clarify in the docs that calls to ``.files`` could return
-+  ``None`` when the metadata is not present. Closes #69.
-+* Return all requirements and not just the first for dist-info
-+  packages. Closes #67.
-+
-+0.19
-+====
-+* Restrain over-eager egg metadata resolution.
-+* Add support for entry points with colons in the name. Closes #75.
-+
-+0.18
-+====
-+* Parse entry points case sensitively.  Closes #68
-+* Add a version constraint on the backport configparser package.  Closes #66
-+
-+0.17
-+====
-+* Fix a permission problem in the tests on Windows.
-+
-+0.16
-+====
-+* Don't crash if there exists an EGG-INFO directory on sys.path.
-+
-+0.15
-+====
-+* Fix documentation.
-+
-+0.14
-+====
-+* Removed ``local_distribution`` function from the API.
-+  **This backward-incompatible change removes this
-+  behavior summarily**. Projects should remove their
-+  reliance on this behavior. A replacement behavior is
-+  under review in the `pep517 project
-+  <https://github.com/pypa/pep517>`_. Closes #42.
-+
-+0.13
-+====
-+* Update docstrings to match PEP 8. Closes #63.
-+* Merged modules into one module. Closes #62.
-+
-+0.12
-+====
-+* Add support for eggs.  !65; Closes #19.
-+
-+0.11
-+====
-+* Support generic zip files (not just wheels).  Closes #59
-+* Support zip files with multiple distributions in them.  Closes #60
-+* Fully expose the public API in ``importlib_metadata.__all__``.
-+
-+0.10
-+====
-+* The ``Distribution`` ABC is now officially part of the public API.
-+  Closes #37.
-+* Fixed support for older single file egg-info formats.  Closes #43.
-+* Fixed a testing bug when ``$CWD`` has spaces in the path.  Closes #50.
-+* Add Python 3.8 to the ``tox`` testing matrix.
-+
-+0.9
-+===
-+* Fixed issue where entry points without an attribute would raise an
-+  Exception.  Closes #40.
-+* Removed unused ``name`` parameter from ``entry_points()``. Closes #44.
-+* ``DistributionFinder`` classes must now be instantiated before
-+  being placed on ``sys.meta_path``.
-+
-+0.8
-+===
-+* This library can now discover/enumerate all installed packages. **This
-+  backward-incompatible change alters the protocol finders must
-+  implement to support distribution package discovery.** Closes #24.
-+* The signature of ``find_distributions()`` on custom installer finders
-+  should now accept two parameters, ``name`` and ``path`` and
-+  these parameters must supply defaults.
-+* The ``entry_points()`` method no longer accepts a package name
-+  but instead returns all entry points in a dictionary keyed by the
-+  ``EntryPoint.group``. The ``resolve`` method has been removed. Instead,
-+  call ``EntryPoint.load()``, which has the same semantics as
-+  ``pkg_resources`` and ``entrypoints``.  **This is a backward incompatible
-+  change.**
-+* Metadata is now always returned as Unicode text regardless of
-+  Python version. Closes #29.
-+* This library can now discover metadata for a 'local' package (found
-+  in the current-working directory). Closes #27.
-+* Added ``files()`` function for resolving files from a distribution.
-+* Added a new ``requires()`` function, which returns the requirements
-+  for a package suitable for parsing by
-+  ``packaging.requirements.Requirement``. Closes #18.
-+* The top-level ``read_text()`` function has been removed.  Use
-+  ``PackagePath.read_text()`` on instances returned by the ``files()``
-+  function.  **This is a backward incompatible change.**
-+* Release dates are now automatically injected into the changelog
-+  based on SCM tags.
-+
-+0.7
-+===
-+* Fixed issue where packages with dashes in their names would
-+  not be discovered. Closes #21.
-+* Distribution lookup is now case-insensitive. Closes #20.
-+* Wheel distributions can no longer be discovered by their module
-+  name. Like Path distributions, they must be indicated by their
-+  distribution package name.
-+
-+0.6
-+===
-+* Removed ``importlib_metadata.distribution`` function. Now
-+  the public interface is primarily the utility functions exposed
-+  in ``importlib_metadata.__all__``. Closes #14.
-+* Added two new utility functions ``read_text`` and
-+  ``metadata``.
-+
-+0.5
-+===
-+* Updated README and removed details about Distribution
-+  class, now considered private. Closes #15.
-+* Added test suite support for Python 3.4+.
-+* Fixed SyntaxErrors on Python 3.4 and 3.5. !12
-+* Fixed errors on Windows joining Path elements. !15
-+
-+0.4
-+===
-+* Housekeeping.
-+
-+0.3
-+===
-+* Added usage documentation.  Closes #8
-+* Add support for getting metadata from wheels on ``sys.path``.  Closes #9
-+
-+0.2
-+===
-+* Added ``importlib_metadata.entry_points()``.  Closes #1
-+* Added ``importlib_metadata.resolve()``.  Closes #12
-+* Add support for Python 2.7.  Closes #4
-+
-+0.1
-+===
-+* Initial release.
-+
-+
-+..
-+   Local Variables:
-+   mode: change-log-mode
-+   indent-tabs-mode: nil
-+   sentence-end-double-space: t
-+   fill-column: 78
-+   coding: utf-8
-+   End:
-diff --git a/third_party/python/importlib_metadata/importlib_metadata/docs/conf.py b/third_party/python/importlib_metadata/importlib_metadata/docs/conf.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/importlib_metadata/importlib_metadata/docs/conf.py
-@@ -0,0 +1,185 @@
-+#!/usr/bin/env python3
-+# -*- coding: utf-8 -*-
-+#
-+# importlib_metadata documentation build configuration file, created by
-+# sphinx-quickstart on Thu Nov 30 10:21:00 2017.
-+#
-+# This file is execfile()d with the current directory set to its
-+# containing dir.
-+#
-+# Note that not all possible configuration values are present in this
-+# autogenerated file.
-+#
-+# All configuration values have a default; values that are commented out
-+# serve to show the default.
-+
-+# If extensions (or modules to document with autodoc) are in another directory,
-+# add these directories to sys.path here. If the directory is relative to the
-+# documentation root, use os.path.abspath to make it absolute, like shown here.
-+#
-+# import os
-+# import sys
-+# sys.path.insert(0, os.path.abspath('.'))
-+
-+
-+# -- General configuration ------------------------------------------------
-+
-+# If your documentation needs a minimal Sphinx version, state it here.
-+#
-+# needs_sphinx = '1.0'
-+
-+# Add any Sphinx extension module names here, as strings. They can be
-+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
-+# ones.
-+extensions = [
-+    'rst.linker',
-+    'sphinx.ext.autodoc',
-+    'sphinx.ext.coverage',
-+    'sphinx.ext.doctest',
-+    'sphinx.ext.intersphinx',
-+    'sphinx.ext.viewcode',
-+    ]
-+
-+# Add any paths that contain templates here, relative to this directory.
-+templates_path = ['_templates']
-+
-+# The suffix(es) of source filenames.
-+# You can specify multiple suffix as a list of string:
-+#
-+# source_suffix = ['.rst', '.md']
-+source_suffix = '.rst'
-+
-+# The master toctree document.
-+master_doc = 'index'
-+
-+# General information about the project.
-+project = 'importlib_metadata'
-+copyright = '2017-2019, Jason R. Coombs, Barry Warsaw'
-+author = 'Jason R. Coombs, Barry Warsaw'
-+
-+# The version info for the project you're documenting, acts as replacement for
-+# |version| and |release|, also used in various other places throughout the
-+# built documents.
-+#
-+# The short X.Y version.
-+version = '0.1'
-+# The full version, including alpha/beta/rc tags.
-+release = '0.1'
-+
-+# The language for content autogenerated by Sphinx. Refer to documentation
-+# for a list of supported languages.
-+#
-+# This is also used if you do content translation via gettext catalogs.
-+# Usually you set "language" from the command line for these cases.
-+language = None
-+
-+# List of patterns, relative to source directory, that match files and
-+# directories to ignore when looking for source files.
-+# This patterns also effect to html_static_path and html_extra_path
-+exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
-+
-+# The name of the Pygments (syntax highlighting) style to use.
-+pygments_style = 'sphinx'
-+
-+# If true, `todo` and `todoList` produce output, else they produce nothing.
-+todo_include_todos = False
-+
-+
-+# -- Options for HTML output ----------------------------------------------
-+
-+# The theme to use for HTML and HTML Help pages.  See the documentation for
-+# a list of builtin themes.
-+#
-+html_theme = 'default'
-+
-+# Custom sidebar templates, must be a dictionary that maps document names
-+# to template names.
-+#
-+# This is required for the alabaster theme
-+# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
-+html_sidebars = {
-+    '**': [
-+        'relations.html',  # needs 'show_related': True theme option to display
-+        'searchbox.html',
-+        ]
-+    }
-+
-+
-+# -- Options for HTMLHelp output ------------------------------------------
-+
-+# Output file base name for HTML help builder.
-+htmlhelp_basename = 'importlib_metadatadoc'
-+
-+
-+# -- Options for LaTeX output ---------------------------------------------
-+
-+latex_elements = {
-+    # The paper size ('letterpaper' or 'a4paper').
-+    #
-+    # 'papersize': 'letterpaper',
-+
-+    # The font size ('10pt', '11pt' or '12pt').
-+    #
-+    # 'pointsize': '10pt',
-+
-+    # Additional stuff for the LaTeX preamble.
-+    #
-+    # 'preamble': '',
-+
-+    # Latex figure (float) alignment
-+    #
-+    # 'figure_align': 'htbp',
-+    }
-+
-+# Grouping the document tree into LaTeX files. List of tuples
-+# (source start file, target name, title,
-+#  author, documentclass [howto, manual, or own class]).
-+latex_documents = [
-+    (master_doc, 'importlib_metadata.tex',
-+     'importlib\\_metadata Documentation',
-+     'Brett Cannon, Barry Warsaw', 'manual'),
-+    ]
-+
-+
-+# -- Options for manual page output ---------------------------------------
-+
-+# One entry per manual page. List of tuples
-+# (source start file, name, description, authors, manual section).
-+man_pages = [
-+    (master_doc, 'importlib_metadata', 'importlib_metadata Documentation',
-+     [author], 1)
-+    ]
-+
-+
-+# -- Options for Texinfo output -------------------------------------------
-+
-+# Grouping the document tree into Texinfo files. List of tuples
-+# (source start file, target name, title, author,
-+#  dir menu entry, description, category)
-+texinfo_documents = [
-+    (master_doc, 'importlib_metadata', 'importlib_metadata Documentation',
-+     author, 'importlib_metadata', 'One line description of project.',
-+     'Miscellaneous'),
-+    ]
-+
-+
-+# Example configuration for intersphinx: refer to the Python standard library.
-+intersphinx_mapping = {
-+    'python': ('https://docs.python.org/3', None),
-+    'importlib_resources': (
-+        'https://importlib-resources.readthedocs.io/en/latest/', None
-+        ),
-+    }
-+
-+
-+# For rst.linker, inject release dates into changelog.rst
-+link_files = {
-+    'changelog.rst': dict(
-+        replace=[
-+            dict(
-+                pattern=r'^(?m)((?P<scm_version>v?\d+(\.\d+){1,2}))\n[-=]+\n',
-+                with_scm='{text}\n{rev[timestamp]:%Y-%m-%d}\n\n',
-+                ),
-+            ],
-+        ),
-+    }
-diff --git a/third_party/python/importlib_metadata/importlib_metadata/docs/index.rst b/third_party/python/importlib_metadata/importlib_metadata/docs/index.rst
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/importlib_metadata/importlib_metadata/docs/index.rst
-@@ -0,0 +1,50 @@
-+===============================
-+ Welcome to importlib_metadata
-+===============================
-+
-+``importlib_metadata`` is a library which provides an API for accessing an
-+installed package's metadata (see :pep:`566`), such as its entry points or its top-level
-+name.  This functionality intends to replace most uses of ``pkg_resources``
-+`entry point API`_ and `metadata API`_.  Along with :mod:`importlib.resources` in
-+Python 3.7 and newer (backported as :doc:`importlib_resources <importlib_resources:index>` for older
-+versions of Python), this can eliminate the need to use the older and less
-+efficient ``pkg_resources`` package.
-+
-+``importlib_metadata`` is a backport of Python 3.8's standard library
-+:doc:`importlib.metadata <library/importlib.metadata>` module for Python 2.7, and 3.4 through 3.7.  Users of
-+Python 3.8 and beyond are encouraged to use the standard library module.
-+When imported on Python 3.8 and later, ``importlib_metadata`` replaces the
-+DistributionFinder behavior from the stdlib, but leaves the API in tact.
-+Developers looking for detailed API descriptions should refer to the Python
-+3.8 standard library documentation.
-+
-+The documentation here includes a general :ref:`usage <using>` guide.
-+
-+
-+.. toctree::
-+   :maxdepth: 2
-+   :caption: Contents:
-+
-+   using.rst
-+   changelog (links).rst
-+
-+
-+Project details
-+===============
-+
-+ * Project home: https://gitlab.com/python-devs/importlib_metadata
-+ * Report bugs at: https://gitlab.com/python-devs/importlib_metadata/issues
-+ * Code hosting: https://gitlab.com/python-devs/importlib_metadata.git
-+ * Documentation: http://importlib_metadata.readthedocs.io/
-+
-+
-+Indices and tables
-+==================
-+
-+* :ref:`genindex`
-+* :ref:`modindex`
-+* :ref:`search`
-+
-+
-+.. _`entry point API`: https://setuptools.readthedocs.io/en/latest/pkg_resources.html#entry-points
-+.. _`metadata API`: https://setuptools.readthedocs.io/en/latest/pkg_resources.html#metadata-api
-diff --git a/third_party/python/importlib_metadata/importlib_metadata/docs/using.rst b/third_party/python/importlib_metadata/importlib_metadata/docs/using.rst
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/importlib_metadata/importlib_metadata/docs/using.rst
-@@ -0,0 +1,252 @@
-+.. _using:
-+
-+=================================
-+ Using :mod:`!importlib_metadata`
-+=================================
-+
-+``importlib_metadata`` is a library that provides for access to installed
-+package metadata.  Built in part on Python's import system, this library
-+intends to replace similar functionality in the `entry point
-+API`_ and `metadata API`_ of ``pkg_resources``.  Along with
-+:mod:`importlib.resources` in Python 3.7
-+and newer (backported as :doc:`importlib_resources <importlib_resources:index>` for older versions of
-+Python), this can eliminate the need to use the older and less efficient
-+``pkg_resources`` package.
-+
-+By "installed package" we generally mean a third-party package installed into
-+Python's ``site-packages`` directory via tools such as `pip
-+<https://pypi.org/project/pip/>`_.  Specifically,
-+it means a package with either a discoverable ``dist-info`` or ``egg-info``
-+directory, and metadata defined by :pep:`566` or its older specifications.
-+By default, package metadata can live on the file system or in zip archives on
-+:data:`sys.path`.  Through an extension mechanism, the metadata can live almost
-+anywhere.
-+
-+
-+Overview
-+========
-+
-+Let's say you wanted to get the version string for a package you've installed
-+using ``pip``.  We start by creating a virtual environment and installing
-+something into it::
-+
-+    $ python3 -m venv example
-+    $ source example/bin/activate
-+    (example) $ pip install importlib_metadata
-+    (example) $ pip install wheel
-+
-+You can get the version string for ``wheel`` by running the following::
-+
-+    (example) $ python
-+    >>> from importlib_metadata import version
-+    >>> version('wheel')
-+    '0.32.3'
-+
-+You can also get the set of entry points keyed by group, such as
-+``console_scripts``, ``distutils.commands`` and others.  Each group contains a
-+sequence of :ref:`EntryPoint <entry-points>` objects.
-+
-+You can get the :ref:`metadata for a distribution <metadata>`::
-+
-+    >>> list(metadata('wheel'))
-+    ['Metadata-Version', 'Name', 'Version', 'Summary', 'Home-page', 'Author', 'Author-email', 'Maintainer', 'Maintainer-email', 'License', 'Project-URL', 'Project-URL', 'Project-URL', 'Keywords', 'Platform', 'Classifier', 'Classifier', 'Classifier', 'Classifier', 'Classifier', 'Classifier', 'Classifier', 'Classifier', 'Classifier', 'Classifier', 'Classifier', 'Classifier', 'Requires-Python', 'Provides-Extra', 'Requires-Dist', 'Requires-Dist']
-+
-+You can also get a :ref:`distribution's version number <version>`, list its
-+:ref:`constituent files <files>`, and get a list of the distribution's
-+:ref:`requirements`.
-+
-+
-+Functional API
-+==============
-+
-+This package provides the following functionality via its public API.
-+
-+
-+.. _entry-points:
-+
-+Entry points
-+------------
-+
-+The ``entry_points()`` function returns a dictionary of all entry points,
-+keyed by group.  Entry points are represented by ``EntryPoint`` instances;
-+each ``EntryPoint`` has a ``.name``, ``.group``, and ``.value`` attributes and
-+a ``.load()`` method to resolve the value::
-+
-+    >>> eps = entry_points()
-+    >>> list(eps)
-+    ['console_scripts', 'distutils.commands', 'distutils.setup_keywords', 'egg_info.writers', 'setuptools.installation']
-+    >>> scripts = eps['console_scripts']
-+    >>> wheel = [ep for ep in scripts if ep.name == 'wheel'][0]
-+    >>> wheel
-+    EntryPoint(name='wheel', value='wheel.cli:main', group='console_scripts')
-+    >>> main = wheel.load()
-+    >>> main
-+    <function main at 0x103528488>
-+
-+The ``group`` and ``name`` are arbitrary values defined by the package author
-+and usually a client will wish to resolve all entry points for a particular
-+group.  Read `the setuptools docs
-+<https://setuptools.readthedocs.io/en/latest/setuptools.html#dynamic-discovery-of-services-and-plugins>`_
-+for more information on entrypoints, their definition, and usage.
-+
-+
-+.. _metadata:
-+
-+Distribution metadata
-+---------------------
-+
-+Every distribution includes some metadata, which you can extract using the
-+``metadata()`` function::
-+
-+    >>> wheel_metadata = metadata('wheel')
-+
-+The keys of the returned data structure [#f1]_ name the metadata keywords, and
-+their values are returned unparsed from the distribution metadata::
-+
-+    >>> wheel_metadata['Requires-Python']
-+    '>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*'
-+
-+
-+.. _version:
-+
-+Distribution versions
-+---------------------
-+
-+The ``version()`` function is the quickest way to get a distribution's version
-+number, as a string::
-+
-+    >>> version('wheel')
-+    '0.32.3'
-+
-+
-+.. _files:
-+
-+Distribution files
-+------------------
-+
-+You can also get the full set of files contained within a distribution.  The
-+``files()`` function takes a distribution package name and returns all of the
-+files installed by this distribution.  Each file object returned is a
-+``PackagePath``, a :class:`pathlib.Path` derived object with additional ``dist``,
-+``size``, and ``hash`` properties as indicated by the metadata.  For example::
-+
-+    >>> util = [p for p in files('wheel') if 'util.py' in str(p)][0]
-+    >>> util
-+    PackagePath('wheel/util.py')
-+    >>> util.size
-+    859
-+    >>> util.dist
-+    <importlib_metadata._hooks.PathDistribution object at 0x101e0cef0>
-+    >>> util.hash
-+    <FileHash mode: sha256 value: bYkw5oMccfazVCoYQwKkkemoVyMAFoR34mmKBx8R1NI>
-+
-+Once you have the file, you can also read its contents::
-+
-+    >>> print(util.read_text())
-+    import base64
-+    import sys
-+    ...
-+    def as_bytes(s):
-+        if isinstance(s, text_type):
-+            return s.encode('utf-8')
-+        return s
-+
-+In the case where the metadata file listing files
-+(RECORD or SOURCES.txt) is missing, ``files()`` will
-+return ``None``. The caller may wish to wrap calls to
-+``files()`` in `always_iterable
-+<https://more-itertools.readthedocs.io/en/stable/api.html#more_itertools.always_iterable>`_
-+or otherwise guard against this condition if the target
-+distribution is not known to have the metadata present.
-+
-+.. _requirements:
-+
-+Distribution requirements
-+-------------------------
-+
-+To get the full set of requirements for a distribution, use the ``requires()``
-+function::
-+
-+    >>> requires('wheel')
-+    ["pytest (>=3.0.0) ; extra == 'test'", "pytest-cov ; extra == 'test'"]
-+
-+
-+Distributions
-+=============
-+
-+While the above API is the most common and convenient usage, you can get all
-+of that information from the ``Distribution`` class.  A ``Distribution`` is an
-+abstract object that represents the metadata for a Python package.  You can
-+get the ``Distribution`` instance::
-+
-+    >>> from importlib_metadata import distribution
-+    >>> dist = distribution('wheel')
-+
-+Thus, an alternative way to get the version number is through the
-+``Distribution`` instance::
-+
-+    >>> dist.version
-+    '0.32.3'
-+
-+There are all kinds of additional metadata available on the ``Distribution``
-+instance::
-+
-+    >>> d.metadata['Requires-Python']
-+    '>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*'
-+    >>> d.metadata['License']
-+    'MIT'
-+
-+The full set of available metadata is not described here.  See :pep:`566`
-+for additional details.
-+
-+
-+Extending the search algorithm
-+==============================
-+
-+Because package metadata is not available through :data:`sys.path` searches, or
-+package loaders directly, the metadata for a package is found through import
-+system `finders`_.  To find a distribution package's metadata,
-+``importlib.metadata`` queries the list of :term:`meta path finders <meta path finder>` on
-+:data:`sys.meta_path`.
-+
-+By default ``importlib_metadata`` installs a finder for distribution packages
-+found on the file system.  This finder doesn't actually find any *packages*,
-+but it can find the packages' metadata.
-+
-+The abstract class :py:class:`importlib.abc.MetaPathFinder` defines the
-+interface expected of finders by Python's import system.
-+``importlib_metadata`` extends this protocol by looking for an optional
-+``find_distributions`` callable on the finders from
-+:data:`sys.meta_path` and presents this extended interface as the
-+``DistributionFinder`` abstract base class, which defines this abstract
-+method::
-+
-+    @abc.abstractmethod
-+    def find_distributions(context=DistributionFinder.Context()):
-+        """Return an iterable of all Distribution instances capable of
-+        loading the metadata for packages for the indicated ``context``.
-+        """
-+
-+The ``DistributionFinder.Context`` object provides ``.path`` and ``.name``
-+properties indicating the path to search and names to match and may
-+supply other relevant context.
-+
-+What this means in practice is that to support finding distribution package
-+metadata in locations other than the file system, subclass
-+``Distribution`` and implement the abstract methods. Then from
-+a custom finder, return instances of this derived ``Distribution`` in the
-+``find_distributions()`` method.
-+
-+
-+.. _`entry point API`: https://setuptools.readthedocs.io/en/latest/pkg_resources.html#entry-points
-+.. _`metadata API`: https://setuptools.readthedocs.io/en/latest/pkg_resources.html#metadata-api
-+.. _`finders`: https://docs.python.org/3/reference/import.html#finders-and-loaders
-+
-+
-+.. rubric:: Footnotes
-+
-+.. [#f1] Technically, the returned distribution metadata object is an
-+         :class:`email.message.EmailMessage`
-+         instance, but this is an implementation detail, and not part of the
-+         stable API.  You should only use dictionary-like methods and syntax
-+         to access the metadata contents.
-diff --git a/third_party/python/iso8601/iso8601/__init__.py b/third_party/python/iso8601/iso8601/__init__.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/iso8601/iso8601/__init__.py
-@@ -0,0 +1,1 @@
-+from .iso8601 import *
-diff --git a/third_party/python/iso8601/iso8601/iso8601.py b/third_party/python/iso8601/iso8601/iso8601.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/iso8601/iso8601/iso8601.py
-@@ -0,0 +1,214 @@
-+"""ISO 8601 date time string parsing
-+
-+Basic usage:
-+>>> import iso8601
-+>>> iso8601.parse_date("2007-01-25T12:00:00Z")
-+datetime.datetime(2007, 1, 25, 12, 0, tzinfo=<iso8601.Utc ...>)
-+>>>
-+
-+"""
-+
-+import datetime
-+from decimal import Decimal
-+import sys
-+import re
-+
-+__all__ = ["parse_date", "ParseError", "UTC",
-+           "FixedOffset"]
-+
-+if sys.version_info >= (3, 0, 0):
-+    _basestring = str
-+else:
-+    _basestring = basestring
-+
-+
-+# Adapted from http://delete.me.uk/2005/03/iso8601.html
-+ISO8601_REGEX = re.compile(
-+    r"""
-+    (?P<year>[0-9]{4})
-+    (
-+        (
-+            (-(?P<monthdash>[0-9]{1,2}))
-+            |
-+            (?P<month>[0-9]{2})
-+            (?!$)  # Don't allow YYYYMM
-+        )
-+        (
-+            (
-+                (-(?P<daydash>[0-9]{1,2}))
-+                |
-+                (?P<day>[0-9]{2})
-+            )
-+            (
-+                (
-+                    (?P<separator>[ T])
-+                    (?P<hour>[0-9]{2})
-+                    (:{0,1}(?P<minute>[0-9]{2})){0,1}
-+                    (
-+                        :{0,1}(?P<second>[0-9]{1,2})
-+                        ([.,](?P<second_fraction>[0-9]+)){0,1}
-+                    ){0,1}
-+                    (?P<timezone>
-+                        Z
-+                        |
-+                        (
-+                            (?P<tz_sign>[-+])
-+                            (?P<tz_hour>[0-9]{2})
-+                            :{0,1}
-+                            (?P<tz_minute>[0-9]{2}){0,1}
-+                        )
-+                    ){0,1}
-+                ){0,1}
-+            )
-+        ){0,1}  # YYYY-MM
-+    ){0,1}  # YYYY only
-+    $
-+    """,
-+    re.VERBOSE
-+)
-+
-+class ParseError(Exception):
-+    """Raised when there is a problem parsing a date string"""
-+
-+if sys.version_info >= (3, 2, 0):
-+    UTC = datetime.timezone.utc
-+    def FixedOffset(offset_hours, offset_minutes, name):
-+        return datetime.timezone(
-+            datetime.timedelta(
-+                hours=offset_hours, minutes=offset_minutes),
-+            name)
-+else:
-+    # Yoinked from python docs
-+    ZERO = datetime.timedelta(0)
-+    class Utc(datetime.tzinfo):
-+        """UTC Timezone
-+
-+        """
-+        def utcoffset(self, dt):
-+            return ZERO
-+
-+        def tzname(self, dt):
-+            return "UTC"
-+
-+        def dst(self, dt):
-+            return ZERO
-+
-+        def __repr__(self):
-+            return "<iso8601.Utc>"
-+
-+    UTC = Utc()
-+
-+    class FixedOffset(datetime.tzinfo):
-+        """Fixed offset in hours and minutes from UTC
-+
-+        """
-+        def __init__(self, offset_hours, offset_minutes, name):
-+            self.__offset_hours = offset_hours  # Keep for later __getinitargs__
-+            self.__offset_minutes = offset_minutes  # Keep for later __getinitargs__
-+            self.__offset = datetime.timedelta(
-+                hours=offset_hours, minutes=offset_minutes)
-+            self.__name = name
-+
-+        def __eq__(self, other):
-+            if isinstance(other, FixedOffset):
-+                return (
-+                    (other.__offset == self.__offset)
-+                    and
-+                    (other.__name == self.__name)
-+                )
-+            return NotImplemented
-+
-+        def __getinitargs__(self):
-+            return (self.__offset_hours, self.__offset_minutes, self.__name)
-+
-+        def utcoffset(self, dt):
-+            return self.__offset
-+
-+        def tzname(self, dt):
-+            return self.__name
-+
-+        def dst(self, dt):
-+            return ZERO
-+
-+        def __repr__(self):
-+            return "<FixedOffset %r %r>" % (self.__name, self.__offset)
-+
-+
-+def to_int(d, key, default_to_zero=False, default=None, required=True):
-+    """Pull a value from the dict and convert to int
-+
-+    :param default_to_zero: If the value is None or empty, treat it as zero
-+    :param default: If the value is missing in the dict use this default
-+
-+    """
-+    value = d.get(key) or default
-+    if (value in ["", None]) and default_to_zero:
-+        return 0
-+    if value is None:
-+        if required:
-+            raise ParseError("Unable to read %s from %s" % (key, d))
-+    else:
-+        return int(value)
-+
-+def parse_timezone(matches, default_timezone=UTC):
-+    """Parses ISO 8601 time zone specs into tzinfo offsets
-+
-+    """
-+
-+    if matches["timezone"] == "Z":
-+        return UTC
-+    # This isn't strictly correct, but it's common to encounter dates without
-+    # timezones so I'll assume the default (which defaults to UTC).
-+    # Addresses issue 4.
-+    if matches["timezone"] is None:
-+        return default_timezone
-+    sign = matches["tz_sign"]
-+    hours = to_int(matches, "tz_hour")
-+    minutes = to_int(matches, "tz_minute", default_to_zero=True)
-+    description = "%s%02d:%02d" % (sign, hours, minutes)
-+    if sign == "-":
-+        hours = -hours
-+        minutes = -minutes
-+    return FixedOffset(hours, minutes, description)
-+
-+def parse_date(datestring, default_timezone=UTC):
-+    """Parses ISO 8601 dates into datetime objects
-+
-+    The timezone is parsed from the date string. However it is quite common to
-+    have dates without a timezone (not strictly correct). In this case the
-+    default timezone specified in default_timezone is used. This is UTC by
-+    default.
-+
-+    :param datestring: The date to parse as a string
-+    :param default_timezone: A datetime tzinfo instance to use when no timezone
-+                             is specified in the datestring. If this is set to
-+                             None then a naive datetime object is returned.
-+    :returns: A datetime.datetime instance
-+    :raises: ParseError when there is a problem parsing the date or
-+             constructing the datetime instance.
-+
-+    """
-+    if not isinstance(datestring, _basestring):
-+        raise ParseError("Expecting a string %r" % datestring)
-+    m = ISO8601_REGEX.match(datestring)
-+    if not m:
-+        raise ParseError("Unable to parse date string %r" % datestring)
-+    groups = m.groupdict()
-+
-+    tz = parse_timezone(groups, default_timezone=default_timezone)
-+
-+    groups["second_fraction"] = int(Decimal("0.%s" % (groups["second_fraction"] or 0)) * Decimal("1000000.0"))
-+
-+    try:
-+        return datetime.datetime(
-+            year=to_int(groups, "year"),
-+            month=to_int(groups, "month", default=to_int(groups, "monthdash", required=False, default=1)),
-+            day=to_int(groups, "day", default=to_int(groups, "daydash", required=False, default=1)),
-+            hour=to_int(groups, "hour", default_to_zero=True),
-+            minute=to_int(groups, "minute", default_to_zero=True),
-+            second=to_int(groups, "second", default_to_zero=True),
-+            microsecond=groups["second_fraction"],
-+            tzinfo=tz,
-+        )
-+    except Exception as e:
-+        raise ParseError(e)
-diff --git a/third_party/python/iso8601/iso8601/test_iso8601.py b/third_party/python/iso8601/iso8601/test_iso8601.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/iso8601/iso8601/test_iso8601.py
-@@ -0,0 +1,102 @@
-+# coding=UTF-8
-+from __future__ import absolute_import
-+
-+import copy
-+import datetime
-+import pickle
-+
-+import pytest
-+
-+from iso8601 import iso8601
-+
-+def test_iso8601_regex():
-+    assert iso8601.ISO8601_REGEX.match("2006-10-11T00:14:33Z")
-+
-+def test_fixedoffset_eq():
-+    # See https://bitbucket.org/micktwomey/pyiso8601/issues/19
-+    datetime.tzinfo() == iso8601.FixedOffset(2, 0, '+2:00')
-+
-+def test_parse_no_timezone_different_default():
-+    tz = iso8601.FixedOffset(2, 0, "test offset")
-+    d = iso8601.parse_date("2007-01-01T08:00:00", default_timezone=tz)
-+    assert d == datetime.datetime(2007, 1, 1, 8, 0, 0, 0, tz)
-+    assert d.tzinfo == tz
-+
-+def test_parse_utc_different_default():
-+    """Z should mean 'UTC', not 'default'.
-+
-+    """
-+    tz = iso8601.FixedOffset(2, 0, "test offset")
-+    d = iso8601.parse_date("2007-01-01T08:00:00Z", default_timezone=tz)
-+    assert d == datetime.datetime(2007, 1, 1, 8, 0, 0, 0, iso8601.UTC)
-+
-+@pytest.mark.parametrize("invalid_date, error_string", [
-+    ("2013-10-", "Unable to parse date string"),
-+    ("2013-", "Unable to parse date string"),
-+    ("", "Unable to parse date string"),
-+    (None, "Expecting a string"),
-+    ("wibble", "Unable to parse date string"),
-+    ("23", "Unable to parse date string"),
-+    ("131015T142533Z", "Unable to parse date string"),
-+    ("131015", "Unable to parse date string"),
-+    ("20141", "Unable to parse date string"),
-+    ("201402", "Unable to parse date string"),
-+    ("2007-06-23X06:40:34.00Z", "Unable to parse date string"),  # https://code.google.com/p/pyiso8601/issues/detail?id=14
-+    ("2007-06-23 06:40:34.00Zrubbish", "Unable to parse date string"),  # https://code.google.com/p/pyiso8601/issues/detail?id=14
-+    ("20114-01-03T01:45:49", "Unable to parse date string"),
-+])
-+def test_parse_invalid_date(invalid_date, error_string):
-+    assert isinstance(invalid_date, str) or invalid_date is None  # Why? 'cos I've screwed up the parametrize before :)
-+    with pytest.raises(iso8601.ParseError) as exc:
-+        iso8601.parse_date(invalid_date)
-+    assert exc.errisinstance(iso8601.ParseError)
-+    assert str(exc.value).startswith(error_string)
-+
-+@pytest.mark.parametrize("valid_date,expected_datetime,isoformat", [
-+    ("2007-06-23 06:40:34.00Z", datetime.datetime(2007, 6, 23, 6, 40, 34, 0, iso8601.UTC), "2007-06-23T06:40:34+00:00"),  # Handle a separator other than T
-+    ("1997-07-16T19:20+01:00", datetime.datetime(1997, 7, 16, 19, 20, 0, 0, iso8601.FixedOffset(1, 0, "+01:00")), "1997-07-16T19:20:00+01:00"),  # Parse with no seconds
-+    ("2007-01-01T08:00:00", datetime.datetime(2007, 1, 1, 8, 0, 0, 0, iso8601.UTC), "2007-01-01T08:00:00+00:00"),  # Handle timezone-less dates. Assumes UTC. http://code.google.com/p/pyiso8601/issues/detail?id=4
-+    ("2006-10-20T15:34:56.123+02:30", datetime.datetime(2006, 10, 20, 15, 34, 56, 123000, iso8601.FixedOffset(2, 30, "+02:30")), None),
-+    ("2006-10-20T15:34:56Z", datetime.datetime(2006, 10, 20, 15, 34, 56, 0, iso8601.UTC), "2006-10-20T15:34:56+00:00"),
-+    ("2007-5-7T11:43:55.328Z", datetime.datetime(2007, 5, 7, 11, 43, 55, 328000, iso8601.UTC), "2007-05-07T11:43:55.328000+00:00"),  # http://code.google.com/p/pyiso8601/issues/detail?id=6
-+    ("2006-10-20T15:34:56.123Z", datetime.datetime(2006, 10, 20, 15, 34, 56, 123000, iso8601.UTC), "2006-10-20T15:34:56.123000+00:00"),
-+    ("2013-10-15T18:30Z", datetime.datetime(2013, 10, 15, 18, 30, 0, 0, iso8601.UTC), "2013-10-15T18:30:00+00:00"),
-+    ("2013-10-15T22:30+04", datetime.datetime(2013, 10, 15, 22, 30, 0, 0, iso8601.FixedOffset(4, 0, "+04:00")), "2013-10-15T22:30:00+04:00"),  # <time>±hh:mm
-+    ("2013-10-15T1130-0700", datetime.datetime(2013, 10, 15, 11, 30, 0, 0, iso8601.FixedOffset(-7, 0, "-07:00")), "2013-10-15T11:30:00-07:00"),  # <time>±hhmm
-+    ("2013-10-15T1130+0700", datetime.datetime(2013, 10, 15, 11, 30, 0, 0, iso8601.FixedOffset(+7, 0, "+07:00")), "2013-10-15T11:30:00+07:00"),  # <time>±hhmm
-+    ("2013-10-15T1130+07", datetime.datetime(2013, 10, 15, 11, 30, 0, 0, iso8601.FixedOffset(+7, 0, "+07:00")), "2013-10-15T11:30:00+07:00"),  # <time>±hh
-+    ("2013-10-15T1130-07", datetime.datetime(2013, 10, 15, 11, 30, 0, 0, iso8601.FixedOffset(-7, 0, "-07:00")), "2013-10-15T11:30:00-07:00"),  # <time>±hh
-+    ("2013-10-15T15:00-03:30", datetime.datetime(2013, 10, 15, 15, 0, 0, 0, iso8601.FixedOffset(-3, -30, "-03:30")), "2013-10-15T15:00:00-03:30"),
-+    ("2013-10-15T183123Z", datetime.datetime(2013, 10, 15, 18, 31, 23, 0, iso8601.UTC), "2013-10-15T18:31:23+00:00"),  # hhmmss
-+    ("2013-10-15T1831Z", datetime.datetime(2013, 10, 15, 18, 31, 0, 0, iso8601.UTC), "2013-10-15T18:31:00+00:00"),  # hhmm
-+    ("2013-10-15T18Z", datetime.datetime(2013, 10, 15, 18, 0, 0, 0, iso8601.UTC), "2013-10-15T18:00:00+00:00"),  # hh
-+    ("2013-10-15", datetime.datetime(2013, 10, 15, 0, 0, 0, 0, iso8601.UTC), "2013-10-15T00:00:00+00:00"),  # YYYY-MM-DD
-+    ("20131015T18:30Z", datetime.datetime(2013, 10, 15, 18, 30, 0, 0, iso8601.UTC), "2013-10-15T18:30:00+00:00"),  # YYYYMMDD
-+    ("2012-12-19T23:21:28.512400+00:00", datetime.datetime(2012, 12, 19, 23, 21, 28, 512400, iso8601.FixedOffset(0, 0, "+00:00")), "2012-12-19T23:21:28.512400+00:00"),  # https://code.google.com/p/pyiso8601/issues/detail?id=21
-+    ("2006-10-20T15:34:56.123+0230", datetime.datetime(2006, 10, 20, 15, 34, 56, 123000, iso8601.FixedOffset(2, 30, "+02:30")), "2006-10-20T15:34:56.123000+02:30"),  # https://code.google.com/p/pyiso8601/issues/detail?id=18
-+    ("19950204", datetime.datetime(1995, 2, 4, tzinfo=iso8601.UTC), "1995-02-04T00:00:00+00:00"),  # https://code.google.com/p/pyiso8601/issues/detail?id=1
-+    ("2010-07-20 15:25:52.520701+00:00", datetime.datetime(2010, 7, 20, 15, 25, 52, 520701, iso8601.FixedOffset(0, 0, "+00:00")), "2010-07-20T15:25:52.520701+00:00"),  # https://code.google.com/p/pyiso8601/issues/detail?id=17
-+    ("2010-06-12", datetime.datetime(2010, 6, 12, tzinfo=iso8601.UTC), "2010-06-12T00:00:00+00:00"),  # https://code.google.com/p/pyiso8601/issues/detail?id=16
-+    ("1985-04-12T23:20:50.52-05:30", datetime.datetime(1985, 4, 12, 23, 20, 50, 520000, iso8601.FixedOffset(-5, -30, "-05:30")), "1985-04-12T23:20:50.520000-05:30"),  # https://bitbucket.org/micktwomey/pyiso8601/issue/8/015-parses-negative-timezones-incorrectly
-+    ("1997-08-29T06:14:00.000123Z", datetime.datetime(1997, 8, 29, 6, 14, 0, 123, iso8601.UTC), "1997-08-29T06:14:00.000123+00:00"),  # https://bitbucket.org/micktwomey/pyiso8601/issue/9/regression-parsing-microseconds
-+    ("2014-02", datetime.datetime(2014, 2, 1, 0, 0, 0, 0, iso8601.UTC), "2014-02-01T00:00:00+00:00"),  # https://bitbucket.org/micktwomey/pyiso8601/issue/14/regression-yyyy-mm-no-longer-parses
-+    ("2014", datetime.datetime(2014, 1, 1, 0, 0, 0, 0, iso8601.UTC), "2014-01-01T00:00:00+00:00"),  # YYYY
-+    ("1997-08-29T06:14:00,000123Z", datetime.datetime(1997, 8, 29, 6, 14, 0, 123, iso8601.UTC), "1997-08-29T06:14:00.000123+00:00"),  # Use , as decimal separator
-+])
-+def test_parse_valid_date(valid_date, expected_datetime, isoformat):
-+    parsed = iso8601.parse_date(valid_date)
-+    assert parsed.year == expected_datetime.year
-+    assert parsed.month == expected_datetime.month
-+    assert parsed.day == expected_datetime.day
-+    assert parsed.hour == expected_datetime.hour
-+    assert parsed.minute == expected_datetime.minute
-+    assert parsed.second == expected_datetime.second
-+    assert parsed.microsecond == expected_datetime.microsecond
-+    assert parsed.tzinfo == expected_datetime.tzinfo
-+    assert parsed == expected_datetime
-+    assert parsed.isoformat() == expected_datetime.isoformat()
-+    copy.deepcopy(parsed)  # ensure it's deep copy-able
-+    pickle.dumps(parsed)  # ensure it pickles
-+    if isoformat:
-+        assert parsed.isoformat() == isoformat
-+    assert iso8601.parse_date(parsed.isoformat()) == parsed  # Test round trip
-diff --git a/third_party/python/jsonschema/jsonschema-3.2.0.dist-info/COPYING b/third_party/python/jsonschema/jsonschema-3.2.0.dist-info/COPYING
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/jsonschema/jsonschema-3.2.0.dist-info/COPYING
-@@ -0,0 +1,19 @@
-+Copyright (c) 2013 Julian Berman
-+
-+Permission is hereby granted, free of charge, to any person obtaining a copy
-+of this software and associated documentation files (the "Software"), to deal
-+in the Software without restriction, including without limitation the rights
-+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-+copies of the Software, and to permit persons to whom the Software is
-+furnished to do so, subject to the following conditions:
-+
-+The above copyright notice and this permission notice shall be included in
-+all copies or substantial portions of the Software.
-+
-+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-+THE SOFTWARE.
-diff --git a/third_party/python/jsonschema/jsonschema-3.2.0.dist-info/METADATA b/third_party/python/jsonschema/jsonschema-3.2.0.dist-info/METADATA
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/jsonschema/jsonschema-3.2.0.dist-info/METADATA
-@@ -0,0 +1,224 @@
-+Metadata-Version: 2.1
-+Name: jsonschema
-+Version: 3.2.0
-+Summary: An implementation of JSON Schema validation for Python
-+Home-page: https://github.com/Julian/jsonschema
-+Author: Julian Berman
-+Author-email: Julian@GrayVines.com
-+License: UNKNOWN
-+Project-URL: Docs, https://python-jsonschema.readthedocs.io/en/latest/
-+Platform: UNKNOWN
-+Classifier: Development Status :: 5 - Production/Stable
-+Classifier: Intended Audience :: Developers
-+Classifier: License :: OSI Approved :: MIT License
-+Classifier: Operating System :: OS Independent
-+Classifier: Programming Language :: Python
-+Classifier: Programming Language :: Python :: 2
-+Classifier: Programming Language :: Python :: 2.7
-+Classifier: Programming Language :: Python :: 3
-+Classifier: Programming Language :: Python :: 3.5
-+Classifier: Programming Language :: Python :: 3.6
-+Classifier: Programming Language :: Python :: 3.7
-+Classifier: Programming Language :: Python :: 3.8
-+Classifier: Programming Language :: Python :: Implementation :: CPython
-+Classifier: Programming Language :: Python :: Implementation :: PyPy
-+Requires-Dist: attrs (>=17.4.0)
-+Requires-Dist: pyrsistent (>=0.14.0)
-+Requires-Dist: setuptools
-+Requires-Dist: six (>=1.11.0)
-+Requires-Dist: functools32 ; python_version < "3"
-+Requires-Dist: importlib-metadata ; python_version < "3.8"
-+Provides-Extra: format
-+Requires-Dist: idna ; extra == 'format'
-+Requires-Dist: jsonpointer (>1.13) ; extra == 'format'
-+Requires-Dist: rfc3987 ; extra == 'format'
-+Requires-Dist: strict-rfc3339 ; extra == 'format'
-+Requires-Dist: webcolors ; extra == 'format'
-+Provides-Extra: format_nongpl
-+Requires-Dist: idna ; extra == 'format_nongpl'
-+Requires-Dist: jsonpointer (>1.13) ; extra == 'format_nongpl'
-+Requires-Dist: webcolors ; extra == 'format_nongpl'
-+Requires-Dist: rfc3986-validator (>0.1.0) ; extra == 'format_nongpl'
-+Requires-Dist: rfc3339-validator ; extra == 'format_nongpl'
-+
-+==========
-+jsonschema
-+==========
-+
-+|PyPI| |Pythons| |Travis| |AppVeyor| |Codecov| |ReadTheDocs|
-+
-+.. |PyPI| image:: https://img.shields.io/pypi/v/jsonschema.svg
-+   :alt: PyPI version
-+   :target: https://pypi.org/project/jsonschema/
-+
-+.. |Pythons| image:: https://img.shields.io/pypi/pyversions/jsonschema.svg
-+   :alt: Supported Python versions
-+   :target: https://pypi.org/project/jsonschema/
-+
-+.. |Travis| image:: https://travis-ci.com/Julian/jsonschema.svg?branch=master
-+   :alt: Travis build status
-+   :target: https://travis-ci.com/Julian/jsonschema
-+
-+.. |AppVeyor| image:: https://ci.appveyor.com/api/projects/status/adtt0aiaihy6muyn/branch/master?svg=true
-+   :alt: AppVeyor build status
-+   :target: https://ci.appveyor.com/project/Julian/jsonschema
-+
-+.. |Codecov| image:: https://codecov.io/gh/Julian/jsonschema/branch/master/graph/badge.svg
-+   :alt: Codecov Code coverage
-+   :target: https://codecov.io/gh/Julian/jsonschema
-+
-+.. |ReadTheDocs| image:: https://readthedocs.org/projects/python-jsonschema/badge/?version=stable&style=flat
-+   :alt: ReadTheDocs status
-+   :target: https://python-jsonschema.readthedocs.io/en/stable/
-+
-+
-+``jsonschema`` is an implementation of `JSON Schema <https://json-schema.org>`_
-+for Python (supporting 2.7+ including Python 3).
-+
-+.. code-block:: python
-+
-+    >>> from jsonschema import validate
-+
-+    >>> # A sample schema, like what we'd get from json.load()
-+    >>> schema = {
-+    ...     "type" : "object",
-+    ...     "properties" : {
-+    ...         "price" : {"type" : "number"},
-+    ...         "name" : {"type" : "string"},
-+    ...     },
-+    ... }
-+
-+    >>> # If no exception is raised by validate(), the instance is valid.
-+    >>> validate(instance={"name" : "Eggs", "price" : 34.99}, schema=schema)
-+
-+    >>> validate(
-+    ...     instance={"name" : "Eggs", "price" : "Invalid"}, schema=schema,
-+    ... )                                   # doctest: +IGNORE_EXCEPTION_DETAIL
-+    Traceback (most recent call last):
-+        ...
-+    ValidationError: 'Invalid' is not of type 'number'
-+
-+It can also be used from console:
-+
-+.. code-block:: bash
-+
-+    $ jsonschema -i sample.json sample.schema
-+
-+Features
-+--------
-+
-+* Full support for
-+  `Draft 7 <https://python-jsonschema.readthedocs.io/en/latest/validate/#jsonschema.Draft7Validator>`_,
-+  `Draft 6 <https://python-jsonschema.readthedocs.io/en/latest/validate/#jsonschema.Draft6Validator>`_,
-+  `Draft 4 <https://python-jsonschema.readthedocs.io/en/latest/validate/#jsonschema.Draft4Validator>`_
-+  and
-+  `Draft 3 <https://python-jsonschema.readthedocs.io/en/latest/validate/#jsonschema.Draft3Validator>`_
-+
-+* `Lazy validation <https://python-jsonschema.readthedocs.io/en/latest/validate/#jsonschema.IValidator.iter_errors>`_
-+  that can iteratively report *all* validation errors.
-+
-+* `Programmatic querying <https://python-jsonschema.readthedocs.io/en/latest/errors/>`_
-+  of which properties or items failed validation.
-+
-+
-+Installation
-+------------
-+
-+``jsonschema`` is available on `PyPI <https://pypi.org/project/jsonschema/>`_. You can install using `pip <https://pip.pypa.io/en/stable/>`_:
-+
-+.. code-block:: bash
-+
-+    $ pip install jsonschema
-+
-+
-+Demo
-+----
-+
-+Try ``jsonschema`` interactively in this online demo:
-+
-+.. image:: https://user-images.githubusercontent.com/1155573/56745335-8b158a00-6750-11e9-8776-83fa675939c4.png
-+    :target: https://notebooks.ai/demo/gh/Julian/jsonschema
-+    :alt: Open Live Demo
-+
-+
-+Online demo Notebook will look similar to this:
-+
-+
-+.. image:: https://user-images.githubusercontent.com/1155573/56820861-5c1c1880-6823-11e9-802a-ce01c5ec574f.gif
-+    :alt: Open Live Demo
-+    :width: 480 px
-+
-+
-+Release Notes
-+-------------
-+
-+v3.1 brings support for ECMA 262 dialect regular expressions
-+throughout schemas, as recommended by the specification. Big
-+thanks to @Zac-HD for authoring support in a new `js-regex
-+<https://pypi.org/project/js-regex/>`_ library.
-+
-+
-+Running the Test Suite
-+----------------------
-+
-+If you have ``tox`` installed (perhaps via ``pip install tox`` or your
-+package manager), running ``tox`` in the directory of your source
-+checkout will run ``jsonschema``'s test suite on all of the versions
-+of Python ``jsonschema`` supports. If you don't have all of the
-+versions that ``jsonschema`` is tested under, you'll likely want to run
-+using ``tox``'s ``--skip-missing-interpreters`` option.
-+
-+Of course you're also free to just run the tests on a single version with your
-+favorite test runner. The tests live in the ``jsonschema.tests`` package.
-+
-+
-+Benchmarks
-+----------
-+
-+``jsonschema``'s benchmarks make use of `pyperf
-+<https://pyperf.readthedocs.io>`_.
-+
-+Running them can be done via ``tox -e perf``, or by invoking the ``pyperf``
-+commands externally (after ensuring that both it and ``jsonschema`` itself are
-+installed)::
-+
-+    $ python -m pyperf jsonschema/benchmarks/test_suite.py --hist --output results.json
-+
-+To compare to a previous run, use::
-+
-+    $ python -m pyperf compare_to --table reference.json results.json
-+
-+See the ``pyperf`` documentation for more details.
-+
-+
-+Community
-+---------
-+
-+There's a `mailing list <https://groups.google.com/forum/#!forum/jsonschema>`_
-+for this implementation on Google Groups.
-+
-+Please join, and feel free to send questions there.
-+
-+
-+Contributing
-+------------
-+
-+I'm Julian Berman.
-+
-+``jsonschema`` is on `GitHub <https://github.com/Julian/jsonschema>`_.
-+
-+Get in touch, via GitHub or otherwise, if you've got something to contribute,
-+it'd be most welcome!
-+
-+You can also generally find me on Freenode (nick: ``tos9``) in various
-+channels, including ``#python``.
-+
-+If you feel overwhelmingly grateful, you can also woo me with beer money
-+via Google Pay with the email in my GitHub profile.
-+
-+And for companies who appreciate ``jsonschema`` and its continued support
-+and growth, ``jsonschema`` is also now supportable via `TideLift
-+<https://tidelift.com/subscription/pkg/pypi-jsonschema?utm_source=pypi-j
-+sonschema&utm_medium=referral&utm_campaign=readme>`_.
-+
-+
-diff --git a/third_party/python/jsonschema/jsonschema-3.2.0.dist-info/RECORD b/third_party/python/jsonschema/jsonschema-3.2.0.dist-info/RECORD
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/jsonschema/jsonschema-3.2.0.dist-info/RECORD
-@@ -0,0 +1,34 @@
-+jsonschema/__init__.py,sha256=dHAr_pQLbbDFoRnbVMrQVztVUvnBFgFlm7bU82pMvOk,934
-+jsonschema/__main__.py,sha256=in4bbzfixCAyGe3RhBwhQVZnGkruszNedcbmwxGyJgc,39
-+jsonschema/_format.py,sha256=vwD1v7S8BmJvSF5y0o6dbPgjAyzt07PZpyO3pvNVVgQ,11691
-+jsonschema/_legacy_validators.py,sha256=kYcYiHfRV-aQtIQv2qe_71L3QFs3LiJ3v69ifteAN4E,4584
-+jsonschema/_reflect.py,sha256=gggQrcrf5FRoyhgdE6ggJ4n2FQHEzWS4CS-cm9bYcqI,5023
-+jsonschema/_types.py,sha256=t2naRRhuTKIUIB0GMR9kOp2la2aVqeT2tFlVAobndmg,4490
-+jsonschema/_utils.py,sha256=ezZJMQ0eU4oPvkTmZi6g5vsCtIFRhb8vN4Y9S4uQwW8,5168
-+jsonschema/_validators.py,sha256=UDYawpxK8f_rIeEBXZtwr0tlxi3OH1Zt2ca0zAxjNdk,11703
-+jsonschema/cli.py,sha256=3Vc8ptc2GD7zDxK2F-kamqmrE9f35a2KVDGR1p1acUU,2310
-+jsonschema/compat.py,sha256=37gSA8MmAR65zlqzsSEB-0ObZk_I2TF7z1kp9zmkskg,1353
-+jsonschema/exceptions.py,sha256=ukWIE7aEES8Kh0UaUP9turpUkV2ZzXEN8CwfRObzlMA,10450
-+jsonschema/validators.py,sha256=RIZTQyZxhWwsyIIRFQGEjLzq38LlyzzzdYUl9jxzV0M,29400
-+jsonschema/benchmarks/__init__.py,sha256=A0sQrxDBVHSyQ-8ru3L11hMXf3q9gVuB9x_YgHb4R9M,70
-+jsonschema/benchmarks/issue232.py,sha256=-azAUmrP75f0uj0x2zEdBc3-DhQw3XX9UQVDCyhBKRk,541
-+jsonschema/benchmarks/json_schema_test_suite.py,sha256=okRE6ACue2C0Hd1dMhnpZ0bc3AoZdDd8cw2lwTnbzwU,343
-+jsonschema/schemas/draft3.json,sha256=PdtCu2s06Va3hV9cX5A5-rvye50SVF__NrvxG0vuzz0,4564
-+jsonschema/schemas/draft4.json,sha256=ODL-0W3kv7MmqL3tm3GJguuVxN1QNO1GtBcpWE3ok98,5399
-+jsonschema/schemas/draft6.json,sha256=wp386fVINcOgbAOzxdXsDtp3cGVo-cTffPvHVmpRAG0,4437
-+jsonschema/schemas/draft7.json,sha256=PVOSCIJhYGxVm2A_OFMpyfGrRbXWZ-uZBodFOwVdQF4,4819
-+jsonschema/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
-+jsonschema/tests/_helpers.py,sha256=3c-b9CK0cdGfhtuUhzM1AjtqPtR2VFvfcKC6G2g0a-0,157
-+jsonschema/tests/_suite.py,sha256=6lxDHOyjJfCjdn9vfOLcUpXtNl0vLIljrinSFi1tRhc,6728
-+jsonschema/tests/test_cli.py,sha256=djw7ZD6zm5_8FgsAr9XyYk4zErIEoPRs8SzBe5nYcWY,4727
-+jsonschema/tests/test_exceptions.py,sha256=zw9bd_al5zOzAm8nJ0IqeymiweH6i8k1AN3CB7t618A,15348
-+jsonschema/tests/test_format.py,sha256=ob0QDop_nwRwiLs1P6sGsf6ZITik00CWhe1pL8JRiA0,2982
-+jsonschema/tests/test_jsonschema_test_suite.py,sha256=8uiplgvQq5yFvtvWxbyqyr7HMYRCx6jNE3OiU-u8AEk,8464
-+jsonschema/tests/test_types.py,sha256=lntWPZ86fwo_aNKbfCueX5R2xdwrYYN7Zo5C0-ppk-0,5902
-+jsonschema/tests/test_validators.py,sha256=R_zhsDKG5r66LE1OVlzdcPyKRWKgc07e6NVWxQkrRiQ,60394
-+jsonschema-3.2.0.dist-info/COPYING,sha256=T5KgFaE8TRoEC-8BiqE0MLTxvHO0Gxa7hGw0Z2bedDk,1057
-+jsonschema-3.2.0.dist-info/METADATA,sha256=os_TL7tiSfPYDMKYoAqoNsw_yMkDJmCL2bqhp-csNR0,7760
-+jsonschema-3.2.0.dist-info/WHEEL,sha256=8zNYZbwQSXoB9IfXOjPfeNwvAsALAjffgk27FqvCWbo,110
-+jsonschema-3.2.0.dist-info/entry_points.txt,sha256=KaVUBBSLyzi5naUkVg-r3q6T_igdLgaHY6Mm3oLX73s,52
-+jsonschema-3.2.0.dist-info/top_level.txt,sha256=jGoNS61vDONU8U7p0Taf-y_8JVG1Z2CJ5Eif6zMN_cw,11
-+jsonschema-3.2.0.dist-info/RECORD,,
-diff --git a/third_party/python/jsonschema/jsonschema-3.2.0.dist-info/WHEEL b/third_party/python/jsonschema/jsonschema-3.2.0.dist-info/WHEEL
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/jsonschema/jsonschema-3.2.0.dist-info/WHEEL
-@@ -0,0 +1,6 @@
-+Wheel-Version: 1.0
-+Generator: bdist_wheel (0.33.6)
-+Root-Is-Purelib: true
-+Tag: py2-none-any
-+Tag: py3-none-any
-+
-diff --git a/third_party/python/jsonschema/jsonschema-3.2.0.dist-info/entry_points.txt b/third_party/python/jsonschema/jsonschema-3.2.0.dist-info/entry_points.txt
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/jsonschema/jsonschema-3.2.0.dist-info/entry_points.txt
-@@ -0,0 +1,3 @@
-+[console_scripts]
-+jsonschema = jsonschema.cli:main
-+
-diff --git a/third_party/python/jsonschema/jsonschema-3.2.0.dist-info/top_level.txt b/third_party/python/jsonschema/jsonschema-3.2.0.dist-info/top_level.txt
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/jsonschema/jsonschema-3.2.0.dist-info/top_level.txt
-@@ -0,0 +1,1 @@
-+jsonschema
-diff --git a/third_party/python/jsonschema/jsonschema/__init__.py b/third_party/python/jsonschema/jsonschema/__init__.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/jsonschema/jsonschema/__init__.py
-@@ -0,0 +1,34 @@
-+"""
-+An implementation of JSON Schema for Python
-+
-+The main functionality is provided by the validator classes for each of the
-+supported JSON Schema versions.
-+
-+Most commonly, `validate` is the quickest way to simply validate a given
-+instance under a schema, and will create a validator for you.
-+"""
-+
-+from jsonschema.exceptions import (
-+    ErrorTree, FormatError, RefResolutionError, SchemaError, ValidationError
-+)
-+from jsonschema._format import (
-+    FormatChecker,
-+    draft3_format_checker,
-+    draft4_format_checker,
-+    draft6_format_checker,
-+    draft7_format_checker,
-+)
-+from jsonschema._types import TypeChecker
-+from jsonschema.validators import (
-+    Draft3Validator,
-+    Draft4Validator,
-+    Draft6Validator,
-+    Draft7Validator,
-+    RefResolver,
-+    validate,
-+)
-+try:
-+    from importlib import metadata
-+except ImportError: # for Python<3.8
-+    import importlib_metadata as metadata
-+__version__ = metadata.version("jsonschema")
-diff --git a/third_party/python/jsonschema/jsonschema/__main__.py b/third_party/python/jsonschema/jsonschema/__main__.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/jsonschema/jsonschema/__main__.py
-@@ -0,0 +1,2 @@
-+from jsonschema.cli import main
-+main()
-diff --git a/third_party/python/jsonschema/jsonschema/_format.py b/third_party/python/jsonschema/jsonschema/_format.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/jsonschema/jsonschema/_format.py
-@@ -0,0 +1,425 @@
-+import datetime
-+import re
-+import socket
-+import struct
-+
-+from jsonschema.compat import str_types
-+from jsonschema.exceptions import FormatError
-+
-+
-+class FormatChecker(object):
-+    """
-+    A ``format`` property checker.
-+
-+    JSON Schema does not mandate that the ``format`` property actually do any
-+    validation. If validation is desired however, instances of this class can
-+    be hooked into validators to enable format validation.
-+
-+    `FormatChecker` objects always return ``True`` when asked about
-+    formats that they do not know how to validate.
-+
-+    To check a custom format using a function that takes an instance and
-+    returns a ``bool``, use the `FormatChecker.checks` or
-+    `FormatChecker.cls_checks` decorators.
-+
-+    Arguments:
-+
-+        formats (~collections.Iterable):
-+
-+            The known formats to validate. This argument can be used to
-+            limit which formats will be used during validation.
-+    """
-+
-+    checkers = {}
-+
-+    def __init__(self, formats=None):
-+        if formats is None:
-+            self.checkers = self.checkers.copy()
-+        else:
-+            self.checkers = dict((k, self.checkers[k]) for k in formats)
-+
-+    def __repr__(self):
-+        return "<FormatChecker checkers={}>".format(sorted(self.checkers))
-+
-+    def checks(self, format, raises=()):
-+        """
-+        Register a decorated function as validating a new format.
-+
-+        Arguments:
-+
-+            format (str):
-+
-+                The format that the decorated function will check.
-+
-+            raises (Exception):
-+
-+                The exception(s) raised by the decorated function when an
-+                invalid instance is found.
-+
-+                The exception object will be accessible as the
-+                `jsonschema.exceptions.ValidationError.cause` attribute of the
-+                resulting validation error.
-+        """
-+
-+        def _checks(func):
-+            self.checkers[format] = (func, raises)
-+            return func
-+        return _checks
-+
-+    cls_checks = classmethod(checks)
-+
-+    def check(self, instance, format):
-+        """
-+        Check whether the instance conforms to the given format.
-+
-+        Arguments:
-+
-+            instance (*any primitive type*, i.e. str, number, bool):
-+
-+                The instance to check
-+
-+            format (str):
-+
-+                The format that instance should conform to
-+
-+
-+        Raises:
-+
-+            FormatError: if the instance does not conform to ``format``
-+        """
-+
-+        if format not in self.checkers:
-+            return
-+
-+        func, raises = self.checkers[format]
-+        result, cause = None, None
-+        try:
-+            result = func(instance)
-+        except raises as e:
-+            cause = e
-+        if not result:
-+            raise FormatError(
-+                "%r is not a %r" % (instance, format), cause=cause,
-+            )
-+
-+    def conforms(self, instance, format):
-+        """
-+        Check whether the instance conforms to the given format.
-+
-+        Arguments:
-+
-+            instance (*any primitive type*, i.e. str, number, bool):
-+
-+                The instance to check
-+
-+            format (str):
-+
-+                The format that instance should conform to
-+
-+        Returns:
-+
-+            bool: whether it conformed
-+        """
-+
-+        try:
-+            self.check(instance, format)
-+        except FormatError:
-+            return False
-+        else:
-+            return True
-+
-+
-+draft3_format_checker = FormatChecker()
-+draft4_format_checker = FormatChecker()
-+draft6_format_checker = FormatChecker()
-+draft7_format_checker = FormatChecker()
-+
-+
-+_draft_checkers = dict(
-+    draft3=draft3_format_checker,
-+    draft4=draft4_format_checker,
-+    draft6=draft6_format_checker,
-+    draft7=draft7_format_checker,
-+)
-+
-+
-+def _checks_drafts(
-+    name=None,
-+    draft3=None,
-+    draft4=None,
-+    draft6=None,
-+    draft7=None,
-+    raises=(),
-+):
-+    draft3 = draft3 or name
-+    draft4 = draft4 or name
-+    draft6 = draft6 or name
-+    draft7 = draft7 or name
-+
-+    def wrap(func):
-+        if draft3:
-+            func = _draft_checkers["draft3"].checks(draft3, raises)(func)
-+        if draft4:
-+            func = _draft_checkers["draft4"].checks(draft4, raises)(func)
-+        if draft6:
-+            func = _draft_checkers["draft6"].checks(draft6, raises)(func)
-+        if draft7:
-+            func = _draft_checkers["draft7"].checks(draft7, raises)(func)
-+
-+        # Oy. This is bad global state, but relied upon for now, until
-+        # deprecation. See https://github.com/Julian/jsonschema/issues/519
-+        # and test_format_checkers_come_with_defaults
-+        FormatChecker.cls_checks(draft7 or draft6 or draft4 or draft3, raises)(
-+            func,
-+        )
-+        return func
-+    return wrap
-+
-+
-+@_checks_drafts(name="idn-email")
-+@_checks_drafts(name="email")
-+def is_email(instance):
-+    if not isinstance(instance, str_types):
-+        return True
-+    return "@" in instance
-+
-+
-+_ipv4_re = re.compile(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$")
-+
-+
-+@_checks_drafts(
-+    draft3="ip-address", draft4="ipv4", draft6="ipv4", draft7="ipv4",
-+)
-+def is_ipv4(instance):
-+    if not isinstance(instance, str_types):
-+        return True
-+    if not _ipv4_re.match(instance):
-+        return False
-+    return all(0 <= int(component) <= 255 for component in instance.split("."))
-+
-+
-+if hasattr(socket, "inet_pton"):
-+    # FIXME: Really this only should raise struct.error, but see the sadness
-+    #        that is https://twistedmatrix.com/trac/ticket/9409
-+    @_checks_drafts(
-+        name="ipv6", raises=(socket.error, struct.error, ValueError),
-+    )
-+    def is_ipv6(instance):
-+        if not isinstance(instance, str_types):
-+            return True
-+        return socket.inet_pton(socket.AF_INET6, instance)
-+
-+
-+_host_name_re = re.compile(r"^[A-Za-z0-9][A-Za-z0-9\.\-]{1,255}$")
-+
-+
-+@_checks_drafts(
-+    draft3="host-name",
-+    draft4="hostname",
-+    draft6="hostname",
-+    draft7="hostname",
-+)
-+def is_host_name(instance):
-+    if not isinstance(instance, str_types):
-+        return True
-+    if not _host_name_re.match(instance):
-+        return False
-+    components = instance.split(".")
-+    for component in components:
-+        if len(component) > 63:
-+            return False
-+    return True
-+
-+
-+try:
-+    # The built-in `idna` codec only implements RFC 3890, so we go elsewhere.
-+    import idna
-+except ImportError:
-+    pass
-+else:
-+    @_checks_drafts(draft7="idn-hostname", raises=idna.IDNAError)
-+    def is_idn_host_name(instance):
-+        if not isinstance(instance, str_types):
-+            return True
-+        idna.encode(instance)
-+        return True
-+
-+
-+try:
-+    import rfc3987
-+except ImportError:
-+    try:
-+        from rfc3986_validator import validate_rfc3986
-+    except ImportError:
-+        pass
-+    else:
-+        @_checks_drafts(name="uri")
-+        def is_uri(instance):
-+            if not isinstance(instance, str_types):
-+                return True
-+            return validate_rfc3986(instance, rule="URI")
-+
-+        @_checks_drafts(
-+            draft6="uri-reference",
-+            draft7="uri-reference",
-+            raises=ValueError,
-+        )
-+        def is_uri_reference(instance):
-+            if not isinstance(instance, str_types):
-+                return True
-+            return validate_rfc3986(instance, rule="URI_reference")
-+
-+else:
-+    @_checks_drafts(draft7="iri", raises=ValueError)
-+    def is_iri(instance):
-+        if not isinstance(instance, str_types):
-+            return True
-+        return rfc3987.parse(instance, rule="IRI")
-+
-+    @_checks_drafts(draft7="iri-reference", raises=ValueError)
-+    def is_iri_reference(instance):
-+        if not isinstance(instance, str_types):
-+            return True
-+        return rfc3987.parse(instance, rule="IRI_reference")
-+
-+    @_checks_drafts(name="uri", raises=ValueError)
-+    def is_uri(instance):
-+        if not isinstance(instance, str_types):
-+            return True
-+        return rfc3987.parse(instance, rule="URI")
-+
-+    @_checks_drafts(
-+        draft6="uri-reference",
-+        draft7="uri-reference",
-+        raises=ValueError,
-+    )
-+    def is_uri_reference(instance):
-+        if not isinstance(instance, str_types):
-+            return True
-+        return rfc3987.parse(instance, rule="URI_reference")
-+
-+
-+try:
-+    from strict_rfc3339 import validate_rfc3339
-+except ImportError:
-+    try:
-+        from rfc3339_validator import validate_rfc3339
-+    except ImportError:
-+        validate_rfc3339 = None
-+
-+if validate_rfc3339:
-+    @_checks_drafts(name="date-time")
-+    def is_datetime(instance):
-+        if not isinstance(instance, str_types):
-+            return True
-+        return validate_rfc3339(instance)
-+
-+    @_checks_drafts(draft7="time")
-+    def is_time(instance):
-+        if not isinstance(instance, str_types):
-+            return True
-+        return is_datetime("1970-01-01T" + instance)
-+
-+
-+@_checks_drafts(name="regex", raises=re.error)
-+def is_regex(instance):
-+    if not isinstance(instance, str_types):
-+        return True
-+    return re.compile(instance)
-+
-+
-+@_checks_drafts(draft3="date", draft7="date", raises=ValueError)
-+def is_date(instance):
-+    if not isinstance(instance, str_types):
-+        return True
-+    return datetime.datetime.strptime(instance, "%Y-%m-%d")
-+
-+
-+@_checks_drafts(draft3="time", raises=ValueError)
-+def is_draft3_time(instance):
-+    if not isinstance(instance, str_types):
-+        return True
-+    return datetime.datetime.strptime(instance, "%H:%M:%S")
-+
-+
-+try:
-+    import webcolors
-+except ImportError:
-+    pass
-+else:
-+    def is_css_color_code(instance):
-+        return webcolors.normalize_hex(instance)
-+
-+    @_checks_drafts(draft3="color", raises=(ValueError, TypeError))
-+    def is_css21_color(instance):
-+        if (
-+            not isinstance(instance, str_types) or
-+            instance.lower() in webcolors.css21_names_to_hex
-+        ):
-+            return True
-+        return is_css_color_code(instance)
-+
-+    def is_css3_color(instance):
-+        if instance.lower() in webcolors.css3_names_to_hex:
-+            return True
-+        return is_css_color_code(instance)
-+
-+
-+try:
-+    import jsonpointer
-+except ImportError:
-+    pass
-+else:
-+    @_checks_drafts(
-+        draft6="json-pointer",
-+        draft7="json-pointer",
-+        raises=jsonpointer.JsonPointerException,
-+    )
-+    def is_json_pointer(instance):
-+        if not isinstance(instance, str_types):
-+            return True
-+        return jsonpointer.JsonPointer(instance)
-+
-+    # TODO: I don't want to maintain this, so it
-+    #       needs to go either into jsonpointer (pending
-+    #       https://github.com/stefankoegl/python-json-pointer/issues/34) or
-+    #       into a new external library.
-+    @_checks_drafts(
-+        draft7="relative-json-pointer",
-+        raises=jsonpointer.JsonPointerException,
-+    )
-+    def is_relative_json_pointer(instance):
-+        # Definition taken from:
-+        # https://tools.ietf.org/html/draft-handrews-relative-json-pointer-01#section-3
-+        if not isinstance(instance, str_types):
-+            return True
-+        non_negative_integer, rest = [], ""
-+        for i, character in enumerate(instance):
-+            if character.isdigit():
-+                non_negative_integer.append(character)
-+                continue
-+
-+            if not non_negative_integer:
-+                return False
-+
-+            rest = instance[i:]
-+            break
-+        return (rest == "#") or jsonpointer.JsonPointer(rest)
-+
-+
-+try:
-+    import uritemplate.exceptions
-+except ImportError:
-+    pass
-+else:
-+    @_checks_drafts(
-+        draft6="uri-template",
-+        draft7="uri-template",
-+        raises=uritemplate.exceptions.InvalidTemplate,
-+    )
-+    def is_uri_template(
-+        instance,
-+        template_validator=uritemplate.Validator().force_balanced_braces(),
-+    ):
-+        template = uritemplate.URITemplate(instance)
-+        return template_validator.validate(template)
-diff --git a/third_party/python/jsonschema/jsonschema/_legacy_validators.py b/third_party/python/jsonschema/jsonschema/_legacy_validators.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/jsonschema/jsonschema/_legacy_validators.py
-@@ -0,0 +1,141 @@
-+from jsonschema import _utils
-+from jsonschema.compat import iteritems
-+from jsonschema.exceptions import ValidationError
-+
-+
-+def dependencies_draft3(validator, dependencies, instance, schema):
-+    if not validator.is_type(instance, "object"):
-+        return
-+
-+    for property, dependency in iteritems(dependencies):
-+        if property not in instance:
-+            continue
-+
-+        if validator.is_type(dependency, "object"):
-+            for error in validator.descend(
-+                instance, dependency, schema_path=property,
-+            ):
-+                yield error
-+        elif validator.is_type(dependency, "string"):
-+            if dependency not in instance:
-+                yield ValidationError(
-+                    "%r is a dependency of %r" % (dependency, property)
-+                )
-+        else:
-+            for each in dependency:
-+                if each not in instance:
-+                    message = "%r is a dependency of %r"
-+                    yield ValidationError(message % (each, property))
-+
-+
-+def disallow_draft3(validator, disallow, instance, schema):
-+    for disallowed in _utils.ensure_list(disallow):
-+        if validator.is_valid(instance, {"type": [disallowed]}):
-+            yield ValidationError(
-+                "%r is disallowed for %r" % (disallowed, instance)
-+            )
-+
-+
-+def extends_draft3(validator, extends, instance, schema):
-+    if validator.is_type(extends, "object"):
-+        for error in validator.descend(instance, extends):
-+            yield error
-+        return
-+    for index, subschema in enumerate(extends):
-+        for error in validator.descend(instance, subschema, schema_path=index):
-+            yield error
-+
-+
-+def items_draft3_draft4(validator, items, instance, schema):
-+    if not validator.is_type(instance, "array"):
-+        return
-+
-+    if validator.is_type(items, "object"):
-+        for index, item in enumerate(instance):
-+            for error in validator.descend(item, items, path=index):
-+                yield error
-+    else:
-+        for (index, item), subschema in zip(enumerate(instance), items):
-+            for error in validator.descend(
-+                item, subschema, path=index, schema_path=index,
-+            ):
-+                yield error
-+
-+
-+def minimum_draft3_draft4(validator, minimum, instance, schema):
-+    if not validator.is_type(instance, "number"):
-+        return
-+
-+    if schema.get("exclusiveMinimum", False):
-+        failed = instance <= minimum
-+        cmp = "less than or equal to"
-+    else:
-+        failed = instance < minimum
-+        cmp = "less than"
-+
-+    if failed:
-+        yield ValidationError(
-+            "%r is %s the minimum of %r" % (instance, cmp, minimum)
-+        )
-+
-+
-+def maximum_draft3_draft4(validator, maximum, instance, schema):
-+    if not validator.is_type(instance, "number"):
-+        return
-+
-+    if schema.get("exclusiveMaximum", False):
-+        failed = instance >= maximum
-+        cmp = "greater than or equal to"
-+    else:
-+        failed = instance > maximum
-+        cmp = "greater than"
-+
-+    if failed:
-+        yield ValidationError(
-+            "%r is %s the maximum of %r" % (instance, cmp, maximum)
-+        )
-+
-+
-+def properties_draft3(validator, properties, instance, schema):
-+    if not validator.is_type(instance, "object"):
-+        return
-+
-+    for property, subschema in iteritems(properties):
-+        if property in instance:
-+            for error in validator.descend(
-+                instance[property],
-+                subschema,
-+                path=property,
-+                schema_path=property,
-+            ):
-+                yield error
-+        elif subschema.get("required", False):
-+            error = ValidationError("%r is a required property" % property)
-+            error._set(
-+                validator="required",
-+                validator_value=subschema["required"],
-+                instance=instance,
-+                schema=schema,
-+            )
-+            error.path.appendleft(property)
-+            error.schema_path.extend([property, "required"])
-+            yield error
-+
-+
-+def type_draft3(validator, types, instance, schema):
-+    types = _utils.ensure_list(types)
-+
-+    all_errors = []
-+    for index, type in enumerate(types):
-+        if validator.is_type(type, "object"):
-+            errors = list(validator.descend(instance, type, schema_path=index))
-+            if not errors:
-+                return
-+            all_errors.extend(errors)
-+        else:
-+            if validator.is_type(instance, type):
-+                return
-+    else:
-+        yield ValidationError(
-+            _utils.types_msg(instance, types), context=all_errors,
-+        )
-diff --git a/third_party/python/jsonschema/jsonschema/_reflect.py b/third_party/python/jsonschema/jsonschema/_reflect.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/jsonschema/jsonschema/_reflect.py
-@@ -0,0 +1,155 @@
-+# -*- test-case-name: twisted.test.test_reflect -*-
-+# Copyright (c) Twisted Matrix Laboratories.
-+# See LICENSE for details.
-+
-+"""
-+Standardized versions of various cool and/or strange things that you can do
-+with Python's reflection capabilities.
-+"""
-+
-+import sys
-+
-+from jsonschema.compat import PY3
-+
-+
-+class _NoModuleFound(Exception):
-+    """
-+    No module was found because none exists.
-+    """
-+
-+
-+
-+class InvalidName(ValueError):
-+    """
-+    The given name is not a dot-separated list of Python objects.
-+    """
-+
-+
-+
-+class ModuleNotFound(InvalidName):
-+    """
-+    The module associated with the given name doesn't exist and it can't be
-+    imported.
-+    """
-+
-+
-+
-+class ObjectNotFound(InvalidName):
-+    """
-+    The object associated with the given name doesn't exist and it can't be
-+    imported.
-+    """
-+
-+
-+
-+if PY3:
-+    def reraise(exception, traceback):
-+        raise exception.with_traceback(traceback)
-+else:
-+    exec("""def reraise(exception, traceback):
-+        raise exception.__class__, exception, traceback""")
-+
-+reraise.__doc__ = """
-+Re-raise an exception, with an optional traceback, in a way that is compatible
-+with both Python 2 and Python 3.
-+
-+Note that on Python 3, re-raised exceptions will be mutated, with their
-+C{__traceback__} attribute being set.
-+
-+@param exception: The exception instance.
-+@param traceback: The traceback to use, or C{None} indicating a new traceback.
-+"""
-+
-+
-+def _importAndCheckStack(importName):
-+    """
-+    Import the given name as a module, then walk the stack to determine whether
-+    the failure was the module not existing, or some code in the module (for
-+    example a dependent import) failing.  This can be helpful to determine
-+    whether any actual application code was run.  For example, to distiguish
-+    administrative error (entering the wrong module name), from programmer
-+    error (writing buggy code in a module that fails to import).
-+
-+    @param importName: The name of the module to import.
-+    @type importName: C{str}
-+    @raise Exception: if something bad happens.  This can be any type of
-+        exception, since nobody knows what loading some arbitrary code might
-+        do.
-+    @raise _NoModuleFound: if no module was found.
-+    """
-+    try:
-+        return __import__(importName)
-+    except ImportError:
-+        excType, excValue, excTraceback = sys.exc_info()
-+        while excTraceback:
-+            execName = excTraceback.tb_frame.f_globals["__name__"]
-+            # in Python 2 execName is None when an ImportError is encountered,
-+            # where in Python 3 execName is equal to the importName.
-+            if execName is None or execName == importName:
-+                reraise(excValue, excTraceback)
-+            excTraceback = excTraceback.tb_next
-+        raise _NoModuleFound()
-+
-+
-+
-+def namedAny(name):
-+    """
-+    Retrieve a Python object by its fully qualified name from the global Python
-+    module namespace.  The first part of the name, that describes a module,
-+    will be discovered and imported.  Each subsequent part of the name is
-+    treated as the name of an attribute of the object specified by all of the
-+    name which came before it.  For example, the fully-qualified name of this
-+    object is 'twisted.python.reflect.namedAny'.
-+
-+    @type name: L{str}
-+    @param name: The name of the object to return.
-+
-+    @raise InvalidName: If the name is an empty string, starts or ends with
-+        a '.', or is otherwise syntactically incorrect.
-+
-+    @raise ModuleNotFound: If the name is syntactically correct but the
-+        module it specifies cannot be imported because it does not appear to
-+        exist.
-+
-+    @raise ObjectNotFound: If the name is syntactically correct, includes at
-+        least one '.', but the module it specifies cannot be imported because
-+        it does not appear to exist.
-+
-+    @raise AttributeError: If an attribute of an object along the way cannot be
-+        accessed, or a module along the way is not found.
-+
-+    @return: the Python object identified by 'name'.
-+    """
-+    if not name:
-+        raise InvalidName('Empty module name')
-+
-+    names = name.split('.')
-+
-+    # if the name starts or ends with a '.' or contains '..', the __import__
-+    # will raise an 'Empty module name' error. This will provide a better error
-+    # message.
-+    if '' in names:
-+        raise InvalidName(
-+            "name must be a string giving a '.'-separated list of Python "
-+            "identifiers, not %r" % (name,))
-+
-+    topLevelPackage = None
-+    moduleNames = names[:]
-+    while not topLevelPackage:
-+        if moduleNames:
-+            trialname = '.'.join(moduleNames)
-+            try:
-+                topLevelPackage = _importAndCheckStack(trialname)
-+            except _NoModuleFound:
-+                moduleNames.pop()
-+        else:
-+            if len(names) == 1:
-+                raise ModuleNotFound("No module named %r" % (name,))
-+            else:
-+                raise ObjectNotFound('%r does not name an object' % (name,))
-+
-+    obj = topLevelPackage
-+    for n in names[1:]:
-+        obj = getattr(obj, n)
-+
-+    return obj
-diff --git a/third_party/python/jsonschema/jsonschema/_types.py b/third_party/python/jsonschema/jsonschema/_types.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/jsonschema/jsonschema/_types.py
-@@ -0,0 +1,188 @@
-+import numbers
-+
-+from pyrsistent import pmap
-+import attr
-+
-+from jsonschema.compat import int_types, str_types
-+from jsonschema.exceptions import UndefinedTypeCheck
-+
-+
-+def is_array(checker, instance):
-+    return isinstance(instance, list)
-+
-+
-+def is_bool(checker, instance):
-+    return isinstance(instance, bool)
-+
-+
-+def is_integer(checker, instance):
-+    # bool inherits from int, so ensure bools aren't reported as ints
-+    if isinstance(instance, bool):
-+        return False
-+    return isinstance(instance, int_types)
-+
-+
-+def is_null(checker, instance):
-+    return instance is None
-+
-+
-+def is_number(checker, instance):
-+    # bool inherits from int, so ensure bools aren't reported as ints
-+    if isinstance(instance, bool):
-+        return False
-+    return isinstance(instance, numbers.Number)
-+
-+
-+def is_object(checker, instance):
-+    return isinstance(instance, dict)
-+
-+
-+def is_string(checker, instance):
-+    return isinstance(instance, str_types)
-+
-+
-+def is_any(checker, instance):
-+    return True
-+
-+
-+@attr.s(frozen=True)
-+class TypeChecker(object):
-+    """
-+    A ``type`` property checker.
-+
-+    A `TypeChecker` performs type checking for an `IValidator`. Type
-+    checks to perform are updated using `TypeChecker.redefine` or
-+    `TypeChecker.redefine_many` and removed via `TypeChecker.remove`.
-+    Each of these return a new `TypeChecker` object.
-+
-+    Arguments:
-+
-+        type_checkers (dict):
-+
-+            The initial mapping of types to their checking functions.
-+    """
-+    _type_checkers = attr.ib(default=pmap(), converter=pmap)
-+
-+    def is_type(self, instance, type):
-+        """
-+        Check if the instance is of the appropriate type.
-+
-+        Arguments:
-+
-+            instance (object):
-+
-+                The instance to check
-+
-+            type (str):
-+
-+                The name of the type that is expected.
-+
-+        Returns:
-+
-+            bool: Whether it conformed.
-+
-+
-+        Raises:
-+
-+            `jsonschema.exceptions.UndefinedTypeCheck`:
-+                if type is unknown to this object.
-+        """
-+        try:
-+            fn = self._type_checkers[type]
-+        except KeyError:
-+            raise UndefinedTypeCheck(type)
-+
-+        return fn(self, instance)
-+
-+    def redefine(self, type, fn):
-+        """
-+        Produce a new checker with the given type redefined.
-+
-+        Arguments:
-+
-+            type (str):
-+
-+                The name of the type to check.
-+
-+            fn (collections.Callable):
-+
-+                A function taking exactly two parameters - the type
-+                checker calling the function and the instance to check.
-+                The function should return true if instance is of this
-+                type and false otherwise.
-+
-+        Returns:
-+
-+            A new `TypeChecker` instance.
-+        """
-+        return self.redefine_many({type: fn})
-+
-+    def redefine_many(self, definitions=()):
-+        """
-+        Produce a new checker with the given types redefined.
-+
-+        Arguments:
-+
-+            definitions (dict):
-+
-+                A dictionary mapping types to their checking functions.
-+
-+        Returns:
-+
-+            A new `TypeChecker` instance.
-+        """
-+        return attr.evolve(
-+            self, type_checkers=self._type_checkers.update(definitions),
-+        )
-+
-+    def remove(self, *types):
-+        """
-+        Produce a new checker with the given types forgotten.
-+
-+        Arguments:
-+
-+            types (~collections.Iterable):
-+
-+                the names of the types to remove.
-+
-+        Returns:
-+
-+            A new `TypeChecker` instance
-+
-+        Raises:
-+
-+            `jsonschema.exceptions.UndefinedTypeCheck`:
-+
-+                if any given type is unknown to this object
-+        """
-+
-+        checkers = self._type_checkers
-+        for each in types:
-+            try:
-+                checkers = checkers.remove(each)
-+            except KeyError:
-+                raise UndefinedTypeCheck(each)
-+        return attr.evolve(self, type_checkers=checkers)
-+
-+
-+draft3_type_checker = TypeChecker(
-+    {
-+        u"any": is_any,
-+        u"array": is_array,
-+        u"boolean": is_bool,
-+        u"integer": is_integer,
-+        u"object": is_object,
-+        u"null": is_null,
-+        u"number": is_number,
-+        u"string": is_string,
-+    },
-+)
-+draft4_type_checker = draft3_type_checker.remove(u"any")
-+draft6_type_checker = draft4_type_checker.redefine(
-+    u"integer",
-+    lambda checker, instance: (
-+        is_integer(checker, instance) or
-+        isinstance(instance, float) and instance.is_integer()
-+    ),
-+)
-+draft7_type_checker = draft6_type_checker
-diff --git a/third_party/python/jsonschema/jsonschema/_utils.py b/third_party/python/jsonschema/jsonschema/_utils.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/jsonschema/jsonschema/_utils.py
-@@ -0,0 +1,212 @@
-+import itertools
-+import json
-+import pkgutil
-+import re
-+
-+from jsonschema.compat import MutableMapping, str_types, urlsplit
-+
-+
-+class URIDict(MutableMapping):
-+    """
-+    Dictionary which uses normalized URIs as keys.
-+    """
-+
-+    def normalize(self, uri):
-+        return urlsplit(uri).geturl()
-+
-+    def __init__(self, *args, **kwargs):
-+        self.store = dict()
-+        self.store.update(*args, **kwargs)
-+
-+    def __getitem__(self, uri):
-+        return self.store[self.normalize(uri)]
-+
-+    def __setitem__(self, uri, value):
-+        self.store[self.normalize(uri)] = value
-+
-+    def __delitem__(self, uri):
-+        del self.store[self.normalize(uri)]
-+
-+    def __iter__(self):
-+        return iter(self.store)
-+
-+    def __len__(self):
-+        return len(self.store)
-+
-+    def __repr__(self):
-+        return repr(self.store)
-+
-+
-+class Unset(object):
-+    """
-+    An as-of-yet unset attribute or unprovided default parameter.
-+    """
-+
-+    def __repr__(self):
-+        return "<unset>"
-+
-+
-+def load_schema(name):
-+    """
-+    Load a schema from ./schemas/``name``.json and return it.
-+    """
-+
-+    data = pkgutil.get_data("jsonschema", "schemas/{0}.json".format(name))
-+    return json.loads(data.decode("utf-8"))
-+
-+
-+def indent(string, times=1):
-+    """
-+    A dumb version of `textwrap.indent` from Python 3.3.
-+    """
-+
-+    return "\n".join(" " * (4 * times) + line for line in string.splitlines())
-+
-+
-+def format_as_index(indices):
-+    """
-+    Construct a single string containing indexing operations for the indices.
-+
-+    For example, [1, 2, "foo"] -> [1][2]["foo"]
-+
-+    Arguments:
-+
-+        indices (sequence):
-+
-+            The indices to format.
-+    """
-+
-+    if not indices:
-+        return ""
-+    return "[%s]" % "][".join(repr(index) for index in indices)
-+
-+
-+def find_additional_properties(instance, schema):
-+    """
-+    Return the set of additional properties for the given ``instance``.
-+
-+    Weeds out properties that should have been validated by ``properties`` and
-+    / or ``patternProperties``.
-+
-+    Assumes ``instance`` is dict-like already.
-+    """
-+
-+    properties = schema.get("properties", {})
-+    patterns = "|".join(schema.get("patternProperties", {}))
-+    for property in instance:
-+        if property not in properties:
-+            if patterns and re.search(patterns, property):
-+                continue
-+            yield property
-+
-+
-+def extras_msg(extras):
-+    """
-+    Create an error message for extra items or properties.
-+    """
-+
-+    if len(extras) == 1:
-+        verb = "was"
-+    else:
-+        verb = "were"
-+    return ", ".join(repr(extra) for extra in extras), verb
-+
-+
-+def types_msg(instance, types):
-+    """
-+    Create an error message for a failure to match the given types.
-+
-+    If the ``instance`` is an object and contains a ``name`` property, it will
-+    be considered to be a description of that object and used as its type.
-+
-+    Otherwise the message is simply the reprs of the given ``types``.
-+    """
-+
-+    reprs = []
-+    for type in types:
-+        try:
-+            reprs.append(repr(type["name"]))
-+        except Exception:
-+            reprs.append(repr(type))
-+    return "%r is not of type %s" % (instance, ", ".join(reprs))
-+
-+
-+def flatten(suitable_for_isinstance):
-+    """
-+    isinstance() can accept a bunch of really annoying different types:
-+        * a single type
-+        * a tuple of types
-+        * an arbitrary nested tree of tuples
-+
-+    Return a flattened tuple of the given argument.
-+    """
-+
-+    types = set()
-+
-+    if not isinstance(suitable_for_isinstance, tuple):
-+        suitable_for_isinstance = (suitable_for_isinstance,)
-+    for thing in suitable_for_isinstance:
-+        if isinstance(thing, tuple):
-+            types.update(flatten(thing))
-+        else:
-+            types.add(thing)
-+    return tuple(types)
-+
-+
-+def ensure_list(thing):
-+    """
-+    Wrap ``thing`` in a list if it's a single str.
-+
-+    Otherwise, return it unchanged.
-+    """
-+
-+    if isinstance(thing, str_types):
-+        return [thing]
-+    return thing
-+
-+
-+def equal(one, two):
-+    """
-+    Check if two things are equal, but evade booleans and ints being equal.
-+    """
-+    return unbool(one) == unbool(two)
-+
-+
-+def unbool(element, true=object(), false=object()):
-+    """
-+    A hack to make True and 1 and False and 0 unique for ``uniq``.
-+    """
-+
-+    if element is True:
-+        return true
-+    elif element is False:
-+        return false
-+    return element
-+
-+
-+def uniq(container):
-+    """
-+    Check if all of a container's elements are unique.
-+
-+    Successively tries first to rely that the elements are hashable, then
-+    falls back on them being sortable, and finally falls back on brute
-+    force.
-+    """
-+
-+    try:
-+        return len(set(unbool(i) for i in container)) == len(container)
-+    except TypeError:
-+        try:
-+            sort = sorted(unbool(i) for i in container)
-+            sliced = itertools.islice(sort, 1, None)
-+            for i, j in zip(sort, sliced):
-+                if i == j:
-+                    return False
-+        except (NotImplementedError, TypeError):
-+            seen = []
-+            for e in container:
-+                e = unbool(e)
-+                if e in seen:
-+                    return False
-+                seen.append(e)
-+    return True
-diff --git a/third_party/python/jsonschema/jsonschema/_validators.py b/third_party/python/jsonschema/jsonschema/_validators.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/jsonschema/jsonschema/_validators.py
-@@ -0,0 +1,373 @@
-+import re
-+
-+from jsonschema._utils import (
-+    ensure_list,
-+    equal,
-+    extras_msg,
-+    find_additional_properties,
-+    types_msg,
-+    unbool,
-+    uniq,
-+)
-+from jsonschema.exceptions import FormatError, ValidationError
-+from jsonschema.compat import iteritems
-+
-+
-+def patternProperties(validator, patternProperties, instance, schema):
-+    if not validator.is_type(instance, "object"):
-+        return
-+
-+    for pattern, subschema in iteritems(patternProperties):
-+        for k, v in iteritems(instance):
-+            if re.search(pattern, k):
-+                for error in validator.descend(
-+                    v, subschema, path=k, schema_path=pattern,
-+                ):
-+                    yield error
-+
-+
-+def propertyNames(validator, propertyNames, instance, schema):
-+    if not validator.is_type(instance, "object"):
-+        return
-+
-+    for property in instance:
-+        for error in validator.descend(
-+            instance=property,
-+            schema=propertyNames,
-+        ):
-+            yield error
-+
-+
-+def additionalProperties(validator, aP, instance, schema):
-+    if not validator.is_type(instance, "object"):
-+        return
-+
-+    extras = set(find_additional_properties(instance, schema))
-+
-+    if validator.is_type(aP, "object"):
-+        for extra in extras:
-+            for error in validator.descend(instance[extra], aP, path=extra):
-+                yield error
-+    elif not aP and extras:
-+        if "patternProperties" in schema:
-+            patterns = sorted(schema["patternProperties"])
-+            if len(extras) == 1:
-+                verb = "does"
-+            else:
-+                verb = "do"
-+            error = "%s %s not match any of the regexes: %s" % (
-+                ", ".join(map(repr, sorted(extras))),
-+                verb,
-+                ", ".join(map(repr, patterns)),
-+            )
-+            yield ValidationError(error)
-+        else:
-+            error = "Additional properties are not allowed (%s %s unexpected)"
-+            yield ValidationError(error % extras_msg(extras))
-+
-+
-+def items(validator, items, instance, schema):
-+    if not validator.is_type(instance, "array"):
-+        return
-+
-+    if validator.is_type(items, "array"):
-+        for (index, item), subschema in zip(enumerate(instance), items):
-+            for error in validator.descend(
-+                item, subschema, path=index, schema_path=index,
-+            ):
-+                yield error
-+    else:
-+        for index, item in enumerate(instance):
-+            for error in validator.descend(item, items, path=index):
-+                yield error
-+
-+
-+def additionalItems(validator, aI, instance, schema):
-+    if (
-+        not validator.is_type(instance, "array") or
-+        validator.is_type(schema.get("items", {}), "object")
-+    ):
-+        return
-+
-+    len_items = len(schema.get("items", []))
-+    if validator.is_type(aI, "object"):
-+        for index, item in enumerate(instance[len_items:], start=len_items):
-+            for error in validator.descend(item, aI, path=index):
-+                yield error
-+    elif not aI and len(instance) > len(schema.get("items", [])):
-+        error = "Additional items are not allowed (%s %s unexpected)"
-+        yield ValidationError(
-+            error %
-+            extras_msg(instance[len(schema.get("items", [])):])
-+        )
-+
-+
-+def const(validator, const, instance, schema):
-+    if not equal(instance, const):
-+        yield ValidationError("%r was expected" % (const,))
-+
-+
-+def contains(validator, contains, instance, schema):
-+    if not validator.is_type(instance, "array"):
-+        return
-+
-+    if not any(validator.is_valid(element, contains) for element in instance):
-+        yield ValidationError(
-+            "None of %r are valid under the given schema" % (instance,)
-+        )
-+
-+
-+def exclusiveMinimum(validator, minimum, instance, schema):
-+    if not validator.is_type(instance, "number"):
-+        return
-+
-+    if instance <= minimum:
-+        yield ValidationError(
-+            "%r is less than or equal to the minimum of %r" % (
-+                instance, minimum,
-+            ),
-+        )
-+
-+
-+def exclusiveMaximum(validator, maximum, instance, schema):
-+    if not validator.is_type(instance, "number"):
-+        return
-+
-+    if instance >= maximum:
-+        yield ValidationError(
-+            "%r is greater than or equal to the maximum of %r" % (
-+                instance, maximum,
-+            ),
-+        )
-+
-+
-+def minimum(validator, minimum, instance, schema):
-+    if not validator.is_type(instance, "number"):
-+        return
-+
-+    if instance < minimum:
-+        yield ValidationError(
-+            "%r is less than the minimum of %r" % (instance, minimum)
-+        )
-+
-+
-+def maximum(validator, maximum, instance, schema):
-+    if not validator.is_type(instance, "number"):
-+        return
-+
-+    if instance > maximum:
-+        yield ValidationError(
-+            "%r is greater than the maximum of %r" % (instance, maximum)
-+        )
-+
-+
-+def multipleOf(validator, dB, instance, schema):
-+    if not validator.is_type(instance, "number"):
-+        return
-+
-+    if isinstance(dB, float):
-+        quotient = instance / dB
-+        failed = int(quotient) != quotient
-+    else:
-+        failed = instance % dB
-+
-+    if failed:
-+        yield ValidationError("%r is not a multiple of %r" % (instance, dB))
-+
-+
-+def minItems(validator, mI, instance, schema):
-+    if validator.is_type(instance, "array") and len(instance) < mI:
-+        yield ValidationError("%r is too short" % (instance,))
-+
-+
-+def maxItems(validator, mI, instance, schema):
-+    if validator.is_type(instance, "array") and len(instance) > mI:
-+        yield ValidationError("%r is too long" % (instance,))
-+
-+
-+def uniqueItems(validator, uI, instance, schema):
-+    if (
-+        uI and
-+        validator.is_type(instance, "array") and
-+        not uniq(instance)
-+    ):
-+        yield ValidationError("%r has non-unique elements" % (instance,))
-+
-+
-+def pattern(validator, patrn, instance, schema):
-+    if (
-+        validator.is_type(instance, "string") and
-+        not re.search(patrn, instance)
-+    ):
-+        yield ValidationError("%r does not match %r" % (instance, patrn))
-+
-+
-+def format(validator, format, instance, schema):
-+    if validator.format_checker is not None:
-+        try:
-+            validator.format_checker.check(instance, format)
-+        except FormatError as error:
-+            yield ValidationError(error.message, cause=error.cause)
-+
-+
-+def minLength(validator, mL, instance, schema):
-+    if validator.is_type(instance, "string") and len(instance) < mL:
-+        yield ValidationError("%r is too short" % (instance,))
-+
-+
-+def maxLength(validator, mL, instance, schema):
-+    if validator.is_type(instance, "string") and len(instance) > mL:
-+        yield ValidationError("%r is too long" % (instance,))
-+
-+
-+def dependencies(validator, dependencies, instance, schema):
-+    if not validator.is_type(instance, "object"):
-+        return
-+
-+    for property, dependency in iteritems(dependencies):
-+        if property not in instance:
-+            continue
-+
-+        if validator.is_type(dependency, "array"):
-+            for each in dependency:
-+                if each not in instance:
-+                    message = "%r is a dependency of %r"
-+                    yield ValidationError(message % (each, property))
-+        else:
-+            for error in validator.descend(
-+                instance, dependency, schema_path=property,
-+            ):
-+                yield error
-+
-+
-+def enum(validator, enums, instance, schema):
-+    if instance == 0 or instance == 1:
-+        unbooled = unbool(instance)
-+        if all(unbooled != unbool(each) for each in enums):
-+            yield ValidationError("%r is not one of %r" % (instance, enums))
-+    elif instance not in enums:
-+        yield ValidationError("%r is not one of %r" % (instance, enums))
-+
-+
-+def ref(validator, ref, instance, schema):
-+    resolve = getattr(validator.resolver, "resolve", None)
-+    if resolve is None:
-+        with validator.resolver.resolving(ref) as resolved:
-+            for error in validator.descend(instance, resolved):
-+                yield error
-+    else:
-+        scope, resolved = validator.resolver.resolve(ref)
-+        validator.resolver.push_scope(scope)
-+
-+        try:
-+            for error in validator.descend(instance, resolved):
-+                yield error
-+        finally:
-+            validator.resolver.pop_scope()
-+
-+
-+def type(validator, types, instance, schema):
-+    types = ensure_list(types)
-+
-+    if not any(validator.is_type(instance, type) for type in types):
-+        yield ValidationError(types_msg(instance, types))
-+
-+
-+def properties(validator, properties, instance, schema):
-+    if not validator.is_type(instance, "object"):
-+        return
-+
-+    for property, subschema in iteritems(properties):
-+        if property in instance:
-+            for error in validator.descend(
-+                instance[property],
-+                subschema,
-+                path=property,
-+                schema_path=property,
-+            ):
-+                yield error
-+
-+
-+def required(validator, required, instance, schema):
-+    if not validator.is_type(instance, "object"):
-+        return
-+    for property in required:
-+        if property not in instance:
-+            yield ValidationError("%r is a required property" % property)
-+
-+
-+def minProperties(validator, mP, instance, schema):
-+    if validator.is_type(instance, "object") and len(instance) < mP:
-+        yield ValidationError(
-+            "%r does not have enough properties" % (instance,)
-+        )
-+
-+
-+def maxProperties(validator, mP, instance, schema):
-+    if not validator.is_type(instance, "object"):
-+        return
-+    if validator.is_type(instance, "object") and len(instance) > mP:
-+        yield ValidationError("%r has too many properties" % (instance,))
-+
-+
-+def allOf(validator, allOf, instance, schema):
-+    for index, subschema in enumerate(allOf):
-+        for error in validator.descend(instance, subschema, schema_path=index):
-+            yield error
-+
-+
-+def anyOf(validator, anyOf, instance, schema):
-+    all_errors = []
-+    for index, subschema in enumerate(anyOf):
-+        errs = list(validator.descend(instance, subschema, schema_path=index))
-+        if not errs:
-+            break
-+        all_errors.extend(errs)
-+    else:
-+        yield ValidationError(
-+            "%r is not valid under any of the given schemas" % (instance,),
-+            context=all_errors,
-+        )
-+
-+
-+def oneOf(validator, oneOf, instance, schema):
-+    subschemas = enumerate(oneOf)
-+    all_errors = []
-+    for index, subschema in subschemas:
-+        errs = list(validator.descend(instance, subschema, schema_path=index))
-+        if not errs:
-+            first_valid = subschema
-+            break
-+        all_errors.extend(errs)
-+    else:
-+        yield ValidationError(
-+            "%r is not valid under any of the given schemas" % (instance,),
-+            context=all_errors,
-+        )
-+
-+    more_valid = [s for i, s in subschemas if validator.is_valid(instance, s)]
-+    if more_valid:
-+        more_valid.append(first_valid)
-+        reprs = ", ".join(repr(schema) for schema in more_valid)
-+        yield ValidationError(
-+            "%r is valid under each of %s" % (instance, reprs)
-+        )
-+
-+
-+def not_(validator, not_schema, instance, schema):
-+    if validator.is_valid(instance, not_schema):
-+        yield ValidationError(
-+            "%r is not allowed for %r" % (not_schema, instance)
-+        )
-+
-+
-+def if_(validator, if_schema, instance, schema):
-+    if validator.is_valid(instance, if_schema):
-+        if u"then" in schema:
-+            then = schema[u"then"]
-+            for error in validator.descend(instance, then, schema_path="then"):
-+                yield error
-+    elif u"else" in schema:
-+        else_ = schema[u"else"]
-+        for error in validator.descend(instance, else_, schema_path="else"):
-+            yield error
-diff --git a/third_party/python/jsonschema/jsonschema/benchmarks/__init__.py b/third_party/python/jsonschema/jsonschema/benchmarks/__init__.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/jsonschema/jsonschema/benchmarks/__init__.py
-@@ -0,0 +1,5 @@
-+"""
-+Benchmarks for validation.
-+
-+This package is *not* public API.
-+"""
-diff --git a/third_party/python/jsonschema/jsonschema/benchmarks/issue232.py b/third_party/python/jsonschema/jsonschema/benchmarks/issue232.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/jsonschema/jsonschema/benchmarks/issue232.py
-@@ -0,0 +1,26 @@
-+#!/usr/bin/env python
-+"""
-+A performance benchmark using the example from issue #232.
-+
-+See https://github.com/Julian/jsonschema/pull/232.
-+"""
-+from twisted.python.filepath import FilePath
-+from pyperf import Runner
-+from pyrsistent import m
-+
-+from jsonschema.tests._suite import Version
-+import jsonschema
-+
-+
-+issue232 = Version(
-+    path=FilePath(__file__).sibling("issue232"),
-+    remotes=m(),
-+    name="issue232",
-+)
-+
-+
-+if __name__ == "__main__":
-+    issue232.benchmark(
-+        runner=Runner(),
-+        Validator=jsonschema.Draft4Validator,
-+    )
-diff --git a/third_party/python/jsonschema/jsonschema/benchmarks/json_schema_test_suite.py b/third_party/python/jsonschema/jsonschema/benchmarks/json_schema_test_suite.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/jsonschema/jsonschema/benchmarks/json_schema_test_suite.py
-@@ -0,0 +1,14 @@
-+#!/usr/bin/env python
-+"""
-+A performance benchmark using the official test suite.
-+
-+This benchmarks jsonschema using every valid example in the
-+JSON-Schema-Test-Suite. It will take some time to complete.
-+"""
-+from pyperf import Runner
-+
-+from jsonschema.tests._suite import Suite
-+
-+
-+if __name__ == "__main__":
-+    Suite().benchmark(runner=Runner())
-diff --git a/third_party/python/jsonschema/jsonschema/cli.py b/third_party/python/jsonschema/jsonschema/cli.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/jsonschema/jsonschema/cli.py
-@@ -0,0 +1,90 @@
-+"""
-+The ``jsonschema`` command line.
-+"""
-+from __future__ import absolute_import
-+import argparse
-+import json
-+import sys
-+
-+from jsonschema import __version__
-+from jsonschema._reflect import namedAny
-+from jsonschema.validators import validator_for
-+
-+
-+def _namedAnyWithDefault(name):
-+    if "." not in name:
-+        name = "jsonschema." + name
-+    return namedAny(name)
-+
-+
-+def _json_file(path):
-+    with open(path) as file:
-+        return json.load(file)
-+
-+
-+parser = argparse.ArgumentParser(
-+    description="JSON Schema Validation CLI",
-+)
-+parser.add_argument(
-+    "-i", "--instance",
-+    action="append",
-+    dest="instances",
-+    type=_json_file,
-+    help=(
-+        "a path to a JSON instance (i.e. filename.json) "
-+        "to validate (may be specified multiple times)"
-+    ),
-+)
-+parser.add_argument(
-+    "-F", "--error-format",
-+    default="{error.instance}: {error.message}\n",
-+    help=(
-+        "the format to use for each error output message, specified in "
-+        "a form suitable for passing to str.format, which will be called "
-+        "with 'error' for each error"
-+    ),
-+)
-+parser.add_argument(
-+    "-V", "--validator",
-+    type=_namedAnyWithDefault,
-+    help=(
-+        "the fully qualified object name of a validator to use, or, for "
-+        "validators that are registered with jsonschema, simply the name "
-+        "of the class."
-+    ),
-+)
-+parser.add_argument(
-+    "--version",
-+    action="version",
-+    version=__version__,
-+)
-+parser.add_argument(
-+    "schema",
-+    help="the JSON Schema to validate with (i.e. schema.json)",
-+    type=_json_file,
-+)
-+
-+
-+def parse_args(args):
-+    arguments = vars(parser.parse_args(args=args or ["--help"]))
-+    if arguments["validator"] is None:
-+        arguments["validator"] = validator_for(arguments["schema"])
-+    return arguments
-+
-+
-+def main(args=sys.argv[1:]):
-+    sys.exit(run(arguments=parse_args(args=args)))
-+
-+
-+def run(arguments, stdout=sys.stdout, stderr=sys.stderr):
-+    error_format = arguments["error_format"]
-+    validator = arguments["validator"](schema=arguments["schema"])
-+
-+    validator.check_schema(arguments["schema"])
-+
-+    errored = False
-+    for instance in arguments["instances"] or ():
-+        for error in validator.iter_errors(instance):
-+            stderr.write(error_format.format(error=error))
-+            errored = True
-+    return errored
-diff --git a/third_party/python/jsonschema/jsonschema/compat.py b/third_party/python/jsonschema/jsonschema/compat.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/jsonschema/jsonschema/compat.py
-@@ -0,0 +1,55 @@
-+"""
-+Python 2/3 compatibility helpers.
-+
-+Note: This module is *not* public API.
-+"""
-+import contextlib
-+import operator
-+import sys
-+
-+
-+try:
-+    from collections.abc import MutableMapping, Sequence  # noqa
-+except ImportError:
-+    from collections import MutableMapping, Sequence  # noqa
-+
-+PY3 = sys.version_info[0] >= 3
-+
-+if PY3:
-+    zip = zip
-+    from functools import lru_cache
-+    from io import StringIO as NativeIO
-+    from urllib.parse import (
-+        unquote, urljoin, urlunsplit, SplitResult, urlsplit
-+    )
-+    from urllib.request import pathname2url, urlopen
-+    str_types = str,
-+    int_types = int,
-+    iteritems = operator.methodcaller("items")
-+else:
-+    from itertools import izip as zip  # noqa
-+    from io import BytesIO as NativeIO
-+    from urlparse import urljoin, urlunsplit, SplitResult, urlsplit
-+    from urllib import pathname2url, unquote  # noqa
-+    import urllib2  # noqa
-+    def urlopen(*args, **kwargs):
-+        return contextlib.closing(urllib2.urlopen(*args, **kwargs))
-+
-+    str_types = basestring
-+    int_types = int, long
-+    iteritems = operator.methodcaller("iteritems")
-+
-+    from functools32 import lru_cache
-+
-+
-+def urldefrag(url):
-+    if "#" in url:
-+        s, n, p, q, frag = urlsplit(url)
-+        defrag = urlunsplit((s, n, p, q, ""))
-+    else:
-+        defrag = url
-+        frag = ""
-+    return defrag, frag
-+
-+
-+# flake8: noqa
-diff --git a/third_party/python/jsonschema/jsonschema/exceptions.py b/third_party/python/jsonschema/jsonschema/exceptions.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/jsonschema/jsonschema/exceptions.py
-@@ -0,0 +1,374 @@
-+"""
-+Validation errors, and some surrounding helpers.
-+"""
-+from collections import defaultdict, deque
-+import itertools
-+import pprint
-+import textwrap
-+
-+import attr
-+
-+from jsonschema import _utils
-+from jsonschema.compat import PY3, iteritems
-+
-+
-+WEAK_MATCHES = frozenset(["anyOf", "oneOf"])
-+STRONG_MATCHES = frozenset()
-+
-+_unset = _utils.Unset()
-+
-+
-+class _Error(Exception):
-+    def __init__(
-+        self,
-+        message,
-+        validator=_unset,
-+        path=(),
-+        cause=None,
-+        context=(),
-+        validator_value=_unset,
-+        instance=_unset,
-+        schema=_unset,
-+        schema_path=(),
-+        parent=None,
-+    ):
-+        super(_Error, self).__init__(
-+            message,
-+            validator,
-+            path,
-+            cause,
-+            context,
-+            validator_value,
-+            instance,
-+            schema,
-+            schema_path,
-+            parent,
-+        )
-+        self.message = message
-+        self.path = self.relative_path = deque(path)
-+        self.schema_path = self.relative_schema_path = deque(schema_path)
-+        self.context = list(context)
-+        self.cause = self.__cause__ = cause
-+        self.validator = validator
-+        self.validator_value = validator_value
-+        self.instance = instance
-+        self.schema = schema
-+        self.parent = parent
-+
-+        for error in context:
-+            error.parent = self
-+
-+    def __repr__(self):
-+        return "<%s: %r>" % (self.__class__.__name__, self.message)
-+
-+    def __unicode__(self):
-+        essential_for_verbose = (
-+            self.validator, self.validator_value, self.instance, self.schema,
-+        )
-+        if any(m is _unset for m in essential_for_verbose):
-+            return self.message
-+
-+        pschema = pprint.pformat(self.schema, width=72)
-+        pinstance = pprint.pformat(self.instance, width=72)
-+        return self.message + textwrap.dedent("""
-+
-+            Failed validating %r in %s%s:
-+            %s
-+
-+            On %s%s:
-+            %s
-+            """.rstrip()
-+        ) % (
-+            self.validator,
-+            self._word_for_schema_in_error_message,
-+            _utils.format_as_index(list(self.relative_schema_path)[:-1]),
-+            _utils.indent(pschema),
-+            self._word_for_instance_in_error_message,
-+            _utils.format_as_index(self.relative_path),
-+            _utils.indent(pinstance),
-+        )
-+
-+    if PY3:
-+        __str__ = __unicode__
-+    else:
-+        def __str__(self):
-+            return unicode(self).encode("utf-8")
-+
-+    @classmethod
-+    def create_from(cls, other):
-+        return cls(**other._contents())
-+
-+    @property
-+    def absolute_path(self):
-+        parent = self.parent
-+        if parent is None:
-+            return self.relative_path
-+
-+        path = deque(self.relative_path)
-+        path.extendleft(reversed(parent.absolute_path))
-+        return path
-+
-+    @property
-+    def absolute_schema_path(self):
-+        parent = self.parent
-+        if parent is None:
-+            return self.relative_schema_path
-+
-+        path = deque(self.relative_schema_path)
-+        path.extendleft(reversed(parent.absolute_schema_path))
-+        return path
-+
-+    def _set(self, **kwargs):
-+        for k, v in iteritems(kwargs):
-+            if getattr(self, k) is _unset:
-+                setattr(self, k, v)
-+
-+    def _contents(self):
-+        attrs = (
-+            "message", "cause", "context", "validator", "validator_value",
-+            "path", "schema_path", "instance", "schema", "parent",
-+        )
-+        return dict((attr, getattr(self, attr)) for attr in attrs)
-+
-+
-+class ValidationError(_Error):
-+    """
-+    An instance was invalid under a provided schema.
-+    """
-+
-+    _word_for_schema_in_error_message = "schema"
-+    _word_for_instance_in_error_message = "instance"
-+
-+
-+class SchemaError(_Error):
-+    """
-+    A schema was invalid under its corresponding metaschema.
-+    """
-+
-+    _word_for_schema_in_error_message = "metaschema"
-+    _word_for_instance_in_error_message = "schema"
-+
-+
-+@attr.s(hash=True)
-+class RefResolutionError(Exception):
-+    """
-+    A ref could not be resolved.
-+    """
-+
-+    _cause = attr.ib()
-+
-+    def __str__(self):
-+        return str(self._cause)
-+
-+
-+class UndefinedTypeCheck(Exception):
-+    """
-+    A type checker was asked to check a type it did not have registered.
-+    """
-+
-+    def __init__(self, type):
-+        self.type = type
-+
-+    def __unicode__(self):
-+        return "Type %r is unknown to this type checker" % self.type
-+
-+    if PY3:
-+        __str__ = __unicode__
-+    else:
-+        def __str__(self):
-+            return unicode(self).encode("utf-8")
-+
-+
-+class UnknownType(Exception):
-+    """
-+    A validator was asked to validate an instance against an unknown type.
-+    """
-+
-+    def __init__(self, type, instance, schema):
-+        self.type = type
-+        self.instance = instance
-+        self.schema = schema
-+
-+    def __unicode__(self):
-+        pschema = pprint.pformat(self.schema, width=72)
-+        pinstance = pprint.pformat(self.instance, width=72)
-+        return textwrap.dedent("""
-+            Unknown type %r for validator with schema:
-+            %s
-+
-+            While checking instance:
-+            %s
-+            """.rstrip()
-+        ) % (self.type, _utils.indent(pschema), _utils.indent(pinstance))
-+
-+    if PY3:
-+        __str__ = __unicode__
-+    else:
-+        def __str__(self):
-+            return unicode(self).encode("utf-8")
-+
-+
-+class FormatError(Exception):
-+    """
-+    Validating a format failed.
-+    """
-+
-+    def __init__(self, message, cause=None):
-+        super(FormatError, self).__init__(message, cause)
-+        self.message = message
-+        self.cause = self.__cause__ = cause
-+
-+    def __unicode__(self):
-+        return self.message
-+
-+    if PY3:
-+        __str__ = __unicode__
-+    else:
-+        def __str__(self):
-+            return self.message.encode("utf-8")
-+
-+
-+class ErrorTree(object):
-+    """
-+    ErrorTrees make it easier to check which validations failed.
-+    """
-+
-+    _instance = _unset
-+
-+    def __init__(self, errors=()):
-+        self.errors = {}
-+        self._contents = defaultdict(self.__class__)
-+
-+        for error in errors:
-+            container = self
-+            for element in error.path:
-+                container = container[element]
-+            container.errors[error.validator] = error
-+
-+            container._instance = error.instance
-+
-+    def __contains__(self, index):
-+        """
-+        Check whether ``instance[index]`` has any errors.
-+        """
-+
-+        return index in self._contents
-+
-+    def __getitem__(self, index):
-+        """
-+        Retrieve the child tree one level down at the given ``index``.
-+
-+        If the index is not in the instance that this tree corresponds to and
-+        is not known by this tree, whatever error would be raised by
-+        ``instance.__getitem__`` will be propagated (usually this is some
-+        subclass of `exceptions.LookupError`.
-+        """
-+
-+        if self._instance is not _unset and index not in self:
-+            self._instance[index]
-+        return self._contents[index]
-+
-+    def __setitem__(self, index, value):
-+        """
-+        Add an error to the tree at the given ``index``.
-+        """
-+        self._contents[index] = value
-+
-+    def __iter__(self):
-+        """
-+        Iterate (non-recursively) over the indices in the instance with errors.
-+        """
-+
-+        return iter(self._contents)
-+
-+    def __len__(self):
-+        """
-+        Return the `total_errors`.
-+        """
-+        return self.total_errors
-+
-+    def __repr__(self):
-+        return "<%s (%s total errors)>" % (self.__class__.__name__, len(self))
-+
-+    @property
-+    def total_errors(self):
-+        """
-+        The total number of errors in the entire tree, including children.
-+        """
-+
-+        child_errors = sum(len(tree) for _, tree in iteritems(self._contents))
-+        return len(self.errors) + child_errors
-+
-+
-+def by_relevance(weak=WEAK_MATCHES, strong=STRONG_MATCHES):
-+    """
-+    Create a key function that can be used to sort errors by relevance.
-+
-+    Arguments:
-+        weak (set):
-+            a collection of validator names to consider to be "weak".
-+            If there are two errors at the same level of the instance
-+            and one is in the set of weak validator names, the other
-+            error will take priority. By default, :validator:`anyOf` and
-+            :validator:`oneOf` are considered weak validators and will
-+            be superseded by other same-level validation errors.
-+
-+        strong (set):
-+            a collection of validator names to consider to be "strong"
-+    """
-+    def relevance(error):
-+        validator = error.validator
-+        return -len(error.path), validator not in weak, validator in strong
-+    return relevance
-+
-+
-+relevance = by_relevance()
-+
-+
-+def best_match(errors, key=relevance):
-+    """
-+    Try to find an error that appears to be the best match among given errors.
-+
-+    In general, errors that are higher up in the instance (i.e. for which
-+    `ValidationError.path` is shorter) are considered better matches,
-+    since they indicate "more" is wrong with the instance.
-+
-+    If the resulting match is either :validator:`oneOf` or :validator:`anyOf`,
-+    the *opposite* assumption is made -- i.e. the deepest error is picked,
-+    since these validators only need to match once, and any other errors may
-+    not be relevant.
-+
-+    Arguments:
-+        errors (collections.Iterable):
-+
-+            the errors to select from. Do not provide a mixture of
-+            errors from different validation attempts (i.e. from
-+            different instances or schemas), since it won't produce
-+            sensical output.
-+
-+        key (collections.Callable):
-+
-+            the key to use when sorting errors. See `relevance` and
-+            transitively `by_relevance` for more details (the default is
-+            to sort with the defaults of that function). Changing the
-+            default is only useful if you want to change the function
-+            that rates errors but still want the error context descent
-+            done by this function.
-+
-+    Returns:
-+        the best matching error, or ``None`` if the iterable was empty
-+
-+    .. note::
-+
-+        This function is a heuristic. Its return value may change for a given
-+        set of inputs from version to version if better heuristics are added.
-+    """
-+    errors = iter(errors)
-+    best = next(errors, None)
-+    if best is None:
-+        return
-+    best = max(itertools.chain([best], errors), key=key)
-+
-+    while best.context:
-+        best = min(best.context, key=key)
-+    return best
-diff --git a/third_party/python/jsonschema/jsonschema/schemas/draft3.json b/third_party/python/jsonschema/jsonschema/schemas/draft3.json
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/jsonschema/jsonschema/schemas/draft3.json
-@@ -0,0 +1,199 @@
-+{
-+    "$schema": "http://json-schema.org/draft-03/schema#",
-+    "dependencies": {
-+        "exclusiveMaximum": "maximum",
-+        "exclusiveMinimum": "minimum"
-+    },
-+    "id": "http://json-schema.org/draft-03/schema#",
-+    "properties": {
-+        "$ref": {
-+            "format": "uri",
-+            "type": "string"
-+        },
-+        "$schema": {
-+            "format": "uri",
-+            "type": "string"
-+        },
-+        "additionalItems": {
-+            "default": {},
-+            "type": [
-+                {
-+                    "$ref": "#"
-+                },
-+                "boolean"
-+            ]
-+        },
-+        "additionalProperties": {
-+            "default": {},
-+            "type": [
-+                {
-+                    "$ref": "#"
-+                },
-+                "boolean"
-+            ]
-+        },
-+        "default": {
-+            "type": "any"
-+        },
-+        "dependencies": {
-+            "additionalProperties": {
-+                "items": {
-+                    "type": "string"
-+                },
-+                "type": [
-+                    "string",
-+                    "array",
-+                    {
-+                        "$ref": "#"
-+                    }
-+                ]
-+            },
-+            "default": {},
-+            "type": [
-+                "string",
-+                "array",
-+                "object"
-+            ]
-+        },
-+        "description": {
-+            "type": "string"
-+        },
-+        "disallow": {
-+            "items": {
-+                "type": [
-+                    "string",
-+                    {
-+                        "$ref": "#"
-+                    }
-+                ]
-+            },
-+            "type": [
-+                "string",
-+                "array"
-+            ],
-+            "uniqueItems": true
-+        },
-+        "divisibleBy": {
-+            "default": 1,
-+            "exclusiveMinimum": true,
-+            "minimum": 0,
-+            "type": "number"
-+        },
-+        "enum": {
-+            "type": "array"
-+        },
-+        "exclusiveMaximum": {
-+            "default": false,
-+            "type": "boolean"
-+        },
-+        "exclusiveMinimum": {
-+            "default": false,
-+            "type": "boolean"
-+        },
-+        "extends": {
-+            "default": {},
-+            "items": {
-+                "$ref": "#"
-+            },
-+            "type": [
-+                {
-+                    "$ref": "#"
-+                },
-+                "array"
-+            ]
-+        },
-+        "format": {
-+            "type": "string"
-+        },
-+        "id": {
-+            "format": "uri",
-+            "type": "string"
-+        },
-+        "items": {
-+            "default": {},
-+            "items": {
-+                "$ref": "#"
-+            },
-+            "type": [
-+                {
-+                    "$ref": "#"
-+                },
-+                "array"
-+            ]
-+        },
-+        "maxDecimal": {
-+            "minimum": 0,
-+            "type": "number"
-+        },
-+        "maxItems": {
-+            "minimum": 0,
-+            "type": "integer"
-+        },
-+        "maxLength": {
-+            "type": "integer"
-+        },
-+        "maximum": {
-+            "type": "number"
-+        },
-+        "minItems": {
-+            "default": 0,
-+            "minimum": 0,
-+            "type": "integer"
-+        },
-+        "minLength": {
-+            "default": 0,
-+            "minimum": 0,
-+            "type": "integer"
-+        },
-+        "minimum": {
-+            "type": "number"
-+        },
-+        "pattern": {
-+            "format": "regex",
-+            "type": "string"
-+        },
-+        "patternProperties": {
-+            "additionalProperties": {
-+                "$ref": "#"
-+            },
-+            "default": {},
-+            "type": "object"
-+        },
-+        "properties": {
-+            "additionalProperties": {
-+                "$ref": "#",
-+                "type": "object"
-+            },
-+            "default": {},
-+            "type": "object"
-+        },
-+        "required": {
-+            "default": false,
-+            "type": "boolean"
-+        },
-+        "title": {
-+            "type": "string"
-+        },
-+        "type": {
-+            "default": "any",
-+            "items": {
-+                "type": [
-+                    "string",
-+                    {
-+                        "$ref": "#"
-+                    }
-+                ]
-+            },
-+            "type": [
-+                "string",
-+                "array"
-+            ],
-+            "uniqueItems": true
-+        },
-+        "uniqueItems": {
-+            "default": false,
-+            "type": "boolean"
-+        }
-+    },
-+    "type": "object"
-+}
-diff --git a/third_party/python/jsonschema/jsonschema/schemas/draft4.json b/third_party/python/jsonschema/jsonschema/schemas/draft4.json
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/jsonschema/jsonschema/schemas/draft4.json
-@@ -0,0 +1,222 @@
-+{
-+    "$schema": "http://json-schema.org/draft-04/schema#",
-+    "default": {},
-+    "definitions": {
-+        "positiveInteger": {
-+            "minimum": 0,
-+            "type": "integer"
-+        },
-+        "positiveIntegerDefault0": {
-+            "allOf": [
-+                {
-+                    "$ref": "#/definitions/positiveInteger"
-+                },
-+                {
-+                    "default": 0
-+                }
-+            ]
-+        },
-+        "schemaArray": {
-+            "items": {
-+                "$ref": "#"
-+            },
-+            "minItems": 1,
-+            "type": "array"
-+        },
-+        "simpleTypes": {
-+            "enum": [
-+                "array",
-+                "boolean",
-+                "integer",
-+                "null",
-+                "number",
-+                "object",
-+                "string"
-+            ]
-+        },
-+        "stringArray": {
-+            "items": {
-+                "type": "string"
-+            },
-+            "minItems": 1,
-+            "type": "array",
-+            "uniqueItems": true
-+        }
-+    },
-+    "dependencies": {
-+        "exclusiveMaximum": [
-+            "maximum"
-+        ],
-+        "exclusiveMinimum": [
-+            "minimum"
-+        ]
-+    },
-+    "description": "Core schema meta-schema",
-+    "id": "http://json-schema.org/draft-04/schema#",
-+    "properties": {
-+        "$schema": {
-+            "format": "uri",
-+            "type": "string"
-+        },
-+        "additionalItems": {
-+            "anyOf": [
-+                {
-+                    "type": "boolean"
-+                },
-+                {
-+                    "$ref": "#"
-+                }
-+            ],
-+            "default": {}
-+        },
-+        "additionalProperties": {
-+            "anyOf": [
-+                {
-+                    "type": "boolean"
-+                },
-+                {
-+                    "$ref": "#"
-+                }
-+            ],
-+            "default": {}
-+        },
-+        "allOf": {
-+            "$ref": "#/definitions/schemaArray"
-+        },
-+        "anyOf": {
-+            "$ref": "#/definitions/schemaArray"
-+        },
-+        "default": {},
-+        "definitions": {
-+            "additionalProperties": {
-+                "$ref": "#"
-+            },
-+            "default": {},
-+            "type": "object"
-+        },
-+        "dependencies": {
-+            "additionalProperties": {
-+                "anyOf": [
-+                    {
-+                        "$ref": "#"
-+                    },
-+                    {
-+                        "$ref": "#/definitions/stringArray"
-+                    }
-+                ]
-+            },
-+            "type": "object"
-+        },
-+        "description": {
-+            "type": "string"
-+        },
-+        "enum": {
-+            "type": "array"
-+        },
-+        "exclusiveMaximum": {
-+            "default": false,
-+            "type": "boolean"
-+        },
-+        "exclusiveMinimum": {
-+            "default": false,
-+            "type": "boolean"
-+        },
-+        "format": {
-+            "type": "string"
-+        },
-+        "id": {
-+            "format": "uri",
-+            "type": "string"
-+        },
-+        "items": {
-+            "anyOf": [
-+                {
-+                    "$ref": "#"
-+                },
-+                {
-+                    "$ref": "#/definitions/schemaArray"
-+                }
-+            ],
-+            "default": {}
-+        },
-+        "maxItems": {
-+            "$ref": "#/definitions/positiveInteger"
-+        },
-+        "maxLength": {
-+            "$ref": "#/definitions/positiveInteger"
-+        },
-+        "maxProperties": {
-+            "$ref": "#/definitions/positiveInteger"
-+        },
-+        "maximum": {
-+            "type": "number"
-+        },
-+        "minItems": {
-+            "$ref": "#/definitions/positiveIntegerDefault0"
-+        },
-+        "minLength": {
-+            "$ref": "#/definitions/positiveIntegerDefault0"
-+        },
-+        "minProperties": {
-+            "$ref": "#/definitions/positiveIntegerDefault0"
-+        },
-+        "minimum": {
-+            "type": "number"
-+        },
-+        "multipleOf": {
-+            "exclusiveMinimum": true,
-+            "minimum": 0,
-+            "type": "number"
-+        },
-+        "not": {
-+            "$ref": "#"
-+        },
-+        "oneOf": {
-+            "$ref": "#/definitions/schemaArray"
-+        },
-+        "pattern": {
-+            "format": "regex",
-+            "type": "string"
-+        },
-+        "patternProperties": {
-+            "additionalProperties": {
-+                "$ref": "#"
-+            },
-+            "default": {},
-+            "type": "object"
-+        },
-+        "properties": {
-+            "additionalProperties": {
-+                "$ref": "#"
-+            },
-+            "default": {},
-+            "type": "object"
-+        },
-+        "required": {
-+            "$ref": "#/definitions/stringArray"
-+        },
-+        "title": {
-+            "type": "string"
-+        },
-+        "type": {
-+            "anyOf": [
-+                {
-+                    "$ref": "#/definitions/simpleTypes"
-+                },
-+                {
-+                    "items": {
-+                        "$ref": "#/definitions/simpleTypes"
-+                    },
-+                    "minItems": 1,
-+                    "type": "array",
-+                    "uniqueItems": true
-+                }
-+            ]
-+        },
-+        "uniqueItems": {
-+            "default": false,
-+            "type": "boolean"
-+        }
-+    },
-+    "type": "object"
-+}
-diff --git a/third_party/python/jsonschema/jsonschema/schemas/draft6.json b/third_party/python/jsonschema/jsonschema/schemas/draft6.json
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/jsonschema/jsonschema/schemas/draft6.json
-@@ -0,0 +1,153 @@
-+{
-+    "$schema": "http://json-schema.org/draft-06/schema#",
-+    "$id": "http://json-schema.org/draft-06/schema#",
-+    "title": "Core schema meta-schema",
-+    "definitions": {
-+        "schemaArray": {
-+            "type": "array",
-+            "minItems": 1,
-+            "items": { "$ref": "#" }
-+        },
-+        "nonNegativeInteger": {
-+            "type": "integer",
-+            "minimum": 0
-+        },
-+        "nonNegativeIntegerDefault0": {
-+            "allOf": [
-+                { "$ref": "#/definitions/nonNegativeInteger" },
-+                { "default": 0 }
-+            ]
-+        },
-+        "simpleTypes": {
-+            "enum": [
-+                "array",
-+                "boolean",
-+                "integer",
-+                "null",
-+                "number",
-+                "object",
-+                "string"
-+            ]
-+        },
-+        "stringArray": {
-+            "type": "array",
-+            "items": { "type": "string" },
-+            "uniqueItems": true,
-+            "default": []
-+        }
-+    },
-+    "type": ["object", "boolean"],
-+    "properties": {
-+        "$id": {
-+            "type": "string",
-+            "format": "uri-reference"
-+        },
-+        "$schema": {
-+            "type": "string",
-+            "format": "uri"
-+        },
-+        "$ref": {
-+            "type": "string",
-+            "format": "uri-reference"
-+        },
-+        "title": {
-+            "type": "string"
-+        },
-+        "description": {
-+            "type": "string"
-+        },
-+        "default": {},
-+        "examples": {
-+            "type": "array",
-+            "items": {}
-+        },
-+        "multipleOf": {
-+            "type": "number",
-+            "exclusiveMinimum": 0
-+        },
-+        "maximum": {
-+            "type": "number"
-+        },
-+        "exclusiveMaximum": {
-+            "type": "number"
-+        },
-+        "minimum": {
-+            "type": "number"
-+        },
-+        "exclusiveMinimum": {
-+            "type": "number"
-+        },
-+        "maxLength": { "$ref": "#/definitions/nonNegativeInteger" },
-+        "minLength": { "$ref": "#/definitions/nonNegativeIntegerDefault0" },
-+        "pattern": {
-+            "type": "string",
-+            "format": "regex"
-+        },
-+        "additionalItems": { "$ref": "#" },
-+        "items": {
-+            "anyOf": [
-+                { "$ref": "#" },
-+                { "$ref": "#/definitions/schemaArray" }
-+            ],
-+            "default": {}
-+        },
-+        "maxItems": { "$ref": "#/definitions/nonNegativeInteger" },
-+        "minItems": { "$ref": "#/definitions/nonNegativeIntegerDefault0" },
-+        "uniqueItems": {
-+            "type": "boolean",
-+            "default": false
-+        },
-+        "contains": { "$ref": "#" },
-+        "maxProperties": { "$ref": "#/definitions/nonNegativeInteger" },
-+        "minProperties": { "$ref": "#/definitions/nonNegativeIntegerDefault0" },
-+        "required": { "$ref": "#/definitions/stringArray" },
-+        "additionalProperties": { "$ref": "#" },
-+        "definitions": {
-+            "type": "object",
-+            "additionalProperties": { "$ref": "#" },
-+            "default": {}
-+        },
-+        "properties": {
-+            "type": "object",
-+            "additionalProperties": { "$ref": "#" },
-+            "default": {}
-+        },
-+        "patternProperties": {
-+            "type": "object",
-+            "additionalProperties": { "$ref": "#" },
-+            "propertyNames": { "format": "regex" },
-+            "default": {}
-+        },
-+        "dependencies": {
-+            "type": "object",
-+            "additionalProperties": {
-+                "anyOf": [
-+                    { "$ref": "#" },
-+                    { "$ref": "#/definitions/stringArray" }
-+                ]
-+            }
-+        },
-+        "propertyNames": { "$ref": "#" },
-+        "const": {},
-+        "enum": {
-+            "type": "array"
-+        },
-+        "type": {
-+            "anyOf": [
-+                { "$ref": "#/definitions/simpleTypes" },
-+                {
-+                    "type": "array",
-+                    "items": { "$ref": "#/definitions/simpleTypes" },
-+                    "minItems": 1,
-+                    "uniqueItems": true
-+                }
-+            ]
-+        },
-+        "format": { "type": "string" },
-+        "allOf": { "$ref": "#/definitions/schemaArray" },
-+        "anyOf": { "$ref": "#/definitions/schemaArray" },
-+        "oneOf": { "$ref": "#/definitions/schemaArray" },
-+        "not": { "$ref": "#" }
-+    },
-+    "default": {}
-+}
-diff --git a/third_party/python/jsonschema/jsonschema/schemas/draft7.json b/third_party/python/jsonschema/jsonschema/schemas/draft7.json
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/jsonschema/jsonschema/schemas/draft7.json
-@@ -0,0 +1,166 @@
-+{
-+    "$schema": "http://json-schema.org/draft-07/schema#",
-+    "$id": "http://json-schema.org/draft-07/schema#",
-+    "title": "Core schema meta-schema",
-+    "definitions": {
-+        "schemaArray": {
-+            "type": "array",
-+            "minItems": 1,
-+            "items": { "$ref": "#" }
-+        },
-+        "nonNegativeInteger": {
-+            "type": "integer",
-+            "minimum": 0
-+        },
-+        "nonNegativeIntegerDefault0": {
-+            "allOf": [
-+                { "$ref": "#/definitions/nonNegativeInteger" },
-+                { "default": 0 }
-+            ]
-+        },
-+        "simpleTypes": {
-+            "enum": [
-+                "array",
-+                "boolean",
-+                "integer",
-+                "null",
-+                "number",
-+                "object",
-+                "string"
-+            ]
-+        },
-+        "stringArray": {
-+            "type": "array",
-+            "items": { "type": "string" },
-+            "uniqueItems": true,
-+            "default": []
-+        }
-+    },
-+    "type": ["object", "boolean"],
-+    "properties": {
-+        "$id": {
-+            "type": "string",
-+            "format": "uri-reference"
-+        },
-+        "$schema": {
-+            "type": "string",
-+            "format": "uri"
-+        },
-+        "$ref": {
-+            "type": "string",
-+            "format": "uri-reference"
-+        },
-+        "$comment": {
-+            "type": "string"
-+        },
-+        "title": {
-+            "type": "string"
-+        },
-+        "description": {
-+            "type": "string"
-+        },
-+        "default": true,
-+        "readOnly": {
-+            "type": "boolean",
-+            "default": false
-+        },
-+        "examples": {
-+            "type": "array",
-+            "items": true
-+        },
-+        "multipleOf": {
-+            "type": "number",
-+            "exclusiveMinimum": 0
-+        },
-+        "maximum": {
-+            "type": "number"
-+        },
-+        "exclusiveMaximum": {
-+            "type": "number"
-+        },
-+        "minimum": {
-+            "type": "number"
-+        },
-+        "exclusiveMinimum": {
-+            "type": "number"
-+        },
-+        "maxLength": { "$ref": "#/definitions/nonNegativeInteger" },
-+        "minLength": { "$ref": "#/definitions/nonNegativeIntegerDefault0" },
-+        "pattern": {
-+            "type": "string",
-+            "format": "regex"
-+        },
-+        "additionalItems": { "$ref": "#" },
-+        "items": {
-+            "anyOf": [
-+                { "$ref": "#" },
-+                { "$ref": "#/definitions/schemaArray" }
-+            ],
-+            "default": true
-+        },
-+        "maxItems": { "$ref": "#/definitions/nonNegativeInteger" },
-+        "minItems": { "$ref": "#/definitions/nonNegativeIntegerDefault0" },
-+        "uniqueItems": {
-+            "type": "boolean",
-+            "default": false
-+        },
-+        "contains": { "$ref": "#" },
-+        "maxProperties": { "$ref": "#/definitions/nonNegativeInteger" },
-+        "minProperties": { "$ref": "#/definitions/nonNegativeIntegerDefault0" },
-+        "required": { "$ref": "#/definitions/stringArray" },
-+        "additionalProperties": { "$ref": "#" },
-+        "definitions": {
-+            "type": "object",
-+            "additionalProperties": { "$ref": "#" },
-+            "default": {}
-+        },
-+        "properties": {
-+            "type": "object",
-+            "additionalProperties": { "$ref": "#" },
-+            "default": {}
-+        },
-+        "patternProperties": {
-+            "type": "object",
-+            "additionalProperties": { "$ref": "#" },
-+            "propertyNames": { "format": "regex" },
-+            "default": {}
-+        },
-+        "dependencies": {
-+            "type": "object",
-+            "additionalProperties": {
-+                "anyOf": [
-+                    { "$ref": "#" },
-+                    { "$ref": "#/definitions/stringArray" }
-+                ]
-+            }
-+        },
-+        "propertyNames": { "$ref": "#" },
-+        "const": true,
-+        "enum": {
-+            "type": "array",
-+            "items": true
-+        },
-+        "type": {
-+            "anyOf": [
-+                { "$ref": "#/definitions/simpleTypes" },
-+                {
-+                    "type": "array",
-+                    "items": { "$ref": "#/definitions/simpleTypes" },
-+                    "minItems": 1,
-+                    "uniqueItems": true
-+                }
-+            ]
-+        },
-+        "format": { "type": "string" },
-+        "contentMediaType": { "type": "string" },
-+        "contentEncoding": { "type": "string" },
-+        "if": {"$ref": "#"},
-+        "then": {"$ref": "#"},
-+        "else": {"$ref": "#"},
-+        "allOf": { "$ref": "#/definitions/schemaArray" },
-+        "anyOf": { "$ref": "#/definitions/schemaArray" },
-+        "oneOf": { "$ref": "#/definitions/schemaArray" },
-+        "not": { "$ref": "#" }
-+    },
-+    "default": true
-+}
-diff --git a/third_party/python/jsonschema/jsonschema/validators.py b/third_party/python/jsonschema/jsonschema/validators.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/jsonschema/jsonschema/validators.py
-@@ -0,0 +1,970 @@
-+"""
-+Creation and extension of validators, with implementations for existing drafts.
-+"""
-+from __future__ import division
-+
-+from warnings import warn
-+import contextlib
-+import json
-+import numbers
-+
-+from six import add_metaclass
-+
-+from jsonschema import (
-+    _legacy_validators,
-+    _types,
-+    _utils,
-+    _validators,
-+    exceptions,
-+)
-+from jsonschema.compat import (
-+    Sequence,
-+    int_types,
-+    iteritems,
-+    lru_cache,
-+    str_types,
-+    unquote,
-+    urldefrag,
-+    urljoin,
-+    urlopen,
-+    urlsplit,
-+)
-+
-+# Sigh. https://gitlab.com/pycqa/flake8/issues/280
-+#       https://github.com/pyga/ebb-lint/issues/7
-+# Imported for backwards compatibility.
-+from jsonschema.exceptions import ErrorTree
-+ErrorTree
-+
-+
-+class _DontDoThat(Exception):
-+    """
-+    Raised when a Validators with non-default type checker is misused.
-+
-+    Asking one for DEFAULT_TYPES doesn't make sense, since type checkers
-+    exist for the unrepresentable cases where DEFAULT_TYPES can't
-+    represent the type relationship.
-+    """
-+
-+    def __str__(self):
-+        return "DEFAULT_TYPES cannot be used on Validators using TypeCheckers"
-+
-+
-+validators = {}
-+meta_schemas = _utils.URIDict()
-+
-+
-+def _generate_legacy_type_checks(types=()):
-+    """
-+    Generate newer-style type checks out of JSON-type-name-to-type mappings.
-+
-+    Arguments:
-+
-+        types (dict):
-+
-+            A mapping of type names to their Python types
-+
-+    Returns:
-+
-+        A dictionary of definitions to pass to `TypeChecker`
-+    """
-+    types = dict(types)
-+
-+    def gen_type_check(pytypes):
-+        pytypes = _utils.flatten(pytypes)
-+
-+        def type_check(checker, instance):
-+            if isinstance(instance, bool):
-+                if bool not in pytypes:
-+                    return False
-+            return isinstance(instance, pytypes)
-+
-+        return type_check
-+
-+    definitions = {}
-+    for typename, pytypes in iteritems(types):
-+        definitions[typename] = gen_type_check(pytypes)
-+
-+    return definitions
-+
-+
-+_DEPRECATED_DEFAULT_TYPES = {
-+    u"array": list,
-+    u"boolean": bool,
-+    u"integer": int_types,
-+    u"null": type(None),
-+    u"number": numbers.Number,
-+    u"object": dict,
-+    u"string": str_types,
-+}
-+_TYPE_CHECKER_FOR_DEPRECATED_DEFAULT_TYPES = _types.TypeChecker(
-+    type_checkers=_generate_legacy_type_checks(_DEPRECATED_DEFAULT_TYPES),
-+)
-+
-+
-+def validates(version):
-+    """
-+    Register the decorated validator for a ``version`` of the specification.
-+
-+    Registered validators and their meta schemas will be considered when
-+    parsing ``$schema`` properties' URIs.
-+
-+    Arguments:
-+
-+        version (str):
-+
-+            An identifier to use as the version's name
-+
-+    Returns:
-+
-+        collections.Callable:
-+
-+            a class decorator to decorate the validator with the version
-+    """
-+
-+    def _validates(cls):
-+        validators[version] = cls
-+        meta_schema_id = cls.ID_OF(cls.META_SCHEMA)
-+        if meta_schema_id:
-+            meta_schemas[meta_schema_id] = cls
-+        return cls
-+    return _validates
-+
-+
-+def _DEFAULT_TYPES(self):
-+    if self._CREATED_WITH_DEFAULT_TYPES is None:
-+        raise _DontDoThat()
-+
-+    warn(
-+        (
-+            "The DEFAULT_TYPES attribute is deprecated. "
-+            "See the type checker attached to this validator instead."
-+        ),
-+        DeprecationWarning,
-+        stacklevel=2,
-+    )
-+    return self._DEFAULT_TYPES
-+
-+
-+class _DefaultTypesDeprecatingMetaClass(type):
-+    DEFAULT_TYPES = property(_DEFAULT_TYPES)
-+
-+
-+def _id_of(schema):
-+    if schema is True or schema is False:
-+        return u""
-+    return schema.get(u"$id", u"")
-+
-+
-+def create(
-+    meta_schema,
-+    validators=(),
-+    version=None,
-+    default_types=None,
-+    type_checker=None,
-+    id_of=_id_of,
-+):
-+    """
-+    Create a new validator class.
-+
-+    Arguments:
-+
-+        meta_schema (collections.Mapping):
-+
-+            the meta schema for the new validator class
-+
-+        validators (collections.Mapping):
-+
-+            a mapping from names to callables, where each callable will
-+            validate the schema property with the given name.
-+
-+            Each callable should take 4 arguments:
-+
-+                1. a validator instance,
-+                2. the value of the property being validated within the
-+                   instance
-+                3. the instance
-+                4. the schema
-+
-+        version (str):
-+
-+            an identifier for the version that this validator class will
-+            validate. If provided, the returned validator class will
-+            have its ``__name__`` set to include the version, and also
-+            will have `jsonschema.validators.validates` automatically
-+            called for the given version.
-+
-+        type_checker (jsonschema.TypeChecker):
-+
-+            a type checker, used when applying the :validator:`type` validator.
-+
-+            If unprovided, a `jsonschema.TypeChecker` will be created
-+            with a set of default types typical of JSON Schema drafts.
-+
-+        default_types (collections.Mapping):
-+
-+            .. deprecated:: 3.0.0
-+
-+                Please use the type_checker argument instead.
-+
-+            If set, it provides mappings of JSON types to Python types
-+            that will be converted to functions and redefined in this
-+            object's `jsonschema.TypeChecker`.
-+
-+        id_of (collections.Callable):
-+
-+            A function that given a schema, returns its ID.
-+
-+    Returns:
-+
-+        a new `jsonschema.IValidator` class
-+    """
-+
-+    if default_types is not None:
-+        if type_checker is not None:
-+            raise TypeError(
-+                "Do not specify default_types when providing a type checker.",
-+            )
-+        _created_with_default_types = True
-+        warn(
-+            (
-+                "The default_types argument is deprecated. "
-+                "Use the type_checker argument instead."
-+            ),
-+            DeprecationWarning,
-+            stacklevel=2,
-+        )
-+        type_checker = _types.TypeChecker(
-+            type_checkers=_generate_legacy_type_checks(default_types),
-+        )
-+    else:
-+        default_types = _DEPRECATED_DEFAULT_TYPES
-+        if type_checker is None:
-+            _created_with_default_types = False
-+            type_checker = _TYPE_CHECKER_FOR_DEPRECATED_DEFAULT_TYPES
-+        elif type_checker is _TYPE_CHECKER_FOR_DEPRECATED_DEFAULT_TYPES:
-+            _created_with_default_types = False
-+        else:
-+            _created_with_default_types = None
-+
-+    @add_metaclass(_DefaultTypesDeprecatingMetaClass)
-+    class Validator(object):
-+
-+        VALIDATORS = dict(validators)
-+        META_SCHEMA = dict(meta_schema)
-+        TYPE_CHECKER = type_checker
-+        ID_OF = staticmethod(id_of)
-+
-+        DEFAULT_TYPES = property(_DEFAULT_TYPES)
-+        _DEFAULT_TYPES = dict(default_types)
-+        _CREATED_WITH_DEFAULT_TYPES = _created_with_default_types
-+
-+        def __init__(
-+            self,
-+            schema,
-+            types=(),
-+            resolver=None,
-+            format_checker=None,
-+        ):
-+            if types:
-+                warn(
-+                    (
-+                        "The types argument is deprecated. Provide "
-+                        "a type_checker to jsonschema.validators.extend "
-+                        "instead."
-+                    ),
-+                    DeprecationWarning,
-+                    stacklevel=2,
-+                )
-+
-+                self.TYPE_CHECKER = self.TYPE_CHECKER.redefine_many(
-+                    _generate_legacy_type_checks(types),
-+                )
-+
-+            if resolver is None:
-+                resolver = RefResolver.from_schema(schema, id_of=id_of)
-+
-+            self.resolver = resolver
-+            self.format_checker = format_checker
-+            self.schema = schema
-+
-+        @classmethod
-+        def check_schema(cls, schema):
-+            for error in cls(cls.META_SCHEMA).iter_errors(schema):
-+                raise exceptions.SchemaError.create_from(error)
-+
-+        def iter_errors(self, instance, _schema=None):
-+            if _schema is None:
-+                _schema = self.schema
-+
-+            if _schema is True:
-+                return
-+            elif _schema is False:
-+                yield exceptions.ValidationError(
-+                    "False schema does not allow %r" % (instance,),
-+                    validator=None,
-+                    validator_value=None,
-+                    instance=instance,
-+                    schema=_schema,
-+                )
-+                return
-+
-+            scope = id_of(_schema)
-+            if scope:
-+                self.resolver.push_scope(scope)
-+            try:
-+                ref = _schema.get(u"$ref")
-+                if ref is not None:
-+                    validators = [(u"$ref", ref)]
-+                else:
-+                    validators = iteritems(_schema)
-+
-+                for k, v in validators:
-+                    validator = self.VALIDATORS.get(k)
-+                    if validator is None:
-+                        continue
-+
-+                    errors = validator(self, v, instance, _schema) or ()
-+                    for error in errors:
-+                        # set details if not already set by the called fn
-+                        error._set(
-+                            validator=k,
-+                            validator_value=v,
-+                            instance=instance,
-+                            schema=_schema,
-+                        )
-+                        if k != u"$ref":
-+                            error.schema_path.appendleft(k)
-+                        yield error
-+            finally:
-+                if scope:
-+                    self.resolver.pop_scope()
-+
-+        def descend(self, instance, schema, path=None, schema_path=None):
-+            for error in self.iter_errors(instance, schema):
-+                if path is not None:
-+                    error.path.appendleft(path)
-+                if schema_path is not None:
-+                    error.schema_path.appendleft(schema_path)
-+                yield error
-+
-+        def validate(self, *args, **kwargs):
-+            for error in self.iter_errors(*args, **kwargs):
-+                raise error
-+
-+        def is_type(self, instance, type):
-+            try:
-+                return self.TYPE_CHECKER.is_type(instance, type)
-+            except exceptions.UndefinedTypeCheck:
-+                raise exceptions.UnknownType(type, instance, self.schema)
-+
-+        def is_valid(self, instance, _schema=None):
-+            error = next(self.iter_errors(instance, _schema), None)
-+            return error is None
-+
-+    if version is not None:
-+        Validator = validates(version)(Validator)
-+        Validator.__name__ = version.title().replace(" ", "") + "Validator"
-+
-+    return Validator
-+
-+
-+def extend(validator, validators=(), version=None, type_checker=None):
-+    """
-+    Create a new validator class by extending an existing one.
-+
-+    Arguments:
-+
-+        validator (jsonschema.IValidator):
-+
-+            an existing validator class
-+
-+        validators (collections.Mapping):
-+
-+            a mapping of new validator callables to extend with, whose
-+            structure is as in `create`.
-+
-+            .. note::
-+
-+                Any validator callables with the same name as an
-+                existing one will (silently) replace the old validator
-+                callable entirely, effectively overriding any validation
-+                done in the "parent" validator class.
-+
-+                If you wish to instead extend the behavior of a parent's
-+                validator callable, delegate and call it directly in
-+                the new validator function by retrieving it using
-+                ``OldValidator.VALIDATORS["validator_name"]``.
-+
-+        version (str):
-+
-+            a version for the new validator class
-+
-+        type_checker (jsonschema.TypeChecker):
-+
-+            a type checker, used when applying the :validator:`type` validator.
-+
-+            If unprovided, the type checker of the extended
-+            `jsonschema.IValidator` will be carried along.`
-+
-+    Returns:
-+
-+        a new `jsonschema.IValidator` class extending the one provided
-+
-+    .. note:: Meta Schemas
-+
-+        The new validator class will have its parent's meta schema.
-+
-+        If you wish to change or extend the meta schema in the new
-+        validator class, modify ``META_SCHEMA`` directly on the returned
-+        class. Note that no implicit copying is done, so a copy should
-+        likely be made before modifying it, in order to not affect the
-+        old validator.
-+    """
-+
-+    all_validators = dict(validator.VALIDATORS)
-+    all_validators.update(validators)
-+
-+    if type_checker is None:
-+        type_checker = validator.TYPE_CHECKER
-+    elif validator._CREATED_WITH_DEFAULT_TYPES:
-+        raise TypeError(
-+            "Cannot extend a validator created with default_types "
-+            "with a type_checker. Update the validator to use a "
-+            "type_checker when created."
-+        )
-+    return create(
-+        meta_schema=validator.META_SCHEMA,
-+        validators=all_validators,
-+        version=version,
-+        type_checker=type_checker,
-+        id_of=validator.ID_OF,
-+    )
-+
-+
-+Draft3Validator = create(
-+    meta_schema=_utils.load_schema("draft3"),
-+    validators={
-+        u"$ref": _validators.ref,
-+        u"additionalItems": _validators.additionalItems,
-+        u"additionalProperties": _validators.additionalProperties,
-+        u"dependencies": _legacy_validators.dependencies_draft3,
-+        u"disallow": _legacy_validators.disallow_draft3,
-+        u"divisibleBy": _validators.multipleOf,
-+        u"enum": _validators.enum,
-+        u"extends": _legacy_validators.extends_draft3,
-+        u"format": _validators.format,
-+        u"items": _legacy_validators.items_draft3_draft4,
-+        u"maxItems": _validators.maxItems,
-+        u"maxLength": _validators.maxLength,
-+        u"maximum": _legacy_validators.maximum_draft3_draft4,
-+        u"minItems": _validators.minItems,
-+        u"minLength": _validators.minLength,
-+        u"minimum": _legacy_validators.minimum_draft3_draft4,
-+        u"pattern": _validators.pattern,
-+        u"patternProperties": _validators.patternProperties,
-+        u"properties": _legacy_validators.properties_draft3,
-+        u"type": _legacy_validators.type_draft3,
-+        u"uniqueItems": _validators.uniqueItems,
-+    },
-+    type_checker=_types.draft3_type_checker,
-+    version="draft3",
-+    id_of=lambda schema: schema.get(u"id", ""),
-+)
-+
-+Draft4Validator = create(
-+    meta_schema=_utils.load_schema("draft4"),
-+    validators={
-+        u"$ref": _validators.ref,
-+        u"additionalItems": _validators.additionalItems,
-+        u"additionalProperties": _validators.additionalProperties,
-+        u"allOf": _validators.allOf,
-+        u"anyOf": _validators.anyOf,
-+        u"dependencies": _validators.dependencies,
-+        u"enum": _validators.enum,
-+        u"format": _validators.format,
-+        u"items": _legacy_validators.items_draft3_draft4,
-+        u"maxItems": _validators.maxItems,
-+        u"maxLength": _validators.maxLength,
-+        u"maxProperties": _validators.maxProperties,
-+        u"maximum": _legacy_validators.maximum_draft3_draft4,
-+        u"minItems": _validators.minItems,
-+        u"minLength": _validators.minLength,
-+        u"minProperties": _validators.minProperties,
-+        u"minimum": _legacy_validators.minimum_draft3_draft4,
-+        u"multipleOf": _validators.multipleOf,
-+        u"not": _validators.not_,
-+        u"oneOf": _validators.oneOf,
-+        u"pattern": _validators.pattern,
-+        u"patternProperties": _validators.patternProperties,
-+        u"properties": _validators.properties,
-+        u"required": _validators.required,
-+        u"type": _validators.type,
-+        u"uniqueItems": _validators.uniqueItems,
-+    },
-+    type_checker=_types.draft4_type_checker,
-+    version="draft4",
-+    id_of=lambda schema: schema.get(u"id", ""),
-+)
-+
-+Draft6Validator = create(
-+    meta_schema=_utils.load_schema("draft6"),
-+    validators={
-+        u"$ref": _validators.ref,
-+        u"additionalItems": _validators.additionalItems,
-+        u"additionalProperties": _validators.additionalProperties,
-+        u"allOf": _validators.allOf,
-+        u"anyOf": _validators.anyOf,
-+        u"const": _validators.const,
-+        u"contains": _validators.contains,
-+        u"dependencies": _validators.dependencies,
-+        u"enum": _validators.enum,
-+        u"exclusiveMaximum": _validators.exclusiveMaximum,
-+        u"exclusiveMinimum": _validators.exclusiveMinimum,
-+        u"format": _validators.format,
-+        u"items": _validators.items,
-+        u"maxItems": _validators.maxItems,
-+        u"maxLength": _validators.maxLength,
-+        u"maxProperties": _validators.maxProperties,
-+        u"maximum": _validators.maximum,
-+        u"minItems": _validators.minItems,
-+        u"minLength": _validators.minLength,
-+        u"minProperties": _validators.minProperties,
-+        u"minimum": _validators.minimum,
-+        u"multipleOf": _validators.multipleOf,
-+        u"not": _validators.not_,
-+        u"oneOf": _validators.oneOf,
-+        u"pattern": _validators.pattern,
-+        u"patternProperties": _validators.patternProperties,
-+        u"properties": _validators.properties,
-+        u"propertyNames": _validators.propertyNames,
-+        u"required": _validators.required,
-+        u"type": _validators.type,
-+        u"uniqueItems": _validators.uniqueItems,
-+    },
-+    type_checker=_types.draft6_type_checker,
-+    version="draft6",
-+)
-+
-+Draft7Validator = create(
-+    meta_schema=_utils.load_schema("draft7"),
-+    validators={
-+        u"$ref": _validators.ref,
-+        u"additionalItems": _validators.additionalItems,
-+        u"additionalProperties": _validators.additionalProperties,
-+        u"allOf": _validators.allOf,
-+        u"anyOf": _validators.anyOf,
-+        u"const": _validators.const,
-+        u"contains": _validators.contains,
-+        u"dependencies": _validators.dependencies,
-+        u"enum": _validators.enum,
-+        u"exclusiveMaximum": _validators.exclusiveMaximum,
-+        u"exclusiveMinimum": _validators.exclusiveMinimum,
-+        u"format": _validators.format,
-+        u"if": _validators.if_,
-+        u"items": _validators.items,
-+        u"maxItems": _validators.maxItems,
-+        u"maxLength": _validators.maxLength,
-+        u"maxProperties": _validators.maxProperties,
-+        u"maximum": _validators.maximum,
-+        u"minItems": _validators.minItems,
-+        u"minLength": _validators.minLength,
-+        u"minProperties": _validators.minProperties,
-+        u"minimum": _validators.minimum,
-+        u"multipleOf": _validators.multipleOf,
-+        u"oneOf": _validators.oneOf,
-+        u"not": _validators.not_,
-+        u"pattern": _validators.pattern,
-+        u"patternProperties": _validators.patternProperties,
-+        u"properties": _validators.properties,
-+        u"propertyNames": _validators.propertyNames,
-+        u"required": _validators.required,
-+        u"type": _validators.type,
-+        u"uniqueItems": _validators.uniqueItems,
-+    },
-+    type_checker=_types.draft7_type_checker,
-+    version="draft7",
-+)
-+
-+_LATEST_VERSION = Draft7Validator
-+
-+
-+class RefResolver(object):
-+    """
-+    Resolve JSON References.
-+
-+    Arguments:
-+
-+        base_uri (str):
-+
-+            The URI of the referring document
-+
-+        referrer:
-+
-+            The actual referring document
-+
-+        store (dict):
-+
-+            A mapping from URIs to documents to cache
-+
-+        cache_remote (bool):
-+
-+            Whether remote refs should be cached after first resolution
-+
-+        handlers (dict):
-+
-+            A mapping from URI schemes to functions that should be used
-+            to retrieve them
-+
-+        urljoin_cache (:func:`functools.lru_cache`):
-+
-+            A cache that will be used for caching the results of joining
-+            the resolution scope to subscopes.
-+
-+        remote_cache (:func:`functools.lru_cache`):
-+
-+            A cache that will be used for caching the results of
-+            resolved remote URLs.
-+
-+    Attributes:
-+
-+        cache_remote (bool):
-+
-+            Whether remote refs should be cached after first resolution
-+    """
-+
-+    def __init__(
-+        self,
-+        base_uri,
-+        referrer,
-+        store=(),
-+        cache_remote=True,
-+        handlers=(),
-+        urljoin_cache=None,
-+        remote_cache=None,
-+    ):
-+        if urljoin_cache is None:
-+            urljoin_cache = lru_cache(1024)(urljoin)
-+        if remote_cache is None:
-+            remote_cache = lru_cache(1024)(self.resolve_from_url)
-+
-+        self.referrer = referrer
-+        self.cache_remote = cache_remote
-+        self.handlers = dict(handlers)
-+
-+        self._scopes_stack = [base_uri]
-+        self.store = _utils.URIDict(
-+            (id, validator.META_SCHEMA)
-+            for id, validator in iteritems(meta_schemas)
-+        )
-+        self.store.update(store)
-+        self.store[base_uri] = referrer
-+
-+        self._urljoin_cache = urljoin_cache
-+        self._remote_cache = remote_cache
-+
-+    @classmethod
-+    def from_schema(cls, schema, id_of=_id_of, *args, **kwargs):
-+        """
-+        Construct a resolver from a JSON schema object.
-+
-+        Arguments:
-+
-+            schema:
-+
-+                the referring schema
-+
-+        Returns:
-+
-+            `RefResolver`
-+        """
-+
-+        return cls(base_uri=id_of(schema), referrer=schema, *args, **kwargs)
-+
-+    def push_scope(self, scope):
-+        """
-+        Enter a given sub-scope.
-+
-+        Treats further dereferences as being performed underneath the
-+        given scope.
-+        """
-+        self._scopes_stack.append(
-+            self._urljoin_cache(self.resolution_scope, scope),
-+        )
-+
-+    def pop_scope(self):
-+        """
-+        Exit the most recent entered scope.
-+
-+        Treats further dereferences as being performed underneath the
-+        original scope.
-+
-+        Don't call this method more times than `push_scope` has been
-+        called.
-+        """
-+        try:
-+            self._scopes_stack.pop()
-+        except IndexError:
-+            raise exceptions.RefResolutionError(
-+                "Failed to pop the scope from an empty stack. "
-+                "`pop_scope()` should only be called once for every "
-+                "`push_scope()`"
-+            )
-+
-+    @property
-+    def resolution_scope(self):
-+        """
-+        Retrieve the current resolution scope.
-+        """
-+        return self._scopes_stack[-1]
-+
-+    @property
-+    def base_uri(self):
-+        """
-+        Retrieve the current base URI, not including any fragment.
-+        """
-+        uri, _ = urldefrag(self.resolution_scope)
-+        return uri
-+
-+    @contextlib.contextmanager
-+    def in_scope(self, scope):
-+        """
-+        Temporarily enter the given scope for the duration of the context.
-+        """
-+        self.push_scope(scope)
-+        try:
-+            yield
-+        finally:
-+            self.pop_scope()
-+
-+    @contextlib.contextmanager
-+    def resolving(self, ref):
-+        """
-+        Resolve the given ``ref`` and enter its resolution scope.
-+
-+        Exits the scope on exit of this context manager.
-+
-+        Arguments:
-+
-+            ref (str):
-+
-+                The reference to resolve
-+        """
-+
-+        url, resolved = self.resolve(ref)
-+        self.push_scope(url)
-+        try:
-+            yield resolved
-+        finally:
-+            self.pop_scope()
-+
-+    def resolve(self, ref):
-+        """
-+        Resolve the given reference.
-+        """
-+        url = self._urljoin_cache(self.resolution_scope, ref)
-+        return url, self._remote_cache(url)
-+
-+    def resolve_from_url(self, url):
-+        """
-+        Resolve the given remote URL.
-+        """
-+        url, fragment = urldefrag(url)
-+        try:
-+            document = self.store[url]
-+        except KeyError:
-+            try:
-+                document = self.resolve_remote(url)
-+            except Exception as exc:
-+                raise exceptions.RefResolutionError(exc)
-+
-+        return self.resolve_fragment(document, fragment)
-+
-+    def resolve_fragment(self, document, fragment):
-+        """
-+        Resolve a ``fragment`` within the referenced ``document``.
-+
-+        Arguments:
-+
-+            document:
-+
-+                The referent document
-+
-+            fragment (str):
-+
-+                a URI fragment to resolve within it
-+        """
-+
-+        fragment = fragment.lstrip(u"/")
-+        parts = unquote(fragment).split(u"/") if fragment else []
-+
-+        for part in parts:
-+            part = part.replace(u"~1", u"/").replace(u"~0", u"~")
-+
-+            if isinstance(document, Sequence):
-+                # Array indexes should be turned into integers
-+                try:
-+                    part = int(part)
-+                except ValueError:
-+                    pass
-+            try:
-+                document = document[part]
-+            except (TypeError, LookupError):
-+                raise exceptions.RefResolutionError(
-+                    "Unresolvable JSON pointer: %r" % fragment
-+                )
-+
-+        return document
-+
-+    def resolve_remote(self, uri):
-+        """
-+        Resolve a remote ``uri``.
-+
-+        If called directly, does not check the store first, but after
-+        retrieving the document at the specified URI it will be saved in
-+        the store if :attr:`cache_remote` is True.
-+
-+        .. note::
-+
-+            If the requests_ library is present, ``jsonschema`` will use it to
-+            request the remote ``uri``, so that the correct encoding is
-+            detected and used.
-+
-+            If it isn't, or if the scheme of the ``uri`` is not ``http`` or
-+            ``https``, UTF-8 is assumed.
-+
-+        Arguments:
-+
-+            uri (str):
-+
-+                The URI to resolve
-+
-+        Returns:
-+
-+            The retrieved document
-+
-+        .. _requests: https://pypi.org/project/requests/
-+        """
-+        try:
-+            import requests
-+        except ImportError:
-+            requests = None
-+
-+        scheme = urlsplit(uri).scheme
-+
-+        if scheme in self.handlers:
-+            result = self.handlers[scheme](uri)
-+        elif scheme in [u"http", u"https"] and requests:
-+            # Requests has support for detecting the correct encoding of
-+            # json over http
-+            result = requests.get(uri).json()
-+        else:
-+            # Otherwise, pass off to urllib and assume utf-8
-+            with urlopen(uri) as url:
-+                result = json.loads(url.read().decode("utf-8"))
-+
-+        if self.cache_remote:
-+            self.store[uri] = result
-+        return result
-+
-+
-+def validate(instance, schema, cls=None, *args, **kwargs):
-+    """
-+    Validate an instance under the given schema.
-+
-+        >>> validate([2, 3, 4], {"maxItems": 2})
-+        Traceback (most recent call last):
-+            ...
-+        ValidationError: [2, 3, 4] is too long
-+
-+    :func:`validate` will first verify that the provided schema is
-+    itself valid, since not doing so can lead to less obvious error
-+    messages and fail in less obvious or consistent ways.
-+
-+    If you know you have a valid schema already, especially if you
-+    intend to validate multiple instances with the same schema, you
-+    likely would prefer using the `IValidator.validate` method directly
-+    on a specific validator (e.g. ``Draft7Validator.validate``).
-+
-+
-+    Arguments:
-+
-+        instance:
-+
-+            The instance to validate
-+
-+        schema:
-+
-+            The schema to validate with
-+
-+        cls (IValidator):
-+
-+            The class that will be used to validate the instance.
-+
-+    If the ``cls`` argument is not provided, two things will happen
-+    in accordance with the specification. First, if the schema has a
-+    :validator:`$schema` property containing a known meta-schema [#]_
-+    then the proper validator will be used. The specification recommends
-+    that all schemas contain :validator:`$schema` properties for this
-+    reason. If no :validator:`$schema` property is found, the default
-+    validator class is the latest released draft.
-+
-+    Any other provided positional and keyword arguments will be passed
-+    on when instantiating the ``cls``.
-+
-+    Raises:
-+
-+        `jsonschema.exceptions.ValidationError` if the instance
-+            is invalid
-+
-+        `jsonschema.exceptions.SchemaError` if the schema itself
-+            is invalid
-+
-+    .. rubric:: Footnotes
-+    .. [#] known by a validator registered with
-+        `jsonschema.validators.validates`
-+    """
-+    if cls is None:
-+        cls = validator_for(schema)
-+
-+    cls.check_schema(schema)
-+    validator = cls(schema, *args, **kwargs)
-+    error = exceptions.best_match(validator.iter_errors(instance))
-+    if error is not None:
-+        raise error
-+
-+
-+def validator_for(schema, default=_LATEST_VERSION):
-+    """
-+    Retrieve the validator class appropriate for validating the given schema.
-+
-+    Uses the :validator:`$schema` property that should be present in the
-+    given schema to look up the appropriate validator class.
-+
-+    Arguments:
-+
-+        schema (collections.Mapping or bool):
-+
-+            the schema to look at
-+
-+        default:
-+
-+            the default to return if the appropriate validator class
-+            cannot be determined.
-+
-+            If unprovided, the default is to return the latest supported
-+            draft.
-+    """
-+    if schema is True or schema is False or u"$schema" not in schema:
-+        return default
-+    if schema[u"$schema"] not in meta_schemas:
-+        warn(
-+            (
-+                "The metaschema specified by $schema was not found. "
-+                "Using the latest draft to validate, but this will raise "
-+                "an error in the future."
-+            ),
-+            DeprecationWarning,
-+            stacklevel=2,
-+        )
-+    return meta_schemas.get(schema[u"$schema"], _LATEST_VERSION)
-diff --git a/third_party/python/pathspec/pathspec/__init__.py b/third_party/python/pathspec/pathspec/__init__.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/pathspec/pathspec/__init__.py
-@@ -0,0 +1,66 @@
-+# encoding: utf-8
-+"""
-+The *pathspec* package provides pattern matching for file paths. So far
-+this only includes Git's wildmatch pattern matching (the style used for
-+".gitignore" files).
-+
-+The following classes are imported and made available from the root of
-+the `pathspec` package:
-+
-+- :class:`pathspec.pathspec.PathSpec`
-+
-+- :class:`pathspec.pattern.Pattern`
-+
-+- :class:`pathspec.pattern.RegexPattern`
-+
-+- :class:`pathspec.util.RecursionError`
-+
-+The following functions are also imported:
-+
-+- :func:`pathspec.util.iter_tree`
-+- :func:`pathspec.util.lookup_pattern`
-+- :func:`pathspec.util.match_files`
-+"""
-+from __future__ import unicode_literals
-+
-+__author__ = "Caleb P. Burns"
-+__copyright__ = "Copyright © 2013-2018 Caleb P. Burns"
-+__created__ = "2013-10-12"
-+__credits__ = [
-+	"dahlia <https://github.com/dahlia>",
-+	"highb <https://github.com/highb>",
-+	"029xue <https://github.com/029xue>",
-+	"mikexstudios <https://github.com/mikexstudios>",
-+	"nhumrich <https://github.com/nhumrich>",
-+	"davidfraser <https://github.com/davidfraser>",
-+	"demurgos <https://github.com/demurgos>",
-+	"ghickman <https://github.com/ghickman>",
-+	"nvie <https://github.com/nvie>",
-+	"adrienverge <https://github.com/adrienverge>",
-+	"AndersBlomdell <https://github.com/AndersBlomdell>",
-+	"highb <https://github.com/highb>",
-+	"thmxv <https://github.com/thmxv>",
-+	"wimglenn <https://github.com/wimglenn>",
-+	"hugovk <https://github.com/hugovk>",
-+	"dcecile <https://github.com/dcecile>",
-+	"mroutis <https://github.com/mroutis>",
-+	"jdufresne <https://github.com/jdufresne>",
-+	"groodt <https://github.com/groodt>",
-+]
-+__email__ = "cpburnz@gmail.com"
-+__license__ = "MPL 2.0"
-+__project__ = "pathspec"
-+__status__ = "Development"
-+__updated__ = "2019-12-27"
-+__version__ = "0.7.0"
-+
-+from .pathspec import PathSpec
-+from .pattern import Pattern, RegexPattern
-+from .util import iter_tree, lookup_pattern, match_files, RecursionError
-+
-+# Load pattern implementations.
-+from . import patterns
-+
-+# Expose `GitIgnorePattern` class in the root module for backward
-+# compatibility with v0.4.
-+from .patterns.gitwildmatch import GitIgnorePattern
-diff --git a/third_party/python/pathspec/pathspec/compat.py b/third_party/python/pathspec/pathspec/compat.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/pathspec/pathspec/compat.py
-@@ -0,0 +1,36 @@
-+# encoding: utf-8
-+"""
-+This module provides compatibility between Python 2 and 3. Hardly
-+anything is used by this project to constitute including `six`_.
-+
-+.. _`six`: http://pythonhosted.org/six
-+"""
-+
-+import sys
-+
-+if sys.version_info[0] < 3:
-+	# Python 2.
-+	unicode = unicode
-+	string_types = (basestring,)
-+
-+	from itertools import izip_longest
-+
-+	def iterkeys(mapping):
-+		return mapping.iterkeys()
-+
-+else:
-+	# Python 3.
-+	unicode = str
-+	string_types = (unicode,)
-+
-+	from itertools import zip_longest as izip_longest
-+
-+	def iterkeys(mapping):
-+		return mapping.keys()
-+
-+try:
-+	# Python 3.6+.
-+	from collections.abc import Collection as collection_type
-+except ImportError:
-+	# Python 2.7 - 3.5.
-+	from collections import Container as collection_type
-diff --git a/third_party/python/pathspec/pathspec/pathspec.py b/third_party/python/pathspec/pathspec/pathspec.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/pathspec/pathspec/pathspec.py
-@@ -0,0 +1,137 @@
-+# encoding: utf-8
-+"""
-+This module provides an object oriented interface for pattern matching
-+of files.
-+"""
-+
-+from . import util
-+from .compat import collection_type, iterkeys, izip_longest, string_types, unicode
-+
-+
-+class PathSpec(object):
-+	"""
-+	The :class:`PathSpec` class is a wrapper around a list of compiled
-+	:class:`.Pattern` instances.
-+	"""
-+
-+	def __init__(self, patterns):
-+		"""
-+		Initializes the :class:`PathSpec` instance.
-+
-+		*patterns* (:class:`~collections.abc.Collection` or :class:`~collections.abc.Iterable`)
-+		yields each compiled pattern (:class:`.Pattern`).
-+		"""
-+
-+		self.patterns = patterns if isinstance(patterns, collection_type) else list(patterns)
-+		"""
-+		*patterns* (:class:`~collections.abc.Collection` of :class:`.Pattern`)
-+		contains the compiled patterns.
-+		"""
-+
-+	def __eq__(self, other):
-+		"""
-+		Tests the equality of this path-spec with *other* (:class:`PathSpec`)
-+		by comparing their :attr:`~PathSpec.patterns` attributes.
-+		"""
-+		if isinstance(other, PathSpec):
-+			paired_patterns = izip_longest(self.patterns, other.patterns)
-+			return all(a == b for a, b in paired_patterns)
-+		else:
-+			return NotImplemented
-+
-+	def __len__(self):
-+		"""
-+		Returns the number of compiled patterns this path-spec contains
-+		(:class:`int`).
-+		"""
-+		return len(self.patterns)
-+
-+	@classmethod
-+	def from_lines(cls, pattern_factory, lines):
-+		"""
-+		Compiles the pattern lines.
-+
-+		*pattern_factory* can be either the name of a registered pattern
-+		factory (:class:`str`), or a :class:`~collections.abc.Callable` used
-+		to compile patterns. It must accept an uncompiled pattern (:class:`str`)
-+		and return the compiled pattern (:class:`.Pattern`).
-+
-+		*lines* (:class:`~collections.abc.Iterable`) yields each uncompiled
-+		pattern (:class:`str`). This simply has to yield each line so it can
-+		be a :class:`file` (e.g., from :func:`open` or :class:`io.StringIO`)
-+		or the result from :meth:`str.splitlines`.
-+
-+		Returns the :class:`PathSpec` instance.
-+		"""
-+		if isinstance(pattern_factory, string_types):
-+			pattern_factory = util.lookup_pattern(pattern_factory)
-+		if not callable(pattern_factory):
-+			raise TypeError("pattern_factory:{!r} is not callable.".format(pattern_factory))
-+
-+		if isinstance(lines, (bytes, unicode)):
-+			raise TypeError("lines:{!r} is not an iterable.".format(lines))
-+
-+		lines = [pattern_factory(line) for line in lines if line]
-+		return cls(lines)
-+
-+	def match_file(self, file, separators=None):
-+		"""
-+		Matches the file to this path-spec.
-+
-+		*file* (:class:`str`) is the file path to be matched against
-+		:attr:`self.patterns <PathSpec.patterns>`.
-+
-+		*separators* (:class:`~collections.abc.Collection` of :class:`str`)
-+		optionally contains the path separators to normalize. See
-+		:func:`~pathspec.util.normalize_file` for more information.
-+
-+		Returns :data:`True` if *file* matched; otherwise, :data:`False`.
-+		"""
-+		norm_file = util.normalize_file(file, separators=separators)
-+		return util.match_file(self.patterns, norm_file)
-+
-+	def match_files(self, files, separators=None):
-+		"""
-+		Matches the files to this path-spec.
-+
-+		*files* (:class:`~collections.abc.Iterable` of :class:`str`) contains
-+		the file paths to be matched against :attr:`self.patterns
-+		<PathSpec.patterns>`.
-+
-+		*separators* (:class:`~collections.abc.Collection` of :class:`str`;
-+		or :data:`None`) optionally contains the path separators to
-+		normalize. See :func:`~pathspec.util.normalize_file` for more
-+		information.
-+
-+		Returns the matched files (:class:`~collections.abc.Iterable` of
-+		:class:`str`).
-+		"""
-+		if isinstance(files, (bytes, unicode)):
-+			raise TypeError("files:{!r} is not an iterable.".format(files))
-+
-+		file_map = util.normalize_files(files, separators=separators)
-+		matched_files = util.match_files(self.patterns, iterkeys(file_map))
-+		for path in matched_files:
-+			yield file_map[path]
-+
-+	def match_tree(self, root, on_error=None, follow_links=None):
-+		"""
-+		Walks the specified root path for all files and matches them to this
-+		path-spec.
-+
-+		*root* (:class:`str`) is the root directory to search for files.
-+
-+		*on_error* (:class:`~collections.abc.Callable` or :data:`None`)
-+		optionally is the error handler for file-system exceptions. See
-+		:func:`~pathspec.util.iter_tree` for more information.
-+
-+
-+		*follow_links* (:class:`bool` or :data:`None`) optionally is whether
-+		to walk symbolik links that resolve to directories. See
-+		:func:`~pathspec.util.iter_tree` for more information.
-+
-+		Returns the matched files (:class:`~collections.abc.Iterable` of
-+		:class:`str`).
-+		"""
-+		files = util.iter_tree(root, on_error=on_error, follow_links=follow_links)
-+		return self.match_files(files)
-diff --git a/third_party/python/pathspec/pathspec/pattern.py b/third_party/python/pathspec/pathspec/pattern.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/pathspec/pathspec/pattern.py
-@@ -0,0 +1,146 @@
-+# encoding: utf-8
-+"""
-+This module provides the base definition for patterns.
-+"""
-+
-+import re
-+
-+from .compat import unicode
-+
-+
-+class Pattern(object):
-+	"""
-+	The :class:`Pattern` class is the abstract definition of a pattern.
-+	"""
-+
-+	# Make the class dict-less.
-+	__slots__ = ('include',)
-+
-+	def __init__(self, include):
-+		"""
-+		Initializes the :class:`Pattern` instance.
-+
-+		*include* (:class:`bool` or :data:`None`) is whether the matched
-+		files should be included (:data:`True`), excluded (:data:`False`),
-+		or is a null-operation (:data:`None`).
-+		"""
-+
-+		self.include = include
-+		"""
-+		*include* (:class:`bool` or :data:`None`) is whether the matched
-+		files should be included (:data:`True`), excluded (:data:`False`),
-+		or is a null-operation (:data:`None`).
-+		"""
-+
-+	def match(self, files):
-+		"""
-+		Matches this pattern against the specified files.
-+
-+		*files* (:class:`~collections.abc.Iterable` of :class:`str`) contains
-+		each file relative to the root directory (e.g., ``"relative/path/to/file"``).
-+
-+		Returns an :class:`~collections.abc.Iterable` yielding each matched
-+		file path (:class:`str`).
-+		"""
-+		raise NotImplementedError("{}.{} must override match().".format(self.__class__.__module__, self.__class__.__name__))
-+
-+
-+class RegexPattern(Pattern):
-+	"""
-+	The :class:`RegexPattern` class is an implementation of a pattern
-+	using regular expressions.
-+	"""
-+
-+	# Make the class dict-less.
-+	__slots__ = ('regex',)
-+
-+	def __init__(self, pattern, include=None):
-+		"""
-+		Initializes the :class:`RegexPattern` instance.
-+
-+		*pattern* (:class:`unicode`, :class:`bytes`, :class:`re.RegexObject`,
-+		or :data:`None`) is the pattern to compile into a regular
-+		expression.
-+
-+		*include* (:class:`bool` or :data:`None`) must be :data:`None`
-+		unless *pattern* is a precompiled regular expression (:class:`re.RegexObject`)
-+		in which case it is whether matched files should be included
-+		(:data:`True`), excluded (:data:`False`), or is a null operation
-+		(:data:`None`).
-+
-+			.. NOTE:: Subclasses do not need to support the *include*
-+			   parameter.
-+		"""
-+
-+		self.regex = None
-+		"""
-+		*regex* (:class:`re.RegexObject`) is the regular expression for the
-+		pattern.
-+		"""
-+
-+		if isinstance(pattern, (unicode, bytes)):
-+			assert include is None, "include:{!r} must be null when pattern:{!r} is a string.".format(include, pattern)
-+			regex, include = self.pattern_to_regex(pattern)
-+			# NOTE: Make sure to allow a null regular expression to be
-+			# returned for a null-operation.
-+			if include is not None:
-+				regex = re.compile(regex)
-+
-+		elif pattern is not None and hasattr(pattern, 'match'):
-+			# Assume pattern is a precompiled regular expression.
-+			# - NOTE: Used specified *include*.
-+			regex = pattern
-+
-+		elif pattern is None:
-+			# NOTE: Make sure to allow a null pattern to be passed for a
-+			# null-operation.
-+			assert include is None, "include:{!r} must be null when pattern:{!r} is null.".format(include, pattern)
-+
-+		else:
-+			raise TypeError("pattern:{!r} is not a string, RegexObject, or None.".format(pattern))
-+
-+		super(RegexPattern, self).__init__(include)
-+		self.regex = regex
-+
-+	def __eq__(self, other):
-+		"""
-+		Tests the equality of this regex pattern with *other* (:class:`RegexPattern`)
-+		by comparing their :attr:`~Pattern.include` and :attr:`~RegexPattern.regex`
-+		attributes.
-+		"""
-+		if isinstance(other, RegexPattern):
-+			return self.include == other.include and self.regex == other.regex
-+		else:
-+			return NotImplemented
-+
-+	def match(self, files):
-+		"""
-+		Matches this pattern against the specified files.
-+
-+		*files* (:class:`~collections.abc.Iterable` of :class:`str`)
-+		contains each file relative to the root directory (e.g., "relative/path/to/file").
-+
-+		Returns an :class:`~collections.abc.Iterable` yielding each matched
-+		file path (:class:`str`).
-+		"""
-+		if self.include is not None:
-+			for path in files:
-+				if self.regex.match(path) is not None:
-+					yield path
-+
-+	@classmethod
-+	def pattern_to_regex(cls, pattern):
-+		"""
-+		Convert the pattern into an uncompiled regular expression.
-+
-+		*pattern* (:class:`str`) is the pattern to convert into a regular
-+		expression.
-+
-+		Returns the uncompiled regular expression (:class:`str` or :data:`None`),
-+		and whether matched files should be included (:data:`True`),
-+		excluded (:data:`False`), or is a null-operation (:data:`None`).
-+
-+			.. NOTE:: The default implementation simply returns *pattern* and
-+			   :data:`True`.
-+		"""
-+		return pattern, True
-diff --git a/third_party/python/pathspec/pathspec/patterns/__init__.py b/third_party/python/pathspec/pathspec/patterns/__init__.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/pathspec/pathspec/patterns/__init__.py
-@@ -0,0 +1,8 @@
-+# encoding: utf-8
-+"""
-+The *pathspec.patterns* package contains the pattern matching
-+implementations.
-+"""
-+
-+# Load pattern implementations.
-+from .gitwildmatch import GitWildMatchPattern
-diff --git a/third_party/python/pathspec/pathspec/patterns/gitwildmatch.py b/third_party/python/pathspec/pathspec/patterns/gitwildmatch.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/pathspec/pathspec/patterns/gitwildmatch.py
-@@ -0,0 +1,330 @@
-+# encoding: utf-8
-+"""
-+This module implements Git's wildmatch pattern matching which itself is
-+derived from Rsync's wildmatch. Git uses wildmatch for its ".gitignore"
-+files.
-+"""
-+from __future__ import unicode_literals
-+
-+import re
-+import warnings
-+
-+from .. import util
-+from ..compat import unicode
-+from ..pattern import RegexPattern
-+
-+#: The encoding to use when parsing a byte string pattern.
-+_BYTES_ENCODING = 'latin1'
-+
-+
-+class GitWildMatchPattern(RegexPattern):
-+	"""
-+	The :class:`GitWildMatchPattern` class represents a compiled Git
-+	wildmatch pattern.
-+	"""
-+
-+	# Keep the dict-less class hierarchy.
-+	__slots__ = ()
-+
-+	@classmethod
-+	def pattern_to_regex(cls, pattern):
-+		"""
-+		Convert the pattern into a regular expression.
-+
-+		*pattern* (:class:`unicode` or :class:`bytes`) is the pattern to
-+		convert into a regular expression.
-+
-+		Returns the uncompiled regular expression (:class:`unicode`, :class:`bytes`,
-+		or :data:`None`), and whether matched files should be included
-+		(:data:`True`), excluded (:data:`False`), or if it is a
-+		null-operation (:data:`None`).
-+		"""
-+		if isinstance(pattern, unicode):
-+			return_type = unicode
-+		elif isinstance(pattern, bytes):
-+			return_type = bytes
-+			pattern = pattern.decode(_BYTES_ENCODING)
-+		else:
-+			raise TypeError("pattern:{!r} is not a unicode or byte string.".format(pattern))
-+
-+		pattern = pattern.strip()
-+
-+		if pattern.startswith('#'):
-+			# A pattern starting with a hash ('#') serves as a comment
-+			# (neither includes nor excludes files). Escape the hash with a
-+			# back-slash to match a literal hash (i.e., '\#').
-+			regex = None
-+			include = None
-+
-+		elif pattern == '/':
-+			# EDGE CASE: According to `git check-ignore` (v2.4.1), a single
-+			# '/' does not match any file.
-+			regex = None
-+			include = None
-+
-+		elif pattern:
-+
-+			if pattern.startswith('!'):
-+				# A pattern starting with an exclamation mark ('!') negates the
-+				# pattern (exclude instead of include). Escape the exclamation
-+				# mark with a back-slash to match a literal exclamation mark
-+				# (i.e., '\!').
-+				include = False
-+				# Remove leading exclamation mark.
-+				pattern = pattern[1:]
-+			else:
-+				include = True
-+
-+			if pattern.startswith('\\'):
-+				# Remove leading back-slash escape for escaped hash ('#') or
-+				# exclamation mark ('!').
-+				pattern = pattern[1:]
-+
-+			# Split pattern into segments.
-+			pattern_segs = pattern.split('/')
-+
-+			# Normalize pattern to make processing easier.
-+
-+			if not pattern_segs[0]:
-+				# A pattern beginning with a slash ('/') will only match paths
-+				# directly on the root directory instead of any descendant
-+				# paths. So, remove empty first segment to make pattern relative
-+				# to root.
-+				del pattern_segs[0]
-+
-+			elif len(pattern_segs) == 1 or (len(pattern_segs) == 2 and not pattern_segs[1]):
-+				# A single pattern without a beginning slash ('/') will match
-+				# any descendant path. This is equivalent to "**/{pattern}". So,
-+				# prepend with double-asterisks to make pattern relative to
-+				# root.
-+				# EDGE CASE: This also holds for a single pattern with a
-+				# trailing slash (e.g. dir/).
-+				if pattern_segs[0] != '**':
-+					pattern_segs.insert(0, '**')
-+
-+			else:
-+				# EDGE CASE: A pattern without a beginning slash ('/') but
-+				# contains at least one prepended directory (e.g.
-+				# "dir/{pattern}") should not match "**/dir/{pattern}",
-+				# according to `git check-ignore` (v2.4.1).
-+				pass
-+
-+			if not pattern_segs[-1] and len(pattern_segs) > 1:
-+				# A pattern ending with a slash ('/') will match all descendant
-+				# paths if it is a directory but not if it is a regular file.
-+				# This is equivilent to "{pattern}/**". So, set last segment to
-+				# double asterisks to include all descendants.
-+				pattern_segs[-1] = '**'
-+
-+			# Build regular expression from pattern.
-+			output = ['^']
-+			need_slash = False
-+			end = len(pattern_segs) - 1
-+			for i, seg in enumerate(pattern_segs):
-+				if seg == '**':
-+					if i == 0 and i == end:
-+						# A pattern consisting solely of double-asterisks ('**')
-+						# will match every path.
-+						output.append('.+')
-+					elif i == 0:
-+						# A normalized pattern beginning with double-asterisks
-+						# ('**') will match any leading path segments.
-+						output.append('(?:.+/)?')
-+						need_slash = False
-+					elif i == end:
-+						# A normalized pattern ending with double-asterisks ('**')
-+						# will match any trailing path segments.
-+						output.append('/.*')
-+					else:
-+						# A pattern with inner double-asterisks ('**') will match
-+						# multiple (or zero) inner path segments.
-+						output.append('(?:/.+)?')
-+						need_slash = True
-+				elif seg == '*':
-+					# Match single path segment.
-+					if need_slash:
-+						output.append('/')
-+					output.append('[^/]+')
-+					need_slash = True
-+				else:
-+					# Match segment glob pattern.
-+					if need_slash:
-+						output.append('/')
-+					output.append(cls._translate_segment_glob(seg))
-+					if i == end and include is True:
-+						# A pattern ending without a slash ('/') will match a file
-+						# or a directory (with paths underneath it). E.g., "foo"
-+						# matches "foo", "foo/bar", "foo/bar/baz", etc.
-+						# EDGE CASE: However, this does not hold for exclusion cases
-+						# according to `git check-ignore` (v2.4.1).
-+						output.append('(?:/.*)?')
-+					need_slash = True
-+			output.append('$')
-+			regex = ''.join(output)
-+
-+		else:
-+			# A blank pattern is a null-operation (neither includes nor
-+			# excludes files).
-+			regex = None
-+			include = None
-+
-+		if regex is not None and return_type is bytes:
-+			regex = regex.encode(_BYTES_ENCODING)
-+
-+		return regex, include
-+
-+	@staticmethod
-+	def _translate_segment_glob(pattern):
-+		"""
-+		Translates the glob pattern to a regular expression. This is used in
-+		the constructor to translate a path segment glob pattern to its
-+		corresponding regular expression.
-+
-+		*pattern* (:class:`str`) is the glob pattern.
-+
-+		Returns the regular expression (:class:`str`).
-+		"""
-+		# NOTE: This is derived from `fnmatch.translate()` and is similar to
-+		# the POSIX function `fnmatch()` with the `FNM_PATHNAME` flag set.
-+
-+		escape = False
-+		regex = ''
-+		i, end = 0, len(pattern)
-+		while i < end:
-+			# Get next character.
-+			char = pattern[i]
-+			i += 1
-+
-+			if escape:
-+				# Escape the character.
-+				escape = False
-+				regex += re.escape(char)
-+
-+			elif char == '\\':
-+				# Escape character, escape next character.
-+				escape = True
-+
-+			elif char == '*':
-+				# Multi-character wildcard. Match any string (except slashes),
-+				# including an empty string.
-+				regex += '[^/]*'
-+
-+			elif char == '?':
-+				# Single-character wildcard. Match any single character (except
-+				# a slash).
-+				regex += '[^/]'
-+
-+			elif char == '[':
-+				# Braket expression wildcard. Except for the beginning
-+				# exclamation mark, the whole braket expression can be used
-+				# directly as regex but we have to find where the expression
-+				# ends.
-+				# - "[][!]" matchs ']', '[' and '!'.
-+				# - "[]-]" matchs ']' and '-'.
-+				# - "[!]a-]" matchs any character except ']', 'a' and '-'.
-+				j = i
-+				# Pass brack expression negation.
-+				if j < end and pattern[j] == '!':
-+					j += 1
-+				# Pass first closing braket if it is at the beginning of the
-+				# expression.
-+				if j < end and pattern[j] == ']':
-+					j += 1
-+				# Find closing braket. Stop once we reach the end or find it.
-+				while j < end and pattern[j] != ']':
-+					j += 1
-+
-+				if j < end:
-+					# Found end of braket expression. Increment j to be one past
-+					# the closing braket:
-+					#
-+					#  [...]
-+					#   ^   ^
-+					#   i   j
-+					#
-+					j += 1
-+					expr = '['
-+
-+					if pattern[i] == '!':
-+						# Braket expression needs to be negated.
-+						expr += '^'
-+						i += 1
-+					elif pattern[i] == '^':
-+						# POSIX declares that the regex braket expression negation
-+						# "[^...]" is undefined in a glob pattern. Python's
-+						# `fnmatch.translate()` escapes the caret ('^') as a
-+						# literal. To maintain consistency with undefined behavior,
-+						# I am escaping the '^' as well.
-+						expr += '\\^'
-+						i += 1
-+
-+					# Build regex braket expression. Escape slashes so they are
-+					# treated as literal slashes by regex as defined by POSIX.
-+					expr += pattern[i:j].replace('\\', '\\\\')
-+
-+					# Add regex braket expression to regex result.
-+					regex += expr
-+
-+					# Set i to one past the closing braket.
-+					i = j
-+
-+				else:
-+					# Failed to find closing braket, treat opening braket as a
-+					# braket literal instead of as an expression.
-+					regex += '\\['
-+
-+			else:
-+				# Regular character, escape it for regex.
-+				regex += re.escape(char)
-+
-+		return regex
-+
-+	@staticmethod
-+	def escape(s):
-+		"""
-+		Escape special characters in the given string.
-+
-+		*s* (:class:`unicode` or :class:`bytes`) a filename or a string
-+		that you want to escape, usually before adding it to a `.gitignore`
-+
-+		Returns the escaped string (:class:`unicode`, :class:`bytes`)
-+		"""
-+		# Reference: https://git-scm.com/docs/gitignore#_pattern_format
-+		meta_characters = r"[]!*#?"
-+
-+		return "".join("\\" + x if x in meta_characters else x for x in s)
-+
-+util.register_pattern('gitwildmatch', GitWildMatchPattern)
-+
-+
-+class GitIgnorePattern(GitWildMatchPattern):
-+	"""
-+	The :class:`GitIgnorePattern` class is deprecated by :class:`GitWildMatchPattern`.
-+	This class only exists to maintain compatibility with v0.4.
-+	"""
-+
-+	def __init__(self, *args, **kw):
-+		"""
-+		Warn about deprecation.
-+		"""
-+		self._deprecated()
-+		return super(GitIgnorePattern, self).__init__(*args, **kw)
-+
-+	@staticmethod
-+	def _deprecated():
-+		"""
-+		Warn about deprecation.
-+		"""
-+		warnings.warn("GitIgnorePattern ('gitignore') is deprecated. Use GitWildMatchPattern ('gitwildmatch') instead.", DeprecationWarning, stacklevel=3)
-+
-+	@classmethod
-+	def pattern_to_regex(cls, *args, **kw):
-+		"""
-+		Warn about deprecation.
-+		"""
-+		cls._deprecated()
-+		return super(GitIgnorePattern, cls).pattern_to_regex(*args, **kw)
-+
-+# Register `GitIgnorePattern` as "gitignore" for backward compatibility
-+# with v0.4.
-+util.register_pattern('gitignore', GitIgnorePattern)
-diff --git a/third_party/python/pathspec/pathspec/util.py b/third_party/python/pathspec/pathspec/util.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/pathspec/pathspec/util.py
-@@ -0,0 +1,350 @@
-+# encoding: utf-8
-+"""
-+This module provides utility methods for dealing with path-specs.
-+"""
-+
-+import os
-+import os.path
-+import posixpath
-+import stat
-+
-+from .compat import collection_type, string_types
-+
-+NORMALIZE_PATH_SEPS = [sep for sep in [os.sep, os.altsep] if sep and sep != posixpath.sep]
-+"""
-+*NORMALIZE_PATH_SEPS* (:class:`list` of :class:`str`) contains the path
-+separators that need to be normalized to the POSIX separator for the
-+current operating system. The separators are determined by examining
-+:data:`os.sep` and :data:`os.altsep`.
-+"""
-+
-+_registered_patterns = {}
-+"""
-+*_registered_patterns* (``dict``) maps a name (``str``) to the
-+registered pattern factory (``callable``).
-+"""
-+
-+def iter_tree(root, on_error=None, follow_links=None):
-+	"""
-+	Walks the specified directory for all files.
-+
-+	*root* (:class:`str`) is the root directory to search for files.
-+
-+	*on_error* (:class:`~collections.abc.Callable` or :data:`None`)
-+	optionally is the error handler for file-system exceptions. It will be
-+	called with the exception (:exc:`OSError`). Reraise the exception to
-+	abort the walk. Default is :data:`None` to ignore file-system
-+	exceptions.
-+
-+	*follow_links* (:class:`bool` or :data:`None`) optionally is whether
-+	to walk symbolik links that resolve to directories. Default is
-+	:data:`None` for :data:`True`.
-+
-+	Raises :exc:`RecursionError` if recursion is detected.
-+
-+	Returns an :class:`~collections.abc.Iterable` yielding the path to
-+	each file (:class:`str`) relative to *root*.
-+	"""
-+	if on_error is not None and not callable(on_error):
-+		raise TypeError("on_error:{!r} is not callable.".format(on_error))
-+
-+	if follow_links is None:
-+		follow_links = True
-+
-+	for file_rel in _iter_tree_next(os.path.abspath(root), '', {}, on_error, follow_links):
-+		yield file_rel
-+
-+def _iter_tree_next(root_full, dir_rel, memo, on_error, follow_links):
-+	"""
-+	Scan the directory for all descendant files.
-+
-+	*root_full* (:class:`str`) the absolute path to the root directory.
-+
-+	*dir_rel* (:class:`str`) the path to the directory to scan relative to
-+	*root_full*.
-+
-+	*memo* (:class:`dict`) keeps track of ancestor directories
-+	encountered. Maps each ancestor real path (:class:`str``) to relative
-+	path (:class:`str`).
-+
-+	*on_error* (:class:`~collections.abc.Callable` or :data:`None`)
-+	optionally is the error handler for file-system exceptions.
-+
-+	*follow_links* (:class:`bool`) is whether to walk symbolik links that
-+	resolve to directories.
-+	"""
-+	dir_full = os.path.join(root_full, dir_rel)
-+	dir_real = os.path.realpath(dir_full)
-+
-+	# Remember each encountered ancestor directory and its canonical
-+	# (real) path. If a canonical path is encountered more than once,
-+	# recursion has occurred.
-+	if dir_real not in memo:
-+		memo[dir_real] = dir_rel
-+	else:
-+		raise RecursionError(real_path=dir_real, first_path=memo[dir_real], second_path=dir_rel)
-+
-+	for node in os.listdir(dir_full):
-+		node_rel = os.path.join(dir_rel, node)
-+		node_full = os.path.join(root_full, node_rel)
-+
-+		# Inspect child node.
-+		try:
-+			node_stat = os.lstat(node_full)
-+		except OSError as e:
-+			if on_error is not None:
-+				on_error(e)
-+			continue
-+
-+		if stat.S_ISLNK(node_stat.st_mode):
-+			# Child node is a link, inspect the target node.
-+			is_link = True
-+			try:
-+				node_stat = os.stat(node_full)
-+			except OSError as e:
-+				if on_error is not None:
-+					on_error(e)
-+				continue
-+		else:
-+			is_link = False
-+
-+		if stat.S_ISDIR(node_stat.st_mode) and (follow_links or not is_link):
-+			# Child node is a directory, recurse into it and yield its
-+			# decendant files.
-+			for file_rel in _iter_tree_next(root_full, node_rel, memo, on_error, follow_links):
-+				yield file_rel
-+
-+		elif stat.S_ISREG(node_stat.st_mode):
-+			# Child node is a file, yield it.
-+			yield node_rel
-+
-+	# NOTE: Make sure to remove the canonical (real) path of the directory
-+	# from the ancestors memo once we are done with it. This allows the
-+	# same directory to appear multiple times. If this is not done, the
-+	# second occurance of the directory will be incorrectly interpreted as
-+	# a recursion. See <https://github.com/cpburnz/python-path-specification/pull/7>.
-+	del memo[dir_real]
-+
-+def lookup_pattern(name):
-+	"""
-+	Lookups a registered pattern factory by name.
-+
-+	*name* (:class:`str`) is the name of the pattern factory.
-+
-+	Returns the registered pattern factory (:class:`~collections.abc.Callable`).
-+	If no pattern factory is registered, raises :exc:`KeyError`.
-+	"""
-+	return _registered_patterns[name]
-+
-+def match_file(patterns, file):
-+	"""
-+	Matches the file to the patterns.
-+
-+	*patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`)
-+	contains the patterns to use.
-+
-+	*file* (:class:`str`) is the normalized file path to be matched
-+	against *patterns*.
-+
-+	Returns :data:`True` if *file* matched; otherwise, :data:`False`.
-+	"""
-+	matched = False
-+	for pattern in patterns:
-+		if pattern.include is not None:
-+			if file in pattern.match((file,)):
-+				matched = pattern.include
-+	return matched
-+
-+def match_files(patterns, files):
-+	"""
-+	Matches the files to the patterns.
-+
-+	*patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`)
-+	contains the patterns to use.
-+
-+	*files* (:class:`~collections.abc.Iterable` of :class:`str`) contains
-+	the normalized file paths to be matched against *patterns*.
-+
-+	Returns the matched files (:class:`set` of :class:`str`).
-+	"""
-+	all_files = files if isinstance(files, collection_type) else list(files)
-+	return_files = set()
-+	for pattern in patterns:
-+		if pattern.include is not None:
-+			result_files = pattern.match(all_files)
-+			if pattern.include:
-+				return_files.update(result_files)
-+			else:
-+				return_files.difference_update(result_files)
-+	return return_files
-+
-+def normalize_file(file, separators=None):
-+	"""
-+	Normalizes the file path to use the POSIX path separator (i.e., ``'/'``).
-+
-+	*file* (:class:`str`) is the file path.
-+
-+	*separators* (:class:`~collections.abc.Collection` of :class:`str`; or
-+	:data:`None`) optionally contains the path separators to normalize.
-+	This does not need to include the POSIX path separator (``'/'``), but
-+	including it will not affect the results. Default is :data:`None` for
-+	:data:`NORMALIZE_PATH_SEPS`. To prevent normalization, pass an empty
-+	container (e.g., an empty tuple ``()``).
-+
-+	Returns the normalized file path (:class:`str`).
-+	"""
-+	# Normalize path separators.
-+	if separators is None:
-+		separators = NORMALIZE_PATH_SEPS
-+	norm_file = file
-+	for sep in separators:
-+		norm_file = norm_file.replace(sep, posixpath.sep)
-+
-+	# Remove current directory prefix.
-+	if norm_file.startswith('./'):
-+		norm_file = norm_file[2:]
-+
-+	return norm_file
-+
-+def normalize_files(files, separators=None):
-+	"""
-+	Normalizes the file paths to use the POSIX path separator.
-+
-+	*files* (:class:`~collections.abc.Iterable` of :class:`str`) contains
-+	the file paths to be normalized.
-+
-+	*separators* (:class:`~collections.abc.Collection` of :class:`str`; or
-+	:data:`None`) optionally contains the path separators to normalize.
-+	See :func:`normalize_file` for more information.
-+
-+	Returns a :class:`dict` mapping the each normalized file path (:class:`str`)
-+	to the original file path (:class:`str`)
-+	"""
-+	norm_files = {}
-+	for path in files:
-+		norm_files[normalize_file(path, separators=separators)] = path
-+	return norm_files
-+
-+def register_pattern(name, pattern_factory, override=None):
-+	"""
-+	Registers the specified pattern factory.
-+
-+	*name* (:class:`str`) is the name to register the pattern factory
-+	under.
-+
-+	*pattern_factory* (:class:`~collections.abc.Callable`) is used to
-+	compile patterns. It must accept an uncompiled pattern (:class:`str`)
-+	and return the compiled pattern (:class:`.Pattern`).
-+
-+	*override* (:class:`bool` or :data:`None`) optionally is whether to
-+	allow overriding an already registered pattern under the same name
-+	(:data:`True`), instead of raising an :exc:`AlreadyRegisteredError`
-+	(:data:`False`). Default is :data:`None` for :data:`False`.
-+	"""
-+	if not isinstance(name, string_types):
-+		raise TypeError("name:{!r} is not a string.".format(name))
-+	if not callable(pattern_factory):
-+		raise TypeError("pattern_factory:{!r} is not callable.".format(pattern_factory))
-+	if name in _registered_patterns and not override:
-+		raise AlreadyRegisteredError(name, _registered_patterns[name])
-+	_registered_patterns[name] = pattern_factory
-+
-+
-+class AlreadyRegisteredError(Exception):
-+	"""
-+	The :exc:`AlreadyRegisteredError` exception is raised when a pattern
-+	factory is registered under a name already in use.
-+	"""
-+
-+	def __init__(self, name, pattern_factory):
-+		"""
-+		Initializes the :exc:`AlreadyRegisteredError` instance.
-+
-+		*name* (:class:`str`) is the name of the registered pattern.
-+
-+		*pattern_factory* (:class:`~collections.abc.Callable`) is the
-+		registered pattern factory.
-+		"""
-+		super(AlreadyRegisteredError, self).__init__(name, pattern_factory)
-+
-+	@property
-+	def message(self):
-+		"""
-+		*message* (:class:`str`) is the error message.
-+		"""
-+		return "{name!r} is already registered for pattern factory:{pattern_factory!r}.".format(
-+			name=self.name,
-+			pattern_factory=self.pattern_factory,
-+		)
-+
-+	@property
-+	def name(self):
-+		"""
-+		*name* (:class:`str`) is the name of the registered pattern.
-+		"""
-+		return self.args[0]
-+
-+	@property
-+	def pattern_factory(self):
-+		"""
-+		*pattern_factory* (:class:`~collections.abc.Callable`) is the
-+		registered pattern factory.
-+		"""
-+		return self.args[1]
-+
-+
-+class RecursionError(Exception):
-+	"""
-+	The :exc:`RecursionError` exception is raised when recursion is
-+	detected.
-+	"""
-+
-+	def __init__(self, real_path, first_path, second_path):
-+		"""
-+		Initializes the :exc:`RecursionError` instance.
-+
-+		*real_path* (:class:`str`) is the real path that recursion was
-+		encountered on.
-+
-+		*first_path* (:class:`str`) is the first path encountered for
-+		*real_path*.
-+
-+		*second_path* (:class:`str`) is the second path encountered for
-+		*real_path*.
-+		"""
-+		super(RecursionError, self).__init__(real_path, first_path, second_path)
-+
-+	@property
-+	def first_path(self):
-+		"""
-+		*first_path* (:class:`str`) is the first path encountered for
-+		:attr:`self.real_path <RecursionError.real_path>`.
-+		"""
-+		return self.args[1]
-+
-+	@property
-+	def message(self):
-+		"""
-+		*message* (:class:`str`) is the error message.
-+		"""
-+		return "Real path {real!r} was encountered at {first!r} and then {second!r}.".format(
-+			real=self.real_path,
-+			first=self.first_path,
-+			second=self.second_path,
-+		)
-+
-+	@property
-+	def real_path(self):
-+		"""
-+		*real_path* (:class:`str`) is the real path that recursion was
-+		encountered on.
-+		"""
-+		return self.args[0]
-+
-+	@property
-+	def second_path(self):
-+		"""
-+		*second_path* (:class:`str`) is the second path encountered for
-+		:attr:`self.real_path <RecursionError.real_path>`.
-+		"""
-+		return self.args[2]
-diff --git a/third_party/python/pep487/pep487/__init__.py b/third_party/python/pep487/pep487/__init__.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/pep487/pep487/__init__.py
-@@ -0,0 +1,81 @@
-+# -*- coding: utf-8 -*-
-+#
-+# Copyright (C) 2017 by Gregor Giesen
-+#
-+# This is a backport of PEP487's simpler customisation of class
-+# creation by Martin Teichmann <https://www.python.org/dev/peps/pep-0487/>
-+# for Python versions before 3.6.
-+#
-+# PEP487 is free software: you can redistribute it and/or modify it
-+# under the terms of the GNU General Public License as published
-+# by the Free Software Foundation, either version 3 of the License,
-+# or (at your option) any later version.
-+#
-+# PEP487 is distributed in the hope that it will be useful, but
-+# WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+# GNU General Public License for more details.
-+#
-+# You should have received a copy of the GNU General Public License
-+# along with PEP487. If not, see <http://www.gnu.org/licenses/>.
-+#
-+"""pep487.py: Simpler customisation of class creation"""
-+
-+import abc
-+import sys
-+import types
-+
-+__all__ = ('PEP487Meta', 'PEP487Object', 'ABCMeta', 'ABC')
-+
-+HAS_PY36 = sys.version_info >= (3, 6)
-+HAS_PEP487 = HAS_PY36
-+
-+if HAS_PEP487:
-+    PEP487Meta = type         # pragma: no cover
-+    ABCMeta = abc.ABCMeta     # pragma: no cover
-+    ABC = abc.ABC             # pragma: no cover
-+    PEP487Base = object       # pragma: no cover
-+    PEP487Object = object     # pragma: no cover
-+else:
-+    class PEP487Meta(type):
-+        def __new__(mcls, name, bases, ns, **kwargs):
-+            init = ns.get('__init_subclass__')
-+            if isinstance(init, types.FunctionType):
-+                ns['__init_subclass__'] = classmethod(init)
-+            cls = super().__new__(mcls, name, bases, ns)
-+            for key, value in cls.__dict__.items():
-+                func = getattr(value, '__set_name__', None)
-+                if func is not None:
-+                    func(cls, key)
-+            super(cls, cls).__init_subclass__(**kwargs)
-+            return cls
-+
-+        def __init__(cls, name, bases, ns, **kwargs):
-+            super().__init__(name, bases, ns)
-+
-+    class ABCMeta(abc.ABCMeta):
-+        def __new__(mcls, name, bases, ns, **kwargs):
-+            init = ns.get('__init_subclass__')
-+            if isinstance(init, types.FunctionType):
-+                ns['__init_subclass__'] = classmethod(init)
-+            cls = super().__new__(mcls, name, bases, ns)
-+            for key, value in cls.__dict__.items():
-+                func = getattr(value, '__set_name__', None)
-+                if func is not None:
-+                    func(cls, key)
-+            super(cls, cls).__init_subclass__(**kwargs)
-+            return cls
-+
-+        def __init__(cls, name, bases, ns, **kwargs):
-+            super().__init__(name, bases, ns)
-+
-+    class PEP487Base:
-+        @classmethod
-+        def __init_subclass__(cls, **kwargs):
-+            pass
-+
-+    class PEP487Object(PEP487Base, metaclass=PEP487Meta):
-+        pass
-+
-+    class ABC(PEP487Base, metaclass=ABCMeta):
-+        pass
-diff --git a/third_party/python/pep487/pep487/version.py b/third_party/python/pep487/pep487/version.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/pep487/pep487/version.py
-@@ -0,0 +1,27 @@
-+# -*- coding: utf-8 -*-
-+#
-+# Copyright (C) 2017 by Gregor Giesen
-+#
-+# This file is part of PEP487.
-+#
-+# PEP487 is free software: you can redistribute it and/or modify it
-+# under the terms of the GNU General Public License as published
-+# by the Free Software Foundation, either version 3 of the License,
-+# or (at your option) any later version.
-+#
-+# PEP487 is distributed in the hope that it will be useful, but
-+# WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+# GNU General Public License for more details.
-+#
-+# You should have received a copy of the GNU General Public License
-+# along with PEP487. If not, see <http://www.gnu.org/licenses/>.
-+#
-+"""pep487/version.py: version info"""
-+
-+__author__ = "Gregor Giesen"
-+__copyright__ = "Copyright 2017, Gregor Giesen"
-+__credits__ = ["Martin Teichmann", "Gregor Giesen"]
-+__license__ = "GPLv3"
-+__version__ = "1.0.1"
-+__maintainer__ = "Gregor Giesen"
-diff --git a/third_party/python/pyrsistent/CHANGES.txt b/third_party/python/pyrsistent/CHANGES.txt
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/pyrsistent/CHANGES.txt
-@@ -0,0 +1,327 @@
-+Revision history
-+----------------
-+0.15.7, 2020-01-07
-+ * NOTE! This is the last version of Pyrsistent that officially supports Python 2.X!
-+ * Fix #186, type errors with more recent versions of MyPy. Thanks @qhesz for this!
-+ * Build and test on ARM during CI. Thanks @ossdev07 for this!
-+ * Set absolute imports for python2 compatibility. Thanks @michalvi for this!
-+
-+0.15.6, 2019-11-23
-+ * Fix #182 moduleinit name clash.
-+
-+0.15.5, 2019-10-27
-+ * Fix #179 Fixed 'ignore_extra' factory parameter for pvector. Thanks @ss18 for this!
-+
-+0.15.4, 2019-07-27
-+ * Fix #174, fix a GC traversal bug in pvector evolver C extension. Thanks @till-varoquaux for finding and fixing this!
-+ * Fix #175, pytest 5 compatibility, this is a quick fix, some more work is needed to get coverage working etc.
-+
-+0.15.3, 2019-07-07
-+ * Fix #172, catch all exceptions during extension build to reduce chance of corner cases that prevents installation.
-+ * Fix #171, in PVector equality comparison don's assume that other object has a length, check before calling len.
-+ * Fix #168, write warning about failing build of C extension directly to stderr to avoid that pip silences it.
-+ * Fix #155, update PMapEvolver type stub to better reflect implementation.
-+
-+0.15.2, 2019-05-12
-+ * Fix #166, Propagate 'ignore_extra' param in hierarchy. Thanks @ss18 for this!
-+ * Fix #167, thaw typing. Thanks @nattofriends for this!
-+ * Fix #154, not possible to insert empty pmap as leaf node with transform.
-+
-+0.15.1, 2019-04-26
-+ * Fix #163 installation broken on Python 2 because of fix of #161, thanks @vphilippon for this! Sorry for the
-+   inconvenience.
-+
-+0.15.0, 2019-04-25
-+ * Python 3.4 is no longer officially supported since it is EOL since 2019-03-18.
-+ * Fix #157, major improvements to type hints. Thanks @je-l for working on this and @nattofriend for reviewing the PR!
-+ * Fix #161, installation fails on some Windows platforms because fallback to Python pvector does not work.
-+   Thanks @MaxTaggart for fixing and verifying this!
-+
-+0.14.11, 2019-02-21
-+ * Fix #152 Don't use __builtin_popcount, this hopefully fixes #147 Error in pvectorc.cp37-win_amd64.pyd file, as well.
-+   Thanks @benrg for this!
-+ * Fix #151 Fix compatibility for hypothesis 4. Thanks @felixonmars for this!
-+
-+0.14.10, 2019-02-09
-+ * Fix #148, only require pytest-runner if running tests. Thanks @ccorbacho for this!
-+
-+0.14.9, 2019-01-06
-+ * Fix #144, Compile pvectormodule.c on windows. Thanks @ganwell for this!
-+
-+0.14.8, 2018-12-19
-+ * Fix #142, Improve type stubs. Thanks @arxanas for this!
-+
-+0.14.7, 2018-11-20
-+ * Fix #102, add PEP 561 type annotation stubs for most pyrsistent types. Thanks @nattofriends for this!
-+
-+0.14.6, 2018-11-17
-+ * Fix #135, Type classes for Python 3 type annotations of pyrsistent types. Thanks @nattofriends for this!
-+ * Fix #128, Allow PClass and PRecord to ignore input parameters to constructor that are not part of the spec
-+   instead of blowing up with a type error. Thanks @agberk for this!
-+
-+0.14.5, 2018-10-14
-+ * Fix #137, deprecation warnings in Python 3.7. Thanks @thombashi for this!
-+ * Fix #129, building via setuptools and setup.py. Thanks @galuszkak for this!
-+
-+0.14.4, 2018-07-08
-+ * Fix #133, minor Python 3.7 compatibility issue. Pyrsistent is now officially Python 3.7 compliant!
-+
-+v0.14.3, 2018-06-11
-+ * Fix #123 regression where type names break sequence fields. Thanks @doozr for this!
-+ * Fix #124 using the class name to make AttributeError on __getattr__ more informative for PRecords.
-+   Thanks @neilvyas for this!
-+ * Fix #125 how fields handle type arguments. Thanks @neilvyas for this!
-+
-+v0.14.2, 2017-12-06
-+ * Fix #121, regression in PClass.set() introduced in 0.14.1.
-+
-+v0.14.1, 2017-11-27
-+ * Equality check performance improvements for pvectors and pmaps. Thanks @dtomas for this!
-+ * Avoid calling factories multiple times for fields that do not change, see PR #120 for for
-+   details. Thanks @teepark for this!
-+
-+v0.14.0, 2017-10-08
-+ * Fix #117, pmap now accepts iterators as input to constructor. Thanks @Julian for this!
-+ * Drop support for Python 2.6. Nothing has been done in this release that will explicitly
-+   break pyrsistent for 2.6 but it will not be considered moving forward.  Dropping 2.6
-+   support is the reason for stepping the second decimal instead of the third.
-+
-+v0.13.0, 2017-09-01
-+ * Fix #113, Skip field factories when loading pickled objects. There is a
-+   minor backwards incompatibilty in the behaviour because of this. Thanks
-+   @teepark for fi this!
-+ * Fix #116, negative indexing for pdeques. Thanks @Julian for this!
-+
-+v0.12.3, 2017-06-04
-+ * Fix #83, make it possible to use Python 3 enums as field type without having to wrap it in
-+   a list or tuple. Thanks @douglas-treadwell for this!
-+
-+v0.12.2, 2017-05-30
-+ * Fix #108, now possible to use the values in predicates to transform. Thanks @exarkus for this!
-+ * Fix #107, support multiple level of __invariant__ inheritance. Thanks @exarkus for this!
-+
-+v0.12.1, 2017-02-26
-+ * Fix #97, initialize CheckedPVector from iterator-
-+ * Fix #97, cache hash value on PMap. Thanks @sarum90 for this!
-+
-+v0.12.0, 2017-01-06
-+ * Fix #87, add function get_in() for access to elements in deeply nested structures.
-+ * Fix #91, add method update() to pset and pbag.
-+ * Fix #92, incorrect discard of elements in transform on pvector
-+ * This is a release candidate for 1.0 as I now consider pyrsistent fairly stable.
-+
-+v0.11.13, 2016-04-03
-+ * Fix #84, pvector segfault in CPython 3 when repr of contained object raises Exception.
-+ * Update README to cover for issue described in #83.
-+
-+v0.11.12, 2016-02-06
-+ * Minor modifications of tests to allow testing as requested in #79 and #80.
-+ * Also run CI tests under python 3.5
-+
-+v0.11.11, 2016-01-31
-+ * #78, include tests in pypi dist.
-+
-+v0.11.10, 2015-12-27, NOTE! This release contains a backwards incompatible change
-+                      despite only stepping the patch version number. See below.
-+ * Implement #74, attribute access on PClass evolver
-+ * Implement #75, lazily evaluated invariant messages by providing a
-+   callable with no arguments.
-+ * Initial values on fields can now be evaluated on object creation
-+   by providing a callable with no arguments.
-+
-+   NOTE! If you previously had callables as initial values this change means that those
-+         will be called upon object creation which may not be what you want. As
-+         a temporary workaround a callable returning a callable can be used. This
-+         feature and the concept of initial values will likely change slightly in the future.
-+         See #77 and and #76 for more information.
-+
-+v0.11.9, 2015-11-01
-+ * Added PVector.remove(), thanks @radix for initiating this!
-+
-+v0.11.8, 2015-10-18
-+ * Fix #66, UnicodeDecodeError when doing pip install in environments with ascii encoding as default.
-+   Thanks @foolswood!
-+ * Implement support for multiple types in pmap_field(), pvector_field() and pset_field(). Thanks @itamarst!
-+
-+v0.11.7, 2015-10-03
-+ * Fix #52, occasional SEGFAULTs due to misplaced call to PyObject_GC_Track. Thanks @jkbjh for this!
-+ * Fix #42, complete support for delete. Now also on the C-implementation of the PVectorEvolver.
-+   Thanks @itamarst for contributing a whole bunch of Hypothesis test cases covering the evolver operations!
-+
-+v0.11.6, 2015-09-30
-+ * Add +, -, & and | operations to PBag. Thanks @Futrell for this!
-+
-+v0.11.5, 2015-09-29
-+ * Fix bug introduced in 0.11.4 that prevented multi level inheritance from PClass.
-+ * Make PClassMeta public for friendlier subclassing
-+
-+v0.11.4, 2015-09-28
-+ * Fix #59, make it possible to create weakrefs to all collection types.
-+   Thanks @itamarst for reporting it.
-+ * Fix #58, add __str__ to InvariantException. Thanks @tomprince for reporting it.
-+
-+v0.11.3, 2015-09-15
-+  * Fix #57, support pickling of PClasses and PRecords using pmap_field, pvector_field, and pset_field.
-+    Thanks @radix for reporting this and submitting a fix for it!
-+
-+v0.11.2, 2015-09-09
-+  * Fix bug causing potential element loss when reallocating PMap. Thanks to @jml for finding
-+    this and submitting a PR with a fix!
-+  * Removed python 3.2 test build from Travis. There is nothing breaking 3.2 compatibility in this
-+    release but there will be no effort moving forward to keep the 3.2 compatibility.
-+
-+v0.11.1, 2015-08-24
-+  * Fix #51, PClass.set() broken when used with string+value argument.
-+  * #50, make it possible to specify more than one assertion in an invariant
-+  * #48, make it possible to make recursive type references by using a string
-+    as type specification.
-+
-+v0.11.0, 2015-07-11
-+  * #42, delete() function added to PVector to allow deletion of elements by index
-+    and range. Will perform a full copy of the vector, no structural sharing.
-+    Thanks @radix for helping out with this one!
-+  * Fix #39, explicitly disallow ordering for PMap and PBag, Python 3 style
-+  * Fix #37, PMap.values()/keys()/items() now returns PVectors instead of lists
-+
-+v0.10.3, 2015-06-13
-+  * Fix #40, make it possible to disable the C extension by setting the
-+    PYRSISTENT_NO_C_EXTENSION environment variable.
-+
-+v0.10.2, 2015-06-07
-+  * Fix #38, construction from serialized object for pvector/pset/pmap fields.
-+
-+v0.10.1, 2015-04-27
-+  * Fix broken README.rst
-+
-+v10.0.0, 2015-04-27
-+  * New type PClass, a persistent version of a Python object. Related to issues #30 and #32.
-+    Thanks @exarkun and @radix for input on this one!
-+  * Rename PRecordTypeError -> PTypeError, it is now also raised by PClass
-+  * New convenience functions, pvector_field, pmap_field and pset_field to create PRecord/PClass
-+    fields for checked collections. Issues #26 and #36. Thanks to @itamarst for this!
-+  * Removed deprecated function set_in() on PMap and PVector.
-+  * Removed deprecated factory function pclass.
-+  * Major internal restructuring breaking pyrsistent.py into multiple files. This should
-+    not affect those only using the public interface but if you experience problems please
-+    let me know.
-+
-+v0.9.4, 2015-04-20
-+  * Fix #34, PVector now compares against built in list type
-+
-+v0.9.3, 2015-04-06
-+  * Rename pclass back to immutable and deprecate the usage of the pclass function. PClass will be used by
-+    a new, different type in upcoming releases.
-+  * Documentation strings for the exceptions introduced in 0.9.2.
-+
-+v0.9.2, 2015-04-03
-+  * More informative type errors from checked types, issue #30
-+  * Support multiple optional types, issue #28
-+
-+v0.9.1, 2015-02-25
-+  * Multi level serialization for checked types
-+
-+v0.9.0, 2015-02-25, Lots of new stuff in this release!
-+  * Checked types, checked versions of PVector, PMap, PSet that support type and invariant specification.
-+    Currently lacking proper documentation but I'm working on it.
-+  * set_in() on PVector and PMap are now deprecated and will be removed in the next release.
-+    Use transform() instead. set_in() has been updated to use transform() for this release
-+    this means that some corner error cases behave slightly different than before.
-+  * Refactoring of the PVector to unify the type. Should not have any user impact as long as
-+    only the public interface of pyrsistent has been used. PVector is now an abstract base class
-+    with which the different implementations are registered.
-+  * Evolvers have been updated to return themselves for evolving operations to allow function chaining.
-+  * Richer exception messages for KeyErrors and IndexErrors specifying the key/index that caused the failure.
-+    Thanks @radix for this.
-+  * Missing attribute on PMaps when accessing with dot-notation now raises an AttributeError instead of a
-+    KeyError. Issue #21.
-+  * New function decorator @mutant that freezes all input arguments to a function and the return value.
-+  * Add __version__ to pyrsistent.py. Issue #23.
-+  * Fix pickling for pset. Issue #24.
-+
-+v0.8.0, 2015-01-21
-+  * New type PRecord. Subtype of PMap that allows explicit, declarative field specification. Thanks @boxed
-+    for inspiration!
-+  * Efficient transformations of arbitrary complexity on PMap and PVector. Thanks @boxed for inspiration!
-+  * Breaking change to the evolver interface. What used to be .pvector(), .pmap() and .pset()
-+    on the different evolvers has now been unified so that all evolvers have one method .persistent()
-+    to produce the persistent counterpart. Sorry for any inconvenience.
-+  * Removed the tests directory from the package.
-+  * PMap and PSet now contains a copy-function to closer mimic the interface of the dict and set. These
-+    functions will simply return a reference to self.
-+  * Removed deprecated alias 'immutable' from pclass.
-+
-+v0.7.1, 2015-01-17
-+  * Fixes #14 where a file executed (unexpectedly) during installation was not python 3 compatible.
-+
-+v0.7.0, 2015-01-04, No 1.0, instead a bunch of new stuff and one API breaking change to PMap.remove().
-+  * Evolvers for pvector, pmap and pset to allow simple and efficient updates of multiple elements
-+    in the collection. See the documentation for a closer description.
-+  * New method mset on pvector to update multiple values in one operation
-+  * Remove deprecated methods merge and merge_with on PMap
-+  * Change behavior of PMap.remove, it will now raise a KeyError if the element is not present.
-+    New method PMap.discard will instead return the original pmap if the element is not present.
-+    This aligns the PMap with how things are done in the PSet and is closer to the behavior of the
-+    built in counterparts.
-+
-+v0.6.3, 2014-11-27
-+  * Python 2.6 support, thanks @wrmsr!
-+  * PMap.merge/merge_with renamed to update/update_with. merge/merge_with remains but will be
-+    removed for 1.0.
-+  * This is a release candidate for 1.0! Please be aware that PMap.merge/merge_with and immutable()
-+    will be removed for 1.0.
-+
-+v0.6.2, 2014-11-03
-+  * Fix typo causing the pure python vector to be used even if the C implementation was
-+    available. Thanks @zerc for finding it!
-+
-+v0.6.1, 2014-10-31
-+  * Renamed 'immutable' to 'pclass' for consistency but left immutable for compatibility.
-+
-+v0.6.0, 2014-10-25
-+  * New data structure, persistent linked list
-+  * New data structure, persistent double ended queue
-+
-+v0.5.0, 2014-09-24
-+  * New data structure, persistent bag / multiset
-+  * New functions freeze and thaw to recursively convert between python
-+    built in data types and corresponding pyrsistent data types.
-+  * All data structures can now be pickled
-+  * New function merge_in on persistent map which allows a user
-+    supplied function to implement the merge strategy.
-+
-+v0.4.0, 2014-09-20
-+  * Full Python 3 support.
-+  * Immutable object implemented.
-+  * Bug fixes in PVector.__repr__() and PMap.__hash__() and index check of PVector.
-+  * Repr changed to be fully cut and paste compatible
-+  * Changed assoc() -> set(), assoc_in() -> set_in(), massoc() -> mset().
-+    Sorry for the API breaking change but I think those names are more pythonic.
-+  * Improved documentation.
-+
-+v0.3.1, 2014-06-29
-+  * assoc() on PSet renamed back to add()
-+
-+v0.3.0, 2014-06-28
-+  * Full Sequence protocol support for PVector
-+  * Full Mapping protocol support for PMap
-+  * Full Set protocol support for PSet
-+  * assoc_in() support for both PMap and PVector
-+  * merge() support for PMap
-+  * Performance improvements to the PVector C extension speed up allocation
-+
-+v0.2.1, 2014-06-21
-+  * Supply the tests with the distribution
-+
-+v0.2.0, 2014-06-21
-+  * New C extension with an optimized version of the persistent vector
-+  * Updated API slightly
-+
-+v0.1.0, 2013-11-10
-+  * Initial release.
-+
-+
-+TODO (in no particular order)
-+-----------------------------
-+- Versioned data structure where the different versions can be accessed by index?
-+- Ordered sets and maps
-+- A good performance measurement suite
-diff --git a/third_party/python/pyrsistent/LICENCE.mit b/third_party/python/pyrsistent/LICENCE.mit
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/pyrsistent/LICENCE.mit
-@@ -0,0 +1,22 @@
-+Copyright (c) 2019 Tobias Gustafsson
-+
-+Permission is hereby granted, free of charge, to any person
-+obtaining a copy of this software and associated documentation
-+files (the "Software"), to deal in the Software without
-+restriction, including without limitation the rights to use,
-+copy, modify, merge, publish, distribute, sublicense, and/or sell
-+copies of the Software, and to permit persons to whom the
-+Software is furnished to do so, subject to the following
-+conditions:
-+
-+The above copyright notice and this permission notice shall be
-+included in all copies or substantial portions of the Software.
-+
-+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-+HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-+WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-+OTHER DEALINGS IN THE SOFTWARE.
-\ No newline at end of file
-diff --git a/third_party/python/pyrsistent/MANIFEST.in b/third_party/python/pyrsistent/MANIFEST.in
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/pyrsistent/MANIFEST.in
-@@ -0,0 +1,5 @@
-+include *.rst
-+include tests/*_test.py
-+include tests/memory_profiling.py
-+include CHANGES.txt
-+include LICENCE.mit
-\ No newline at end of file
-diff --git a/third_party/python/pyrsistent/PKG-INFO b/third_party/python/pyrsistent/PKG-INFO
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/pyrsistent/PKG-INFO
-@@ -0,0 +1,741 @@
-+Metadata-Version: 1.1
-+Name: pyrsistent
-+Version: 0.15.7
-+Summary: Persistent/Functional/Immutable data structures
-+Home-page: http://github.com/tobgu/pyrsistent/
-+Author: Tobias Gustafsson
-+Author-email: tobias.l.gustafsson@gmail.com
-+License: MIT
-+Description: Pyrsistent
-+        ==========
-+        .. image:: https://travis-ci.org/tobgu/pyrsistent.png?branch=master
-+            :target: https://travis-ci.org/tobgu/pyrsistent
-+        
-+        .. image:: https://badge.fury.io/py/pyrsistent.svg
-+            :target: https://badge.fury.io/py/pyrsistent
-+        
-+        .. image:: https://coveralls.io/repos/tobgu/pyrsistent/badge.svg?branch=master&service=github
-+            :target: https://coveralls.io/github/tobgu/pyrsistent?branch=master
-+        
-+        
-+        .. _Pyrthon: https://www.github.com/tobgu/pyrthon/
-+        
-+        Pyrsistent is a number of persistent collections (by some referred to as functional data structures). Persistent in 
-+        the sense that they are immutable.
-+        
-+        All methods on a data structure that would normally mutate it instead return a new copy of the structure containing the
-+        requested updates. The original structure is left untouched.
-+        
-+        This will simplify the reasoning about what a program does since no hidden side effects ever can take place to these
-+        data structures. You can rest assured that the object you hold a reference to will remain the same throughout its
-+        lifetime and need not worry that somewhere five stack levels below you in the darkest corner of your application
-+        someone has decided to remove that element that you expected to be there.
-+        
-+        Pyrsistent is influenced by persistent data structures such as those found in the standard library of Clojure. The
-+        data structures are designed to share common elements through path copying.
-+        It aims at taking these concepts and make them as pythonic as possible so that they can be easily integrated into any python
-+        program without hassle.
-+        
-+        If you want to go all in on persistent data structures and use literal syntax to define them in your code rather
-+        than function calls check out Pyrthon_.
-+        
-+        Examples
-+        --------
-+        .. _Sequence: collections_
-+        .. _Hashable: collections_
-+        .. _Mapping: collections_
-+        .. _Mappings: collections_
-+        .. _Set: collections_
-+        .. _collections: https://docs.python.org/3/library/collections.abc.html
-+        .. _documentation: http://pyrsistent.readthedocs.org/
-+        
-+        The collection types and key features currently implemented are:
-+        
-+        * PVector_, similar to a python list
-+        * PMap_, similar to dict
-+        * PSet_, similar to set
-+        * PRecord_, a PMap on steroids with fixed fields, optional type and invariant checking and much more
-+        * PClass_, a Python class fixed fields, optional type and invariant checking and much more
-+        * `Checked collections`_, PVector, PMap and PSet with optional type and invariance checks and more
-+        * PBag, similar to collections.Counter
-+        * PList, a classic singly linked list
-+        * PDeque, similar to collections.deque
-+        * Immutable object type (immutable) built on the named tuple
-+        * freeze_ and thaw_ functions to convert between pythons standard collections and pyrsistent collections.
-+        * Flexible transformations_ of arbitrarily complex structures built from PMaps and PVectors.
-+        
-+        Below are examples of common usage patterns for some of the structures and features. More information and
-+        full documentation for all data structures is available in the documentation_.
-+        
-+        .. _PVector:
-+        
-+        PVector
-+        ~~~~~~~
-+        With full support for the Sequence_ protocol PVector is meant as a drop in replacement to the built in list from a readers
-+        point of view. Write operations of course differ since no in place mutation is done but naming should be in line
-+        with corresponding operations on the built in list.
-+        
-+        Support for the Hashable_ protocol also means that it can be used as key in Mappings_.
-+        
-+        Appends are amortized O(1). Random access and insert is log32(n) where n is the size of the vector.
-+        
-+        .. code:: python
-+        
-+            >>> from pyrsistent import v, pvector
-+        
-+            # No mutation of vectors once created, instead they
-+            # are "evolved" leaving the original untouched
-+            >>> v1 = v(1, 2, 3)
-+            >>> v2 = v1.append(4)
-+            >>> v3 = v2.set(1, 5)
-+            >>> v1
-+            pvector([1, 2, 3])
-+            >>> v2
-+            pvector([1, 2, 3, 4])
-+            >>> v3
-+            pvector([1, 5, 3, 4])
-+        
-+            # Random access and slicing
-+            >>> v3[1]
-+            5
-+            >>> v3[1:3]
-+            pvector([5, 3])
-+        
-+            # Iteration
-+            >>> list(x + 1 for x in v3)
-+            [2, 6, 4, 5]
-+            >>> pvector(2 * x for x in range(3))
-+            pvector([0, 2, 4])
-+        
-+        .. _PMap:
-+        
-+        PMap
-+        ~~~~
-+        With full support for the Mapping_ protocol PMap is meant as a drop in replacement to the built in dict from a readers point
-+        of view. Support for the Hashable_ protocol also means that it can be used as key in other Mappings_.
-+        
-+        Random access and insert is log32(n) where n is the size of the map.
-+        
-+        .. code:: python
-+        
-+            >>> from pyrsistent import m, pmap, v
-+        
-+            # No mutation of maps once created, instead they are
-+            # "evolved" leaving the original untouched
-+            >>> m1 = m(a=1, b=2)
-+            >>> m2 = m1.set('c', 3)
-+            >>> m3 = m2.set('a', 5)
-+            >>> m1
-+            pmap({'a': 1, 'b': 2})
-+            >>> m2
-+            pmap({'a': 1, 'c': 3, 'b': 2})
-+            >>> m3
-+            pmap({'a': 5, 'c': 3, 'b': 2})
-+            >>> m3['a']
-+            5
-+        
-+            # Evolution of nested persistent structures
-+            >>> m4 = m(a=5, b=6, c=v(1, 2))
-+            >>> m4.transform(('c', 1), 17)
-+            pmap({'a': 5, 'c': pvector([1, 17]), 'b': 6})
-+            >>> m5 = m(a=1, b=2)
-+        
-+            # Evolve by merging with other mappings
-+            >>> m5.update(m(a=2, c=3), {'a': 17, 'd': 35})
-+            pmap({'a': 17, 'c': 3, 'b': 2, 'd': 35})
-+            >>> pmap({'x': 1, 'y': 2}) + pmap({'y': 3, 'z': 4})
-+            pmap({'y': 3, 'x': 1, 'z': 4})
-+        
-+            # Dict-like methods to convert to list and iterate
-+            >>> m3.items()
-+            pvector([('a', 5), ('c', 3), ('b', 2)])
-+            >>> list(m3)
-+            ['a', 'c', 'b']
-+        
-+        .. _PSet:
-+        
-+        PSet
-+        ~~~~
-+        With full support for the Set_ protocol PSet is meant as a drop in replacement to the built in set from a readers point
-+        of view. Support for the Hashable_ protocol also means that it can be used as key in Mappings_.
-+        
-+        Random access and insert is log32(n) where n is the size of the set.
-+        
-+        .. code:: python
-+        
-+            >>> from pyrsistent import s
-+        
-+            # No mutation of sets once created, you know the story...
-+            >>> s1 = s(1, 2, 3, 2)
-+            >>> s2 = s1.add(4)
-+            >>> s3 = s1.remove(1)
-+            >>> s1
-+            pset([1, 2, 3])
-+            >>> s2
-+            pset([1, 2, 3, 4])
-+            >>> s3
-+            pset([2, 3])
-+        
-+            # Full support for set operations
-+            >>> s1 | s(3, 4, 5)
-+            pset([1, 2, 3, 4, 5])
-+            >>> s1 & s(3, 4, 5)
-+            pset([3])
-+            >>> s1 < s2
-+            True
-+            >>> s1 < s(3, 4, 5)
-+            False
-+        
-+        .. _PRecord:
-+        
-+        PRecord
-+        ~~~~~~~
-+        A PRecord is a PMap with a fixed set of specified fields. Records are declared as python classes inheriting
-+        from PRecord. Because it is a PMap it has full support for all Mapping methods such as iteration and element
-+        access using subscript notation.
-+        
-+        .. code:: python
-+        
-+            >>> from pyrsistent import PRecord, field
-+            >>> class ARecord(PRecord):
-+            ...     x = field()
-+            ...
-+            >>> r = ARecord(x=3)
-+            >>> r
-+            ARecord(x=3)
-+            >>> r.x
-+            3
-+            >>> r.set(x=2)
-+            ARecord(x=2)
-+            >>> r.set(y=2)
-+            Traceback (most recent call last):
-+            AttributeError: 'y' is not among the specified fields for ARecord
-+        
-+        Type information
-+        ****************
-+        It is possible to add type information to the record to enforce type checks. Multiple allowed types can be specified
-+        by providing an iterable of types.
-+        
-+        .. code:: python
-+        
-+            >>> class BRecord(PRecord):
-+            ...     x = field(type=int)
-+            ...     y = field(type=(int, type(None)))
-+            ...
-+            >>> BRecord(x=3, y=None)
-+            BRecord(y=None, x=3)
-+            >>> BRecord(x=3.0)
-+            Traceback (most recent call last):
-+            PTypeError: Invalid type for field BRecord.x, was float
-+        
-+        
-+        Custom types (classes) that are iterable should be wrapped in a tuple to prevent their
-+        members being added to the set of valid types.  Although Enums in particular are now 
-+        supported without wrapping, see #83 for more information.
-+        
-+        Mandatory fields
-+        ****************
-+        Fields are not mandatory by default but can be specified as such. If fields are missing an
-+        *InvariantException* will be thrown which contains information about the missing fields.
-+        
-+        .. code:: python
-+        
-+            >>> from pyrsistent import InvariantException
-+            >>> class CRecord(PRecord):
-+            ...     x = field(mandatory=True)
-+            ...
-+            >>> r = CRecord(x=3)
-+            >>> try:
-+            ...    r.discard('x')
-+            ... except InvariantException as e:
-+            ...    print(e.missing_fields)
-+            ...
-+            ('CRecord.x',)
-+        
-+        Invariants
-+        **********
-+        It is possible to add invariants that must hold when evolving the record. Invariants can be
-+        specified on both field and record level. If invariants fail an *InvariantException* will be
-+        thrown which contains information about the failing invariants. An invariant function should
-+        return a tuple consisting of a boolean that tells if the invariant holds or not and an object
-+        describing the invariant. This object can later be used to identify which invariant that failed.
-+        
-+        The global invariant function is only executed if all field invariants hold.
-+        
-+        Global invariants are inherited to subclasses.
-+        
-+        .. code:: python
-+        
-+            >>> class RestrictedVector(PRecord):
-+            ...     __invariant__ = lambda r: (r.y >= r.x, 'x larger than y')
-+            ...     x = field(invariant=lambda x: (x > 0, 'x negative'))
-+            ...     y = field(invariant=lambda y: (y > 0, 'y negative'))
-+            ...
-+            >>> r = RestrictedVector(y=3, x=2)
-+            >>> try:
-+            ...    r.set(x=-1, y=-2)
-+            ... except InvariantException as e:
-+            ...    print(e.invariant_errors)
-+            ...
-+            ('y negative', 'x negative')
-+            >>> try:
-+            ...    r.set(x=2, y=1)
-+            ... except InvariantException as e:
-+            ...    print(e.invariant_errors)
-+            ...
-+            ('x larger than y',)
-+        
-+        Invariants may also contain multiple assertions. For those cases the invariant function should
-+        return a tuple of invariant tuples as described above. This structure is reflected in the
-+        invariant_errors attribute of the exception which will contain tuples with data from all failed
-+        invariants. Eg:
-+        
-+        .. code:: python
-+        
-+            >>> class EvenX(PRecord):
-+            ...     x = field(invariant=lambda x: ((x > 0, 'x negative'), (x % 2 == 0, 'x odd')))
-+            ...
-+            >>> try:
-+            ...    EvenX(x=-1)
-+            ... except InvariantException as e:
-+            ...    print(e.invariant_errors)
-+            ...
-+            (('x negative', 'x odd'),)
-+        
-+        
-+        Factories
-+        *********
-+        It's possible to specify factory functions for fields. The factory function receives whatever
-+        is supplied as field value and the actual returned by the factory is assigned to the field
-+        given that any type and invariant checks hold.
-+        PRecords have a default factory specified as a static function on the class, create(). It takes
-+        a *Mapping* as argument and returns an instance of the specific record.
-+        If a record has fields of type PRecord the create() method of that record will
-+        be called to create the "sub record" if no factory has explicitly been specified to override
-+        this behaviour.
-+        
-+        .. code:: python
-+        
-+            >>> class DRecord(PRecord):
-+            ...     x = field(factory=int)
-+            ...
-+            >>> class ERecord(PRecord):
-+            ...     d = field(type=DRecord)
-+            ...
-+            >>> ERecord.create({'d': {'x': '1'}})
-+            ERecord(d=DRecord(x=1))
-+        
-+        Collection fields
-+        *****************
-+        It is also possible to have fields with ``pyrsistent`` collections.
-+        
-+        .. code:: python
-+        
-+           >>> from pyrsistent import pset_field, pmap_field, pvector_field
-+           >>> class MultiRecord(PRecord):
-+           ...     set_of_ints = pset_field(int)
-+           ...     map_int_to_str = pmap_field(int, str)
-+           ...     vector_of_strs = pvector_field(str)
-+           ...
-+        	
-+        Serialization
-+        *************
-+        PRecords support serialization back to dicts. Default serialization will take keys and values
-+        "as is" and output them into a dict. It is possible to specify custom serialization functions
-+        to take care of fields that require special treatment.
-+        
-+        .. code:: python
-+        
-+            >>> from datetime import date
-+            >>> class Person(PRecord):
-+            ...     name = field(type=unicode)
-+            ...     birth_date = field(type=date,
-+            ...                        serializer=lambda format, d: d.strftime(format['date']))
-+            ...
-+            >>> john = Person(name=u'John', birth_date=date(1985, 10, 21))
-+            >>> john.serialize({'date': '%Y-%m-%d'})
-+            {'birth_date': '1985-10-21', 'name': u'John'}
-+        
-+        
-+        .. _instar: https://github.com/boxed/instar/
-+        
-+        .. _PClass:
-+        
-+        PClass
-+        ~~~~~~
-+        A PClass is a python class with a fixed set of specified fields. PClasses are declared as python classes inheriting
-+        from PClass. It is defined the same way that PRecords are and behaves like a PRecord in all aspects except that it
-+        is not a PMap and hence not a collection but rather a plain Python object.
-+        
-+        .. code:: python
-+        
-+            >>> from pyrsistent import PClass, field
-+            >>> class AClass(PClass):
-+            ...     x = field()
-+            ...
-+            >>> a = AClass(x=3)
-+            >>> a
-+            AClass(x=3)
-+            >>> a.x
-+            3
-+        
-+        
-+        Checked collections
-+        ~~~~~~~~~~~~~~~~~~~
-+        Checked collections currently come in three flavors: CheckedPVector, CheckedPMap and CheckedPSet.
-+        
-+        .. code:: python
-+        
-+            >>> from pyrsistent import CheckedPVector, CheckedPMap, CheckedPSet, thaw
-+            >>> class Positives(CheckedPSet):
-+            ...     __type__ = (long, int)
-+            ...     __invariant__ = lambda n: (n >= 0, 'Negative')
-+            ...
-+            >>> class Lottery(PRecord):
-+            ...     name = field(type=str)
-+            ...     numbers = field(type=Positives, invariant=lambda p: (len(p) > 0, 'No numbers'))
-+            ...
-+            >>> class Lotteries(CheckedPVector):
-+            ...     __type__ = Lottery
-+            ...
-+            >>> class LotteriesByDate(CheckedPMap):
-+            ...     __key_type__ = date
-+            ...     __value_type__ = Lotteries
-+            ...
-+            >>> lotteries = LotteriesByDate.create({date(2015, 2, 15): [{'name': 'SuperLotto', 'numbers': {1, 2, 3}},
-+            ...                                                         {'name': 'MegaLotto',  'numbers': {4, 5, 6}}],
-+            ...                                     date(2015, 2, 16): [{'name': 'SuperLotto', 'numbers': {3, 2, 1}},
-+            ...                                                         {'name': 'MegaLotto',  'numbers': {6, 5, 4}}]})
-+            >>> lotteries
-+            LotteriesByDate({datetime.date(2015, 2, 15): Lotteries([Lottery(numbers=Positives([1, 2, 3]), name='SuperLotto'), Lottery(numbers=Positives([4, 5, 6]), name='MegaLotto')]), datetime.date(2015, 2, 16): Lotteries([Lottery(numbers=Positives([1, 2, 3]), name='SuperLotto'), Lottery(numbers=Positives([4, 5, 6]), name='MegaLotto')])})
-+        
-+            # The checked versions support all operations that the corresponding
-+            # unchecked types do
-+            >>> lottery_0215 = lotteries[date(2015, 2, 15)]
-+            >>> lottery_0215.transform([0, 'name'], 'SuperDuperLotto')
-+            Lotteries([Lottery(numbers=Positives([1, 2, 3]), name='SuperDuperLotto'), Lottery(numbers=Positives([4, 5, 6]), name='MegaLotto')])
-+        
-+            # But also makes asserts that types and invariants hold
-+            >>> lottery_0215.transform([0, 'name'], 999)
-+            Traceback (most recent call last):
-+            PTypeError: Invalid type for field Lottery.name, was int
-+        
-+            >>> lottery_0215.transform([0, 'numbers'], set())
-+            Traceback (most recent call last):
-+            InvariantException: Field invariant failed
-+        
-+            # They can be converted back to python built ins with either thaw()
-+            # or serialize() (which provides possibilities to customize serialization)
-+            >>> thaw(lottery_0215)
-+            [{'numbers': set([1, 2, 3]), 'name': 'SuperLotto'}, {'numbers': set([4, 5, 6]), 'name': 'MegaLotto'}]
-+            >>> lottery_0215.serialize()
-+            [{'numbers': set([1, 2, 3]), 'name': 'SuperLotto'}, {'numbers': set([4, 5, 6]), 'name': 'MegaLotto'}]
-+        
-+        .. _transformations:
-+        
-+        Transformations
-+        ~~~~~~~~~~~~~~~
-+        Transformations are inspired by the cool library instar_ for Clojure. They let you evolve PMaps and PVectors
-+        with arbitrarily deep/complex nesting using simple syntax and flexible matching syntax.
-+        
-+        The first argument to transformation is the path that points out the value to transform. The
-+        second is the transformation to perform. If the transformation is callable it will be applied
-+        to the value(s) matching the path. The path may also contain callables. In that case they are
-+        treated as matchers. If the matcher returns True for a specific key it is considered for transformation.
-+        
-+        .. code:: python
-+        
-+            # Basic examples
-+            >>> from pyrsistent import inc, freeze, thaw, rex, ny, discard
-+            >>> v1 = freeze([1, 2, 3, 4, 5])
-+            >>> v1.transform([2], inc)
-+            pvector([1, 2, 4, 4, 5])
-+            >>> v1.transform([lambda ix: 0 < ix < 4], 8)
-+            pvector([1, 8, 8, 8, 5])
-+            >>> v1.transform([lambda ix, v: ix == 0 or v == 5], 0)
-+            pvector([0, 2, 3, 4, 0])
-+        
-+            # The (a)ny matcher can be used to match anything
-+            >>> v1.transform([ny], 8)
-+            pvector([8, 8, 8, 8, 8])
-+        
-+            # Regular expressions can be used for matching
-+            >>> scores = freeze({'John': 12, 'Joseph': 34, 'Sara': 23})
-+            >>> scores.transform([rex('^Jo')], 0)
-+            pmap({'Joseph': 0, 'Sara': 23, 'John': 0})
-+        
-+            # Transformations can be done on arbitrarily deep structures
-+            >>> news_paper = freeze({'articles': [{'author': 'Sara', 'content': 'A short article'},
-+            ...                                   {'author': 'Steve', 'content': 'A slightly longer article'}],
-+            ...                      'weather': {'temperature': '11C', 'wind': '5m/s'}})
-+            >>> short_news = news_paper.transform(['articles', ny, 'content'], lambda c: c[:25] + '...' if len(c) > 25 else c)
-+            >>> very_short_news = news_paper.transform(['articles', ny, 'content'], lambda c: c[:15] + '...' if len(c) > 15 else c)
-+            >>> very_short_news.articles[0].content
-+            'A short article'
-+            >>> very_short_news.articles[1].content
-+            'A slightly long...'
-+        
-+            # When nothing has been transformed the original data structure is kept
-+            >>> short_news is news_paper
-+            True
-+            >>> very_short_news is news_paper
-+            False
-+            >>> very_short_news.articles[0] is news_paper.articles[0]
-+            True
-+        
-+            # There is a special transformation that can be used to discard elements. Also
-+            # multiple transformations can be applied in one call
-+            >>> thaw(news_paper.transform(['weather'], discard, ['articles', ny, 'content'], discard))
-+            {'articles': [{'author': 'Sara'}, {'author': 'Steve'}]}
-+        
-+        Evolvers
-+        ~~~~~~~~
-+        PVector, PMap and PSet all have support for a concept dubbed *evolvers*. An evolver acts like a mutable
-+        view of the underlying persistent data structure with "transaction like" semantics. No updates of the original
-+        data structure is ever performed, it is still fully immutable.
-+        
-+        The evolvers have a very limited API by design to discourage excessive, and inappropriate, usage as that would
-+        take us down the mutable road. In principle only basic mutation and element access functions are supported.
-+        Check out the documentation_ of each data structure for specific examples.
-+        
-+        Examples of when you may want to use an evolver instead of working directly with the data structure include:
-+        
-+        * Multiple updates are done to the same data structure and the intermediate results are of no
-+          interest. In this case using an evolver may be a more efficient and easier to work with.
-+        * You need to pass a vector into a legacy function or a function that you have no control
-+          over which performs in place mutations. In this case pass an evolver instance
-+          instead and then create a new pvector from the evolver once the function returns.
-+        
-+        .. code:: python
-+        
-+            >>> from pyrsistent import v
-+        
-+            # In place mutation as when working with the built in counterpart
-+            >>> v1 = v(1, 2, 3)
-+            >>> e = v1.evolver()
-+            >>> e[1] = 22
-+            >>> e = e.append(4)
-+            >>> e = e.extend([5, 6])
-+            >>> e[5] += 1
-+            >>> len(e)
-+            6
-+        
-+            # The evolver is considered *dirty* when it contains changes compared to the underlying vector
-+            >>> e.is_dirty()
-+            True
-+        
-+            # But the underlying pvector still remains untouched
-+            >>> v1
-+            pvector([1, 2, 3])
-+        
-+            # Once satisfied with the updates you can produce a new pvector containing the updates.
-+            # The new pvector will share data with the original pvector in the same way that would have
-+            # been done if only using operations on the pvector.
-+            >>> v2 = e.persistent()
-+            >>> v2
-+            pvector([1, 22, 3, 4, 5, 7])
-+        
-+            # The evolver is now no longer considered *dirty* as it contains no differences compared to the
-+            # pvector just produced.
-+            >>> e.is_dirty()
-+            False
-+        
-+            # You may continue to work with the same evolver without affecting the content of v2
-+            >>> e[0] = 11
-+        
-+            # Or create a new evolver from v2. The two evolvers can be updated independently but will both
-+            # share data with v2 where possible.
-+            >>> e2 = v2.evolver()
-+            >>> e2[0] = 1111
-+            >>> e.persistent()
-+            pvector([11, 22, 3, 4, 5, 7])
-+            >>> e2.persistent()
-+            pvector([1111, 22, 3, 4, 5, 7])
-+        
-+        .. _freeze:
-+        .. _thaw:
-+        
-+        freeze and thaw
-+        ~~~~~~~~~~~~~~~
-+        These functions are great when your cozy immutable world has to interact with the evil mutable world outside.
-+        
-+        .. code:: python
-+        
-+            >>> from pyrsistent import freeze, thaw, v, m
-+            >>> freeze([1, {'a': 3}])
-+            pvector([1, pmap({'a': 3})])
-+            >>> thaw(v(1, m(a=3)))
-+            [1, {'a': 3}]
-+        
-+        Compatibility
-+        -------------
-+        
-+        Pyrsistent is developed and tested on Python 2.7, 3.5, 3.6, 3.7 and PyPy (Python 2 and 3 compatible). It will most
-+        likely work on all other versions >= 3.4 but no guarantees are given. :)
-+        
-+        Compatibility issues
-+        ~~~~~~~~~~~~~~~~~~~~
-+        
-+        .. _27: https://github.com/tobgu/pyrsistent/issues/27
-+        
-+        There is currently one known compatibility issue when comparing built in sets and frozensets to PSets as discussed in 27_.
-+        It affects python 2 versions < 2.7.8 and python 3 versions < 3.4.0 and is due to a bug described in
-+        http://bugs.python.org/issue8743.
-+        
-+        Comparisons will fail or be incorrect when using the set/frozenset as left hand side of the comparison. As a workaround
-+        you need to either upgrade Python to a more recent version, avoid comparing sets/frozensets with PSets or always make
-+        sure to convert both sides of the comparison to the same type before performing the comparison.
-+        
-+        Performance
-+        -----------
-+        
-+        Pyrsistent is developed with performance in mind. Still, while some operations are nearly on par with their built in, 
-+        mutable, counterparts in terms of speed, other operations are slower. In the cases where attempts at
-+        optimizations have been done, speed has generally been valued over space.
-+        
-+        Pyrsistent comes with two API compatible flavors of PVector (on which PMap and PSet are based), one pure Python 
-+        implementation and one implemented as a C extension. The latter generally being 2 - 20 times faster than the former.
-+        The C extension will be used automatically when possible.
-+        
-+        The pure python implementation is fully PyPy compatible. Running it under PyPy speeds operations up considerably if 
-+        the structures are used heavily (if JITed), for some cases the performance is almost on par with the built in counterparts.
-+        
-+        Type hints
-+        ----------
-+        
-+        PEP 561 style type hints for use with mypy and various editors are available for most types and functions in pyrsistent.
-+        
-+        Type classes for annotating your own code with pyrsistent types are also available under pyrsistent.typing.
-+        
-+        Installation
-+        ------------
-+        
-+        pip install pyrsistent
-+        
-+        Documentation
-+        -------------
-+        
-+        Available at http://pyrsistent.readthedocs.org/
-+        
-+        Brief presentation available at http://slides.com/tobiasgustafsson/immutability-and-python/
-+        
-+        Contributors
-+        ------------
-+        
-+        Tobias Gustafsson https://github.com/tobgu
-+        
-+        Christopher Armstrong https://github.com/radix
-+        
-+        Anders Hovmöller https://github.com/boxed
-+        
-+        Itamar Turner-Trauring https://github.com/itamarst
-+        
-+        Jonathan Lange https://github.com/jml
-+        
-+        Richard Futrell https://github.com/Futrell
-+        
-+        Jakob Hollenstein https://github.com/jkbjh
-+        
-+        David Honour https://github.com/foolswood
-+        
-+        David R. MacIver https://github.com/DRMacIver
-+        
-+        Marcus Ewert https://github.com/sarum90
-+        
-+        Jean-Paul Calderone https://github.com/exarkun
-+        
-+        Douglas Treadwell https://github.com/douglas-treadwell
-+        
-+        Travis Parker https://github.com/teepark
-+        
-+        Julian Berman https://github.com/Julian
-+        
-+        Dennis Tomas https://github.com/dtomas
-+        
-+        Neil Vyas https://github.com/neilvyas
-+        
-+        doozr https://github.com/doozr
-+        
-+        Kamil Galuszka https://github.com/galuszkak
-+        
-+        Tsuyoshi Hombashi https://github.com/thombashi
-+        
-+        nattofriends https://github.com/nattofriends
-+        
-+        agberk https://github.com/agberk
-+        
-+        Waleed Khan https://github.com/arxanas
-+        
-+        Jean-Louis Fuchs https://github.com/ganwell
-+        
-+        Carlos Corbacho https://github.com/ccorbacho
-+        
-+        Felix Yan https://github.com/felixonmars
-+        
-+        benrg https://github.com/benrg
-+        
-+        Jere Lahelma https://github.com/je-l
-+        
-+        Max Taggart https://github.com/MaxTaggart
-+        
-+        Vincent Philippon https://github.com/vphilippon
-+        
-+        Semen Zhydenko https://github.com/ss18
-+        
-+        Till Varoquaux  https://github.com/till-varoquaux
-+        
-+        Michal Kowalik https://github.com/michalvi
-+        
-+        ossdev07 https://github.com/ossdev07
-+        
-+        Kerry Olesen https://github.com/qhesz
-+        
-+        Contributing
-+        ------------
-+        
-+        Want to contribute? That's great! If you experience problems please log them on GitHub. If you want to contribute code,
-+        please fork the repository and submit a pull request.
-+        
-+        Run tests
-+        ~~~~~~~~~
-+        .. _tox: https://tox.readthedocs.io/en/latest/
-+        
-+        Tests can be executed using tox_.
-+        
-+        Install tox: ``pip install tox``
-+        
-+        Run test for Python 2.7: ``tox -epy27``
-+        
-+        Release
-+        ~~~~~~~
-+        * Update CHANGES.txt
-+        * Update README with any new contributors and potential info needed.
-+        * Update _pyrsistent_version.py
-+        * python setup.py sdist upload
-+        * Commit and tag with new version: git add -u . && git commit -m 'Prepare version vX.Y.Z' && git tag -a vX.Y.Z -m 'vX.Y.Z'
-+        * Push commit and tags: git push && git push --tags
-+        
-+        Project status
-+        --------------
-+        Pyrsistent can be considered stable and mature (who knows, there may even be a 1.0 some day :-)). The project is
-+        maintained, bugs fixed, PRs reviewed and merged and new releases made. I currently do not have time for development
-+        of new features or functionality which I don't have use for myself. I'm more than happy to take PRs for new
-+        functionality though!
-+        
-+        There are a bunch of issues marked with ``enhancement`` and ``help wanted`` that contain requests for new functionality
-+        that would be nice to include. The level of difficulty and extend of the issues varies, please reach out to me if you're
-+        interested in working on any of them.
-+        
-+        If you feel that you have a grand master plan for where you would like Pyrsistent to go and have the time to put into
-+        it please don't hesitate to discuss this with me and submit PRs for it. If all goes well I'd be more than happy to add
-+        additional maintainers to the project!
-+        
-+Platform: UNKNOWN
-+Classifier: Intended Audience :: Developers
-+Classifier: License :: OSI Approved :: MIT License
-+Classifier: Operating System :: OS Independent
-+Classifier: Programming Language :: Python :: 2.7
-+Classifier: Programming Language :: Python :: 3.5
-+Classifier: Programming Language :: Python :: 3.6
-+Classifier: Programming Language :: Python :: 3.7
-+Classifier: Programming Language :: Python :: Implementation :: PyPy
-diff --git a/third_party/python/pyrsistent/README b/third_party/python/pyrsistent/README
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/pyrsistent/README
-@@ -0,0 +1,723 @@
-+Pyrsistent
-+==========
-+.. image:: https://travis-ci.org/tobgu/pyrsistent.png?branch=master
-+    :target: https://travis-ci.org/tobgu/pyrsistent
-+
-+.. image:: https://badge.fury.io/py/pyrsistent.svg
-+    :target: https://badge.fury.io/py/pyrsistent
-+
-+.. image:: https://coveralls.io/repos/tobgu/pyrsistent/badge.svg?branch=master&service=github
-+    :target: https://coveralls.io/github/tobgu/pyrsistent?branch=master
-+
-+
-+.. _Pyrthon: https://www.github.com/tobgu/pyrthon/
-+
-+Pyrsistent is a number of persistent collections (by some referred to as functional data structures). Persistent in 
-+the sense that they are immutable.
-+
-+All methods on a data structure that would normally mutate it instead return a new copy of the structure containing the
-+requested updates. The original structure is left untouched.
-+
-+This will simplify the reasoning about what a program does since no hidden side effects ever can take place to these
-+data structures. You can rest assured that the object you hold a reference to will remain the same throughout its
-+lifetime and need not worry that somewhere five stack levels below you in the darkest corner of your application
-+someone has decided to remove that element that you expected to be there.
-+
-+Pyrsistent is influenced by persistent data structures such as those found in the standard library of Clojure. The
-+data structures are designed to share common elements through path copying.
-+It aims at taking these concepts and make them as pythonic as possible so that they can be easily integrated into any python
-+program without hassle.
-+
-+If you want to go all in on persistent data structures and use literal syntax to define them in your code rather
-+than function calls check out Pyrthon_.
-+
-+Examples
-+--------
-+.. _Sequence: collections_
-+.. _Hashable: collections_
-+.. _Mapping: collections_
-+.. _Mappings: collections_
-+.. _Set: collections_
-+.. _collections: https://docs.python.org/3/library/collections.abc.html
-+.. _documentation: http://pyrsistent.readthedocs.org/
-+
-+The collection types and key features currently implemented are:
-+
-+* PVector_, similar to a python list
-+* PMap_, similar to dict
-+* PSet_, similar to set
-+* PRecord_, a PMap on steroids with fixed fields, optional type and invariant checking and much more
-+* PClass_, a Python class fixed fields, optional type and invariant checking and much more
-+* `Checked collections`_, PVector, PMap and PSet with optional type and invariance checks and more
-+* PBag, similar to collections.Counter
-+* PList, a classic singly linked list
-+* PDeque, similar to collections.deque
-+* Immutable object type (immutable) built on the named tuple
-+* freeze_ and thaw_ functions to convert between pythons standard collections and pyrsistent collections.
-+* Flexible transformations_ of arbitrarily complex structures built from PMaps and PVectors.
-+
-+Below are examples of common usage patterns for some of the structures and features. More information and
-+full documentation for all data structures is available in the documentation_.
-+
-+.. _PVector:
-+
-+PVector
-+~~~~~~~
-+With full support for the Sequence_ protocol PVector is meant as a drop in replacement to the built in list from a readers
-+point of view. Write operations of course differ since no in place mutation is done but naming should be in line
-+with corresponding operations on the built in list.
-+
-+Support for the Hashable_ protocol also means that it can be used as key in Mappings_.
-+
-+Appends are amortized O(1). Random access and insert is log32(n) where n is the size of the vector.
-+
-+.. code:: python
-+
-+    >>> from pyrsistent import v, pvector
-+
-+    # No mutation of vectors once created, instead they
-+    # are "evolved" leaving the original untouched
-+    >>> v1 = v(1, 2, 3)
-+    >>> v2 = v1.append(4)
-+    >>> v3 = v2.set(1, 5)
-+    >>> v1
-+    pvector([1, 2, 3])
-+    >>> v2
-+    pvector([1, 2, 3, 4])
-+    >>> v3
-+    pvector([1, 5, 3, 4])
-+
-+    # Random access and slicing
-+    >>> v3[1]
-+    5
-+    >>> v3[1:3]
-+    pvector([5, 3])
-+
-+    # Iteration
-+    >>> list(x + 1 for x in v3)
-+    [2, 6, 4, 5]
-+    >>> pvector(2 * x for x in range(3))
-+    pvector([0, 2, 4])
-+
-+.. _PMap:
-+
-+PMap
-+~~~~
-+With full support for the Mapping_ protocol PMap is meant as a drop in replacement to the built in dict from a readers point
-+of view. Support for the Hashable_ protocol also means that it can be used as key in other Mappings_.
-+
-+Random access and insert is log32(n) where n is the size of the map.
-+
-+.. code:: python
-+
-+    >>> from pyrsistent import m, pmap, v
-+
-+    # No mutation of maps once created, instead they are
-+    # "evolved" leaving the original untouched
-+    >>> m1 = m(a=1, b=2)
-+    >>> m2 = m1.set('c', 3)
-+    >>> m3 = m2.set('a', 5)
-+    >>> m1
-+    pmap({'a': 1, 'b': 2})
-+    >>> m2
-+    pmap({'a': 1, 'c': 3, 'b': 2})
-+    >>> m3
-+    pmap({'a': 5, 'c': 3, 'b': 2})
-+    >>> m3['a']
-+    5
-+
-+    # Evolution of nested persistent structures
-+    >>> m4 = m(a=5, b=6, c=v(1, 2))
-+    >>> m4.transform(('c', 1), 17)
-+    pmap({'a': 5, 'c': pvector([1, 17]), 'b': 6})
-+    >>> m5 = m(a=1, b=2)
-+
-+    # Evolve by merging with other mappings
-+    >>> m5.update(m(a=2, c=3), {'a': 17, 'd': 35})
-+    pmap({'a': 17, 'c': 3, 'b': 2, 'd': 35})
-+    >>> pmap({'x': 1, 'y': 2}) + pmap({'y': 3, 'z': 4})
-+    pmap({'y': 3, 'x': 1, 'z': 4})
-+
-+    # Dict-like methods to convert to list and iterate
-+    >>> m3.items()
-+    pvector([('a', 5), ('c', 3), ('b', 2)])
-+    >>> list(m3)
-+    ['a', 'c', 'b']
-+
-+.. _PSet:
-+
-+PSet
-+~~~~
-+With full support for the Set_ protocol PSet is meant as a drop in replacement to the built in set from a readers point
-+of view. Support for the Hashable_ protocol also means that it can be used as key in Mappings_.
-+
-+Random access and insert is log32(n) where n is the size of the set.
-+
-+.. code:: python
-+
-+    >>> from pyrsistent import s
-+
-+    # No mutation of sets once created, you know the story...
-+    >>> s1 = s(1, 2, 3, 2)
-+    >>> s2 = s1.add(4)
-+    >>> s3 = s1.remove(1)
-+    >>> s1
-+    pset([1, 2, 3])
-+    >>> s2
-+    pset([1, 2, 3, 4])
-+    >>> s3
-+    pset([2, 3])
-+
-+    # Full support for set operations
-+    >>> s1 | s(3, 4, 5)
-+    pset([1, 2, 3, 4, 5])
-+    >>> s1 & s(3, 4, 5)
-+    pset([3])
-+    >>> s1 < s2
-+    True
-+    >>> s1 < s(3, 4, 5)
-+    False
-+
-+.. _PRecord:
-+
-+PRecord
-+~~~~~~~
-+A PRecord is a PMap with a fixed set of specified fields. Records are declared as python classes inheriting
-+from PRecord. Because it is a PMap it has full support for all Mapping methods such as iteration and element
-+access using subscript notation.
-+
-+.. code:: python
-+
-+    >>> from pyrsistent import PRecord, field
-+    >>> class ARecord(PRecord):
-+    ...     x = field()
-+    ...
-+    >>> r = ARecord(x=3)
-+    >>> r
-+    ARecord(x=3)
-+    >>> r.x
-+    3
-+    >>> r.set(x=2)
-+    ARecord(x=2)
-+    >>> r.set(y=2)
-+    Traceback (most recent call last):
-+    AttributeError: 'y' is not among the specified fields for ARecord
-+
-+Type information
-+****************
-+It is possible to add type information to the record to enforce type checks. Multiple allowed types can be specified
-+by providing an iterable of types.
-+
-+.. code:: python
-+
-+    >>> class BRecord(PRecord):
-+    ...     x = field(type=int)
-+    ...     y = field(type=(int, type(None)))
-+    ...
-+    >>> BRecord(x=3, y=None)
-+    BRecord(y=None, x=3)
-+    >>> BRecord(x=3.0)
-+    Traceback (most recent call last):
-+    PTypeError: Invalid type for field BRecord.x, was float
-+
-+
-+Custom types (classes) that are iterable should be wrapped in a tuple to prevent their
-+members being added to the set of valid types.  Although Enums in particular are now 
-+supported without wrapping, see #83 for more information.
-+
-+Mandatory fields
-+****************
-+Fields are not mandatory by default but can be specified as such. If fields are missing an
-+*InvariantException* will be thrown which contains information about the missing fields.
-+
-+.. code:: python
-+
-+    >>> from pyrsistent import InvariantException
-+    >>> class CRecord(PRecord):
-+    ...     x = field(mandatory=True)
-+    ...
-+    >>> r = CRecord(x=3)
-+    >>> try:
-+    ...    r.discard('x')
-+    ... except InvariantException as e:
-+    ...    print(e.missing_fields)
-+    ...
-+    ('CRecord.x',)
-+
-+Invariants
-+**********
-+It is possible to add invariants that must hold when evolving the record. Invariants can be
-+specified on both field and record level. If invariants fail an *InvariantException* will be
-+thrown which contains information about the failing invariants. An invariant function should
-+return a tuple consisting of a boolean that tells if the invariant holds or not and an object
-+describing the invariant. This object can later be used to identify which invariant that failed.
-+
-+The global invariant function is only executed if all field invariants hold.
-+
-+Global invariants are inherited to subclasses.
-+
-+.. code:: python
-+
-+    >>> class RestrictedVector(PRecord):
-+    ...     __invariant__ = lambda r: (r.y >= r.x, 'x larger than y')
-+    ...     x = field(invariant=lambda x: (x > 0, 'x negative'))
-+    ...     y = field(invariant=lambda y: (y > 0, 'y negative'))
-+    ...
-+    >>> r = RestrictedVector(y=3, x=2)
-+    >>> try:
-+    ...    r.set(x=-1, y=-2)
-+    ... except InvariantException as e:
-+    ...    print(e.invariant_errors)
-+    ...
-+    ('y negative', 'x negative')
-+    >>> try:
-+    ...    r.set(x=2, y=1)
-+    ... except InvariantException as e:
-+    ...    print(e.invariant_errors)
-+    ...
-+    ('x larger than y',)
-+
-+Invariants may also contain multiple assertions. For those cases the invariant function should
-+return a tuple of invariant tuples as described above. This structure is reflected in the
-+invariant_errors attribute of the exception which will contain tuples with data from all failed
-+invariants. Eg:
-+
-+.. code:: python
-+
-+    >>> class EvenX(PRecord):
-+    ...     x = field(invariant=lambda x: ((x > 0, 'x negative'), (x % 2 == 0, 'x odd')))
-+    ...
-+    >>> try:
-+    ...    EvenX(x=-1)
-+    ... except InvariantException as e:
-+    ...    print(e.invariant_errors)
-+    ...
-+    (('x negative', 'x odd'),)
-+
-+
-+Factories
-+*********
-+It's possible to specify factory functions for fields. The factory function receives whatever
-+is supplied as field value and the actual returned by the factory is assigned to the field
-+given that any type and invariant checks hold.
-+PRecords have a default factory specified as a static function on the class, create(). It takes
-+a *Mapping* as argument and returns an instance of the specific record.
-+If a record has fields of type PRecord the create() method of that record will
-+be called to create the "sub record" if no factory has explicitly been specified to override
-+this behaviour.
-+
-+.. code:: python
-+
-+    >>> class DRecord(PRecord):
-+    ...     x = field(factory=int)
-+    ...
-+    >>> class ERecord(PRecord):
-+    ...     d = field(type=DRecord)
-+    ...
-+    >>> ERecord.create({'d': {'x': '1'}})
-+    ERecord(d=DRecord(x=1))
-+
-+Collection fields
-+*****************
-+It is also possible to have fields with ``pyrsistent`` collections.
-+
-+.. code:: python
-+
-+   >>> from pyrsistent import pset_field, pmap_field, pvector_field
-+   >>> class MultiRecord(PRecord):
-+   ...     set_of_ints = pset_field(int)
-+   ...     map_int_to_str = pmap_field(int, str)
-+   ...     vector_of_strs = pvector_field(str)
-+   ...
-+	
-+Serialization
-+*************
-+PRecords support serialization back to dicts. Default serialization will take keys and values
-+"as is" and output them into a dict. It is possible to specify custom serialization functions
-+to take care of fields that require special treatment.
-+
-+.. code:: python
-+
-+    >>> from datetime import date
-+    >>> class Person(PRecord):
-+    ...     name = field(type=unicode)
-+    ...     birth_date = field(type=date,
-+    ...                        serializer=lambda format, d: d.strftime(format['date']))
-+    ...
-+    >>> john = Person(name=u'John', birth_date=date(1985, 10, 21))
-+    >>> john.serialize({'date': '%Y-%m-%d'})
-+    {'birth_date': '1985-10-21', 'name': u'John'}
-+
-+
-+.. _instar: https://github.com/boxed/instar/
-+
-+.. _PClass:
-+
-+PClass
-+~~~~~~
-+A PClass is a python class with a fixed set of specified fields. PClasses are declared as python classes inheriting
-+from PClass. It is defined the same way that PRecords are and behaves like a PRecord in all aspects except that it
-+is not a PMap and hence not a collection but rather a plain Python object.
-+
-+.. code:: python
-+
-+    >>> from pyrsistent import PClass, field
-+    >>> class AClass(PClass):
-+    ...     x = field()
-+    ...
-+    >>> a = AClass(x=3)
-+    >>> a
-+    AClass(x=3)
-+    >>> a.x
-+    3
-+
-+
-+Checked collections
-+~~~~~~~~~~~~~~~~~~~
-+Checked collections currently come in three flavors: CheckedPVector, CheckedPMap and CheckedPSet.
-+
-+.. code:: python
-+
-+    >>> from pyrsistent import CheckedPVector, CheckedPMap, CheckedPSet, thaw
-+    >>> class Positives(CheckedPSet):
-+    ...     __type__ = (long, int)
-+    ...     __invariant__ = lambda n: (n >= 0, 'Negative')
-+    ...
-+    >>> class Lottery(PRecord):
-+    ...     name = field(type=str)
-+    ...     numbers = field(type=Positives, invariant=lambda p: (len(p) > 0, 'No numbers'))
-+    ...
-+    >>> class Lotteries(CheckedPVector):
-+    ...     __type__ = Lottery
-+    ...
-+    >>> class LotteriesByDate(CheckedPMap):
-+    ...     __key_type__ = date
-+    ...     __value_type__ = Lotteries
-+    ...
-+    >>> lotteries = LotteriesByDate.create({date(2015, 2, 15): [{'name': 'SuperLotto', 'numbers': {1, 2, 3}},
-+    ...                                                         {'name': 'MegaLotto',  'numbers': {4, 5, 6}}],
-+    ...                                     date(2015, 2, 16): [{'name': 'SuperLotto', 'numbers': {3, 2, 1}},
-+    ...                                                         {'name': 'MegaLotto',  'numbers': {6, 5, 4}}]})
-+    >>> lotteries
-+    LotteriesByDate({datetime.date(2015, 2, 15): Lotteries([Lottery(numbers=Positives([1, 2, 3]), name='SuperLotto'), Lottery(numbers=Positives([4, 5, 6]), name='MegaLotto')]), datetime.date(2015, 2, 16): Lotteries([Lottery(numbers=Positives([1, 2, 3]), name='SuperLotto'), Lottery(numbers=Positives([4, 5, 6]), name='MegaLotto')])})
-+
-+    # The checked versions support all operations that the corresponding
-+    # unchecked types do
-+    >>> lottery_0215 = lotteries[date(2015, 2, 15)]
-+    >>> lottery_0215.transform([0, 'name'], 'SuperDuperLotto')
-+    Lotteries([Lottery(numbers=Positives([1, 2, 3]), name='SuperDuperLotto'), Lottery(numbers=Positives([4, 5, 6]), name='MegaLotto')])
-+
-+    # But also makes asserts that types and invariants hold
-+    >>> lottery_0215.transform([0, 'name'], 999)
-+    Traceback (most recent call last):
-+    PTypeError: Invalid type for field Lottery.name, was int
-+
-+    >>> lottery_0215.transform([0, 'numbers'], set())
-+    Traceback (most recent call last):
-+    InvariantException: Field invariant failed
-+
-+    # They can be converted back to python built ins with either thaw()
-+    # or serialize() (which provides possibilities to customize serialization)
-+    >>> thaw(lottery_0215)
-+    [{'numbers': set([1, 2, 3]), 'name': 'SuperLotto'}, {'numbers': set([4, 5, 6]), 'name': 'MegaLotto'}]
-+    >>> lottery_0215.serialize()
-+    [{'numbers': set([1, 2, 3]), 'name': 'SuperLotto'}, {'numbers': set([4, 5, 6]), 'name': 'MegaLotto'}]
-+
-+.. _transformations:
-+
-+Transformations
-+~~~~~~~~~~~~~~~
-+Transformations are inspired by the cool library instar_ for Clojure. They let you evolve PMaps and PVectors
-+with arbitrarily deep/complex nesting using simple syntax and flexible matching syntax.
-+
-+The first argument to transformation is the path that points out the value to transform. The
-+second is the transformation to perform. If the transformation is callable it will be applied
-+to the value(s) matching the path. The path may also contain callables. In that case they are
-+treated as matchers. If the matcher returns True for a specific key it is considered for transformation.
-+
-+.. code:: python
-+
-+    # Basic examples
-+    >>> from pyrsistent import inc, freeze, thaw, rex, ny, discard
-+    >>> v1 = freeze([1, 2, 3, 4, 5])
-+    >>> v1.transform([2], inc)
-+    pvector([1, 2, 4, 4, 5])
-+    >>> v1.transform([lambda ix: 0 < ix < 4], 8)
-+    pvector([1, 8, 8, 8, 5])
-+    >>> v1.transform([lambda ix, v: ix == 0 or v == 5], 0)
-+    pvector([0, 2, 3, 4, 0])
-+
-+    # The (a)ny matcher can be used to match anything
-+    >>> v1.transform([ny], 8)
-+    pvector([8, 8, 8, 8, 8])
-+
-+    # Regular expressions can be used for matching
-+    >>> scores = freeze({'John': 12, 'Joseph': 34, 'Sara': 23})
-+    >>> scores.transform([rex('^Jo')], 0)
-+    pmap({'Joseph': 0, 'Sara': 23, 'John': 0})
-+
-+    # Transformations can be done on arbitrarily deep structures
-+    >>> news_paper = freeze({'articles': [{'author': 'Sara', 'content': 'A short article'},
-+    ...                                   {'author': 'Steve', 'content': 'A slightly longer article'}],
-+    ...                      'weather': {'temperature': '11C', 'wind': '5m/s'}})
-+    >>> short_news = news_paper.transform(['articles', ny, 'content'], lambda c: c[:25] + '...' if len(c) > 25 else c)
-+    >>> very_short_news = news_paper.transform(['articles', ny, 'content'], lambda c: c[:15] + '...' if len(c) > 15 else c)
-+    >>> very_short_news.articles[0].content
-+    'A short article'
-+    >>> very_short_news.articles[1].content
-+    'A slightly long...'
-+
-+    # When nothing has been transformed the original data structure is kept
-+    >>> short_news is news_paper
-+    True
-+    >>> very_short_news is news_paper
-+    False
-+    >>> very_short_news.articles[0] is news_paper.articles[0]
-+    True
-+
-+    # There is a special transformation that can be used to discard elements. Also
-+    # multiple transformations can be applied in one call
-+    >>> thaw(news_paper.transform(['weather'], discard, ['articles', ny, 'content'], discard))
-+    {'articles': [{'author': 'Sara'}, {'author': 'Steve'}]}
-+
-+Evolvers
-+~~~~~~~~
-+PVector, PMap and PSet all have support for a concept dubbed *evolvers*. An evolver acts like a mutable
-+view of the underlying persistent data structure with "transaction like" semantics. No updates of the original
-+data structure is ever performed, it is still fully immutable.
-+
-+The evolvers have a very limited API by design to discourage excessive, and inappropriate, usage as that would
-+take us down the mutable road. In principle only basic mutation and element access functions are supported.
-+Check out the documentation_ of each data structure for specific examples.
-+
-+Examples of when you may want to use an evolver instead of working directly with the data structure include:
-+
-+* Multiple updates are done to the same data structure and the intermediate results are of no
-+  interest. In this case using an evolver may be a more efficient and easier to work with.
-+* You need to pass a vector into a legacy function or a function that you have no control
-+  over which performs in place mutations. In this case pass an evolver instance
-+  instead and then create a new pvector from the evolver once the function returns.
-+
-+.. code:: python
-+
-+    >>> from pyrsistent import v
-+
-+    # In place mutation as when working with the built in counterpart
-+    >>> v1 = v(1, 2, 3)
-+    >>> e = v1.evolver()
-+    >>> e[1] = 22
-+    >>> e = e.append(4)
-+    >>> e = e.extend([5, 6])
-+    >>> e[5] += 1
-+    >>> len(e)
-+    6
-+
-+    # The evolver is considered *dirty* when it contains changes compared to the underlying vector
-+    >>> e.is_dirty()
-+    True
-+
-+    # But the underlying pvector still remains untouched
-+    >>> v1
-+    pvector([1, 2, 3])
-+
-+    # Once satisfied with the updates you can produce a new pvector containing the updates.
-+    # The new pvector will share data with the original pvector in the same way that would have
-+    # been done if only using operations on the pvector.
-+    >>> v2 = e.persistent()
-+    >>> v2
-+    pvector([1, 22, 3, 4, 5, 7])
-+
-+    # The evolver is now no longer considered *dirty* as it contains no differences compared to the
-+    # pvector just produced.
-+    >>> e.is_dirty()
-+    False
-+
-+    # You may continue to work with the same evolver without affecting the content of v2
-+    >>> e[0] = 11
-+
-+    # Or create a new evolver from v2. The two evolvers can be updated independently but will both
-+    # share data with v2 where possible.
-+    >>> e2 = v2.evolver()
-+    >>> e2[0] = 1111
-+    >>> e.persistent()
-+    pvector([11, 22, 3, 4, 5, 7])
-+    >>> e2.persistent()
-+    pvector([1111, 22, 3, 4, 5, 7])
-+
-+.. _freeze:
-+.. _thaw:
-+
-+freeze and thaw
-+~~~~~~~~~~~~~~~
-+These functions are great when your cozy immutable world has to interact with the evil mutable world outside.
-+
-+.. code:: python
-+
-+    >>> from pyrsistent import freeze, thaw, v, m
-+    >>> freeze([1, {'a': 3}])
-+    pvector([1, pmap({'a': 3})])
-+    >>> thaw(v(1, m(a=3)))
-+    [1, {'a': 3}]
-+
-+Compatibility
-+-------------
-+
-+Pyrsistent is developed and tested on Python 2.7, 3.5, 3.6, 3.7 and PyPy (Python 2 and 3 compatible). It will most
-+likely work on all other versions >= 3.4 but no guarantees are given. :)
-+
-+Compatibility issues
-+~~~~~~~~~~~~~~~~~~~~
-+
-+.. _27: https://github.com/tobgu/pyrsistent/issues/27
-+
-+There is currently one known compatibility issue when comparing built in sets and frozensets to PSets as discussed in 27_.
-+It affects python 2 versions < 2.7.8 and python 3 versions < 3.4.0 and is due to a bug described in
-+http://bugs.python.org/issue8743.
-+
-+Comparisons will fail or be incorrect when using the set/frozenset as left hand side of the comparison. As a workaround
-+you need to either upgrade Python to a more recent version, avoid comparing sets/frozensets with PSets or always make
-+sure to convert both sides of the comparison to the same type before performing the comparison.
-+
-+Performance
-+-----------
-+
-+Pyrsistent is developed with performance in mind. Still, while some operations are nearly on par with their built in, 
-+mutable, counterparts in terms of speed, other operations are slower. In the cases where attempts at
-+optimizations have been done, speed has generally been valued over space.
-+
-+Pyrsistent comes with two API compatible flavors of PVector (on which PMap and PSet are based), one pure Python 
-+implementation and one implemented as a C extension. The latter generally being 2 - 20 times faster than the former.
-+The C extension will be used automatically when possible.
-+
-+The pure python implementation is fully PyPy compatible. Running it under PyPy speeds operations up considerably if 
-+the structures are used heavily (if JITed), for some cases the performance is almost on par with the built in counterparts.
-+
-+Type hints
-+----------
-+
-+PEP 561 style type hints for use with mypy and various editors are available for most types and functions in pyrsistent.
-+
-+Type classes for annotating your own code with pyrsistent types are also available under pyrsistent.typing.
-+
-+Installation
-+------------
-+
-+pip install pyrsistent
-+
-+Documentation
-+-------------
-+
-+Available at http://pyrsistent.readthedocs.org/
-+
-+Brief presentation available at http://slides.com/tobiasgustafsson/immutability-and-python/
-+
-+Contributors
-+------------
-+
-+Tobias Gustafsson https://github.com/tobgu
-+
-+Christopher Armstrong https://github.com/radix
-+
-+Anders Hovmöller https://github.com/boxed
-+
-+Itamar Turner-Trauring https://github.com/itamarst
-+
-+Jonathan Lange https://github.com/jml
-+
-+Richard Futrell https://github.com/Futrell
-+
-+Jakob Hollenstein https://github.com/jkbjh
-+
-+David Honour https://github.com/foolswood
-+
-+David R. MacIver https://github.com/DRMacIver
-+
-+Marcus Ewert https://github.com/sarum90
-+
-+Jean-Paul Calderone https://github.com/exarkun
-+
-+Douglas Treadwell https://github.com/douglas-treadwell
-+
-+Travis Parker https://github.com/teepark
-+
-+Julian Berman https://github.com/Julian
-+
-+Dennis Tomas https://github.com/dtomas
-+
-+Neil Vyas https://github.com/neilvyas
-+
-+doozr https://github.com/doozr
-+
-+Kamil Galuszka https://github.com/galuszkak
-+
-+Tsuyoshi Hombashi https://github.com/thombashi
-+
-+nattofriends https://github.com/nattofriends
-+
-+agberk https://github.com/agberk
-+
-+Waleed Khan https://github.com/arxanas
-+
-+Jean-Louis Fuchs https://github.com/ganwell
-+
-+Carlos Corbacho https://github.com/ccorbacho
-+
-+Felix Yan https://github.com/felixonmars
-+
-+benrg https://github.com/benrg
-+
-+Jere Lahelma https://github.com/je-l
-+
-+Max Taggart https://github.com/MaxTaggart
-+
-+Vincent Philippon https://github.com/vphilippon
-+
-+Semen Zhydenko https://github.com/ss18
-+
-+Till Varoquaux  https://github.com/till-varoquaux
-+
-+Michal Kowalik https://github.com/michalvi
-+
-+ossdev07 https://github.com/ossdev07
-+
-+Kerry Olesen https://github.com/qhesz
-+
-+Contributing
-+------------
-+
-+Want to contribute? That's great! If you experience problems please log them on GitHub. If you want to contribute code,
-+please fork the repository and submit a pull request.
-+
-+Run tests
-+~~~~~~~~~
-+.. _tox: https://tox.readthedocs.io/en/latest/
-+
-+Tests can be executed using tox_.
-+
-+Install tox: ``pip install tox``
-+
-+Run test for Python 2.7: ``tox -epy27``
-+
-+Release
-+~~~~~~~
-+* Update CHANGES.txt
-+* Update README with any new contributors and potential info needed.
-+* Update _pyrsistent_version.py
-+* python setup.py sdist upload
-+* Commit and tag with new version: git add -u . && git commit -m 'Prepare version vX.Y.Z' && git tag -a vX.Y.Z -m 'vX.Y.Z'
-+* Push commit and tags: git push && git push --tags
-+
-+Project status
-+--------------
-+Pyrsistent can be considered stable and mature (who knows, there may even be a 1.0 some day :-)). The project is
-+maintained, bugs fixed, PRs reviewed and merged and new releases made. I currently do not have time for development
-+of new features or functionality which I don't have use for myself. I'm more than happy to take PRs for new
-+functionality though!
-+
-+There are a bunch of issues marked with ``enhancement`` and ``help wanted`` that contain requests for new functionality
-+that would be nice to include. The level of difficulty and extend of the issues varies, please reach out to me if you're
-+interested in working on any of them.
-+
-+If you feel that you have a grand master plan for where you would like Pyrsistent to go and have the time to put into
-+it please don't hesitate to discuss this with me and submit PRs for it. If all goes well I'd be more than happy to add
-+additional maintainers to the project!
-diff --git a/third_party/python/pyrsistent/README.rst b/third_party/python/pyrsistent/README.rst
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/pyrsistent/README.rst
-@@ -0,0 +1,723 @@
-+Pyrsistent
-+==========
-+.. image:: https://travis-ci.org/tobgu/pyrsistent.png?branch=master
-+    :target: https://travis-ci.org/tobgu/pyrsistent
-+
-+.. image:: https://badge.fury.io/py/pyrsistent.svg
-+    :target: https://badge.fury.io/py/pyrsistent
-+
-+.. image:: https://coveralls.io/repos/tobgu/pyrsistent/badge.svg?branch=master&service=github
-+    :target: https://coveralls.io/github/tobgu/pyrsistent?branch=master
-+
-+
-+.. _Pyrthon: https://www.github.com/tobgu/pyrthon/
-+
-+Pyrsistent is a number of persistent collections (by some referred to as functional data structures). Persistent in 
-+the sense that they are immutable.
-+
-+All methods on a data structure that would normally mutate it instead return a new copy of the structure containing the
-+requested updates. The original structure is left untouched.
-+
-+This will simplify the reasoning about what a program does since no hidden side effects ever can take place to these
-+data structures. You can rest assured that the object you hold a reference to will remain the same throughout its
-+lifetime and need not worry that somewhere five stack levels below you in the darkest corner of your application
-+someone has decided to remove that element that you expected to be there.
-+
-+Pyrsistent is influenced by persistent data structures such as those found in the standard library of Clojure. The
-+data structures are designed to share common elements through path copying.
-+It aims at taking these concepts and make them as pythonic as possible so that they can be easily integrated into any python
-+program without hassle.
-+
-+If you want to go all in on persistent data structures and use literal syntax to define them in your code rather
-+than function calls check out Pyrthon_.
-+
-+Examples
-+--------
-+.. _Sequence: collections_
-+.. _Hashable: collections_
-+.. _Mapping: collections_
-+.. _Mappings: collections_
-+.. _Set: collections_
-+.. _collections: https://docs.python.org/3/library/collections.abc.html
-+.. _documentation: http://pyrsistent.readthedocs.org/
-+
-+The collection types and key features currently implemented are:
-+
-+* PVector_, similar to a python list
-+* PMap_, similar to dict
-+* PSet_, similar to set
-+* PRecord_, a PMap on steroids with fixed fields, optional type and invariant checking and much more
-+* PClass_, a Python class fixed fields, optional type and invariant checking and much more
-+* `Checked collections`_, PVector, PMap and PSet with optional type and invariance checks and more
-+* PBag, similar to collections.Counter
-+* PList, a classic singly linked list
-+* PDeque, similar to collections.deque
-+* Immutable object type (immutable) built on the named tuple
-+* freeze_ and thaw_ functions to convert between pythons standard collections and pyrsistent collections.
-+* Flexible transformations_ of arbitrarily complex structures built from PMaps and PVectors.
-+
-+Below are examples of common usage patterns for some of the structures and features. More information and
-+full documentation for all data structures is available in the documentation_.
-+
-+.. _PVector:
-+
-+PVector
-+~~~~~~~
-+With full support for the Sequence_ protocol PVector is meant as a drop in replacement to the built in list from a readers
-+point of view. Write operations of course differ since no in place mutation is done but naming should be in line
-+with corresponding operations on the built in list.
-+
-+Support for the Hashable_ protocol also means that it can be used as key in Mappings_.
-+
-+Appends are amortized O(1). Random access and insert is log32(n) where n is the size of the vector.
-+
-+.. code:: python
-+
-+    >>> from pyrsistent import v, pvector
-+
-+    # No mutation of vectors once created, instead they
-+    # are "evolved" leaving the original untouched
-+    >>> v1 = v(1, 2, 3)
-+    >>> v2 = v1.append(4)
-+    >>> v3 = v2.set(1, 5)
-+    >>> v1
-+    pvector([1, 2, 3])
-+    >>> v2
-+    pvector([1, 2, 3, 4])
-+    >>> v3
-+    pvector([1, 5, 3, 4])
-+
-+    # Random access and slicing
-+    >>> v3[1]
-+    5
-+    >>> v3[1:3]
-+    pvector([5, 3])
-+
-+    # Iteration
-+    >>> list(x + 1 for x in v3)
-+    [2, 6, 4, 5]
-+    >>> pvector(2 * x for x in range(3))
-+    pvector([0, 2, 4])
-+
-+.. _PMap:
-+
-+PMap
-+~~~~
-+With full support for the Mapping_ protocol PMap is meant as a drop in replacement to the built in dict from a readers point
-+of view. Support for the Hashable_ protocol also means that it can be used as key in other Mappings_.
-+
-+Random access and insert is log32(n) where n is the size of the map.
-+
-+.. code:: python
-+
-+    >>> from pyrsistent import m, pmap, v
-+
-+    # No mutation of maps once created, instead they are
-+    # "evolved" leaving the original untouched
-+    >>> m1 = m(a=1, b=2)
-+    >>> m2 = m1.set('c', 3)
-+    >>> m3 = m2.set('a', 5)
-+    >>> m1
-+    pmap({'a': 1, 'b': 2})
-+    >>> m2
-+    pmap({'a': 1, 'c': 3, 'b': 2})
-+    >>> m3
-+    pmap({'a': 5, 'c': 3, 'b': 2})
-+    >>> m3['a']
-+    5
-+
-+    # Evolution of nested persistent structures
-+    >>> m4 = m(a=5, b=6, c=v(1, 2))
-+    >>> m4.transform(('c', 1), 17)
-+    pmap({'a': 5, 'c': pvector([1, 17]), 'b': 6})
-+    >>> m5 = m(a=1, b=2)
-+
-+    # Evolve by merging with other mappings
-+    >>> m5.update(m(a=2, c=3), {'a': 17, 'd': 35})
-+    pmap({'a': 17, 'c': 3, 'b': 2, 'd': 35})
-+    >>> pmap({'x': 1, 'y': 2}) + pmap({'y': 3, 'z': 4})
-+    pmap({'y': 3, 'x': 1, 'z': 4})
-+
-+    # Dict-like methods to convert to list and iterate
-+    >>> m3.items()
-+    pvector([('a', 5), ('c', 3), ('b', 2)])
-+    >>> list(m3)
-+    ['a', 'c', 'b']
-+
-+.. _PSet:
-+
-+PSet
-+~~~~
-+With full support for the Set_ protocol PSet is meant as a drop in replacement to the built in set from a readers point
-+of view. Support for the Hashable_ protocol also means that it can be used as key in Mappings_.
-+
-+Random access and insert is log32(n) where n is the size of the set.
-+
-+.. code:: python
-+
-+    >>> from pyrsistent import s
-+
-+    # No mutation of sets once created, you know the story...
-+    >>> s1 = s(1, 2, 3, 2)
-+    >>> s2 = s1.add(4)
-+    >>> s3 = s1.remove(1)
-+    >>> s1
-+    pset([1, 2, 3])
-+    >>> s2
-+    pset([1, 2, 3, 4])
-+    >>> s3
-+    pset([2, 3])
-+
-+    # Full support for set operations
-+    >>> s1 | s(3, 4, 5)
-+    pset([1, 2, 3, 4, 5])
-+    >>> s1 & s(3, 4, 5)
-+    pset([3])
-+    >>> s1 < s2
-+    True
-+    >>> s1 < s(3, 4, 5)
-+    False
-+
-+.. _PRecord:
-+
-+PRecord
-+~~~~~~~
-+A PRecord is a PMap with a fixed set of specified fields. Records are declared as python classes inheriting
-+from PRecord. Because it is a PMap it has full support for all Mapping methods such as iteration and element
-+access using subscript notation.
-+
-+.. code:: python
-+
-+    >>> from pyrsistent import PRecord, field
-+    >>> class ARecord(PRecord):
-+    ...     x = field()
-+    ...
-+    >>> r = ARecord(x=3)
-+    >>> r
-+    ARecord(x=3)
-+    >>> r.x
-+    3
-+    >>> r.set(x=2)
-+    ARecord(x=2)
-+    >>> r.set(y=2)
-+    Traceback (most recent call last):
-+    AttributeError: 'y' is not among the specified fields for ARecord
-+
-+Type information
-+****************
-+It is possible to add type information to the record to enforce type checks. Multiple allowed types can be specified
-+by providing an iterable of types.
-+
-+.. code:: python
-+
-+    >>> class BRecord(PRecord):
-+    ...     x = field(type=int)
-+    ...     y = field(type=(int, type(None)))
-+    ...
-+    >>> BRecord(x=3, y=None)
-+    BRecord(y=None, x=3)
-+    >>> BRecord(x=3.0)
-+    Traceback (most recent call last):
-+    PTypeError: Invalid type for field BRecord.x, was float
-+
-+
-+Custom types (classes) that are iterable should be wrapped in a tuple to prevent their
-+members being added to the set of valid types.  Although Enums in particular are now 
-+supported without wrapping, see #83 for more information.
-+
-+Mandatory fields
-+****************
-+Fields are not mandatory by default but can be specified as such. If fields are missing an
-+*InvariantException* will be thrown which contains information about the missing fields.
-+
-+.. code:: python
-+
-+    >>> from pyrsistent import InvariantException
-+    >>> class CRecord(PRecord):
-+    ...     x = field(mandatory=True)
-+    ...
-+    >>> r = CRecord(x=3)
-+    >>> try:
-+    ...    r.discard('x')
-+    ... except InvariantException as e:
-+    ...    print(e.missing_fields)
-+    ...
-+    ('CRecord.x',)
-+
-+Invariants
-+**********
-+It is possible to add invariants that must hold when evolving the record. Invariants can be
-+specified on both field and record level. If invariants fail an *InvariantException* will be
-+thrown which contains information about the failing invariants. An invariant function should
-+return a tuple consisting of a boolean that tells if the invariant holds or not and an object
-+describing the invariant. This object can later be used to identify which invariant that failed.
-+
-+The global invariant function is only executed if all field invariants hold.
-+
-+Global invariants are inherited to subclasses.
-+
-+.. code:: python
-+
-+    >>> class RestrictedVector(PRecord):
-+    ...     __invariant__ = lambda r: (r.y >= r.x, 'x larger than y')
-+    ...     x = field(invariant=lambda x: (x > 0, 'x negative'))
-+    ...     y = field(invariant=lambda y: (y > 0, 'y negative'))
-+    ...
-+    >>> r = RestrictedVector(y=3, x=2)
-+    >>> try:
-+    ...    r.set(x=-1, y=-2)
-+    ... except InvariantException as e:
-+    ...    print(e.invariant_errors)
-+    ...
-+    ('y negative', 'x negative')
-+    >>> try:
-+    ...    r.set(x=2, y=1)
-+    ... except InvariantException as e:
-+    ...    print(e.invariant_errors)
-+    ...
-+    ('x larger than y',)
-+
-+Invariants may also contain multiple assertions. For those cases the invariant function should
-+return a tuple of invariant tuples as described above. This structure is reflected in the
-+invariant_errors attribute of the exception which will contain tuples with data from all failed
-+invariants. Eg:
-+
-+.. code:: python
-+
-+    >>> class EvenX(PRecord):
-+    ...     x = field(invariant=lambda x: ((x > 0, 'x negative'), (x % 2 == 0, 'x odd')))
-+    ...
-+    >>> try:
-+    ...    EvenX(x=-1)
-+    ... except InvariantException as e:
-+    ...    print(e.invariant_errors)
-+    ...
-+    (('x negative', 'x odd'),)
-+
-+
-+Factories
-+*********
-+It's possible to specify factory functions for fields. The factory function receives whatever
-+is supplied as field value and the actual returned by the factory is assigned to the field
-+given that any type and invariant checks hold.
-+PRecords have a default factory specified as a static function on the class, create(). It takes
-+a *Mapping* as argument and returns an instance of the specific record.
-+If a record has fields of type PRecord the create() method of that record will
-+be called to create the "sub record" if no factory has explicitly been specified to override
-+this behaviour.
-+
-+.. code:: python
-+
-+    >>> class DRecord(PRecord):
-+    ...     x = field(factory=int)
-+    ...
-+    >>> class ERecord(PRecord):
-+    ...     d = field(type=DRecord)
-+    ...
-+    >>> ERecord.create({'d': {'x': '1'}})
-+    ERecord(d=DRecord(x=1))
-+
-+Collection fields
-+*****************
-+It is also possible to have fields with ``pyrsistent`` collections.
-+
-+.. code:: python
-+
-+   >>> from pyrsistent import pset_field, pmap_field, pvector_field
-+   >>> class MultiRecord(PRecord):
-+   ...     set_of_ints = pset_field(int)
-+   ...     map_int_to_str = pmap_field(int, str)
-+   ...     vector_of_strs = pvector_field(str)
-+   ...
-+	
-+Serialization
-+*************
-+PRecords support serialization back to dicts. Default serialization will take keys and values
-+"as is" and output them into a dict. It is possible to specify custom serialization functions
-+to take care of fields that require special treatment.
-+
-+.. code:: python
-+
-+    >>> from datetime import date
-+    >>> class Person(PRecord):
-+    ...     name = field(type=unicode)
-+    ...     birth_date = field(type=date,
-+    ...                        serializer=lambda format, d: d.strftime(format['date']))
-+    ...
-+    >>> john = Person(name=u'John', birth_date=date(1985, 10, 21))
-+    >>> john.serialize({'date': '%Y-%m-%d'})
-+    {'birth_date': '1985-10-21', 'name': u'John'}
-+
-+
-+.. _instar: https://github.com/boxed/instar/
-+
-+.. _PClass:
-+
-+PClass
-+~~~~~~
-+A PClass is a python class with a fixed set of specified fields. PClasses are declared as python classes inheriting
-+from PClass. It is defined the same way that PRecords are and behaves like a PRecord in all aspects except that it
-+is not a PMap and hence not a collection but rather a plain Python object.
-+
-+.. code:: python
-+
-+    >>> from pyrsistent import PClass, field
-+    >>> class AClass(PClass):
-+    ...     x = field()
-+    ...
-+    >>> a = AClass(x=3)
-+    >>> a
-+    AClass(x=3)
-+    >>> a.x
-+    3
-+
-+
-+Checked collections
-+~~~~~~~~~~~~~~~~~~~
-+Checked collections currently come in three flavors: CheckedPVector, CheckedPMap and CheckedPSet.
-+
-+.. code:: python
-+
-+    >>> from pyrsistent import CheckedPVector, CheckedPMap, CheckedPSet, thaw
-+    >>> class Positives(CheckedPSet):
-+    ...     __type__ = (long, int)
-+    ...     __invariant__ = lambda n: (n >= 0, 'Negative')
-+    ...
-+    >>> class Lottery(PRecord):
-+    ...     name = field(type=str)
-+    ...     numbers = field(type=Positives, invariant=lambda p: (len(p) > 0, 'No numbers'))
-+    ...
-+    >>> class Lotteries(CheckedPVector):
-+    ...     __type__ = Lottery
-+    ...
-+    >>> class LotteriesByDate(CheckedPMap):
-+    ...     __key_type__ = date
-+    ...     __value_type__ = Lotteries
-+    ...
-+    >>> lotteries = LotteriesByDate.create({date(2015, 2, 15): [{'name': 'SuperLotto', 'numbers': {1, 2, 3}},
-+    ...                                                         {'name': 'MegaLotto',  'numbers': {4, 5, 6}}],
-+    ...                                     date(2015, 2, 16): [{'name': 'SuperLotto', 'numbers': {3, 2, 1}},
-+    ...                                                         {'name': 'MegaLotto',  'numbers': {6, 5, 4}}]})
-+    >>> lotteries
-+    LotteriesByDate({datetime.date(2015, 2, 15): Lotteries([Lottery(numbers=Positives([1, 2, 3]), name='SuperLotto'), Lottery(numbers=Positives([4, 5, 6]), name='MegaLotto')]), datetime.date(2015, 2, 16): Lotteries([Lottery(numbers=Positives([1, 2, 3]), name='SuperLotto'), Lottery(numbers=Positives([4, 5, 6]), name='MegaLotto')])})
-+
-+    # The checked versions support all operations that the corresponding
-+    # unchecked types do
-+    >>> lottery_0215 = lotteries[date(2015, 2, 15)]
-+    >>> lottery_0215.transform([0, 'name'], 'SuperDuperLotto')
-+    Lotteries([Lottery(numbers=Positives([1, 2, 3]), name='SuperDuperLotto'), Lottery(numbers=Positives([4, 5, 6]), name='MegaLotto')])
-+
-+    # But also makes asserts that types and invariants hold
-+    >>> lottery_0215.transform([0, 'name'], 999)
-+    Traceback (most recent call last):
-+    PTypeError: Invalid type for field Lottery.name, was int
-+
-+    >>> lottery_0215.transform([0, 'numbers'], set())
-+    Traceback (most recent call last):
-+    InvariantException: Field invariant failed
-+
-+    # They can be converted back to python built ins with either thaw()
-+    # or serialize() (which provides possibilities to customize serialization)
-+    >>> thaw(lottery_0215)
-+    [{'numbers': set([1, 2, 3]), 'name': 'SuperLotto'}, {'numbers': set([4, 5, 6]), 'name': 'MegaLotto'}]
-+    >>> lottery_0215.serialize()
-+    [{'numbers': set([1, 2, 3]), 'name': 'SuperLotto'}, {'numbers': set([4, 5, 6]), 'name': 'MegaLotto'}]
-+
-+.. _transformations:
-+
-+Transformations
-+~~~~~~~~~~~~~~~
-+Transformations are inspired by the cool library instar_ for Clojure. They let you evolve PMaps and PVectors
-+with arbitrarily deep/complex nesting using simple syntax and flexible matching syntax.
-+
-+The first argument to transformation is the path that points out the value to transform. The
-+second is the transformation to perform. If the transformation is callable it will be applied
-+to the value(s) matching the path. The path may also contain callables. In that case they are
-+treated as matchers. If the matcher returns True for a specific key it is considered for transformation.
-+
-+.. code:: python
-+
-+    # Basic examples
-+    >>> from pyrsistent import inc, freeze, thaw, rex, ny, discard
-+    >>> v1 = freeze([1, 2, 3, 4, 5])
-+    >>> v1.transform([2], inc)
-+    pvector([1, 2, 4, 4, 5])
-+    >>> v1.transform([lambda ix: 0 < ix < 4], 8)
-+    pvector([1, 8, 8, 8, 5])
-+    >>> v1.transform([lambda ix, v: ix == 0 or v == 5], 0)
-+    pvector([0, 2, 3, 4, 0])
-+
-+    # The (a)ny matcher can be used to match anything
-+    >>> v1.transform([ny], 8)
-+    pvector([8, 8, 8, 8, 8])
-+
-+    # Regular expressions can be used for matching
-+    >>> scores = freeze({'John': 12, 'Joseph': 34, 'Sara': 23})
-+    >>> scores.transform([rex('^Jo')], 0)
-+    pmap({'Joseph': 0, 'Sara': 23, 'John': 0})
-+
-+    # Transformations can be done on arbitrarily deep structures
-+    >>> news_paper = freeze({'articles': [{'author': 'Sara', 'content': 'A short article'},
-+    ...                                   {'author': 'Steve', 'content': 'A slightly longer article'}],
-+    ...                      'weather': {'temperature': '11C', 'wind': '5m/s'}})
-+    >>> short_news = news_paper.transform(['articles', ny, 'content'], lambda c: c[:25] + '...' if len(c) > 25 else c)
-+    >>> very_short_news = news_paper.transform(['articles', ny, 'content'], lambda c: c[:15] + '...' if len(c) > 15 else c)
-+    >>> very_short_news.articles[0].content
-+    'A short article'
-+    >>> very_short_news.articles[1].content
-+    'A slightly long...'
-+
-+    # When nothing has been transformed the original data structure is kept
-+    >>> short_news is news_paper
-+    True
-+    >>> very_short_news is news_paper
-+    False
-+    >>> very_short_news.articles[0] is news_paper.articles[0]
-+    True
-+
-+    # There is a special transformation that can be used to discard elements. Also
-+    # multiple transformations can be applied in one call
-+    >>> thaw(news_paper.transform(['weather'], discard, ['articles', ny, 'content'], discard))
-+    {'articles': [{'author': 'Sara'}, {'author': 'Steve'}]}
-+
-+Evolvers
-+~~~~~~~~
-+PVector, PMap and PSet all have support for a concept dubbed *evolvers*. An evolver acts like a mutable
-+view of the underlying persistent data structure with "transaction like" semantics. No updates of the original
-+data structure is ever performed, it is still fully immutable.
-+
-+The evolvers have a very limited API by design to discourage excessive, and inappropriate, usage as that would
-+take us down the mutable road. In principle only basic mutation and element access functions are supported.
-+Check out the documentation_ of each data structure for specific examples.
-+
-+Examples of when you may want to use an evolver instead of working directly with the data structure include:
-+
-+* Multiple updates are done to the same data structure and the intermediate results are of no
-+  interest. In this case using an evolver may be a more efficient and easier to work with.
-+* You need to pass a vector into a legacy function or a function that you have no control
-+  over which performs in place mutations. In this case pass an evolver instance
-+  instead and then create a new pvector from the evolver once the function returns.
-+
-+.. code:: python
-+
-+    >>> from pyrsistent import v
-+
-+    # In place mutation as when working with the built in counterpart
-+    >>> v1 = v(1, 2, 3)
-+    >>> e = v1.evolver()
-+    >>> e[1] = 22
-+    >>> e = e.append(4)
-+    >>> e = e.extend([5, 6])
-+    >>> e[5] += 1
-+    >>> len(e)
-+    6
-+
-+    # The evolver is considered *dirty* when it contains changes compared to the underlying vector
-+    >>> e.is_dirty()
-+    True
-+
-+    # But the underlying pvector still remains untouched
-+    >>> v1
-+    pvector([1, 2, 3])
-+
-+    # Once satisfied with the updates you can produce a new pvector containing the updates.
-+    # The new pvector will share data with the original pvector in the same way that would have
-+    # been done if only using operations on the pvector.
-+    >>> v2 = e.persistent()
-+    >>> v2
-+    pvector([1, 22, 3, 4, 5, 7])
-+
-+    # The evolver is now no longer considered *dirty* as it contains no differences compared to the
-+    # pvector just produced.
-+    >>> e.is_dirty()
-+    False
-+
-+    # You may continue to work with the same evolver without affecting the content of v2
-+    >>> e[0] = 11
-+
-+    # Or create a new evolver from v2. The two evolvers can be updated independently but will both
-+    # share data with v2 where possible.
-+    >>> e2 = v2.evolver()
-+    >>> e2[0] = 1111
-+    >>> e.persistent()
-+    pvector([11, 22, 3, 4, 5, 7])
-+    >>> e2.persistent()
-+    pvector([1111, 22, 3, 4, 5, 7])
-+
-+.. _freeze:
-+.. _thaw:
-+
-+freeze and thaw
-+~~~~~~~~~~~~~~~
-+These functions are great when your cozy immutable world has to interact with the evil mutable world outside.
-+
-+.. code:: python
-+
-+    >>> from pyrsistent import freeze, thaw, v, m
-+    >>> freeze([1, {'a': 3}])
-+    pvector([1, pmap({'a': 3})])
-+    >>> thaw(v(1, m(a=3)))
-+    [1, {'a': 3}]
-+
-+Compatibility
-+-------------
-+
-+Pyrsistent is developed and tested on Python 2.7, 3.5, 3.6, 3.7 and PyPy (Python 2 and 3 compatible). It will most
-+likely work on all other versions >= 3.4 but no guarantees are given. :)
-+
-+Compatibility issues
-+~~~~~~~~~~~~~~~~~~~~
-+
-+.. _27: https://github.com/tobgu/pyrsistent/issues/27
-+
-+There is currently one known compatibility issue when comparing built in sets and frozensets to PSets as discussed in 27_.
-+It affects python 2 versions < 2.7.8 and python 3 versions < 3.4.0 and is due to a bug described in
-+http://bugs.python.org/issue8743.
-+
-+Comparisons will fail or be incorrect when using the set/frozenset as left hand side of the comparison. As a workaround
-+you need to either upgrade Python to a more recent version, avoid comparing sets/frozensets with PSets or always make
-+sure to convert both sides of the comparison to the same type before performing the comparison.
-+
-+Performance
-+-----------
-+
-+Pyrsistent is developed with performance in mind. Still, while some operations are nearly on par with their built in, 
-+mutable, counterparts in terms of speed, other operations are slower. In the cases where attempts at
-+optimizations have been done, speed has generally been valued over space.
-+
-+Pyrsistent comes with two API compatible flavors of PVector (on which PMap and PSet are based), one pure Python 
-+implementation and one implemented as a C extension. The latter generally being 2 - 20 times faster than the former.
-+The C extension will be used automatically when possible.
-+
-+The pure python implementation is fully PyPy compatible. Running it under PyPy speeds operations up considerably if 
-+the structures are used heavily (if JITed), for some cases the performance is almost on par with the built in counterparts.
-+
-+Type hints
-+----------
-+
-+PEP 561 style type hints for use with mypy and various editors are available for most types and functions in pyrsistent.
-+
-+Type classes for annotating your own code with pyrsistent types are also available under pyrsistent.typing.
-+
-+Installation
-+------------
-+
-+pip install pyrsistent
-+
-+Documentation
-+-------------
-+
-+Available at http://pyrsistent.readthedocs.org/
-+
-+Brief presentation available at http://slides.com/tobiasgustafsson/immutability-and-python/
-+
-+Contributors
-+------------
-+
-+Tobias Gustafsson https://github.com/tobgu
-+
-+Christopher Armstrong https://github.com/radix
-+
-+Anders Hovmöller https://github.com/boxed
-+
-+Itamar Turner-Trauring https://github.com/itamarst
-+
-+Jonathan Lange https://github.com/jml
-+
-+Richard Futrell https://github.com/Futrell
-+
-+Jakob Hollenstein https://github.com/jkbjh
-+
-+David Honour https://github.com/foolswood
-+
-+David R. MacIver https://github.com/DRMacIver
-+
-+Marcus Ewert https://github.com/sarum90
-+
-+Jean-Paul Calderone https://github.com/exarkun
-+
-+Douglas Treadwell https://github.com/douglas-treadwell
-+
-+Travis Parker https://github.com/teepark
-+
-+Julian Berman https://github.com/Julian
-+
-+Dennis Tomas https://github.com/dtomas
-+
-+Neil Vyas https://github.com/neilvyas
-+
-+doozr https://github.com/doozr
-+
-+Kamil Galuszka https://github.com/galuszkak
-+
-+Tsuyoshi Hombashi https://github.com/thombashi
-+
-+nattofriends https://github.com/nattofriends
-+
-+agberk https://github.com/agberk
-+
-+Waleed Khan https://github.com/arxanas
-+
-+Jean-Louis Fuchs https://github.com/ganwell
-+
-+Carlos Corbacho https://github.com/ccorbacho
-+
-+Felix Yan https://github.com/felixonmars
-+
-+benrg https://github.com/benrg
-+
-+Jere Lahelma https://github.com/je-l
-+
-+Max Taggart https://github.com/MaxTaggart
-+
-+Vincent Philippon https://github.com/vphilippon
-+
-+Semen Zhydenko https://github.com/ss18
-+
-+Till Varoquaux  https://github.com/till-varoquaux
-+
-+Michal Kowalik https://github.com/michalvi
-+
-+ossdev07 https://github.com/ossdev07
-+
-+Kerry Olesen https://github.com/qhesz
-+
-+Contributing
-+------------
-+
-+Want to contribute? That's great! If you experience problems please log them on GitHub. If you want to contribute code,
-+please fork the repository and submit a pull request.
-+
-+Run tests
-+~~~~~~~~~
-+.. _tox: https://tox.readthedocs.io/en/latest/
-+
-+Tests can be executed using tox_.
-+
-+Install tox: ``pip install tox``
-+
-+Run test for Python 2.7: ``tox -epy27``
-+
-+Release
-+~~~~~~~
-+* Update CHANGES.txt
-+* Update README with any new contributors and potential info needed.
-+* Update _pyrsistent_version.py
-+* python setup.py sdist upload
-+* Commit and tag with new version: git add -u . && git commit -m 'Prepare version vX.Y.Z' && git tag -a vX.Y.Z -m 'vX.Y.Z'
-+* Push commit and tags: git push && git push --tags
-+
-+Project status
-+--------------
-+Pyrsistent can be considered stable and mature (who knows, there may even be a 1.0 some day :-)). The project is
-+maintained, bugs fixed, PRs reviewed and merged and new releases made. I currently do not have time for development
-+of new features or functionality which I don't have use for myself. I'm more than happy to take PRs for new
-+functionality though!
-+
-+There are a bunch of issues marked with ``enhancement`` and ``help wanted`` that contain requests for new functionality
-+that would be nice to include. The level of difficulty and extend of the issues varies, please reach out to me if you're
-+interested in working on any of them.
-+
-+If you feel that you have a grand master plan for where you would like Pyrsistent to go and have the time to put into
-+it please don't hesitate to discuss this with me and submit PRs for it. If all goes well I'd be more than happy to add
-+additional maintainers to the project!
-diff --git a/third_party/python/pyrsistent/_pyrsistent_version.py b/third_party/python/pyrsistent/_pyrsistent_version.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/pyrsistent/_pyrsistent_version.py
-@@ -0,0 +1,1 @@
-+__version__ = '0.15.7'
-diff --git a/third_party/python/pyrsistent/pvectorcmodule.c b/third_party/python/pyrsistent/pvectorcmodule.c
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/pyrsistent/pvectorcmodule.c
-@@ -0,0 +1,1642 @@
-+#include <Python.h>
-+#include <structmember.h>
-+
-+/*
-+Persistent/Immutable/Functional vector and helper types. 
-+
-+Please note that they are anything but immutable at this level since
-+there is a whole lot of reference counting going on. That's the way
-+CPython works though and the GIL makes them appear immutable.
-+
-+To the programmer using them from Python they appear immutable and
-+behave immutably at least.
-+
-+Naming conventions
-+------------------
-+initpyrsistentc - This is the method that initializes the whole module
-+pyrsistent_* -    Methods part of the interface
-+<typename>_* -    Instance methods of types. For examle PVector_append(...)
-+
-+All other methods are camel cased without prefix. All methods are static, none should
-+require to be exposed outside of this module. 
-+*/
-+
-+#define SHIFT 5
-+#define BRANCH_FACTOR (1 << SHIFT)
-+#define BIT_MASK (BRANCH_FACTOR - 1)
-+
-+static PyTypeObject PVectorType;
-+static PyTypeObject PVectorEvolverType;
-+
-+typedef struct {
-+  void *items[BRANCH_FACTOR];
-+  unsigned int refCount;
-+} VNode;
-+
-+#define NODE_CACHE_MAX_SIZE 1024
-+
-+typedef struct {
-+  unsigned int size;
-+  VNode* nodes[NODE_CACHE_MAX_SIZE];
-+} vNodeCache;
-+
-+static vNodeCache nodeCache;
-+
-+typedef struct {
-+  PyObject_HEAD
-+  unsigned int count;   // Perhaps ditch this one in favor of ob_size/Py_SIZE()
-+  unsigned int shift;
-+  VNode *root;
-+  VNode *tail;
-+  PyObject *in_weakreflist; /* List of weak references */
-+} PVector;
-+
-+typedef struct {
-+  PyObject_HEAD
-+  PVector* originalVector;
-+  PVector* newVector;
-+  PyObject* appendList;
-+} PVectorEvolver;
-+
-+
-+static PVector* EMPTY_VECTOR = NULL;
-+static PyObject* transform_fn = NULL;
-+
-+static PyObject* transform(PVector* self, PyObject* args) {
-+  if(transform_fn == NULL) {
-+    // transform to avoid circular import problems
-+    transform_fn = PyObject_GetAttrString(PyImport_ImportModule("pyrsistent._transformations"), "transform");
-+  }
-+
-+  return PyObject_CallFunctionObjArgs(transform_fn, self, args, NULL);
-+}
-+
-+
-+// No access to internal members
-+static PyMemberDef PVector_members[] = {
-+	{NULL}  /* Sentinel */
-+};
-+
-+#define debug(...)
-+// #define debug printf
-+
-+#define NODE_REF_COUNT(n) ((n)->refCount)
-+#define SET_NODE_REF_COUNT(n, c) (NODE_REF_COUNT(n) = (c))
-+#define INC_NODE_REF_COUNT(n) (NODE_REF_COUNT(n)++)
-+#define DEC_NODE_REF_COUNT(n) (NODE_REF_COUNT(n)--)
-+
-+static VNode* allocNode(void) {
-+  if(nodeCache.size > 0) {
-+    nodeCache.size--;
-+    return nodeCache.nodes[nodeCache.size];
-+  }
-+
-+  return PyMem_Malloc(sizeof(VNode));
-+}
-+
-+static void freeNode(VNode *node) {
-+  if(nodeCache.size < NODE_CACHE_MAX_SIZE) {
-+    nodeCache.nodes[nodeCache.size] = node;
-+    nodeCache.size++;
-+  } else {
-+    PyMem_Free(node);
-+  }
-+}
-+
-+static VNode* newNode(void) {
-+  VNode* result = allocNode();
-+  memset(result, 0x0, sizeof(VNode));
-+  SET_NODE_REF_COUNT(result, 1);
-+  debug("newNode() %p\n", result);
-+  return result;
-+}
-+
-+static VNode* copyNode(VNode* source) {
-+  /* NB: Only to be used for internal nodes, eg. nodes that do not
-+         hold direct references to python objects but only to other nodes. */
-+  int i;
-+  VNode* result = allocNode();
-+  debug("copyNode() %p\n", result);
-+  memcpy(result->items, source->items, sizeof(source->items));
-+  
-+  for(i = 0; i < BRANCH_FACTOR; i++) {
-+    // TODO-OPT: Any need to go on when the first NULL has been found?
-+    if(result->items[i] != NULL) {
-+      INC_NODE_REF_COUNT((VNode*)result->items[i]);
-+    }
-+  }
-+
-+  SET_NODE_REF_COUNT(result, 1);
-+  return result;
-+}
-+
-+static PVector* emptyNewPvec(void);
-+static PVector* copyPVector(PVector *original);
-+static void extendWithItem(PVector *newVec, PyObject *item);
-+
-+static PyObject *PVectorEvolver_persistent(PVectorEvolver *);
-+static int PVectorEvolver_set_item(PVectorEvolver *, PyObject*, PyObject*);
-+
-+static Py_ssize_t PVector_len(PVector *self) {
-+  return self->count;
-+}
-+
-+/* Convenience macros */
-+#define ROOT_NODE_FULL(vec) ((vec->count >> SHIFT) > (1 << vec->shift))
-+#define TAIL_OFF(vec) ((vec->count < BRANCH_FACTOR) ? 0 : (((vec->count - 1) >> SHIFT) << SHIFT))
-+#define TAIL_SIZE(vec) (vec->count - TAIL_OFF(vec))
-+#define PVector_CheckExact(op) (Py_TYPE(op) == &PVectorType)
-+
-+static VNode* nodeFor(PVector *self, int i){
-+  int level;
-+  if((i >= 0) && (i < self->count)) {
-+    if(i >= TAIL_OFF(self)) {
-+      return self->tail;
-+    }
-+
-+    VNode* node = self->root;
-+    for(level = self->shift; level > 0; level -= SHIFT) {
-+      node = (VNode*) node->items[(i >> level) & BIT_MASK];
-+    }
-+
-+    return node;
-+  }
-+
-+  PyErr_Format(PyExc_IndexError, "Index out of range: %i", i);
-+  return NULL;
-+}
-+
-+static PyObject* _get_item(PVector *self, Py_ssize_t pos) {
-+  VNode* node = nodeFor((PVector*)self, pos);
-+  PyObject *result = NULL;
-+  if(node != NULL) {
-+    result = node->items[pos & BIT_MASK];
-+  }
-+  return result;
-+}
-+
-+/*
-+ Returns a new reference as specified by the PySequence_GetItem function.
-+*/
-+static PyObject* PVector_get_item(PVector *self, Py_ssize_t pos) {
-+  if (pos < 0) {
-+    pos += self->count;
-+  }
-+
-+  PyObject* obj = _get_item(self, pos);
-+  Py_XINCREF(obj);
-+  return obj;  
-+}
-+
-+static void releaseNode(int level, VNode *node) {
-+  if(node == NULL) {
-+    return;
-+  }
-+
-+  debug("releaseNode(): node=%p, level=%i, refCount=%i\n", node, level, NODE_REF_COUNT(node));
-+
-+  int i;
-+
-+  DEC_NODE_REF_COUNT(node);
-+  debug("Refcount when trying to release: %u\n", NODE_REF_COUNT(node));
-+  if(NODE_REF_COUNT(node) == 0) {
-+    if(level > 0) {
-+      for(i = 0; i < BRANCH_FACTOR; i++) {
-+        if(node->items[i] != NULL) {
-+          releaseNode(level - SHIFT, node->items[i]);
-+        }
-+      }
-+      freeNode(node);
-+    } else {
-+      for(i = 0; i < BRANCH_FACTOR; i++) {
-+         Py_XDECREF(node->items[i]);
-+      }
-+      freeNode(node);
-+    }
-+  }
-+
-+  debug("releaseNode(): Done! node=%p!\n", node);
-+}
-+
-+/*
-+ Returns all references to PyObjects that have been stolen. Also decrements
-+ the internal reference counts used for shared memory structures and deallocates
-+ those if needed.
-+*/
-+static void PVector_dealloc(PVector *self) {
-+  debug("Dealloc(): self=%p, self->count=%u, tail->refCount=%u, root->refCount=%u, self->shift=%u, self->tail=%p, self->root=%p\n",
-+        self, self->count, NODE_REF_COUNT(self->tail), NODE_REF_COUNT(self->root), self->shift, self->tail, self->root);
-+
-+  if (self->in_weakreflist != NULL) {
-+    PyObject_ClearWeakRefs((PyObject *) self);
-+  }
-+  
-+  PyObject_GC_UnTrack((PyObject*)self);
-+  Py_TRASHCAN_SAFE_BEGIN(self);
-+
-+  releaseNode(0, self->tail);
-+  releaseNode(self->shift, self->root);
-+  
-+  PyObject_GC_Del(self);
-+  Py_TRASHCAN_SAFE_END(self);
-+}
-+
-+static PyObject *PVector_toList(PVector *self) {
-+  Py_ssize_t i;
-+  PyObject *list = PyList_New(self->count);
-+  for (i = 0; i < self->count; ++i) {
-+    PyObject *o = _get_item(self, i);
-+    Py_INCREF(o);
-+    PyList_SET_ITEM(list, i, o);
-+  }
-+
-+  return list;
-+}
-+
-+
-+static PyObject *PVector_repr(PVector *self) {
-+  // Reuse the list repr code, a bit less efficient but saves some code
-+  PyObject *list = PVector_toList(self);
-+  PyObject *list_repr = PyObject_Repr(list);
-+  Py_DECREF(list);
-+
-+  if(list_repr == NULL) {
-+    // Exception raised during call to repr
-+    return NULL;
-+  }
-+  
-+  // Repr for list implemented differently in python 2 and 3. Need to
-+  // handle this or core dump will occur.
-+#if PY_MAJOR_VERSION >= 3
-+  PyObject *s = PyUnicode_FromFormat("%s%U%s", "pvector(", list_repr, ")");
-+  Py_DECREF(list_repr);
-+#else
-+  PyObject *s = PyString_FromString("pvector(");
-+  PyString_ConcatAndDel(&s, list_repr);
-+  PyString_ConcatAndDel(&s, PyString_FromString(")"));
-+#endif
-+
-+  return s;
-+}
-+
-+
-+static long PVector_hash(PVector *self) {
-+  // Follows the pattern of the tuple hash
-+  long x, y;
-+  Py_ssize_t i;
-+  long mult = 1000003L;
-+  x = 0x456789L;
-+  for(i=0; i<self->count; i++) {
-+      y = PyObject_Hash(_get_item(self, i));
-+      if (y == -1) {
-+        return -1;
-+      }
-+      x = (x ^ y) * mult;
-+      mult += (long)(82520L + i + i);
-+  }
-+
-+  x += 97531L;
-+  if(x == -1) {
-+    x = -2;
-+  }
-+
-+  return x;
-+}
-+
-+static PyObject* compareSizes(long vlen, long wlen, int op) {
-+    int cmp;
-+    PyObject *res;
-+    switch (op) {
-+      case Py_LT: cmp = vlen <  wlen; break;
-+      case Py_LE: cmp = vlen <= wlen; break;
-+      case Py_EQ: cmp = vlen == wlen; break;
-+      case Py_NE: cmp = vlen != wlen; break;
-+      case Py_GT: cmp = vlen >  wlen; break;
-+      case Py_GE: cmp = vlen >= wlen; break;
-+      default: return NULL; /* cannot happen */
-+    }
-+
-+    if (cmp) {
-+      res = Py_True;
-+    } else {
-+      res = Py_False;
-+    }
-+
-+    Py_INCREF(res);
-+    return res;
-+}
-+
-+static PyObject* PVector_richcompare(PyObject *v, PyObject *w, int op) {
-+    // Follows the principles of the tuple comparison
-+    PVector *vt, *wt;
-+    Py_ssize_t i;
-+    Py_ssize_t vlen, wlen;
-+    PyObject *list;
-+    PyObject *result;
-+
-+    if(!PVector_CheckExact(v) || !PVector_CheckExact(w)) {
-+      if(PVector_CheckExact(v)) {
-+        list = PVector_toList((PVector*)v);
-+        result = PyObject_RichCompare(list , w, op);
-+        Py_DECREF(list);
-+        return result; 
-+      }
-+
-+      if(PVector_CheckExact(w)) {
-+        list = PVector_toList((PVector*)w);
-+        result = PyObject_RichCompare(v, list, op);
-+        Py_DECREF(list);
-+        return result; 
-+      }
-+
-+      Py_INCREF(Py_NotImplemented);
-+      return Py_NotImplemented;
-+    }
-+
-+    if((op == Py_EQ) && (v == w)) {
-+        Py_INCREF(Py_True);
-+        return Py_True;
-+    }
-+
-+    vt = (PVector *)v;
-+    wt = (PVector *)w;
-+
-+    vlen = vt->count;
-+    wlen = wt->count;
-+
-+    if (vlen != wlen) {
-+        if (op == Py_EQ) {
-+            Py_INCREF(Py_False);
-+            return Py_False;
-+        } else if (op == Py_NE) {
-+            Py_INCREF(Py_True);
-+            return Py_True;
-+        }
-+    }
-+
-+    /* Search for the first index where items are different. */
-+    PyObject *left = NULL;
-+    PyObject *right = NULL;
-+    for (i = 0; i < vlen && i < wlen; i++) {
-+        left = _get_item(vt, i);
-+        right = _get_item(wt, i);
-+        int k = PyObject_RichCompareBool(left, right, Py_EQ);
-+        if (k < 0) {
-+            return NULL;
-+        }
-+        if (!k) {
-+           break;
-+        }
-+    }
-+
-+    if (i >= vlen || i >= wlen) {
-+        /* No more items to compare -- compare sizes */
-+        return compareSizes(vlen, wlen, op);
-+    }
-+
-+    /* We have an item that differs -- shortcuts for EQ/NE */
-+    if (op == Py_EQ) {
-+        Py_INCREF(Py_False);
-+        return Py_False;
-+    } else if (op == Py_NE) {
-+        Py_INCREF(Py_True);
-+        return Py_True;
-+    } else {
-+      /* Compare the final item again using the proper operator */
-+      return PyObject_RichCompare(left, right, op);
-+    }
-+}
-+
-+
-+static PyObject* PVector_repeat(PVector *self, Py_ssize_t n) {
-+  if (n < 0) {
-+      n = 0;
-+  }
-+
-+  if ((n == 0) || (self->count == 0)) {
-+    Py_INCREF(EMPTY_VECTOR);
-+    return (PyObject *)EMPTY_VECTOR;
-+  } else if (n == 1) {
-+    Py_INCREF(self);
-+    return (PyObject *)self;
-+  } else if ((self->count * n)/self->count != n) {
-+    return PyErr_NoMemory();
-+  } else {
-+    int i, j;
-+    PVector *newVec = copyPVector(self);
-+    for(i=0; i<(n-1); i++) {
-+      for(j=0; j<self->count; j++) {
-+        extendWithItem(newVec, PVector_get_item(self, j));
-+      }
-+    }
-+    return (PyObject*)newVec;
-+  }
-+}
-+
-+static int PVector_traverse(PVector *o, visitproc visit, void *arg) {
-+    // Naive traverse
-+    Py_ssize_t i;
-+    for (i = o->count; --i >= 0; ) {
-+      Py_VISIT(_get_item(o, i));
-+    }
-+
-+    return 0;
-+}
-+
-+
-+static PyObject* PVector_index(PVector *self, PyObject *args) {
-+  // A direct rip-off of the tuple version
-+  Py_ssize_t i, start=0, stop=self->count;
-+  PyObject *value;
-+  
-+  if (!PyArg_ParseTuple(args, "O|O&O&:index", &value,
-+			_PyEval_SliceIndex, &start,
-+			_PyEval_SliceIndex, &stop)) {
-+    return NULL;
-+  }
-+  
-+  if (start < 0) {
-+    start += self->count;
-+    if (start < 0) {
-+      start = 0;
-+    }
-+  }
-+  
-+  if (stop < 0) {
-+    stop += self->count;
-+      if (stop < 0) {
-+	stop = 0;
-+      }
-+  }
-+  
-+  for (i = start; i < stop && i < self->count; i++) {
-+    int cmp = PyObject_RichCompareBool(_get_item(self, i), value, Py_EQ);
-+    if (cmp > 0) {
-+#if PY_MAJOR_VERSION >= 3
-+      return PyLong_FromSsize_t(i);
-+#else
-+      return PyInt_FromSsize_t(i);
-+#endif
-+    } else if (cmp < 0) {
-+      return NULL;
-+    }
-+  }
-+
-+  PyErr_SetString(PyExc_ValueError, "PVector.index(x): x not in vector");
-+  return NULL;
-+}
-+
-+static PyObject* PVector_count(PVector *self, PyObject *value) {
-+  Py_ssize_t count = 0;
-+  Py_ssize_t i;
-+
-+  for (i = 0; i < self->count; i++) {
-+    int cmp = PyObject_RichCompareBool(_get_item(self, i), value, Py_EQ);
-+    if (cmp > 0) {
-+      count++;
-+    } else if (cmp < 0) {
-+      return NULL;
-+    }
-+  }
-+
-+#if PY_MAJOR_VERSION >= 3
-+      return PyLong_FromSsize_t(count);
-+#else
-+      return PyInt_FromSsize_t(count);
-+#endif
-+}
-+
-+static PyObject* PVector_pickle_reduce(PVector *self) {
-+
-+  PyObject* module = PyImport_ImportModule("pvectorc");
-+  PyObject* pvector_fn = PyObject_GetAttrString(module, "pvector");
-+  Py_DECREF(module);
-+
-+  PyObject *list = PVector_toList(self);
-+  PyObject *arg_tuple = PyTuple_New(1);
-+  PyTuple_SET_ITEM(arg_tuple, 0, list);
-+
-+  PyObject *result_tuple = PyTuple_New(2);
-+  PyTuple_SET_ITEM(result_tuple, 0, pvector_fn);
-+  PyTuple_SET_ITEM(result_tuple, 1, arg_tuple);
-+
-+  return result_tuple;
-+}
-+
-+static PVector* rawCopyPVector(PVector* vector) {
-+  PVector* newVector = PyObject_GC_New(PVector, &PVectorType);
-+  newVector->count = vector->count;
-+  newVector->shift = vector->shift;
-+  newVector->root = vector->root;
-+  newVector->tail = vector->tail;
-+  newVector->in_weakreflist = NULL;
-+  PyObject_GC_Track((PyObject*)newVector);
-+  return newVector;
-+}
-+
-+static void initializeEvolver(PVectorEvolver* evolver, PVector* vector, PyObject* appendList) {
-+  // Need to hold a reference to the underlying vector to manage
-+  // the ref counting properly.
-+  evolver->originalVector = vector;
-+  evolver->newVector = vector;
-+
-+  if(appendList == NULL) {
-+    evolver->appendList = PyList_New(0);
-+  } else {
-+    evolver->appendList = appendList;
-+  }
-+}
-+
-+static PyObject * PVector_evolver(PVector *self) {
-+  PVectorEvolver *evolver = PyObject_GC_New(PVectorEvolver, &PVectorEvolverType);
-+  if (evolver == NULL) {
-+    return NULL;
-+  }
-+  initializeEvolver(evolver, self, NULL);
-+  PyObject_GC_Track(evolver);
-+  Py_INCREF(self);
-+  return (PyObject *)evolver;
-+}
-+
-+
-+static void copyInsert(void** dest, void** src, Py_ssize_t pos, void *obj) {
-+  memcpy(dest, src, BRANCH_FACTOR * sizeof(void*));
-+  dest[pos] = obj;
-+}
-+
-+static PyObject* PVector_append(PVector *self, PyObject *obj);
-+
-+static PyObject* PVector_transform(PVector *self, PyObject *obj);
-+
-+static PyObject* PVector_set(PVector *self, PyObject *obj);
-+
-+static PyObject* PVector_mset(PVector *self, PyObject *args);
-+
-+static PyObject* PVector_subscript(PVector* self, PyObject* item);
-+
-+static PyObject* PVector_extend(PVector *self, PyObject *args);
-+
-+static PyObject* PVector_delete(PVector *self, PyObject *args);
-+
-+static PyObject* PVector_remove(PVector *self, PyObject *args);
-+
-+static PySequenceMethods PVector_sequence_methods = {
-+    (lenfunc)PVector_len,            /* sq_length */
-+    (binaryfunc)PVector_extend,      /* sq_concat */
-+    (ssizeargfunc)PVector_repeat,    /* sq_repeat */
-+    (ssizeargfunc)PVector_get_item,  /* sq_item */
-+    // TODO might want to move the slice function to here
-+    NULL,                            /* sq_slice */
-+    NULL,                            /* sq_ass_item */
-+    NULL,                            /* sq_ass_slice */
-+    NULL,                            /* sq_contains */
-+    NULL,                            /* sq_inplace_concat */
-+    NULL,                            /* sq_inplace_repeat */
-+};
-+
-+static PyMappingMethods PVector_mapping_methods = {
-+    (lenfunc)PVector_len,
-+    (binaryfunc)PVector_subscript,
-+    NULL
-+};
-+
-+
-+static PyMethodDef PVector_methods[] = {
-+	{"append",      (PyCFunction)PVector_append, METH_O,       "Appends an element"},
-+	{"set",         (PyCFunction)PVector_set, METH_VARARGS, "Inserts an element at the specified position"},
-+	{"extend",      (PyCFunction)PVector_extend, METH_O|METH_COEXIST, "Extend"},
-+        {"transform",   (PyCFunction)PVector_transform, METH_VARARGS, "Apply one or more transformations"},
-+        {"index",       (PyCFunction)PVector_index, METH_VARARGS, "Return first index of value"},
-+	{"count",       (PyCFunction)PVector_count, METH_O, "Return number of occurrences of value"},
-+        {"__reduce__",  (PyCFunction)PVector_pickle_reduce, METH_NOARGS, "Pickle support method"},
-+        {"evolver",     (PyCFunction)PVector_evolver, METH_NOARGS, "Return new evolver for pvector"},
-+	{"mset",        (PyCFunction)PVector_mset, METH_VARARGS, "Inserts multiple elements at the specified positions"},
-+        {"tolist",      (PyCFunction)PVector_toList, METH_NOARGS, "Convert to list"},
-+        {"delete",      (PyCFunction)PVector_delete, METH_VARARGS, "Delete element(s) by index"},
-+        {"remove",      (PyCFunction)PVector_remove, METH_VARARGS, "Remove element(s) by equality"},
-+	{NULL}
-+};
-+
-+static PyObject * PVectorIter_iter(PyObject *seq);
-+
-+static PyTypeObject PVectorType = {
-+  PyVarObject_HEAD_INIT(NULL, 0)
-+  "pvectorc.PVector",                         /* tp_name        */
-+  sizeof(PVector),                            /* tp_basicsize   */
-+  0,		                              /* tp_itemsize    */
-+  (destructor)PVector_dealloc,                /* tp_dealloc     */
-+  0,                                          /* tp_print       */
-+  0,                                          /* tp_getattr     */
-+  0,                                          /* tp_setattr     */
-+  0,                                          /* tp_compare     */
-+  (reprfunc)PVector_repr,                     /* tp_repr        */
-+  0,                                          /* tp_as_number   */
-+  &PVector_sequence_methods,                  /* tp_as_sequence */
-+  &PVector_mapping_methods,                   /* tp_as_mapping  */
-+  (hashfunc)PVector_hash,                     /* tp_hash        */
-+  0,                                          /* tp_call        */
-+  0,                                          /* tp_str         */
-+  0,                                          /* tp_getattro    */
-+  0,                                          /* tp_setattro    */
-+  0,                                          /* tp_as_buffer   */
-+  Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,    /* tp_flags       */
-+  "Persistent vector",   	              /* tp_doc         */
-+  (traverseproc)PVector_traverse,             /* tp_traverse       */
-+  0,                                          /* tp_clear          */
-+  PVector_richcompare,                        /* tp_richcompare    */
-+  offsetof(PVector, in_weakreflist),          /* tp_weaklistoffset */
-+  PVectorIter_iter,                           /* tp_iter           */
-+  0,                                          /* tp_iternext       */
-+  PVector_methods,                            /* tp_methods        */
-+  PVector_members,                            /* tp_members        */
-+  0,                                          /* tp_getset         */
-+  0,                                          /* tp_base           */
-+  0,                                          /* tp_dict           */
-+  0,                                          /* tp_descr_get      */
-+  0,                                          /* tp_descr_set      */
-+  0,                                          /* tp_dictoffset     */
-+};
-+
-+static PyObject* pyrsistent_pvec(PyObject *self, PyObject *args) {
-+    debug("pyrsistent_pvec(): %x\n", args);
-+
-+    PyObject *argObj = NULL;  /* list of arguments */
-+
-+    if(!PyArg_ParseTuple(args, "|O", &argObj)) {
-+      return NULL;
-+    }
-+
-+    if(argObj == NULL) {
-+      Py_INCREF(EMPTY_VECTOR);
-+      return (PyObject*)EMPTY_VECTOR;
-+    }
-+
-+    return PVector_extend(EMPTY_VECTOR, argObj);
-+}
-+
-+static PVector* emptyNewPvec(void) {
-+  PVector *pvec = PyObject_GC_New(PVector, &PVectorType);
-+  debug("pymem alloc_new %x, ref cnt: %u\n", pvec, pvec->ob_refcnt);
-+  pvec->count = (Py_ssize_t)0;
-+  pvec->shift = SHIFT;
-+  pvec->root = newNode();
-+  pvec->tail = newNode();
-+  pvec->in_weakreflist = NULL;
-+  PyObject_GC_Track((PyObject*)pvec);
-+  return pvec;
-+}
-+
-+static void incRefs(PyObject **obj) {
-+  // TODO-OPT: Would it be OK to exit on first NULL? Should not be any
-+  //           non NULLs beyond a NULL.
-+  int i;
-+  for(i = 0; i < BRANCH_FACTOR; i++) {
-+    Py_XINCREF(obj[i]);
-+  }
-+}
-+
-+
-+static PVector* newPvec(unsigned int count, unsigned int shift, VNode *root) {
-+  // TODO-OPT: Introduce object cache
-+  PVector *pvec = PyObject_GC_New(PVector, &PVectorType);
-+  debug("pymem alloc_copy %x, ref cnt: %u\n", pvec, pvec->ob_refcnt);
-+  pvec->count = count;
-+  pvec->shift = shift;
-+  pvec->root = root;
-+  pvec->tail = newNode();
-+  pvec->in_weakreflist = NULL;
-+  PyObject_GC_Track((PyObject*)pvec);
-+  return pvec;
-+}
-+
-+static VNode* newPath(unsigned int level, VNode* node){
-+  if(level == 0) {
-+    INC_NODE_REF_COUNT(node);
-+    return node;
-+  }
-+  
-+  VNode* result = newNode();
-+  result->items[0] = newPath(level - SHIFT, node);
-+  return result;
-+}
-+
-+static VNode* pushTail(unsigned int level, unsigned int count, VNode* parent, VNode* tail) {
-+  int subIndex = ((count - 1) >> level) & BIT_MASK;
-+  VNode* result = copyNode(parent);
-+  VNode* nodeToInsert;
-+  VNode* child;
-+  debug("pushTail(): count = %i, subIndex = %i\n", count, subIndex);
-+
-+  if(level == SHIFT) {
-+    // We're at the bottom
-+    INC_NODE_REF_COUNT(tail);
-+    nodeToInsert = tail;
-+  } else {
-+    // More levels available in the tree
-+    child = parent->items[subIndex];
-+
-+    if(child != NULL) {
-+      nodeToInsert = pushTail(level - SHIFT, count, child, tail);
-+
-+      // Need to make an adjustment of the ref COUNT for the child node here since
-+      // it was incremented in an earlier stage when the node was copied. Now the child
-+      // node will be part of the path copy so the number of references to the original
-+      // child will not increase at all.
-+      DEC_NODE_REF_COUNT(child);
-+    } else {
-+      nodeToInsert = newPath(level - SHIFT, tail);
-+    }
-+  }
-+  
-+  result->items[subIndex] = nodeToInsert;
-+  return result;
-+}
-+
-+static PVector* copyPVector(PVector *original) {
-+  PVector *newVec = newPvec(original->count, original->shift, original->root);
-+  INC_NODE_REF_COUNT(original->root);
-+  memcpy(newVec->tail->items, original->tail->items, TAIL_SIZE(original) * sizeof(void*));
-+  incRefs((PyObject**)newVec->tail->items);
-+  return newVec;
-+}
-+
-+/* Does not steal a reference, this must be managed outside of this function */
-+static void extendWithItem(PVector *newVec, PyObject *item) {
-+  unsigned int tail_size = TAIL_SIZE(newVec);
-+
-+  if(tail_size >= BRANCH_FACTOR) {
-+    VNode* new_root;
-+    if(ROOT_NODE_FULL(newVec)) {
-+      new_root = newNode();
-+      new_root->items[0] = newVec->root;
-+      new_root->items[1] = newPath(newVec->shift, newVec->tail);
-+      newVec->shift += SHIFT;
-+    } else {
-+      new_root = pushTail(newVec->shift, newVec->count, newVec->root, newVec->tail);
-+      releaseNode(newVec->shift, newVec->root);
-+    }
-+
-+    newVec->root = new_root;
-+
-+    // Need to adjust the ref count of the old tail here since no new references were
-+    // actually created, we just moved the tail.
-+    DEC_NODE_REF_COUNT(newVec->tail);
-+    newVec->tail = newNode();
-+    tail_size = 0;
-+  }
-+
-+  newVec->tail->items[tail_size] = item;    
-+  newVec->count++;
-+}
-+
-+
-+#if PY_MAJOR_VERSION >= 3
-+// This was changed in 3.2 but we do not claim compatibility with any older version of python 3.
-+#define SLICE_CAST
-+#else
-+#define SLICE_CAST (PySliceObject *)
-+#endif
-+
-+static PyObject *PVector_subscript(PVector* self, PyObject* item) {
-+  if (PyIndex_Check(item)) {
-+    Py_ssize_t i = PyNumber_AsSsize_t(item, PyExc_IndexError);
-+    if (i == -1 && PyErr_Occurred()) {
-+      return NULL;
-+    }
-+    
-+    return PVector_get_item(self, i);
-+  } else if (PySlice_Check(item)) {
-+    Py_ssize_t start, stop, step, slicelength, cur, i;
-+    if (PySlice_GetIndicesEx(SLICE_CAST item, self->count,
-+                             &start, &stop, &step, &slicelength) < 0) {
-+      return NULL;
-+    }
-+    
-+    debug("start=%i, stop=%i, step=%i\n", start, stop, step);
-+    
-+    if (slicelength <= 0) {
-+      Py_INCREF(EMPTY_VECTOR);
-+      return (PyObject*)EMPTY_VECTOR;
-+    } else if((slicelength == self->count) && (step > 0)) {
-+      Py_INCREF(self);
-+      return (PyObject*)self;
-+    } else {
-+      PVector *newVec = copyPVector(EMPTY_VECTOR);
-+      for (cur=start, i=0; i<slicelength; cur += (size_t)step, i++) {
-+        extendWithItem(newVec, PVector_get_item(self, cur));
-+      }
-+      
-+      return (PyObject*)newVec;
-+    }
-+  } else {
-+    PyErr_Format(PyExc_TypeError, "pvector indices must be integers, not %.200s", Py_TYPE(item)->tp_name);
-+    return NULL;
-+  }
-+} 
-+
-+/* A hack to get some of the error handling code away from the function
-+   doing the actual work */
-+#define HANDLE_ITERATION_ERROR()                         \
-+    if (PyErr_Occurred()) {                              \
-+      if (PyErr_ExceptionMatches(PyExc_StopIteration)) { \
-+        PyErr_Clear();                                   \
-+      } else {                                           \
-+        return NULL;                                     \
-+      }                                                  \
-+    }
-+
-+
-+/* Returns a new vector that is extended with the iterable b.
-+   Takes a copy of the original vector and performs the extension in place on this
-+   one for efficiency. 
-+
-+   These are some optimizations that could be done to this function,
-+   these are not considered important enough yet though.
-+   - Use the PySequence_Fast ops if the iterable is a list or a tuple (which it
-+     whould probably often be)
-+   - Only copy the original tail if it is not full
-+   - No need to try to increment ref count in tail for the whole tail
-+*/
-+static PyObject* PVector_extend(PVector *self, PyObject *iterable) {
-+    PyObject *it;
-+    PyObject *(*iternext)(PyObject *);
-+
-+    it = PyObject_GetIter(iterable);
-+    if (it == NULL) {
-+        return NULL;
-+    }
-+    
-+    // TODO-OPT: Use special fast iterator if available
-+    iternext = *Py_TYPE(it)->tp_iternext;
-+    PyObject *item = iternext(it);
-+    if (item == NULL) {
-+      Py_DECREF(it);
-+      HANDLE_ITERATION_ERROR()
-+      Py_INCREF(self);
-+      return (PyObject *)self;
-+    } else {
-+      PVector *newVec = copyPVector(self);
-+      // TODO-OPT test using special case code here for extension to
-+      // avoid recalculating tail length all the time.
-+      while(item != NULL) {
-+        extendWithItem(newVec, item);
-+        item = iternext(it);
-+      }
-+
-+      Py_DECREF(it);
-+      HANDLE_ITERATION_ERROR()
-+      return (PyObject*)newVec;
-+    }
-+}
-+
-+/*
-+ Steals a reference to the object that is appended to the list.
-+*/
-+static PyObject* PVector_append(PVector *self, PyObject *obj) {
-+  assert (obj != NULL);
-+
-+  unsigned int tail_size = TAIL_SIZE(self);
-+  debug("append(): count = %u, tail_size = %u\n", self->count, tail_size);
-+
-+  // Does the new object fit in the tail? If so, take a copy of the tail and
-+  // insert the new element in that.
-+  if(tail_size < BRANCH_FACTOR) {
-+    INC_NODE_REF_COUNT(self->root);
-+    PVector *new_pvec = newPvec(self->count + 1, self->shift, self->root);
-+    // TODO-OPT No need to copy more than the current tail length
-+    // TODO-OPT No need to incRefs for all elements all the time
-+    copyInsert(new_pvec->tail->items, self->tail->items, tail_size, obj);
-+    incRefs((PyObject**)new_pvec->tail->items);
-+    debug("append(): new_pvec=%p, new_pvec->tail=%p, new_pvec->root=%p\n",
-+    new_pvec, new_pvec->tail, new_pvec->root);
-+
-+    return (PyObject*)new_pvec;
-+  }
-+
-+  // Tail is full, need to push it into the tree  
-+  VNode* new_root;
-+  unsigned int new_shift;
-+  if(ROOT_NODE_FULL(self)) {
-+    new_root = newNode();
-+    new_root->items[0] = self->root;
-+    INC_NODE_REF_COUNT(self->root);
-+    new_root->items[1] = newPath(self->shift, self->tail);
-+    new_shift = self->shift + SHIFT;
-+  } else {
-+    new_root = pushTail(self->shift, self->count, self->root, self->tail);
-+    new_shift = self->shift;
-+  }
-+
-+  PVector* pvec = newPvec(self->count + 1, new_shift, new_root);
-+  pvec->tail->items[0] = obj;
-+  Py_XINCREF(obj);
-+  debug("append_push(): pvec=%p, pvec->tail=%p, pvec->root=%p\n", pvec, pvec->tail, pvec->root);
-+  return (PyObject*)pvec;
-+}
-+
-+static VNode* doSet(VNode* node, unsigned int level, unsigned int position, PyObject* value) {
-+  debug("doSet(): level == %i\n", level);
-+  if(level == 0) {
-+    // TODO-OPT: Perhaps an alloc followed by a reset of reference
-+    // count is enough here since we overwrite all subnodes below.
-+    VNode* theNewNode = newNode();
-+    copyInsert(theNewNode->items, node->items, position & BIT_MASK, value);
-+    incRefs((PyObject**)theNewNode->items);
-+    return theNewNode;
-+  } else {
-+    VNode* theNewNode = copyNode(node);
-+    Py_ssize_t index = (position >> level) & BIT_MASK;
-+
-+    // Drop reference to this node since we're about to replace it
-+    DEC_NODE_REF_COUNT((VNode*)theNewNode->items[index]);
-+    theNewNode->items[index] = doSet(node->items[index], level - SHIFT, position, value); 
-+    return theNewNode;
-+  }
-+}
-+
-+
-+static PyObject* internalSet(PVector *self, Py_ssize_t position, PyObject *argObj) {
-+  if(position < 0) {
-+    position += self->count;
-+  }
-+
-+  if((0 <= position) && (position < self->count)) {
-+    if(position >= TAIL_OFF(self)) {
-+      // Reuse the root, replace the tail
-+      INC_NODE_REF_COUNT(self->root);
-+      PVector *new_pvec = newPvec(self->count, self->shift, self->root);
-+      copyInsert(new_pvec->tail->items, self->tail->items, position & BIT_MASK, argObj);
-+      incRefs((PyObject**)new_pvec->tail->items);
-+      return (PyObject*)new_pvec;
-+    } else {
-+      // Keep the tail, replace the root
-+      VNode *newRoot = doSet(self->root, self->shift, position, argObj);
-+      PVector *new_pvec = newPvec(self->count, self->shift, newRoot);
-+
-+      // Free the tail and replace it with a reference to the tail of the original vector
-+      freeNode(new_pvec->tail);
-+      new_pvec->tail = self->tail;
-+      INC_NODE_REF_COUNT(self->tail);
-+      return (PyObject*)new_pvec;
-+    }
-+  } else if (position == self->count) {
-+    // TODO Remove this case?
-+    return PVector_append(self, argObj);
-+  } else {
-+    PyErr_Format(PyExc_IndexError, "Index out of range: %zd", position);
-+    return NULL;
-+  }
-+}
-+
-+static PyObject* PVector_transform(PVector *self, PyObject *obj) {
-+  return transform(self, obj);
-+}
-+
-+/*
-+ Steals a reference to the object that is inserted in the vector.
-+*/
-+static PyObject* PVector_set(PVector *self, PyObject *args) {
-+  PyObject *argObj = NULL;  /* argument to insert */
-+  Py_ssize_t position;
-+
-+  /* The n parses for size, the O parses for a Python object */
-+  if(!PyArg_ParseTuple(args, "nO", &position, &argObj)) {
-+    return NULL;
-+  }
-+
-+  return internalSet(self, position, argObj);
-+}
-+
-+
-+static PyObject* PVector_mset(PVector *self, PyObject *args) {
-+  Py_ssize_t size = PyTuple_Size(args);
-+  if(size % 2) {
-+    PyErr_SetString(PyExc_TypeError, "mset expected an even number of arguments");
-+    return NULL;
-+  }
-+
-+  PVectorEvolver* evolver = (PVectorEvolver*)PVector_evolver(self);
-+  Py_ssize_t i;
-+  for(i=0; i<size; i+=2) {
-+    if(PVectorEvolver_set_item(evolver, PyTuple_GetItem(args, i), PyTuple_GetItem(args, i + 1)) < 0) {
-+      Py_DECREF(evolver);
-+      return NULL;
-+    }
-+  }
-+
-+  PyObject* vector = PVectorEvolver_persistent(evolver);
-+  Py_DECREF(evolver);
-+  return vector;
-+}
-+
-+
-+static PyObject* internalDelete(PVector *self, Py_ssize_t index, PyObject *stop_obj) {
-+  Py_ssize_t stop;
-+  PyObject *list;
-+  PyObject *result;
-+
-+  if (index < 0) {
-+    index += self->count;
-+  }
-+
-+  if (stop_obj != NULL) {
-+    if (PyIndex_Check(stop_obj)) {
-+      stop = PyNumber_AsSsize_t(stop_obj, PyExc_IndexError);
-+      if (stop == -1 && PyErr_Occurred()) {
-+        return NULL;
-+      }
-+    } else {
-+      PyErr_Format(PyExc_TypeError, "Stop index must be integer, not %.200s", Py_TYPE(stop_obj)->tp_name);
-+      return NULL;
-+    }
-+
-+    if (stop < 0) {
-+      stop += self->count;
-+    }
-+  } else {
-+    if (index < 0 || index >= self->count) {
-+      PyErr_SetString(PyExc_IndexError, "delete index out of range");
-+      return NULL;
-+    }
-+
-+    stop = index + 1;
-+  }
-+
-+  list = PVector_toList(self);
-+  if(PyList_SetSlice(list, index, stop, NULL) < 0) {
-+    return NULL;
-+  }
-+
-+  result = PVector_extend(EMPTY_VECTOR, list);
-+  Py_DECREF(list);
-+  return result;
-+}
-+
-+static PyObject* PVector_delete(PVector *self, PyObject *args) {
-+  Py_ssize_t index;
-+  PyObject *stop_obj = NULL;
-+
-+  if(!PyArg_ParseTuple(args, "n|O:delete", &index, &stop_obj)) {
-+    return NULL;
-+  }
-+
-+  return internalDelete(self, index, stop_obj);
-+}
-+
-+static PyObject* PVector_remove(PVector *self, PyObject *args) {
-+  Py_ssize_t index;
-+  PyObject* py_index = PVector_index(self, args);
-+
-+  if(py_index != NULL) {
-+#if PY_MAJOR_VERSION >= 3
-+      index = PyLong_AsSsize_t(py_index);
-+#else
-+      index = PyInt_AsSsize_t(py_index);
-+#endif
-+    Py_DECREF(py_index);
-+    return internalDelete(self, index, NULL);
-+  }
-+
-+  PyErr_SetString(PyExc_ValueError, "PVector.remove(x): x not in vector");
-+  return NULL;
-+}
-+
-+
-+/*********************** PVector Iterator **************************/
-+
-+/* 
-+The Sequence class provides us with a default iterator but the runtime
-+overhead of using that compared to the iterator below is huge.
-+*/
-+
-+typedef struct {
-+    PyObject_HEAD
-+    Py_ssize_t it_index;
-+    PVector *it_seq; /* Set to NULL when iterator is exhausted */
-+} PVectorIter;
-+
-+static void PVectorIter_dealloc(PVectorIter *);
-+static int PVectorIter_traverse(PVectorIter *, visitproc, void *);
-+static PyObject *PVectorIter_next(PVectorIter *);
-+
-+static PyMethodDef PVectorIter_methods[] = {
-+    {NULL,              NULL}           /* sentinel */
-+};
-+
-+static PyTypeObject PVectorIterType = {
-+    PyVarObject_HEAD_INIT(NULL, 0)
-+    "pvector_iterator",                         /* tp_name */
-+    sizeof(PVectorIter),                        /* tp_basicsize */
-+    0,                                          /* tp_itemsize */
-+    /* methods */
-+    (destructor)PVectorIter_dealloc,            /* tp_dealloc */
-+    0,                                          /* tp_print */
-+    0,                                          /* tp_getattr */
-+    0,                                          /* tp_setattr */
-+    0,                                          /* tp_compare */
-+    0,                                          /* tp_repr */
-+    0,                                          /* tp_as_number */
-+    0,                                          /* tp_as_sequence */
-+    0,                                          /* tp_as_mapping */
-+    0,                                          /* tp_hash */
-+    0,                                          /* tp_call */
-+    0,                                          /* tp_str */
-+    PyObject_GenericGetAttr,                    /* tp_getattro */
-+    0,                                          /* tp_setattro */
-+    0,                                          /* tp_as_buffer */
-+    Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,    /* tp_flags */
-+    0,                                          /* tp_doc */
-+    (traverseproc)PVectorIter_traverse,         /* tp_traverse */
-+    0,                                          /* tp_clear */
-+    0,                                          /* tp_richcompare */
-+    0,                                          /* tp_weaklistoffset */
-+    PyObject_SelfIter,                          /* tp_iter */
-+    (iternextfunc)PVectorIter_next,             /* tp_iternext */
-+    PVectorIter_methods,                        /* tp_methods */
-+    0,                                          /* tp_members */
-+};
-+
-+static PyObject *PVectorIter_iter(PyObject *seq) {
-+    PVectorIter *it = PyObject_GC_New(PVectorIter, &PVectorIterType);
-+    if (it == NULL) {
-+        return NULL;
-+    }
-+
-+    it->it_index = 0;
-+    Py_INCREF(seq);
-+    it->it_seq = (PVector *)seq;
-+    PyObject_GC_Track(it);
-+    return (PyObject *)it;
-+}
-+
-+static void PVectorIter_dealloc(PVectorIter *it) {
-+    PyObject_GC_UnTrack(it);
-+    Py_XDECREF(it->it_seq);
-+    PyObject_GC_Del(it);
-+}
-+
-+static int PVectorIter_traverse(PVectorIter *it, visitproc visit, void *arg) {
-+    Py_VISIT(it->it_seq);
-+    return 0;
-+}
-+
-+static PyObject *PVectorIter_next(PVectorIter *it) {
-+    assert(it != NULL);
-+    PVector *seq = it->it_seq;
-+    if (seq == NULL) {
-+        return NULL;
-+    }
-+
-+    if (it->it_index < seq->count) {
-+        PyObject *item = _get_item(seq, it->it_index);
-+        ++it->it_index;
-+        Py_INCREF(item);
-+        return item;
-+    }
-+
-+    Py_DECREF(seq);
-+    it->it_seq = NULL;
-+    return NULL;
-+}
-+
-+
-+/*********************** PVector Evolver **************************/
-+
-+/* 
-+Evolver to make multi updates easier to work with and more efficient.
-+*/
-+
-+static void PVectorEvolver_dealloc(PVectorEvolver *);
-+static PyObject *PVectorEvolver_append(PVectorEvolver *, PyObject *);
-+static PyObject *PVectorEvolver_extend(PVectorEvolver *, PyObject *);
-+static PyObject *PVectorEvolver_set(PVectorEvolver *, PyObject *);
-+static PyObject *PVectorEvolver_delete(PVectorEvolver *self, PyObject *args);
-+static PyObject *PVectorEvolver_subscript(PVectorEvolver *, PyObject *);
-+static PyObject *PVectorEvolver_persistent(PVectorEvolver *);
-+static Py_ssize_t PVectorEvolver_len(PVectorEvolver *);
-+static PyObject *PVectorEvolver_is_dirty(PVectorEvolver *);
-+static int PVectorEvolver_traverse(PVectorEvolver *self, visitproc visit, void *arg);
-+
-+static PyMappingMethods PVectorEvolver_mapping_methods = {
-+  (lenfunc)PVectorEvolver_len,
-+  (binaryfunc)PVectorEvolver_subscript,
-+  (objobjargproc)PVectorEvolver_set_item,
-+};
-+
-+
-+static PyMethodDef PVectorEvolver_methods[] = {
-+	{"append",      (PyCFunction)PVectorEvolver_append, METH_O,       "Appends an element"},
-+	{"extend",      (PyCFunction)PVectorEvolver_extend, METH_O|METH_COEXIST, "Extend"},
-+	{"set",         (PyCFunction)PVectorEvolver_set, METH_VARARGS, "Set item"},
-+        {"delete",      (PyCFunction)PVectorEvolver_delete, METH_VARARGS, "Delete item"},
-+        {"persistent",  (PyCFunction)PVectorEvolver_persistent, METH_NOARGS, "Create PVector from evolver"},
-+        {"is_dirty",    (PyCFunction)PVectorEvolver_is_dirty, METH_NOARGS, "Check if evolver contains modifications"},
-+        {NULL,              NULL}           /* sentinel */
-+};
-+
-+static PyTypeObject PVectorEvolverType = {
-+    PyVarObject_HEAD_INIT(NULL, 0)
-+    "pvector_evolver",                          /* tp_name */
-+    sizeof(PVectorEvolver),                     /* tp_basicsize */
-+    0,                                          /* tp_itemsize */
-+    /* methods */
-+    (destructor)PVectorEvolver_dealloc,         /* tp_dealloc */
-+    0,                                          /* tp_print */
-+    0,                                          /* tp_getattr */
-+    0,                                          /* tp_setattr */
-+    0,                                          /* tp_compare */
-+    0,                                          /* tp_repr */
-+    0,                                          /* tp_as_number */
-+    0,                                          /* tp_as_sequence */
-+    &PVectorEvolver_mapping_methods,             /* tp_as_mapping */
-+    0,                                          /* tp_hash */
-+    0,                                          /* tp_call */
-+    0,                                          /* tp_str */
-+    PyObject_GenericGetAttr,                    /* tp_getattro */
-+    0,                                          /* tp_setattro */
-+    0,                                          /* tp_as_buffer */
-+    Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,    /* tp_flags */
-+    0,                                          /* tp_doc */
-+    (traverseproc)PVectorEvolver_traverse,      /* tp_traverse       */
-+    0,                                          /* tp_clear */
-+    0,                                          /* tp_richcompare */
-+    0,                                          /* tp_weaklistoffset */
-+    0,                                          /* tp_iter */
-+    0,                                          /* tp_iternext */
-+    PVectorEvolver_methods,                     /* tp_methods */
-+    0,                                          /* tp_members */
-+};
-+
-+
-+// Indicate that a node is "dirty" (has been updated by the evolver)
-+// by setting the MSB of the refCount. This will be cleared when
-+// creating a pvector from the evolver (cleaning it).
-+#define DIRTY_BIT 0x80000000
-+#define REF_COUNT_MASK (~DIRTY_BIT)
-+#define IS_DIRTY(node) ((node)->refCount & DIRTY_BIT)
-+#define SET_DIRTY(node) ((node)->refCount |= DIRTY_BIT)
-+#define CLEAR_DIRTY(node) ((node)->refCount &= REF_COUNT_MASK)
-+
-+
-+static void cleanNodeRecursively(VNode *node, int level) {
-+  debug("Cleaning recursively node=%p, level=%u\n", node, level);
-+
-+  int i;
-+  CLEAR_DIRTY(node);
-+  SET_NODE_REF_COUNT(node, 1);
-+  if(level > 0) {
-+    for(i = 0; i < BRANCH_FACTOR; i++) {
-+      VNode *nextNode = (VNode*)node->items[i];
-+      if((nextNode != NULL) && IS_DIRTY(nextNode)) {
-+        cleanNodeRecursively(nextNode, level - SHIFT);
-+      }
-+    }
-+  }
-+}
-+
-+static void cleanVector(PVector *vector) {
-+  // Cleaning the vector means that all dirty indications are cleared
-+  // and that the nodes that were dirty get a ref count of 1 since
-+  // they are brand new. Once cleaned the vector can be released into
-+  // the wild.
-+  if(IS_DIRTY(vector->tail)) {
-+    cleanNodeRecursively(vector->tail, 0);
-+  } else {
-+    INC_NODE_REF_COUNT(vector->tail);
-+  }
-+
-+  if(IS_DIRTY(vector->root)) {
-+    cleanNodeRecursively(vector->root, vector->shift);
-+  } else {
-+    INC_NODE_REF_COUNT(vector->root);
-+  }
-+}
-+
-+static void PVectorEvolver_dealloc(PVectorEvolver *self) {
-+  PyObject_GC_UnTrack(self);
-+  Py_TRASHCAN_SAFE_BEGIN(self);
-+
-+  if(self->originalVector != self->newVector) {
-+    cleanVector(self->newVector);
-+    Py_DECREF(self->newVector);
-+  }
-+
-+  Py_DECREF(self->originalVector);
-+  Py_DECREF(self->appendList);
-+
-+  PyObject_GC_Del(self);
-+  Py_TRASHCAN_SAFE_END(self);
-+}
-+
-+static PyObject *PVectorEvolver_append(PVectorEvolver *self, PyObject *args) {
-+  if (PyList_Append(self->appendList, args) == 0) {
-+    Py_INCREF(self);
-+    return (PyObject*)self;
-+  }
-+
-+  return NULL;
-+}
-+
-+static PyObject *PVectorEvolver_extend(PVectorEvolver *self, PyObject *args) {
-+  PyObject *retVal = _PyList_Extend((PyListObject *)self->appendList, args);
-+  if (retVal == NULL) {
-+    return NULL;
-+  }
-+
-+  Py_DECREF(retVal);
-+  Py_INCREF(self);
-+  return (PyObject*)self;
-+}
-+
-+static PyObject *PVectorEvolver_subscript(PVectorEvolver *self, PyObject *item) {
-+  if (PyIndex_Check(item)) {
-+    Py_ssize_t position = PyNumber_AsSsize_t(item, PyExc_IndexError);
-+    if (position == -1 && PyErr_Occurred()) {
-+      return NULL;
-+    }
-+
-+    if (position < 0) {
-+      position += self->newVector->count + PyList_GET_SIZE(self->appendList);
-+    }
-+
-+    if(0 <= position && position < self->newVector->count) {
-+      PyObject *result = _get_item(self->newVector, position);
-+      Py_XINCREF(result);
-+      return result;
-+    } else if (0 <= position && position < (self->newVector->count + PyList_GET_SIZE(self->appendList))) {
-+      PyObject *result = PyList_GetItem(self->appendList, position - self->newVector->count);
-+      Py_INCREF(result);
-+      return result;
-+    } else {
-+      PyErr_SetString(PyExc_IndexError, "Index out of range");
-+    }
-+  } else {
-+    PyErr_Format(PyExc_TypeError, "Indices must be integers, not %.200s", item->ob_type->tp_name);
-+  }
-+
-+  return NULL;
-+}
-+
-+static VNode* doSetWithDirty(VNode* node, unsigned int level, unsigned int position, PyObject* value) {
-+  VNode* resultNode;
-+  debug("doSetWithDirty(): level == %i\n", level);
-+  if(level == 0) {
-+    if(!IS_DIRTY(node)) {
-+      resultNode = allocNode();
-+      copyInsert(resultNode->items, node->items, position & BIT_MASK, value);
-+      incRefs((PyObject**)resultNode->items);
-+      SET_DIRTY(resultNode);
-+    } else {
-+      resultNode = node;
-+      Py_INCREF(value);
-+      Py_DECREF(resultNode->items[position & BIT_MASK]);
-+      resultNode->items[position & BIT_MASK] = value;
-+    }
-+  } else {
-+    if(!IS_DIRTY(node)) {
-+      resultNode = copyNode(node);
-+      SET_DIRTY(resultNode);
-+    } else {
-+      resultNode = node;
-+    }    
-+
-+    Py_ssize_t index = (position >> level) & BIT_MASK;
-+    VNode* oldNode = (VNode*)resultNode->items[index];
-+    resultNode->items[index] = doSetWithDirty(resultNode->items[index], level - SHIFT, position, value);
-+
-+    if(resultNode->items[index] != oldNode) {
-+      // Node replaced, drop references to old node
-+      DEC_NODE_REF_COUNT(oldNode);
-+    }
-+  }
-+
-+  return resultNode;
-+}
-+
-+/*
-+ Steals a reference to the object that is inserted in the vector.
-+*/
-+static PyObject *PVectorEvolver_set(PVectorEvolver *self, PyObject *args) {
-+  PyObject *argObj = NULL;  /* argument to insert */
-+  PyObject *position = NULL;
-+
-+  /* The n parses for size, the O parses for a Python object */
-+  if(!PyArg_ParseTuple(args, "OO", &position, &argObj)) {
-+    return NULL;
-+  }
-+
-+  if(PVectorEvolver_set_item(self, position, argObj) < 0) {
-+    return NULL;
-+  }
-+
-+  Py_INCREF(self);
-+  return (PyObject*)self;
-+}
-+
-+static PyObject *PVectorEvolver_delete(PVectorEvolver *self, PyObject *args) {
-+  PyObject *position = NULL;
-+
-+  /* The n parses for size, the O parses for a Python object */
-+  if(!PyArg_ParseTuple(args, "O", &position)) {
-+    return NULL;
-+  }
-+
-+  if(PVectorEvolver_set_item(self, position, NULL) < 0) {
-+    return NULL;
-+  }
-+
-+  Py_INCREF(self);
-+  return (PyObject*)self;
-+}
-+
-+
-+static int internalPVectorDelete(PVectorEvolver *self, Py_ssize_t position) {
-+  // Delete element. Should be unusual. Simple but expensive operation
-+  // that reuses the delete code for the vector. Realize the vector, delete on it and
-+  // then reset the evolver to work on the new vector.
-+  PVector *temp = (PVector*)PVectorEvolver_persistent(self);
-+  PVector *temp2 = (PVector*)internalDelete(temp, position, NULL);
-+  Py_DECREF(temp);
-+
-+  if(temp2 == NULL) {
-+    return -1;
-+  }
-+
-+  Py_DECREF(self->originalVector);
-+  self->originalVector = temp2;
-+  self->newVector = self->originalVector;
-+  return 0;
-+}
-+
-+static int PVectorEvolver_set_item(PVectorEvolver *self, PyObject* item, PyObject* value) {
-+  if (PyIndex_Check(item)) {
-+    Py_ssize_t position = PyNumber_AsSsize_t(item, PyExc_IndexError);
-+    if (position == -1 && PyErr_Occurred()) {
-+      return -1;
-+    }
-+         
-+    if (position < 0) {
-+      position += self->newVector->count + PyList_GET_SIZE(self->appendList);
-+    }
-+
-+    if((0 <= position) && (position < self->newVector->count)) {
-+      if(self->originalVector == self->newVector) {
-+        // Create new vector since we're about to modify the original
-+        self->newVector = rawCopyPVector(self->originalVector);
-+      }
-+
-+      if(value != NULL) {
-+        if(position < TAIL_OFF(self->newVector)) {
-+          self->newVector->root = doSetWithDirty(self->newVector->root, self->newVector->shift, position, value);
-+        } else {
-+          self->newVector->tail = doSetWithDirty(self->newVector->tail, 0, position, value);
-+        }
-+
-+        return 0;
-+      }
-+
-+      return internalPVectorDelete(self, position);
-+    } else if((0 <= position) && (position < (self->newVector->count + PyList_GET_SIZE(self->appendList)))) {
-+      if (value != NULL) {
-+        int result = PyList_SetItem(self->appendList, position - self->newVector->count, value); 
-+        if(result == 0) {
-+          Py_INCREF(value);
-+        }
-+        return result;
-+      }
-+
-+      return internalPVectorDelete(self, position);
-+    } else if((0 <= position)
-+              && (position < (self->newVector->count + PyList_GET_SIZE(self->appendList) + 1))
-+              && (value != NULL)) {
-+        return PyList_Append(self->appendList, value);
-+    } else {
-+      PyErr_Format(PyExc_IndexError, "Index out of range: %zd", position);
-+    }
-+  } else {
-+    PyErr_Format(PyExc_TypeError, "Indices must be integers, not %.200s", item->ob_type->tp_name);
-+  }
-+  return -1;
-+}
-+
-+static PyObject *PVectorEvolver_persistent(PVectorEvolver *self) {
-+  PVector *resultVector;
-+  if(self->newVector != self->originalVector) {
-+    cleanVector(self->newVector);
-+    Py_DECREF(self->originalVector);
-+  }
-+
-+  resultVector = self->newVector;
-+
-+  if(PyList_GET_SIZE(self->appendList)) {
-+    PVector *oldVector = resultVector;
-+    resultVector = (PVector*)PVector_extend(resultVector, self->appendList);
-+    Py_DECREF(oldVector);
-+    Py_DECREF(self->appendList);
-+    self->appendList = NULL;
-+  }
-+
-+  initializeEvolver(self, resultVector, self->appendList);
-+  Py_INCREF(resultVector);  
-+  return (PyObject*)resultVector;
-+}
-+
-+static Py_ssize_t PVectorEvolver_len(PVectorEvolver *self) {
-+  return self->newVector->count + PyList_GET_SIZE(self->appendList);
-+}
-+
-+static PyObject* PVectorEvolver_is_dirty(PVectorEvolver *self) {
-+  if((self->newVector != self->originalVector) || (PyList_GET_SIZE(self->appendList) > 0)) {
-+    Py_INCREF(Py_True);
-+    return Py_True;
-+  }
-+
-+  Py_INCREF(Py_False);
-+  return Py_False;
-+}
-+
-+static int PVectorEvolver_traverse(PVectorEvolver *self, visitproc visit, void *arg) {
-+  Py_VISIT(self->newVector);
-+  if (self->newVector != self->originalVector) {
-+      Py_VISIT(self->originalVector);
-+  }
-+  Py_VISIT(self->appendList);
-+  return 0;
-+}
-+
-+static PyMethodDef PyrsistentMethods[] = {
-+  {"pvector", pyrsistent_pvec, METH_VARARGS, 
-+   "pvector([iterable])\n"
-+   "Create a new persistent vector containing the elements in iterable.\n\n"
-+   ">>> v1 = pvector([1, 2, 3])\n"
-+   ">>> v1\n"
-+   "pvector([1, 2, 3])"},
-+  {NULL, NULL, 0, NULL}
-+};
-+
-+
-+/********************* Python module initialization ************************/
-+
-+#if PY_MAJOR_VERSION >= 3
-+  static struct PyModuleDef moduledef = {
-+    PyModuleDef_HEAD_INIT,
-+    "pvectorc",          /* m_name */
-+    "Persistent vector", /* m_doc */
-+    -1,                  /* m_size */
-+    PyrsistentMethods,   /* m_methods */
-+    NULL,                /* m_reload */
-+    NULL,                /* m_traverse */
-+    NULL,                /* m_clear */
-+    NULL,                /* m_free */
-+  };
-+#endif
-+
-+static PyObject* pyrsistent_pvectorc_moduleinit(void) {
-+  PyObject* m;
-+  
-+  // Only allow creation/initialization through factory method pvec
-+  PVectorType.tp_init = NULL;
-+  PVectorType.tp_new = NULL;
-+
-+  if (PyType_Ready(&PVectorType) < 0) {
-+    return NULL;
-+  }
-+  if (PyType_Ready(&PVectorIterType) < 0) {
-+    return NULL;
-+  }
-+  if (PyType_Ready(&PVectorEvolverType) < 0) {
-+    return NULL;
-+  }
-+
-+
-+#if PY_MAJOR_VERSION >= 3
-+  m = PyModule_Create(&moduledef);
-+#else
-+  m = Py_InitModule3("pvectorc", PyrsistentMethods, "Persistent vector");  
-+#endif
-+
-+  if (m == NULL) {
-+    return NULL;
-+  }
-+
-+  if(EMPTY_VECTOR == NULL) {
-+    EMPTY_VECTOR = emptyNewPvec();
-+  }
-+
-+  nodeCache.size = 0;
-+
-+  Py_INCREF(&PVectorType);
-+  PyModule_AddObject(m, "PVector", (PyObject *)&PVectorType);
-+
-+  return m;
-+}
-+
-+#if PY_MAJOR_VERSION >= 3
-+PyMODINIT_FUNC PyInit_pvectorc(void) {
-+  return pyrsistent_pvectorc_moduleinit();
-+}
-+#else
-+PyMODINIT_FUNC initpvectorc(void) {
-+  pyrsistent_pvectorc_moduleinit();
-+}
-+#endif
-diff --git a/third_party/python/pyrsistent/pyrsistent/__init__.py b/third_party/python/pyrsistent/pyrsistent/__init__.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/pyrsistent/pyrsistent/__init__.py
-@@ -0,0 +1,47 @@
-+# -*- coding: utf-8 -*-
-+
-+from pyrsistent._pmap import pmap, m, PMap
-+
-+from pyrsistent._pvector import pvector, v, PVector
-+
-+from pyrsistent._pset import pset, s, PSet
-+
-+from pyrsistent._pbag import pbag, b, PBag
-+
-+from pyrsistent._plist import plist, l, PList
-+
-+from pyrsistent._pdeque import pdeque, dq, PDeque
-+
-+from pyrsistent._checked_types import (
-+    CheckedPMap, CheckedPVector, CheckedPSet, InvariantException, CheckedKeyTypeError,
-+    CheckedValueTypeError, CheckedType, optional)
-+
-+from pyrsistent._field_common import (
-+    field, PTypeError, pset_field, pmap_field, pvector_field)
-+
-+from pyrsistent._precord import PRecord
-+
-+from pyrsistent._pclass import PClass, PClassMeta
-+
-+from pyrsistent._immutable import immutable
-+
-+from pyrsistent._helpers import freeze, thaw, mutant
-+
-+from pyrsistent._transformations import inc, discard, rex, ny
-+
-+from pyrsistent._toolz import get_in
-+
-+
-+__all__ = ('pmap', 'm', 'PMap',
-+           'pvector', 'v', 'PVector',
-+           'pset', 's', 'PSet',
-+           'pbag', 'b', 'PBag',
-+           'plist', 'l', 'PList',
-+           'pdeque', 'dq', 'PDeque',
-+           'CheckedPMap', 'CheckedPVector', 'CheckedPSet', 'InvariantException', 'CheckedKeyTypeError', 'CheckedValueTypeError', 'CheckedType', 'optional',
-+           'PRecord', 'field', 'pset_field', 'pmap_field', 'pvector_field',
-+           'PClass', 'PClassMeta',
-+           'immutable',
-+           'freeze', 'thaw', 'mutant',
-+           'get_in',
-+           'inc', 'discard', 'rex', 'ny')
-diff --git a/third_party/python/pyrsistent/pyrsistent/__init__.pyi b/third_party/python/pyrsistent/pyrsistent/__init__.pyi
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/pyrsistent/pyrsistent/__init__.pyi
-@@ -0,0 +1,213 @@
-+# flake8: noqa: E704
-+# from https://gist.github.com/WuTheFWasThat/091a17d4b5cab597dfd5d4c2d96faf09
-+# Stubs for pyrsistent (Python 3.6)
-+
-+from typing import Any
-+from typing import AnyStr
-+from typing import Callable
-+from typing import Iterable
-+from typing import Iterator
-+from typing import List
-+from typing import Optional
-+from typing import Mapping
-+from typing import MutableMapping
-+from typing import Sequence
-+from typing import Set
-+from typing import Union
-+from typing import Tuple
-+from typing import Type
-+from typing import TypeVar
-+from typing import overload
-+
-+# see commit 08519aa for explanation of the re-export
-+from pyrsistent.typing import CheckedKeyTypeError as CheckedKeyTypeError
-+from pyrsistent.typing import CheckedPMap as CheckedPMap
-+from pyrsistent.typing import CheckedPSet as CheckedPSet
-+from pyrsistent.typing import CheckedPVector as CheckedPVector
-+from pyrsistent.typing import CheckedType as CheckedType
-+from pyrsistent.typing import CheckedValueTypeError as CheckedValueTypeError
-+from pyrsistent.typing import InvariantException as InvariantException
-+from pyrsistent.typing import PClass as PClass
-+from pyrsistent.typing import PBag as PBag
-+from pyrsistent.typing import PDeque as PDeque
-+from pyrsistent.typing import PList as PList
-+from pyrsistent.typing import PMap as PMap
-+from pyrsistent.typing import PMapEvolver as PMapEvolver
-+from pyrsistent.typing import PSet as PSet
-+from pyrsistent.typing import PSetEvolver as PSetEvolver
-+from pyrsistent.typing import PTypeError as PTypeError
-+from pyrsistent.typing import PVector as PVector
-+from pyrsistent.typing import PVectorEvolver as PVectorEvolver
-+
-+T = TypeVar('T')
-+KT = TypeVar('KT')
-+VT = TypeVar('VT')
-+
-+def pmap(initial: Union[Mapping[KT, VT], Iterable[Tuple[KT, VT]]] = {}, pre_size: int = 0) -> PMap[KT, VT]: ...
-+def m(**kwargs: VT) -> PMap[str, VT]: ...
-+
-+def pvector(iterable: Iterable[T] = ...) -> PVector[T]: ...
-+def v(*iterable: T) -> PVector[T]: ...
-+
-+def pset(iterable: Iterable[T] = (), pre_size: int = 8) -> PSet[T]: ...
-+def s(*iterable: T) -> PSet[T]: ...
-+
-+# see class_test.py for use cases
-+Invariant = Tuple[bool, Optional[Union[str, Callable[[], str]]]]
-+
-+@overload
-+def field(
-+    type: Union[Type[T], Sequence[Type[T]]] = ...,
-+    invariant: Callable[[Any], Union[Invariant, Iterable[Invariant]]] = lambda _: (True, None),
-+    initial: Any = object(),
-+    mandatory: bool = False,
-+    factory: Callable[[Any], T] = lambda x: x,
-+    serializer: Callable[[Any, T], Any] = lambda _, value: value,
-+) -> T: ...
-+# The actual return value (_PField) is irrelevant after a PRecord has been instantiated,
-+# see https://github.com/tobgu/pyrsistent/blob/master/pyrsistent/_precord.py#L10
-+@overload
-+def field(
-+    type: Any = ...,
-+    invariant: Callable[[Any], Union[Invariant, Iterable[Invariant]]] = lambda _: (True, None),
-+    initial: Any = object(),
-+    mandatory: bool = False,
-+    factory: Callable[[Any], Any] = lambda x: x,
-+    serializer: Callable[[Any, Any], Any] = lambda _, value: value,
-+) -> Any: ...
-+
-+# Use precise types for the simplest use cases, but fall back to Any for
-+# everything else. See record_test.py for the wide range of possible types for
-+# item_type
-+@overload
-+def pset_field(
-+    item_type: Type[T],
-+    optional: bool = False,
-+    initial: Iterable[T] = ...,
-+) -> PSet[T]: ...
-+@overload
-+def pset_field(
-+    item_type: Any,
-+    optional: bool = False,
-+    initial: Any = (),
-+) -> PSet[Any]: ...
-+
-+@overload
-+def pmap_field(
-+    key_type: Type[KT],
-+    value_type: Type[VT],
-+    optional: bool = False,
-+    invariant: Callable[[Any], Tuple[bool, Optional[str]]] = lambda _: (True, None),
-+) -> PMap[KT, VT]: ...
-+@overload
-+def pmap_field(
-+    key_type: Any,
-+    value_type: Any,
-+    optional: bool = False,
-+    invariant: Callable[[Any], Tuple[bool, Optional[str]]] = lambda _: (True, None),
-+) -> PMap[Any, Any]: ...
-+
-+@overload
-+def pvector_field(
-+    item_type: Type[T],
-+    optional: bool = False,
-+    initial: Iterable[T] = ...,
-+) -> PVector[T]: ...
-+@overload
-+def pvector_field(
-+    item_type: Any,
-+    optional: bool = False,
-+    initial: Any = (),
-+) -> PVector[Any]: ...
-+
-+def pbag(elements: Iterable[T]) -> PBag[T]: ...
-+def b(*elements: T) -> PBag[T]: ...
-+
-+def plist(iterable: Iterable[T] = (), reverse: bool = False) -> PList[T]: ...
-+def l(*elements: T) -> PList[T]: ...
-+
-+def pdeque(iterable: Optional[Iterable[T]] = None, maxlen: Optional[int] = None) -> PDeque[T]: ...
-+def dq(*iterable: T) -> PDeque[T]: ...
-+
-+@overload
-+def optional(type: T) -> Tuple[T, Type[None]]: ...
-+@overload
-+def optional(*typs: Any) -> Tuple[Any, ...]: ...
-+
-+T_PRecord = TypeVar('T_PRecord', bound='PRecord')
-+class PRecord(PMap[AnyStr, Any]):
-+    _precord_fields: Mapping
-+    _precord_initial_values: Mapping
-+
-+    def __hash__(self) -> int: ...
-+    def __init__(self, **kwargs: Any) -> None: ...
-+    def __iter__(self) -> Iterator[Any]: ...
-+    def __len__(self) -> int: ...
-+    @classmethod
-+    def create(
-+        cls: Type[T_PRecord],
-+        kwargs: Mapping,
-+        _factory_fields: Optional[Iterable] = None,
-+        ignore_extra: bool = False,
-+    ) -> T_PRecord: ...
-+    # This is OK because T_PRecord is a concrete type
-+    def discard(self: T_PRecord, key: KT) -> T_PRecord: ...
-+    def remove(self: T_PRecord, key: KT) -> T_PRecord: ...
-+
-+    def serialize(self, format: Optional[Any] = ...) -> MutableMapping: ...
-+
-+    # From pyrsistent documentation:
-+    #   This set function differs slightly from that in the PMap
-+    #   class. First of all it accepts key-value pairs. Second it accepts multiple key-value
-+    #   pairs to perform one, atomic, update of multiple fields.
-+    @overload
-+    def set(self, key: KT, val: VT) -> Any: ...
-+    @overload
-+    def set(self, **kwargs: VT) -> Any: ...
-+
-+def immutable(
-+    members: Union[str, Iterable[str]] = '',
-+    name: str = 'Immutable',
-+    verbose: bool = False,
-+) -> Tuple: ...  # actually a namedtuple
-+
-+# ignore mypy warning "Overloaded function signatures 1 and 5 overlap with
-+# incompatible return types"
-+@overload
-+def freeze(o: Mapping[KT, VT]) -> PMap[KT, VT]: ... # type: ignore
-+@overload
-+def freeze(o: List[T]) -> PVector[T]: ... # type: ignore
-+@overload
-+def freeze(o: Tuple[T, ...]) -> Tuple[T, ...]: ...
-+@overload
-+def freeze(o: Set[T]) -> PSet[T]: ... # type: ignore
-+@overload
-+def freeze(o: T) -> T: ...
-+
-+
-+@overload
-+def thaw(o: PMap[KT, VT]) -> MutableMapping[KT, VT]: ... # type: ignore
-+@overload
-+def thaw(o: PVector[T]) -> List[T]: ... # type: ignore
-+@overload
-+def thaw(o: Tuple[T, ...]) -> Tuple[T, ...]: ...
-+# collections.abc.MutableSet is kind of garbage:
-+# https://stackoverflow.com/questions/24977898/why-does-collections-mutableset-not-bestow-an-update-method
-+@overload
-+def thaw(o: PSet[T]) -> Set[T]: ... # type: ignore
-+@overload
-+def thaw(o: T) -> T: ...
-+
-+def mutant(fn: Callable) -> Callable: ...
-+
-+def inc(x: int) -> int: ...
-+@overload
-+def discard(evolver: PMapEvolver[KT, VT], key: KT) -> None: ...
-+@overload
-+def discard(evolver: PVectorEvolver[T], key: int) -> None: ...
-+@overload
-+def discard(evolver: PSetEvolver[T], key: T) -> None: ...
-+def rex(expr: str) -> Callable[[Any], bool]: ...
-+def ny(_: Any) -> bool: ...
-+
-+def get_in(keys: Iterable, coll: Mapping, default: Optional[Any] = None, no_default: bool = False) -> Any: ...
-diff --git a/third_party/python/pyrsistent/pyrsistent/_checked_types.py b/third_party/python/pyrsistent/pyrsistent/_checked_types.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/pyrsistent/pyrsistent/_checked_types.py
-@@ -0,0 +1,542 @@
-+from ._compat import Iterable
-+import six
-+
-+from pyrsistent._compat import Enum, string_types
-+from pyrsistent._pmap import PMap, pmap
-+from pyrsistent._pset import PSet, pset
-+from pyrsistent._pvector import PythonPVector, python_pvector
-+
-+
-+class CheckedType(object):
-+    """
-+    Marker class to enable creation and serialization of checked object graphs.
-+    """
-+    __slots__ = ()
-+
-+    @classmethod
-+    def create(cls, source_data, _factory_fields=None):
-+        raise NotImplementedError()
-+
-+    def serialize(self, format=None):
-+        raise NotImplementedError()
-+
-+
-+def _restore_pickle(cls, data):
-+    return cls.create(data, _factory_fields=set())
-+
-+
-+class InvariantException(Exception):
-+    """
-+    Exception raised from a :py:class:`CheckedType` when invariant tests fail or when a mandatory
-+    field is missing.
-+
-+    Contains two fields of interest:
-+    invariant_errors, a tuple of error data for the failing invariants
-+    missing_fields, a tuple of strings specifying the missing names
-+    """
-+
-+    def __init__(self, error_codes=(), missing_fields=(), *args, **kwargs):
-+        self.invariant_errors = tuple(e() if callable(e) else e for e in error_codes)
-+        self.missing_fields = missing_fields
-+        super(InvariantException, self).__init__(*args, **kwargs)
-+
-+    def __str__(self):
-+        return super(InvariantException, self).__str__() + \
-+            ", invariant_errors=[{invariant_errors}], missing_fields=[{missing_fields}]".format(
-+            invariant_errors=', '.join(str(e) for e in self.invariant_errors),
-+            missing_fields=', '.join(self.missing_fields))
-+
-+
-+_preserved_iterable_types = (
-+        Enum,
-+)
-+"""Some types are themselves iterable, but we want to use the type itself and
-+not its members for the type specification. This defines a set of such types
-+that we explicitly preserve.
-+
-+Note that strings are not such types because the string inputs we pass in are
-+values, not types.
-+"""
-+
-+
-+def maybe_parse_user_type(t):
-+    """Try to coerce a user-supplied type directive into a list of types.
-+
-+    This function should be used in all places where a user specifies a type,
-+    for consistency.
-+
-+    The policy for what defines valid user input should be clear from the implementation.
-+    """
-+    is_type = isinstance(t, type)
-+    is_preserved = isinstance(t, type) and issubclass(t, _preserved_iterable_types)
-+    is_string = isinstance(t, string_types)
-+    is_iterable = isinstance(t, Iterable)
-+
-+    if is_preserved:
-+        return [t]
-+    elif is_string:
-+        return [t]
-+    elif is_type and not is_iterable:
-+        return [t]
-+    elif is_iterable:
-+        # Recur to validate contained types as well.
-+        ts = t
-+        return tuple(e for t in ts for e in maybe_parse_user_type(t))
-+    else:
-+        # If this raises because `t` cannot be formatted, so be it.
-+        raise TypeError(
-+            'Type specifications must be types or strings. Input: {}'.format(t)
-+        )
-+
-+
-+def maybe_parse_many_user_types(ts):
-+    # Just a different name to communicate that you're parsing multiple user
-+    # inputs. `maybe_parse_user_type` handles the iterable case anyway.
-+    return maybe_parse_user_type(ts)
-+
-+
-+def _store_types(dct, bases, destination_name, source_name):
-+    maybe_types = maybe_parse_many_user_types([
-+        d[source_name]
-+        for d in ([dct] + [b.__dict__ for b in bases]) if source_name in d
-+    ])
-+
-+    dct[destination_name] = maybe_types
-+
-+
-+def _merge_invariant_results(result):
-+    verdict = True
-+    data = []
-+    for verd, dat in result:
-+        if not verd:
-+            verdict = False
-+            data.append(dat)
-+
-+    return verdict, tuple(data)
-+
-+
-+def wrap_invariant(invariant):
-+    # Invariant functions may return the outcome of several tests
-+    # In those cases the results have to be merged before being passed
-+    # back to the client.
-+    def f(*args, **kwargs):
-+        result = invariant(*args, **kwargs)
-+        if isinstance(result[0], bool):
-+            return result
-+
-+        return _merge_invariant_results(result)
-+
-+    return f
-+
-+
-+def _all_dicts(bases, seen=None):
-+    """
-+    Yield each class in ``bases`` and each of their base classes.
-+    """
-+    if seen is None:
-+        seen = set()
-+    for cls in bases:
-+        if cls in seen:
-+            continue
-+        seen.add(cls)
-+        yield cls.__dict__
-+        for b in _all_dicts(cls.__bases__, seen):
-+            yield b
-+
-+
-+def store_invariants(dct, bases, destination_name, source_name):
-+    # Invariants are inherited
-+    invariants = []
-+    for ns in [dct] + list(_all_dicts(bases)):
-+        try:
-+            invariant = ns[source_name]
-+        except KeyError:
-+            continue
-+        invariants.append(invariant)
-+
-+    if not all(callable(invariant) for invariant in invariants):
-+        raise TypeError('Invariants must be callable')
-+    dct[destination_name] = tuple(wrap_invariant(inv) for inv in invariants)
-+
-+
-+class _CheckedTypeMeta(type):
-+    def __new__(mcs, name, bases, dct):
-+        _store_types(dct, bases, '_checked_types', '__type__')
-+        store_invariants(dct, bases, '_checked_invariants', '__invariant__')
-+
-+        def default_serializer(self, _, value):
-+            if isinstance(value, CheckedType):
-+                return value.serialize()
-+            return value
-+
-+        dct.setdefault('__serializer__', default_serializer)
-+
-+        dct['__slots__'] = ()
-+
-+        return super(_CheckedTypeMeta, mcs).__new__(mcs, name, bases, dct)
-+
-+
-+class CheckedTypeError(TypeError):
-+    def __init__(self, source_class, expected_types, actual_type, actual_value, *args, **kwargs):
-+        super(CheckedTypeError, self).__init__(*args, **kwargs)
-+        self.source_class = source_class
-+        self.expected_types = expected_types
-+        self.actual_type = actual_type
-+        self.actual_value = actual_value
-+
-+
-+class CheckedKeyTypeError(CheckedTypeError):
-+    """
-+    Raised when trying to set a value using a key with a type that doesn't match the declared type.
-+
-+    Attributes:
-+    source_class -- The class of the collection
-+    expected_types  -- Allowed types
-+    actual_type -- The non matching type
-+    actual_value -- Value of the variable with the non matching type
-+    """
-+    pass
-+
-+
-+class CheckedValueTypeError(CheckedTypeError):
-+    """
-+    Raised when trying to set a value using a key with a type that doesn't match the declared type.
-+
-+    Attributes:
-+    source_class -- The class of the collection
-+    expected_types  -- Allowed types
-+    actual_type -- The non matching type
-+    actual_value -- Value of the variable with the non matching type
-+    """
-+    pass
-+
-+
-+def _get_class(type_name):
-+    module_name, class_name = type_name.rsplit('.', 1)
-+    module = __import__(module_name, fromlist=[class_name])
-+    return getattr(module, class_name)
-+
-+
-+def get_type(typ):
-+    if isinstance(typ, type):
-+        return typ
-+
-+    return _get_class(typ)
-+
-+
-+def get_types(typs):
-+    return [get_type(typ) for typ in typs]
-+
-+
-+def _check_types(it, expected_types, source_class, exception_type=CheckedValueTypeError):
-+    if expected_types:
-+        for e in it:
-+            if not any(isinstance(e, get_type(t)) for t in expected_types):
-+                actual_type = type(e)
-+                msg = "Type {source_class} can only be used with {expected_types}, not {actual_type}".format(
-+                    source_class=source_class.__name__,
-+                    expected_types=tuple(get_type(et).__name__ for et in expected_types),
-+                    actual_type=actual_type.__name__)
-+                raise exception_type(source_class, expected_types, actual_type, e, msg)
-+
-+
-+def _invariant_errors(elem, invariants):
-+    return [data for valid, data in (invariant(elem) for invariant in invariants) if not valid]
-+
-+
-+def _invariant_errors_iterable(it, invariants):
-+    return sum([_invariant_errors(elem, invariants) for elem in it], [])
-+
-+
-+def optional(*typs):
-+    """ Convenience function to specify that a value may be of any of the types in type 'typs' or None """
-+    return tuple(typs) + (type(None),)
-+
-+
-+def _checked_type_create(cls, source_data, _factory_fields=None, ignore_extra=False):
-+    if isinstance(source_data, cls):
-+        return source_data
-+
-+    # Recursively apply create methods of checked types if the types of the supplied data
-+    # does not match any of the valid types.
-+    types = get_types(cls._checked_types)
-+    checked_type = next((t for t in types if issubclass(t, CheckedType)), None)
-+    if checked_type:
-+        return cls([checked_type.create(data, ignore_extra=ignore_extra)
-+                    if not any(isinstance(data, t) for t in types) else data
-+                    for data in source_data])
-+
-+    return cls(source_data)
-+
-+@six.add_metaclass(_CheckedTypeMeta)
-+class CheckedPVector(PythonPVector, CheckedType):
-+    """
-+    A CheckedPVector is a PVector which allows specifying type and invariant checks.
-+
-+    >>> class Positives(CheckedPVector):
-+    ...     __type__ = (long, int)
-+    ...     __invariant__ = lambda n: (n >= 0, 'Negative')
-+    ...
-+    >>> Positives([1, 2, 3])
-+    Positives([1, 2, 3])
-+    """
-+
-+    __slots__ = ()
-+
-+    def __new__(cls, initial=()):
-+        if type(initial) == PythonPVector:
-+            return super(CheckedPVector, cls).__new__(cls, initial._count, initial._shift, initial._root, initial._tail)
-+
-+        return CheckedPVector.Evolver(cls, python_pvector()).extend(initial).persistent()
-+
-+    def set(self, key, value):
-+        return self.evolver().set(key, value).persistent()
-+
-+    def append(self, val):
-+        return self.evolver().append(val).persistent()
-+
-+    def extend(self, it):
-+        return self.evolver().extend(it).persistent()
-+
-+    create = classmethod(_checked_type_create)
-+
-+    def serialize(self, format=None):
-+        serializer = self.__serializer__
-+        return list(serializer(format, v) for v in self)
-+
-+    def __reduce__(self):
-+        # Pickling support
-+        return _restore_pickle, (self.__class__, list(self),)
-+
-+    class Evolver(PythonPVector.Evolver):
-+        __slots__ = ('_destination_class', '_invariant_errors')
-+
-+        def __init__(self, destination_class, vector):
-+            super(CheckedPVector.Evolver, self).__init__(vector)
-+            self._destination_class = destination_class
-+            self._invariant_errors = []
-+
-+        def _check(self, it):
-+            _check_types(it, self._destination_class._checked_types, self._destination_class)
-+            error_data = _invariant_errors_iterable(it, self._destination_class._checked_invariants)
-+            self._invariant_errors.extend(error_data)
-+
-+        def __setitem__(self, key, value):
-+            self._check([value])
-+            return super(CheckedPVector.Evolver, self).__setitem__(key, value)
-+
-+        def append(self, elem):
-+            self._check([elem])
-+            return super(CheckedPVector.Evolver, self).append(elem)
-+
-+        def extend(self, it):
-+            it = list(it)
-+            self._check(it)
-+            return super(CheckedPVector.Evolver, self).extend(it)
-+
-+        def persistent(self):
-+            if self._invariant_errors:
-+                raise InvariantException(error_codes=self._invariant_errors)
-+
-+            result = self._orig_pvector
-+            if self.is_dirty() or (self._destination_class != type(self._orig_pvector)):
-+                pv = super(CheckedPVector.Evolver, self).persistent().extend(self._extra_tail)
-+                result = self._destination_class(pv)
-+                self._reset(result)
-+
-+            return result
-+
-+    def __repr__(self):
-+        return self.__class__.__name__ + "({0})".format(self.tolist())
-+
-+    __str__ = __repr__
-+
-+    def evolver(self):
-+        return CheckedPVector.Evolver(self.__class__, self)
-+
-+
-+@six.add_metaclass(_CheckedTypeMeta)
-+class CheckedPSet(PSet, CheckedType):
-+    """
-+    A CheckedPSet is a PSet which allows specifying type and invariant checks.
-+
-+    >>> class Positives(CheckedPSet):
-+    ...     __type__ = (long, int)
-+    ...     __invariant__ = lambda n: (n >= 0, 'Negative')
-+    ...
-+    >>> Positives([1, 2, 3])
-+    Positives([1, 2, 3])
-+    """
-+
-+    __slots__ = ()
-+
-+    def __new__(cls, initial=()):
-+        if type(initial) is PMap:
-+            return super(CheckedPSet, cls).__new__(cls, initial)
-+
-+        evolver = CheckedPSet.Evolver(cls, pset())
-+        for e in initial:
-+            evolver.add(e)
-+
-+        return evolver.persistent()
-+
-+    def __repr__(self):
-+        return self.__class__.__name__ + super(CheckedPSet, self).__repr__()[4:]
-+
-+    def __str__(self):
-+        return self.__repr__()
-+
-+    def serialize(self, format=None):
-+        serializer = self.__serializer__
-+        return set(serializer(format, v) for v in self)
-+
-+    create = classmethod(_checked_type_create)
-+
-+    def __reduce__(self):
-+        # Pickling support
-+        return _restore_pickle, (self.__class__, list(self),)
-+
-+    def evolver(self):
-+        return CheckedPSet.Evolver(self.__class__, self)
-+
-+    class Evolver(PSet._Evolver):
-+        __slots__ = ('_destination_class', '_invariant_errors')
-+
-+        def __init__(self, destination_class, original_set):
-+            super(CheckedPSet.Evolver, self).__init__(original_set)
-+            self._destination_class = destination_class
-+            self._invariant_errors = []
-+
-+        def _check(self, it):
-+            _check_types(it, self._destination_class._checked_types, self._destination_class)
-+            error_data = _invariant_errors_iterable(it, self._destination_class._checked_invariants)
-+            self._invariant_errors.extend(error_data)
-+
-+        def add(self, element):
-+            self._check([element])
-+            self._pmap_evolver[element] = True
-+            return self
-+
-+        def persistent(self):
-+            if self._invariant_errors:
-+                raise InvariantException(error_codes=self._invariant_errors)
-+
-+            if self.is_dirty() or self._destination_class != type(self._original_pset):
-+                return self._destination_class(self._pmap_evolver.persistent())
-+
-+            return self._original_pset
-+
-+
-+class _CheckedMapTypeMeta(type):
-+    def __new__(mcs, name, bases, dct):
-+        _store_types(dct, bases, '_checked_key_types', '__key_type__')
-+        _store_types(dct, bases, '_checked_value_types', '__value_type__')
-+        store_invariants(dct, bases, '_checked_invariants', '__invariant__')
-+
-+        def default_serializer(self, _, key, value):
-+            sk = key
-+            if isinstance(key, CheckedType):
-+                sk = key.serialize()
-+
-+            sv = value
-+            if isinstance(value, CheckedType):
-+                sv = value.serialize()
-+
-+            return sk, sv
-+
-+        dct.setdefault('__serializer__', default_serializer)
-+
-+        dct['__slots__'] = ()
-+
-+        return super(_CheckedMapTypeMeta, mcs).__new__(mcs, name, bases, dct)
-+
-+# Marker object
-+_UNDEFINED_CHECKED_PMAP_SIZE = object()
-+
-+
-+@six.add_metaclass(_CheckedMapTypeMeta)
-+class CheckedPMap(PMap, CheckedType):
-+    """
-+    A CheckedPMap is a PMap which allows specifying type and invariant checks.
-+
-+    >>> class IntToFloatMap(CheckedPMap):
-+    ...     __key_type__ = int
-+    ...     __value_type__ = float
-+    ...     __invariant__ = lambda k, v: (int(v) == k, 'Invalid mapping')
-+    ...
-+    >>> IntToFloatMap({1: 1.5, 2: 2.25})
-+    IntToFloatMap({1: 1.5, 2: 2.25})
-+    """
-+
-+    __slots__ = ()
-+
-+    def __new__(cls, initial={}, size=_UNDEFINED_CHECKED_PMAP_SIZE):
-+        if size is not _UNDEFINED_CHECKED_PMAP_SIZE:
-+            return super(CheckedPMap, cls).__new__(cls, size, initial)
-+
-+        evolver = CheckedPMap.Evolver(cls, pmap())
-+        for k, v in initial.items():
-+            evolver.set(k, v)
-+
-+        return evolver.persistent()
-+
-+    def evolver(self):
-+        return CheckedPMap.Evolver(self.__class__, self)
-+
-+    def __repr__(self):
-+        return self.__class__.__name__ + "({0})".format(str(dict(self)))
-+
-+    __str__ = __repr__
-+
-+    def serialize(self, format=None):
-+        serializer = self.__serializer__
-+        return dict(serializer(format, k, v) for k, v in self.items())
-+
-+    @classmethod
-+    def create(cls, source_data, _factory_fields=None):
-+        if isinstance(source_data, cls):
-+            return source_data
-+
-+        # Recursively apply create methods of checked types if the types of the supplied data
-+        # does not match any of the valid types.
-+        key_types = get_types(cls._checked_key_types)
-+        checked_key_type = next((t for t in key_types if issubclass(t, CheckedType)), None)
-+        value_types = get_types(cls._checked_value_types)
-+        checked_value_type = next((t for t in value_types if issubclass(t, CheckedType)), None)
-+
-+        if checked_key_type or checked_value_type:
-+            return cls(dict((checked_key_type.create(key) if checked_key_type and not any(isinstance(key, t) for t in key_types) else key,
-+                             checked_value_type.create(value) if checked_value_type and not any(isinstance(value, t) for t in value_types) else value)
-+                            for key, value in source_data.items()))
-+
-+        return cls(source_data)
-+
-+    def __reduce__(self):
-+        # Pickling support
-+        return _restore_pickle, (self.__class__, dict(self),)
-+
-+    class Evolver(PMap._Evolver):
-+        __slots__ = ('_destination_class', '_invariant_errors')
-+
-+        def __init__(self, destination_class, original_map):
-+            super(CheckedPMap.Evolver, self).__init__(original_map)
-+            self._destination_class = destination_class
-+            self._invariant_errors = []
-+
-+        def set(self, key, value):
-+            _check_types([key], self._destination_class._checked_key_types, self._destination_class, CheckedKeyTypeError)
-+            _check_types([value], self._destination_class._checked_value_types, self._destination_class)
-+            self._invariant_errors.extend(data for valid, data in (invariant(key, value)
-+                                                                   for invariant in self._destination_class._checked_invariants)
-+                                          if not valid)
-+
-+            return super(CheckedPMap.Evolver, self).set(key, value)
-+
-+        def persistent(self):
-+            if self._invariant_errors:
-+                raise InvariantException(error_codes=self._invariant_errors)
-+
-+            if self.is_dirty() or type(self._original_pmap) != self._destination_class:
-+                return self._destination_class(self._buckets_evolver.persistent(), self._size)
-+
-+            return self._original_pmap
-diff --git a/third_party/python/pyrsistent/pyrsistent/_compat.py b/third_party/python/pyrsistent/pyrsistent/_compat.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/pyrsistent/pyrsistent/_compat.py
-@@ -0,0 +1,31 @@
-+from six import string_types
-+
-+
-+# enum compat
-+try:
-+    from enum import Enum
-+except:
-+    class Enum(object): pass
-+    # no objects will be instances of this class
-+
-+# collections compat
-+try:
-+    from collections.abc import (
-+        Container,
-+        Hashable,
-+        Iterable,
-+        Mapping,
-+        Sequence,
-+        Set,
-+        Sized,
-+    )
-+except ImportError:
-+    from collections import (
-+        Container,
-+        Hashable,
-+        Iterable,
-+        Mapping,
-+        Sequence,
-+        Set,
-+        Sized,
-+    )
-diff --git a/third_party/python/pyrsistent/pyrsistent/_field_common.py b/third_party/python/pyrsistent/pyrsistent/_field_common.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/pyrsistent/pyrsistent/_field_common.py
-@@ -0,0 +1,330 @@
-+import six
-+import sys
-+
-+from pyrsistent._checked_types import (
-+    CheckedPMap,
-+    CheckedPSet,
-+    CheckedPVector,
-+    CheckedType,
-+    InvariantException,
-+    _restore_pickle,
-+    get_type,
-+    maybe_parse_user_type,
-+    maybe_parse_many_user_types,
-+)
-+from pyrsistent._checked_types import optional as optional_type
-+from pyrsistent._checked_types import wrap_invariant
-+import inspect
-+
-+PY2 = sys.version_info[0] < 3
-+
-+
-+def set_fields(dct, bases, name):
-+    dct[name] = dict(sum([list(b.__dict__.get(name, {}).items()) for b in bases], []))
-+
-+    for k, v in list(dct.items()):
-+        if isinstance(v, _PField):
-+            dct[name][k] = v
-+            del dct[k]
-+
-+
-+def check_global_invariants(subject, invariants):
-+    error_codes = tuple(error_code for is_ok, error_code in
-+                        (invariant(subject) for invariant in invariants) if not is_ok)
-+    if error_codes:
-+        raise InvariantException(error_codes, (), 'Global invariant failed')
-+
-+
-+def serialize(serializer, format, value):
-+    if isinstance(value, CheckedType) and serializer is PFIELD_NO_SERIALIZER:
-+        return value.serialize(format)
-+
-+    return serializer(format, value)
-+
-+
-+def check_type(destination_cls, field, name, value):
-+    if field.type and not any(isinstance(value, get_type(t)) for t in field.type):
-+        actual_type = type(value)
-+        message = "Invalid type for field {0}.{1}, was {2}".format(destination_cls.__name__, name, actual_type.__name__)
-+        raise PTypeError(destination_cls, name, field.type, actual_type, message)
-+
-+
-+def is_type_cls(type_cls, field_type):
-+    if type(field_type) is set:
-+        return True
-+    types = tuple(field_type)
-+    if len(types) == 0:
-+        return False
-+    return issubclass(get_type(types[0]), type_cls)
-+
-+
-+def is_field_ignore_extra_complaint(type_cls, field, ignore_extra):
-+    # ignore_extra param has default False value, for speed purpose no need to propagate False
-+    if not ignore_extra:
-+        return False
-+
-+    if not is_type_cls(type_cls, field.type):
-+        return False
-+
-+    if PY2:
-+        return 'ignore_extra' in inspect.getargspec(field.factory).args
-+    else:
-+        return 'ignore_extra' in inspect.signature(field.factory).parameters
-+
-+
-+
-+class _PField(object):
-+    __slots__ = ('type', 'invariant', 'initial', 'mandatory', '_factory', 'serializer')
-+
-+    def __init__(self, type, invariant, initial, mandatory, factory, serializer):
-+        self.type = type
-+        self.invariant = invariant
-+        self.initial = initial
-+        self.mandatory = mandatory
-+        self._factory = factory
-+        self.serializer = serializer
-+
-+    @property
-+    def factory(self):
-+        # If no factory is specified and the type is another CheckedType use the factory method of that CheckedType
-+        if self._factory is PFIELD_NO_FACTORY and len(self.type) == 1:
-+            typ = get_type(tuple(self.type)[0])
-+            if issubclass(typ, CheckedType):
-+                return typ.create
-+
-+        return self._factory
-+
-+PFIELD_NO_TYPE = ()
-+PFIELD_NO_INVARIANT = lambda _: (True, None)
-+PFIELD_NO_FACTORY = lambda x: x
-+PFIELD_NO_INITIAL = object()
-+PFIELD_NO_SERIALIZER = lambda _, value: value
-+
-+
-+def field(type=PFIELD_NO_TYPE, invariant=PFIELD_NO_INVARIANT, initial=PFIELD_NO_INITIAL,
-+          mandatory=False, factory=PFIELD_NO_FACTORY, serializer=PFIELD_NO_SERIALIZER):
-+    """
-+    Field specification factory for :py:class:`PRecord`.
-+
-+    :param type: a type or iterable with types that are allowed for this field
-+    :param invariant: a function specifying an invariant that must hold for the field
-+    :param initial: value of field if not specified when instantiating the record
-+    :param mandatory: boolean specifying if the field is mandatory or not
-+    :param factory: function called when field is set.
-+    :param serializer: function that returns a serialized version of the field
-+    """
-+
-+    # NB: We have to check this predicate separately from the predicates in
-+    # `maybe_parse_user_type` et al. because this one is related to supporting
-+    # the argspec for `field`, while those are related to supporting the valid
-+    # ways to specify types.
-+
-+    # Multiple types must be passed in one of the following containers. Note
-+    # that a type that is a subclass of one of these containers, like a
-+    # `collections.namedtuple`, will work as expected, since we check
-+    # `isinstance` and not `issubclass`.
-+    if isinstance(type, (list, set, tuple)):
-+        types = set(maybe_parse_many_user_types(type))
-+    else:
-+        types = set(maybe_parse_user_type(type))
-+
-+    invariant_function = wrap_invariant(invariant) if invariant != PFIELD_NO_INVARIANT and callable(invariant) else invariant
-+    field = _PField(type=types, invariant=invariant_function, initial=initial,
-+                    mandatory=mandatory, factory=factory, serializer=serializer)
-+
-+    _check_field_parameters(field)
-+
-+    return field
-+
-+
-+def _check_field_parameters(field):
-+    for t in field.type:
-+        if not isinstance(t, type) and not isinstance(t, six.string_types):
-+            raise TypeError('Type parameter expected, not {0}'.format(type(t)))
-+
-+    if field.initial is not PFIELD_NO_INITIAL and \
-+            not callable(field.initial) and \
-+            field.type and not any(isinstance(field.initial, t) for t in field.type):
-+        raise TypeError('Initial has invalid type {0}'.format(type(field.initial)))
-+
-+    if not callable(field.invariant):
-+        raise TypeError('Invariant must be callable')
-+
-+    if not callable(field.factory):
-+        raise TypeError('Factory must be callable')
-+
-+    if not callable(field.serializer):
-+        raise TypeError('Serializer must be callable')
-+
-+
-+class PTypeError(TypeError):
-+    """
-+    Raised when trying to assign a value with a type that doesn't match the declared type.
-+
-+    Attributes:
-+    source_class -- The class of the record
-+    field -- Field name
-+    expected_types  -- Types allowed for the field
-+    actual_type -- The non matching type
-+    """
-+    def __init__(self, source_class, field, expected_types, actual_type, *args, **kwargs):
-+        super(PTypeError, self).__init__(*args, **kwargs)
-+        self.source_class = source_class
-+        self.field = field
-+        self.expected_types = expected_types
-+        self.actual_type = actual_type
-+
-+
-+SEQ_FIELD_TYPE_SUFFIXES = {
-+    CheckedPVector: "PVector",
-+    CheckedPSet: "PSet",
-+}
-+
-+# Global dictionary to hold auto-generated field types: used for unpickling
-+_seq_field_types = {}
-+
-+def _restore_seq_field_pickle(checked_class, item_type, data):
-+    """Unpickling function for auto-generated PVec/PSet field types."""
-+    type_ = _seq_field_types[checked_class, item_type]
-+    return _restore_pickle(type_, data)
-+
-+def _types_to_names(types):
-+    """Convert a tuple of types to a human-readable string."""
-+    return "".join(get_type(typ).__name__.capitalize() for typ in types)
-+
-+def _make_seq_field_type(checked_class, item_type):
-+    """Create a subclass of the given checked class with the given item type."""
-+    type_ = _seq_field_types.get((checked_class, item_type))
-+    if type_ is not None:
-+        return type_
-+
-+    class TheType(checked_class):
-+        __type__ = item_type
-+
-+        def __reduce__(self):
-+            return (_restore_seq_field_pickle,
-+                    (checked_class, item_type, list(self)))
-+
-+    suffix = SEQ_FIELD_TYPE_SUFFIXES[checked_class]
-+    TheType.__name__ = _types_to_names(TheType._checked_types) + suffix
-+    _seq_field_types[checked_class, item_type] = TheType
-+    return TheType
-+
-+def _sequence_field(checked_class, item_type, optional, initial):
-+    """
-+    Create checked field for either ``PSet`` or ``PVector``.
-+
-+    :param checked_class: ``CheckedPSet`` or ``CheckedPVector``.
-+    :param item_type: The required type for the items in the set.
-+    :param optional: If true, ``None`` can be used as a value for
-+        this field.
-+    :param initial: Initial value to pass to factory.
-+
-+    :return: A ``field`` containing a checked class.
-+    """
-+    TheType = _make_seq_field_type(checked_class, item_type)
-+
-+    if optional:
-+        def factory(argument):
-+            if argument is None:
-+                return None
-+            else:
-+                return TheType.create(argument)
-+    else:
-+        factory = TheType.create
-+
-+    return field(type=optional_type(TheType) if optional else TheType,
-+                 factory=factory, mandatory=True,
-+                 initial=factory(initial))
-+
-+
-+def pset_field(item_type, optional=False, initial=()):
-+    """
-+    Create checked ``PSet`` field.
-+
-+    :param item_type: The required type for the items in the set.
-+    :param optional: If true, ``None`` can be used as a value for
-+        this field.
-+    :param initial: Initial value to pass to factory if no value is given
-+        for the field.
-+
-+    :return: A ``field`` containing a ``CheckedPSet`` of the given type.
-+    """
-+    return _sequence_field(CheckedPSet, item_type, optional,
-+                           initial)
-+
-+
-+def pvector_field(item_type, optional=False, initial=()):
-+    """
-+    Create checked ``PVector`` field.
-+
-+    :param item_type: The required type for the items in the vector.
-+    :param optional: If true, ``None`` can be used as a value for
-+        this field.
-+    :param initial: Initial value to pass to factory if no value is given
-+        for the field.
-+
-+    :return: A ``field`` containing a ``CheckedPVector`` of the given type.
-+    """
-+    return _sequence_field(CheckedPVector, item_type, optional,
-+                           initial)
-+
-+
-+_valid = lambda item: (True, "")
-+
-+
-+# Global dictionary to hold auto-generated field types: used for unpickling
-+_pmap_field_types = {}
-+
-+def _restore_pmap_field_pickle(key_type, value_type, data):
-+    """Unpickling function for auto-generated PMap field types."""
-+    type_ = _pmap_field_types[key_type, value_type]
-+    return _restore_pickle(type_, data)
-+
-+def _make_pmap_field_type(key_type, value_type):
-+    """Create a subclass of CheckedPMap with the given key and value types."""
-+    type_ = _pmap_field_types.get((key_type, value_type))
-+    if type_ is not None:
-+        return type_
-+
-+    class TheMap(CheckedPMap):
-+        __key_type__ = key_type
-+        __value_type__ = value_type
-+
-+        def __reduce__(self):
-+            return (_restore_pmap_field_pickle,
-+                    (self.__key_type__, self.__value_type__, dict(self)))
-+
-+    TheMap.__name__ = "{0}To{1}PMap".format(
-+        _types_to_names(TheMap._checked_key_types),
-+        _types_to_names(TheMap._checked_value_types))
-+    _pmap_field_types[key_type, value_type] = TheMap
-+    return TheMap
-+
-+
-+def pmap_field(key_type, value_type, optional=False, invariant=PFIELD_NO_INVARIANT):
-+    """
-+    Create a checked ``PMap`` field.
-+
-+    :param key: The required type for the keys of the map.
-+    :param value: The required type for the values of the map.
-+    :param optional: If true, ``None`` can be used as a value for
-+        this field.
-+    :param invariant: Pass-through to ``field``.
-+
-+    :return: A ``field`` containing a ``CheckedPMap``.
-+    """
-+    TheMap = _make_pmap_field_type(key_type, value_type)
-+
-+    if optional:
-+        def factory(argument):
-+            if argument is None:
-+                return None
-+            else:
-+                return TheMap.create(argument)
-+    else:
-+        factory = TheMap.create
-+
-+    return field(mandatory=True, initial=TheMap(),
-+                 type=optional_type(TheMap) if optional else TheMap,
-+                 factory=factory, invariant=invariant)
-diff --git a/third_party/python/pyrsistent/pyrsistent/_helpers.py b/third_party/python/pyrsistent/pyrsistent/_helpers.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/pyrsistent/pyrsistent/_helpers.py
-@@ -0,0 +1,82 @@
-+from functools import wraps
-+import six
-+from pyrsistent._pmap import PMap, pmap
-+from pyrsistent._pset import PSet, pset
-+from pyrsistent._pvector import PVector, pvector
-+
-+
-+def freeze(o):
-+    """
-+    Recursively convert simple Python containers into pyrsistent versions
-+    of those containers.
-+
-+    - list is converted to pvector, recursively
-+    - dict is converted to pmap, recursively on values (but not keys)
-+    - set is converted to pset, but not recursively
-+    - tuple is converted to tuple, recursively.
-+
-+    Sets and dict keys are not recursively frozen because they do not contain
-+    mutable data by convention. The main exception to this rule is that
-+    dict keys and set elements are often instances of mutable objects that
-+    support hash-by-id, which this function can't convert anyway.
-+
-+    >>> freeze(set([1, 2]))
-+    pset([1, 2])
-+    >>> freeze([1, {'a': 3}])
-+    pvector([1, pmap({'a': 3})])
-+    >>> freeze((1, []))
-+    (1, pvector([]))
-+    """
-+    typ = type(o)
-+    if typ is dict:
-+        return pmap(dict((k, freeze(v)) for k, v in six.iteritems(o)))
-+    if typ is list:
-+        return pvector(map(freeze, o))
-+    if typ is tuple:
-+        return tuple(map(freeze, o))
-+    if typ is set:
-+        return pset(o)
-+    return o
-+
-+
-+def thaw(o):
-+    """
-+    Recursively convert pyrsistent containers into simple Python containers.
-+
-+    - pvector is converted to list, recursively
-+    - pmap is converted to dict, recursively on values (but not keys)
-+    - pset is converted to set, but not recursively
-+    - tuple is converted to tuple, recursively.
-+
-+    >>> from pyrsistent import s, m, v
-+    >>> thaw(s(1, 2))
-+    set([1, 2])
-+    >>> thaw(v(1, m(a=3)))
-+    [1, {'a': 3}]
-+    >>> thaw((1, v()))
-+    (1, [])
-+    """
-+    if isinstance(o, PVector):
-+        return list(map(thaw, o))
-+    if isinstance(o, PMap):
-+        return dict((k, thaw(v)) for k, v in o.iteritems())
-+    if isinstance(o, PSet):
-+        return set(o)
-+    if type(o) is tuple:
-+        return tuple(map(thaw, o))
-+    return o
-+
-+
-+def mutant(fn):
-+    """
-+    Convenience decorator to isolate mutation to within the decorated function (with respect
-+    to the input arguments).
-+
-+    All arguments to the decorated function will be frozen so that they are guaranteed not to change.
-+    The return value is also frozen.
-+    """
-+    @wraps(fn)
-+    def inner_f(*args, **kwargs):
-+        return freeze(fn(*[freeze(e) for e in args], **dict(freeze(item) for item in kwargs.items())))
-+
-+    return inner_f
-diff --git a/third_party/python/pyrsistent/pyrsistent/_immutable.py b/third_party/python/pyrsistent/pyrsistent/_immutable.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/pyrsistent/pyrsistent/_immutable.py
-@@ -0,0 +1,105 @@
-+import sys
-+
-+import six
-+
-+
-+def immutable(members='', name='Immutable', verbose=False):
-+    """
-+    Produces a class that either can be used standalone or as a base class for persistent classes.
-+
-+    This is a thin wrapper around a named tuple.
-+
-+    Constructing a type and using it to instantiate objects:
-+
-+    >>> Point = immutable('x, y', name='Point')
-+    >>> p = Point(1, 2)
-+    >>> p2 = p.set(x=3)
-+    >>> p
-+    Point(x=1, y=2)
-+    >>> p2
-+    Point(x=3, y=2)
-+
-+    Inheriting from a constructed type. In this case no type name needs to be supplied:
-+
-+    >>> class PositivePoint(immutable('x, y')):
-+    ...     __slots__ = tuple()
-+    ...     def __new__(cls, x, y):
-+    ...         if x > 0 and y > 0:
-+    ...             return super(PositivePoint, cls).__new__(cls, x, y)
-+    ...         raise Exception('Coordinates must be positive!')
-+    ...
-+    >>> p = PositivePoint(1, 2)
-+    >>> p.set(x=3)
-+    PositivePoint(x=3, y=2)
-+    >>> p.set(y=-3)
-+    Traceback (most recent call last):
-+    Exception: Coordinates must be positive!
-+
-+    The persistent class also supports the notion of frozen members. The value of a frozen member
-+    cannot be updated. For example it could be used to implement an ID that should remain the same
-+    over time. A frozen member is denoted by a trailing underscore.
-+
-+    >>> Point = immutable('x, y, id_', name='Point')
-+    >>> p = Point(1, 2, id_=17)
-+    >>> p.set(x=3)
-+    Point(x=3, y=2, id_=17)
-+    >>> p.set(id_=18)
-+    Traceback (most recent call last):
-+    AttributeError: Cannot set frozen members id_
-+    """
-+
-+    if isinstance(members, six.string_types):
-+        members = members.replace(',', ' ').split()
-+
-+    def frozen_member_test():
-+        frozen_members = ["'%s'" % f for f in members if f.endswith('_')]
-+        if frozen_members:
-+            return """
-+        frozen_fields = fields_to_modify & set([{frozen_members}])
-+        if frozen_fields:
-+            raise AttributeError('Cannot set frozen members %s' % ', '.join(frozen_fields))
-+            """.format(frozen_members=', '.join(frozen_members))
-+
-+        return ''
-+
-+    verbose_string = ""
-+    if sys.version_info < (3, 7):
-+        # Verbose is no longer supported in Python 3.7
-+        verbose_string = ", verbose={verbose}".format(verbose=verbose)
-+
-+    quoted_members = ', '.join("'%s'" % m for m in members)
-+    template = """
-+class {class_name}(namedtuple('ImmutableBase', [{quoted_members}]{verbose_string})):
-+    __slots__ = tuple()
-+
-+    def __repr__(self):
-+        return super({class_name}, self).__repr__().replace('ImmutableBase', self.__class__.__name__)
-+
-+    def set(self, **kwargs):
-+        if not kwargs:
-+            return self
-+
-+        fields_to_modify = set(kwargs.keys())
-+        if not fields_to_modify <= {member_set}:
-+            raise AttributeError("'%s' is not a member" % ', '.join(fields_to_modify - {member_set}))
-+
-+        {frozen_member_test}
-+
-+        return self.__class__.__new__(self.__class__, *map(kwargs.pop, [{quoted_members}], self))
-+""".format(quoted_members=quoted_members,
-+               member_set="set([%s])" % quoted_members if quoted_members else 'set()',
-+               frozen_member_test=frozen_member_test(),
-+               verbose_string=verbose_string,
-+               class_name=name)
-+
-+    if verbose:
-+        print(template)
-+
-+    from collections import namedtuple
-+    namespace = dict(namedtuple=namedtuple, __name__='pyrsistent_immutable')
-+    try:
-+        six.exec_(template, namespace)
-+    except SyntaxError as e:
-+        raise SyntaxError(e.message + ':\n' + template)
-+
-+    return namespace[name]
-\ No newline at end of file
-diff --git a/third_party/python/pyrsistent/pyrsistent/_pbag.py b/third_party/python/pyrsistent/pyrsistent/_pbag.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/pyrsistent/pyrsistent/_pbag.py
-@@ -0,0 +1,267 @@
-+from ._compat import Container, Iterable, Sized, Hashable
-+from functools import reduce
-+from pyrsistent._pmap import pmap
-+
-+
-+def _add_to_counters(counters, element):
-+    return counters.set(element, counters.get(element, 0) + 1)
-+
-+
-+class PBag(object):
-+    """
-+    A persistent bag/multiset type.
-+
-+    Requires elements to be hashable, and allows duplicates, but has no
-+    ordering. Bags are hashable.
-+
-+    Do not instantiate directly, instead use the factory functions :py:func:`b`
-+    or :py:func:`pbag` to create an instance.
-+
-+    Some examples:
-+
-+    >>> s = pbag([1, 2, 3, 1])
-+    >>> s2 = s.add(4)
-+    >>> s3 = s2.remove(1)
-+    >>> s
-+    pbag([1, 1, 2, 3])
-+    >>> s2
-+    pbag([1, 1, 2, 3, 4])
-+    >>> s3
-+    pbag([1, 2, 3, 4])
-+    """
-+
-+    __slots__ = ('_counts', '__weakref__')
-+
-+    def __init__(self, counts):
-+        self._counts = counts
-+
-+    def add(self, element):
-+        """
-+        Add an element to the bag.
-+
-+        >>> s = pbag([1])
-+        >>> s2 = s.add(1)
-+        >>> s3 = s.add(2)
-+        >>> s2
-+        pbag([1, 1])
-+        >>> s3
-+        pbag([1, 2])
-+        """
-+        return PBag(_add_to_counters(self._counts, element))
-+
-+    def update(self, iterable):
-+        """
-+        Update bag with all elements in iterable.
-+
-+        >>> s = pbag([1])
-+        >>> s.update([1, 2])
-+        pbag([1, 1, 2])
-+        """
-+        if iterable:
-+            return PBag(reduce(_add_to_counters, iterable, self._counts))
-+
-+        return self
-+
-+    def remove(self, element):
-+        """
-+        Remove an element from the bag.
-+
-+        >>> s = pbag([1, 1, 2])
-+        >>> s2 = s.remove(1)
-+        >>> s3 = s.remove(2)
-+        >>> s2
-+        pbag([1, 2])
-+        >>> s3
-+        pbag([1, 1])
-+        """
-+        if element not in self._counts:
-+            raise KeyError(element)
-+        elif self._counts[element] == 1:
-+            newc = self._counts.remove(element)
-+        else:
-+            newc = self._counts.set(element, self._counts[element] - 1)
-+        return PBag(newc)
-+
-+    def count(self, element):
-+        """
-+        Return the number of times an element appears.
-+
-+
-+        >>> pbag([]).count('non-existent')
-+        0
-+        >>> pbag([1, 1, 2]).count(1)
-+        2
-+        """
-+        return self._counts.get(element, 0)
-+
-+    def __len__(self):
-+        """
-+        Return the length including duplicates.
-+
-+        >>> len(pbag([1, 1, 2]))
-+        3
-+        """
-+        return sum(self._counts.itervalues())
-+
-+    def __iter__(self):
-+        """
-+        Return an iterator of all elements, including duplicates.
-+
-+        >>> list(pbag([1, 1, 2]))
-+        [1, 1, 2]
-+        >>> list(pbag([1, 2]))
-+        [1, 2]
-+        """
-+        for elt, count in self._counts.iteritems():
-+            for i in range(count):
-+                yield elt
-+
-+    def __contains__(self, elt):
-+        """
-+        Check if an element is in the bag.
-+
-+        >>> 1 in pbag([1, 1, 2])
-+        True
-+        >>> 0 in pbag([1, 2])
-+        False
-+        """
-+        return elt in self._counts
-+
-+    def __repr__(self):
-+        return "pbag({0})".format(list(self))
-+
-+    def __eq__(self, other):
-+        """
-+        Check if two bags are equivalent, honoring the number of duplicates,
-+        and ignoring insertion order.
-+
-+        >>> pbag([1, 1, 2]) == pbag([1, 2])
-+        False
-+        >>> pbag([2, 1, 0]) == pbag([0, 1, 2])
-+        True
-+        """
-+        if type(other) is not PBag:
-+            raise TypeError("Can only compare PBag with PBags")
-+        return self._counts == other._counts
-+
-+    def __lt__(self, other):
-+        raise TypeError('PBags are not orderable')
-+
-+    __le__ = __lt__
-+    __gt__ = __lt__
-+    __ge__ = __lt__
-+
-+    # Multiset-style operations similar to collections.Counter
-+
-+    def __add__(self, other):
-+        """ 
-+        Combine elements from two PBags.
-+
-+        >>> pbag([1, 2, 2]) + pbag([2, 3, 3])
-+        pbag([1, 2, 2, 2, 3, 3])
-+        """
-+        if not isinstance(other, PBag):
-+            return NotImplemented
-+        result = self._counts.evolver()
-+        for elem, other_count in other._counts.iteritems():
-+            result[elem] = self.count(elem) + other_count
-+        return PBag(result.persistent())
-+
-+    def __sub__(self, other):
-+        """ 
-+        Remove elements from one PBag that are present in another.
-+
-+        >>> pbag([1, 2, 2, 2, 3]) - pbag([2, 3, 3, 4])
-+        pbag([1, 2, 2])
-+        """
-+        if not isinstance(other, PBag):
-+            return NotImplemented
-+        result = self._counts.evolver()
-+        for elem, other_count in other._counts.iteritems():
-+            newcount = self.count(elem) - other_count
-+            if newcount > 0:
-+                result[elem] = newcount
-+            elif elem in self:
-+                result.remove(elem)
-+        return PBag(result.persistent())
-+        
-+    def __or__(self, other):
-+        """ 
-+        Union: Keep elements that are present in either of two PBags.
-+
-+        >>> pbag([1, 2, 2, 2]) | pbag([2, 3, 3])
-+        pbag([1, 2, 2, 2, 3, 3])
-+        """
-+        if not isinstance(other, PBag):
-+            return NotImplemented
-+        result = self._counts.evolver()
-+        for elem, other_count in other._counts.iteritems():
-+            count = self.count(elem)
-+            newcount = max(count, other_count)
-+            result[elem] = newcount
-+        return PBag(result.persistent())
-+        
-+    def __and__(self, other):
-+        """
-+        Intersection: Only keep elements that are present in both PBags.
-+        
-+        >>> pbag([1, 2, 2, 2]) & pbag([2, 3, 3])
-+        pbag([2])
-+        """
-+        if not isinstance(other, PBag):
-+            return NotImplemented
-+        result = pmap().evolver()
-+        for elem, count in self._counts.iteritems():
-+            newcount = min(count, other.count(elem))
-+            if newcount > 0:
-+                result[elem] = newcount
-+        return PBag(result.persistent())
-+    
-+    def __hash__(self):
-+        """
-+        Hash based on value of elements.
-+
-+        >>> m = pmap({pbag([1, 2]): "it's here!"})
-+        >>> m[pbag([2, 1])]
-+        "it's here!"
-+        >>> pbag([1, 1, 2]) in m
-+        False
-+        """
-+        return hash(self._counts)
-+
-+
-+Container.register(PBag)
-+Iterable.register(PBag)
-+Sized.register(PBag)
-+Hashable.register(PBag)
-+
-+
-+def b(*elements):
-+    """
-+    Construct a persistent bag.
-+
-+    Takes an arbitrary number of arguments to insert into the new persistent
-+    bag.
-+
-+    >>> b(1, 2, 3, 2)
-+    pbag([1, 2, 2, 3])
-+    """
-+    return pbag(elements)
-+
-+
-+def pbag(elements):
-+    """
-+    Convert an iterable to a persistent bag.
-+
-+    Takes an iterable with elements to insert.
-+
-+    >>> pbag([1, 2, 3, 2])
-+    pbag([1, 2, 2, 3])
-+    """
-+    if not elements:
-+        return _EMPTY_PBAG
-+    return PBag(reduce(_add_to_counters, elements, pmap()))
-+
-+
-+_EMPTY_PBAG = PBag(pmap())
-+
-diff --git a/third_party/python/pyrsistent/pyrsistent/_pclass.py b/third_party/python/pyrsistent/pyrsistent/_pclass.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/pyrsistent/pyrsistent/_pclass.py
-@@ -0,0 +1,264 @@
-+import six
-+from pyrsistent._checked_types import (InvariantException, CheckedType, _restore_pickle, store_invariants)
-+from pyrsistent._field_common import (
-+    set_fields, check_type, is_field_ignore_extra_complaint, PFIELD_NO_INITIAL, serialize, check_global_invariants
-+)
-+from pyrsistent._transformations import transform
-+
-+
-+def _is_pclass(bases):
-+    return len(bases) == 1 and bases[0] == CheckedType
-+
-+
-+class PClassMeta(type):
-+    def __new__(mcs, name, bases, dct):
-+        set_fields(dct, bases, name='_pclass_fields')
-+        store_invariants(dct, bases, '_pclass_invariants', '__invariant__')
-+        dct['__slots__'] = ('_pclass_frozen',) + tuple(key for key in dct['_pclass_fields'])
-+
-+        # There must only be one __weakref__ entry in the inheritance hierarchy,
-+        # lets put it on the top level class.
-+        if _is_pclass(bases):
-+            dct['__slots__'] += ('__weakref__',)
-+
-+        return super(PClassMeta, mcs).__new__(mcs, name, bases, dct)
-+
-+_MISSING_VALUE = object()
-+
-+
-+def _check_and_set_attr(cls, field, name, value, result, invariant_errors):
-+    check_type(cls, field, name, value)
-+    is_ok, error_code = field.invariant(value)
-+    if not is_ok:
-+        invariant_errors.append(error_code)
-+    else:
-+        setattr(result, name, value)
-+
-+
-+@six.add_metaclass(PClassMeta)
-+class PClass(CheckedType):
-+    """
-+    A PClass is a python class with a fixed set of specified fields. PClasses are declared as python classes inheriting
-+    from PClass. It is defined the same way that PRecords are and behaves like a PRecord in all aspects except that it
-+    is not a PMap and hence not a collection but rather a plain Python object.
-+
-+
-+    More documentation and examples of PClass usage is available at https://github.com/tobgu/pyrsistent
-+    """
-+    def __new__(cls, **kwargs):    # Support *args?
-+        result = super(PClass, cls).__new__(cls)
-+        factory_fields = kwargs.pop('_factory_fields', None)
-+        ignore_extra = kwargs.pop('ignore_extra', None)
-+        missing_fields = []
-+        invariant_errors = []
-+        for name, field in cls._pclass_fields.items():
-+            if name in kwargs:
-+                if factory_fields is None or name in factory_fields:
-+                    if is_field_ignore_extra_complaint(PClass, field, ignore_extra):
-+                        value = field.factory(kwargs[name], ignore_extra=ignore_extra)
-+                    else:
-+                        value = field.factory(kwargs[name])
-+                else:
-+                    value = kwargs[name]
-+                _check_and_set_attr(cls, field, name, value, result, invariant_errors)
-+                del kwargs[name]
-+            elif field.initial is not PFIELD_NO_INITIAL:
-+                initial = field.initial() if callable(field.initial) else field.initial
-+                _check_and_set_attr(
-+                    cls, field, name, initial, result, invariant_errors)
-+            elif field.mandatory:
-+                missing_fields.append('{0}.{1}'.format(cls.__name__, name))
-+
-+        if invariant_errors or missing_fields:
-+            raise InvariantException(tuple(invariant_errors), tuple(missing_fields), 'Field invariant failed')
-+
-+        if kwargs:
-+            raise AttributeError("'{0}' are not among the specified fields for {1}".format(
-+                ', '.join(kwargs), cls.__name__))
-+
-+        check_global_invariants(result, cls._pclass_invariants)
-+
-+        result._pclass_frozen = True
-+        return result
-+
-+    def set(self, *args, **kwargs):
-+        """
-+        Set a field in the instance. Returns a new instance with the updated value. The original instance remains
-+        unmodified. Accepts key-value pairs or single string representing the field name and a value.
-+
-+        >>> from pyrsistent import PClass, field
-+        >>> class AClass(PClass):
-+        ...     x = field()
-+        ...
-+        >>> a = AClass(x=1)
-+        >>> a2 = a.set(x=2)
-+        >>> a3 = a.set('x', 3)
-+        >>> a
-+        AClass(x=1)
-+        >>> a2
-+        AClass(x=2)
-+        >>> a3
-+        AClass(x=3)
-+        """
-+        if args:
-+            kwargs[args[0]] = args[1]
-+
-+        factory_fields = set(kwargs)
-+
-+        for key in self._pclass_fields:
-+            if key not in kwargs:
-+                value = getattr(self, key, _MISSING_VALUE)
-+                if value is not _MISSING_VALUE:
-+                    kwargs[key] = value
-+
-+        return self.__class__(_factory_fields=factory_fields, **kwargs)
-+
-+    @classmethod
-+    def create(cls, kwargs, _factory_fields=None, ignore_extra=False):
-+        """
-+        Factory method. Will create a new PClass of the current type and assign the values
-+        specified in kwargs.
-+
-+        :param ignore_extra: A boolean which when set to True will ignore any keys which appear in kwargs that are not
-+                             in the set of fields on the PClass.
-+        """
-+        if isinstance(kwargs, cls):
-+            return kwargs
-+
-+        if ignore_extra:
-+            kwargs = {k: kwargs[k] for k in cls._pclass_fields if k in kwargs}
-+
-+        return cls(_factory_fields=_factory_fields, ignore_extra=ignore_extra, **kwargs)
-+
-+    def serialize(self, format=None):
-+        """
-+        Serialize the current PClass using custom serializer functions for fields where
-+        such have been supplied.
-+        """
-+        result = {}
-+        for name in self._pclass_fields:
-+            value = getattr(self, name, _MISSING_VALUE)
-+            if value is not _MISSING_VALUE:
-+                result[name] = serialize(self._pclass_fields[name].serializer, format, value)
-+
-+        return result
-+
-+    def transform(self, *transformations):
-+        """
-+        Apply transformations to the currency PClass. For more details on transformations see
-+        the documentation for PMap. Transformations on PClasses do not support key matching
-+        since the PClass is not a collection. Apart from that the transformations available
-+        for other persistent types work as expected.
-+        """
-+        return transform(self, transformations)
-+
-+    def __eq__(self, other):
-+        if isinstance(other, self.__class__):
-+            for name in self._pclass_fields:
-+                if getattr(self, name, _MISSING_VALUE) != getattr(other, name, _MISSING_VALUE):
-+                    return False
-+
-+            return True
-+
-+        return NotImplemented
-+
-+    def __ne__(self, other):
-+        return not self == other
-+
-+    def __hash__(self):
-+        # May want to optimize this by caching the hash somehow
-+        return hash(tuple((key, getattr(self, key, _MISSING_VALUE)) for key in self._pclass_fields))
-+
-+    def __setattr__(self, key, value):
-+        if getattr(self, '_pclass_frozen', False):
-+            raise AttributeError("Can't set attribute, key={0}, value={1}".format(key, value))
-+
-+        super(PClass, self).__setattr__(key, value)
-+
-+    def __delattr__(self, key):
-+            raise AttributeError("Can't delete attribute, key={0}, use remove()".format(key))
-+
-+    def _to_dict(self):
-+        result = {}
-+        for key in self._pclass_fields:
-+            value = getattr(self, key, _MISSING_VALUE)
-+            if value is not _MISSING_VALUE:
-+                result[key] = value
-+
-+        return result
-+
-+    def __repr__(self):
-+        return "{0}({1})".format(self.__class__.__name__,
-+                                 ', '.join('{0}={1}'.format(k, repr(v)) for k, v in self._to_dict().items()))
-+
-+    def __reduce__(self):
-+        # Pickling support
-+        data = dict((key, getattr(self, key)) for key in self._pclass_fields if hasattr(self, key))
-+        return _restore_pickle, (self.__class__, data,)
-+
-+    def evolver(self):
-+        """
-+        Returns an evolver for this object.
-+        """
-+        return _PClassEvolver(self, self._to_dict())
-+
-+    def remove(self, name):
-+        """
-+        Remove attribute given by name from the current instance. Raises AttributeError if the
-+        attribute doesn't exist.
-+        """
-+        evolver = self.evolver()
-+        del evolver[name]
-+        return evolver.persistent()
-+
-+
-+class _PClassEvolver(object):
-+    __slots__ = ('_pclass_evolver_original', '_pclass_evolver_data', '_pclass_evolver_data_is_dirty', '_factory_fields')
-+
-+    def __init__(self, original, initial_dict):
-+        self._pclass_evolver_original = original
-+        self._pclass_evolver_data = initial_dict
-+        self._pclass_evolver_data_is_dirty = False
-+        self._factory_fields = set()
-+
-+    def __getitem__(self, item):
-+        return self._pclass_evolver_data[item]
-+
-+    def set(self, key, value):
-+        if self._pclass_evolver_data.get(key, _MISSING_VALUE) is not value:
-+            self._pclass_evolver_data[key] = value
-+            self._factory_fields.add(key)
-+            self._pclass_evolver_data_is_dirty = True
-+
-+        return self
-+
-+    def __setitem__(self, key, value):
-+        self.set(key, value)
-+
-+    def remove(self, item):
-+        if item in self._pclass_evolver_data:
-+            del self._pclass_evolver_data[item]
-+            self._factory_fields.discard(item)
-+            self._pclass_evolver_data_is_dirty = True
-+            return self
-+
-+        raise AttributeError(item)
-+
-+    def __delitem__(self, item):
-+        self.remove(item)
-+
-+    def persistent(self):
-+        if self._pclass_evolver_data_is_dirty:
-+            return self._pclass_evolver_original.__class__(_factory_fields=self._factory_fields,
-+                                                           **self._pclass_evolver_data)
-+
-+        return self._pclass_evolver_original
-+
-+    def __setattr__(self, key, value):
-+        if key not in self.__slots__:
-+            self.set(key, value)
-+        else:
-+            super(_PClassEvolver, self).__setattr__(key, value)
-+
-+    def __getattr__(self, item):
-+        return self[item]
-diff --git a/third_party/python/pyrsistent/pyrsistent/_pdeque.py b/third_party/python/pyrsistent/pyrsistent/_pdeque.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/pyrsistent/pyrsistent/_pdeque.py
-@@ -0,0 +1,376 @@
-+from ._compat import Sequence, Hashable
-+from itertools import islice, chain
-+from numbers import Integral
-+from pyrsistent._plist import plist
-+
-+
-+class PDeque(object):
-+    """
-+    Persistent double ended queue (deque). Allows quick appends and pops in both ends. Implemented
-+    using two persistent lists.
-+
-+    A maximum length can be specified to create a bounded queue.
-+
-+    Fully supports the Sequence and Hashable protocols including indexing and slicing but
-+    if you need fast random access go for the PVector instead.
-+
-+    Do not instantiate directly, instead use the factory functions :py:func:`dq` or :py:func:`pdeque` to
-+    create an instance.
-+
-+    Some examples:
-+
-+    >>> x = pdeque([1, 2, 3])
-+    >>> x.left
-+    1
-+    >>> x.right
-+    3
-+    >>> x[0] == x.left
-+    True
-+    >>> x[-1] == x.right
-+    True
-+    >>> x.pop()
-+    pdeque([1, 2])
-+    >>> x.pop() == x[:-1]
-+    True
-+    >>> x.popleft()
-+    pdeque([2, 3])
-+    >>> x.append(4)
-+    pdeque([1, 2, 3, 4])
-+    >>> x.appendleft(4)
-+    pdeque([4, 1, 2, 3])
-+
-+    >>> y = pdeque([1, 2, 3], maxlen=3)
-+    >>> y.append(4)
-+    pdeque([2, 3, 4], maxlen=3)
-+    >>> y.appendleft(4)
-+    pdeque([4, 1, 2], maxlen=3)
-+    """
-+    __slots__ = ('_left_list', '_right_list', '_length', '_maxlen', '__weakref__')
-+
-+    def __new__(cls, left_list, right_list, length, maxlen=None):
-+        instance = super(PDeque, cls).__new__(cls)
-+        instance._left_list = left_list
-+        instance._right_list = right_list
-+        instance._length = length
-+
-+        if maxlen is not None:
-+            if not isinstance(maxlen, Integral):
-+                raise TypeError('An integer is required as maxlen')
-+
-+            if maxlen < 0:
-+                raise ValueError("maxlen must be non-negative")
-+
-+        instance._maxlen = maxlen
-+        return instance
-+
-+    @property
-+    def right(self):
-+        """
-+        Rightmost element in dqueue.
-+        """
-+        return PDeque._tip_from_lists(self._right_list, self._left_list)
-+
-+    @property
-+    def left(self):
-+        """
-+        Leftmost element in dqueue.
-+        """
-+        return PDeque._tip_from_lists(self._left_list, self._right_list)
-+
-+    @staticmethod
-+    def _tip_from_lists(primary_list, secondary_list):
-+        if primary_list:
-+            return primary_list.first
-+
-+        if secondary_list:
-+            return secondary_list[-1]
-+
-+        raise IndexError('No elements in empty deque')
-+
-+    def __iter__(self):
-+        return chain(self._left_list, self._right_list.reverse())
-+
-+    def __repr__(self):
-+        return "pdeque({0}{1})".format(list(self),
-+                                       ', maxlen={0}'.format(self._maxlen) if self._maxlen is not None else '')
-+    __str__ = __repr__
-+
-+    @property
-+    def maxlen(self):
-+        """
-+        Maximum length of the queue.
-+        """
-+        return self._maxlen
-+
-+    def pop(self, count=1):
-+        """
-+        Return new deque with rightmost element removed. Popping the empty queue
-+        will return the empty queue. A optional count can be given to indicate the
-+        number of elements to pop. Popping with a negative index is the same as
-+        popleft. Executes in amortized O(k) where k is the number of elements to pop.
-+
-+        >>> pdeque([1, 2]).pop()
-+        pdeque([1])
-+        >>> pdeque([1, 2]).pop(2)
-+        pdeque([])
-+        >>> pdeque([1, 2]).pop(-1)
-+        pdeque([2])
-+        """
-+        if count < 0:
-+            return self.popleft(-count)
-+
-+        new_right_list, new_left_list = PDeque._pop_lists(self._right_list, self._left_list, count)
-+        return PDeque(new_left_list, new_right_list, max(self._length - count, 0), self._maxlen)
-+
-+    def popleft(self, count=1):
-+        """
-+        Return new deque with leftmost element removed. Otherwise functionally
-+        equivalent to pop().
-+
-+        >>> pdeque([1, 2]).popleft()
-+        pdeque([2])
-+        """
-+        if count < 0:
-+            return self.pop(-count)
-+
-+        new_left_list, new_right_list = PDeque._pop_lists(self._left_list, self._right_list, count)
-+        return PDeque(new_left_list, new_right_list, max(self._length - count, 0), self._maxlen)
-+
-+    @staticmethod
-+    def _pop_lists(primary_list, secondary_list, count):
-+        new_primary_list = primary_list
-+        new_secondary_list = secondary_list
-+
-+        while count > 0 and (new_primary_list or new_secondary_list):
-+            count -= 1
-+            if new_primary_list.rest:
-+                new_primary_list = new_primary_list.rest
-+            elif new_primary_list:
-+                new_primary_list = new_secondary_list.reverse()
-+                new_secondary_list = plist()
-+            else:
-+                new_primary_list = new_secondary_list.reverse().rest
-+                new_secondary_list = plist()
-+
-+        return new_primary_list, new_secondary_list
-+
-+    def _is_empty(self):
-+        return not self._left_list and not self._right_list
-+
-+    def __lt__(self, other):
-+        if not isinstance(other, PDeque):
-+            return NotImplemented
-+
-+        return tuple(self) < tuple(other)
-+
-+    def __eq__(self, other):
-+        if not isinstance(other, PDeque):
-+            return NotImplemented
-+
-+        if tuple(self) == tuple(other):
-+            # Sanity check of the length value since it is redundant (there for performance)
-+            assert len(self) == len(other)
-+            return True
-+
-+        return False
-+
-+    def __hash__(self):
-+        return  hash(tuple(self))
-+
-+    def __len__(self):
-+        return self._length
-+
-+    def append(self, elem):
-+        """
-+        Return new deque with elem as the rightmost element.
-+
-+        >>> pdeque([1, 2]).append(3)
-+        pdeque([1, 2, 3])
-+        """
-+        new_left_list, new_right_list, new_length = self._append(self._left_list, self._right_list, elem)
-+        return PDeque(new_left_list, new_right_list, new_length, self._maxlen)
-+
-+    def appendleft(self, elem):
-+        """
-+        Return new deque with elem as the leftmost element.
-+
-+        >>> pdeque([1, 2]).appendleft(3)
-+        pdeque([3, 1, 2])
-+        """
-+        new_right_list, new_left_list, new_length = self._append(self._right_list, self._left_list, elem)
-+        return PDeque(new_left_list, new_right_list, new_length, self._maxlen)
-+
-+    def _append(self, primary_list, secondary_list, elem):
-+        if self._maxlen is not None and self._length == self._maxlen:
-+            if self._maxlen == 0:
-+                return primary_list, secondary_list, 0
-+            new_primary_list, new_secondary_list = PDeque._pop_lists(primary_list, secondary_list, 1)
-+            return new_primary_list, new_secondary_list.cons(elem), self._length
-+
-+        return primary_list, secondary_list.cons(elem), self._length + 1
-+
-+    @staticmethod
-+    def _extend_list(the_list, iterable):
-+        count = 0
-+        for elem in iterable:
-+            the_list = the_list.cons(elem)
-+            count += 1
-+
-+        return the_list, count
-+
-+    def _extend(self, primary_list, secondary_list, iterable):
-+        new_primary_list, extend_count = PDeque._extend_list(primary_list, iterable)
-+        new_secondary_list = secondary_list
-+        current_len = self._length + extend_count
-+        if self._maxlen is not None and current_len > self._maxlen:
-+            pop_len = current_len - self._maxlen
-+            new_secondary_list, new_primary_list = PDeque._pop_lists(new_secondary_list, new_primary_list, pop_len)
-+            extend_count -= pop_len
-+
-+        return new_primary_list, new_secondary_list, extend_count
-+
-+    def extend(self, iterable):
-+        """
-+        Return new deque with all elements of iterable appended to the right.
-+
-+        >>> pdeque([1, 2]).extend([3, 4])
-+        pdeque([1, 2, 3, 4])
-+        """
-+        new_right_list, new_left_list, extend_count = self._extend(self._right_list, self._left_list, iterable)
-+        return PDeque(new_left_list, new_right_list, self._length + extend_count, self._maxlen)
-+
-+    def extendleft(self, iterable):
-+        """
-+        Return new deque with all elements of iterable appended to the left.
-+
-+        NB! The elements will be inserted in reverse order compared to the order in the iterable.
-+
-+        >>> pdeque([1, 2]).extendleft([3, 4])
-+        pdeque([4, 3, 1, 2])
-+        """
-+        new_left_list, new_right_list, extend_count = self._extend(self._left_list, self._right_list, iterable)
-+        return PDeque(new_left_list, new_right_list, self._length + extend_count, self._maxlen)
-+
-+    def count(self, elem):
-+        """
-+        Return the number of elements equal to elem present in the queue
-+
-+        >>> pdeque([1, 2, 1]).count(1)
-+        2
-+        """
-+        return self._left_list.count(elem) + self._right_list.count(elem)
-+
-+    def remove(self, elem):
-+        """
-+        Return new deque with first element from left equal to elem removed. If no such element is found
-+        a ValueError is raised.
-+
-+        >>> pdeque([2, 1, 2]).remove(2)
-+        pdeque([1, 2])
-+        """
-+        try:
-+            return PDeque(self._left_list.remove(elem), self._right_list, self._length - 1)
-+        except ValueError:
-+            # Value not found in left list, try the right list
-+            try:
-+                # This is severely inefficient with a double reverse, should perhaps implement a remove_last()?
-+                return PDeque(self._left_list,
-+                               self._right_list.reverse().remove(elem).reverse(), self._length - 1)
-+            except ValueError:
-+                raise ValueError('{0} not found in PDeque'.format(elem))
-+
-+    def reverse(self):
-+        """
-+        Return reversed deque.
-+
-+        >>> pdeque([1, 2, 3]).reverse()
-+        pdeque([3, 2, 1])
-+
-+        Also supports the standard python reverse function.
-+
-+        >>> reversed(pdeque([1, 2, 3]))
-+        pdeque([3, 2, 1])
-+        """
-+        return PDeque(self._right_list, self._left_list, self._length)
-+    __reversed__ = reverse
-+
-+    def rotate(self, steps):
-+        """
-+        Return deque with elements rotated steps steps.
-+
-+        >>> x = pdeque([1, 2, 3])
-+        >>> x.rotate(1)
-+        pdeque([3, 1, 2])
-+        >>> x.rotate(-2)
-+        pdeque([3, 1, 2])
-+        """
-+        popped_deque = self.pop(steps)
-+        if steps >= 0:
-+            return popped_deque.extendleft(islice(self.reverse(), steps))
-+
-+        return popped_deque.extend(islice(self, -steps))
-+
-+    def __reduce__(self):
-+        # Pickling support
-+        return pdeque, (list(self), self._maxlen)
-+
-+    def __getitem__(self, index):
-+        if isinstance(index, slice):
-+            if index.step is not None and index.step != 1:
-+                # Too difficult, no structural sharing possible
-+                return pdeque(tuple(self)[index], maxlen=self._maxlen)
-+
-+            result = self
-+            if index.start is not None:
-+                result = result.popleft(index.start % self._length)
-+            if index.stop is not None:
-+                result = result.pop(self._length - (index.stop % self._length))
-+
-+            return result
-+
-+        if not isinstance(index, Integral):
-+            raise TypeError("'%s' object cannot be interpreted as an index" % type(index).__name__)
-+
-+        if index >= 0:
-+            return self.popleft(index).left
-+
-+        shifted = len(self) + index
-+        if shifted < 0:
-+            raise IndexError(
-+                "pdeque index {0} out of range {1}".format(index, len(self)),
-+            )
-+        return self.popleft(shifted).left
-+
-+    index = Sequence.index
-+
-+Sequence.register(PDeque)
-+Hashable.register(PDeque)
-+
-+
-+def pdeque(iterable=(), maxlen=None):
-+    """
-+    Return deque containing the elements of iterable. If maxlen is specified then
-+    len(iterable) - maxlen elements are discarded from the left to if len(iterable) > maxlen.
-+
-+    >>> pdeque([1, 2, 3])
-+    pdeque([1, 2, 3])
-+    >>> pdeque([1, 2, 3, 4], maxlen=2)
-+    pdeque([3, 4], maxlen=2)
-+    """
-+    t = tuple(iterable)
-+    if maxlen is not None:
-+        t = t[-maxlen:]
-+    length = len(t)
-+    pivot = int(length / 2)
-+    left = plist(t[:pivot])
-+    right = plist(t[pivot:], reverse=True)
-+    return PDeque(left, right, length, maxlen)
-+
-+def dq(*elements):
-+    """
-+    Return deque containing all arguments.
-+
-+    >>> dq(1, 2, 3)
-+    pdeque([1, 2, 3])
-+    """
-+    return pdeque(elements)
-diff --git a/third_party/python/pyrsistent/pyrsistent/_plist.py b/third_party/python/pyrsistent/pyrsistent/_plist.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/pyrsistent/pyrsistent/_plist.py
-@@ -0,0 +1,313 @@
-+from ._compat import Sequence, Hashable
-+from numbers import Integral
-+from functools import reduce
-+
-+
-+class _PListBuilder(object):
-+    """
-+    Helper class to allow construction of a list without
-+    having to reverse it in the end.
-+    """
-+    __slots__ = ('_head', '_tail')
-+
-+    def __init__(self):
-+        self._head = _EMPTY_PLIST
-+        self._tail = _EMPTY_PLIST
-+
-+    def _append(self, elem, constructor):
-+        if not self._tail:
-+            self._head = constructor(elem)
-+            self._tail = self._head
-+        else:
-+            self._tail.rest = constructor(elem)
-+            self._tail = self._tail.rest
-+
-+        return self._head
-+
-+    def append_elem(self, elem):
-+        return self._append(elem, lambda e: PList(e, _EMPTY_PLIST))
-+
-+    def append_plist(self, pl):
-+        return self._append(pl, lambda l: l)
-+
-+    def build(self):
-+        return self._head
-+
-+
-+class _PListBase(object):
-+    __slots__ = ('__weakref__',)
-+
-+    # Selected implementations can be taken straight from the Sequence
-+    # class, other are less suitable. Especially those that work with
-+    # index lookups.
-+    count = Sequence.count
-+    index = Sequence.index
-+
-+    def __reduce__(self):
-+        # Pickling support
-+        return plist, (list(self),)
-+
-+    def __len__(self):
-+        """
-+        Return the length of the list, computed by traversing it.
-+
-+        This is obviously O(n) but with the current implementation
-+        where a list is also a node the overhead of storing the length
-+        in every node would be quite significant.
-+        """
-+        return sum(1 for _ in self)
-+
-+    def __repr__(self):
-+        return "plist({0})".format(list(self))
-+    __str__ = __repr__
-+
-+    def cons(self, elem):
-+        """
-+        Return a new list with elem inserted as new head.
-+
-+        >>> plist([1, 2]).cons(3)
-+        plist([3, 1, 2])
-+        """
-+        return PList(elem, self)
-+
-+    def mcons(self, iterable):
-+        """
-+        Return a new list with all elements of iterable repeatedly cons:ed to the current list.
-+        NB! The elements will be inserted in the reverse order of the iterable.
-+        Runs in O(len(iterable)).
-+
-+        >>> plist([1, 2]).mcons([3, 4])
-+        plist([4, 3, 1, 2])
-+        """
-+        head = self
-+        for elem in iterable:
-+            head = head.cons(elem)
-+
-+        return head
-+
-+    def reverse(self):
-+        """
-+        Return a reversed version of list. Runs in O(n) where n is the length of the list.
-+
-+        >>> plist([1, 2, 3]).reverse()
-+        plist([3, 2, 1])
-+
-+        Also supports the standard reversed function.
-+
-+        >>> reversed(plist([1, 2, 3]))
-+        plist([3, 2, 1])
-+        """
-+        result = plist()
-+        head = self
-+        while head:
-+            result = result.cons(head.first)
-+            head = head.rest
-+
-+        return result
-+    __reversed__ = reverse
-+
-+    def split(self, index):
-+        """
-+        Spilt the list at position specified by index. Returns a tuple containing the
-+        list up until index and the list after the index. Runs in O(index).
-+
-+        >>> plist([1, 2, 3, 4]).split(2)
-+        (plist([1, 2]), plist([3, 4]))
-+        """
-+        lb = _PListBuilder()
-+        right_list = self
-+        i = 0
-+        while right_list and i < index:
-+            lb.append_elem(right_list.first)
-+            right_list = right_list.rest
-+            i += 1
-+
-+        if not right_list:
-+            # Just a small optimization in the cases where no split occurred
-+            return self, _EMPTY_PLIST
-+
-+        return lb.build(), right_list
-+
-+    def __iter__(self):
-+        li = self
-+        while li:
-+            yield li.first
-+            li = li.rest
-+
-+    def __lt__(self, other):
-+        if not isinstance(other, _PListBase):
-+            return NotImplemented
-+
-+        return tuple(self) < tuple(other)
-+
-+    def __eq__(self, other):
-+        """
-+        Traverses the lists, checking equality of elements.
-+        
-+        This is an O(n) operation, but preserves the standard semantics of list equality.
-+        """
-+        if not isinstance(other, _PListBase):
-+            return NotImplemented
-+
-+        self_head = self
-+        other_head = other
-+        while self_head and other_head:
-+            if not self_head.first == other_head.first:
-+                return False
-+            self_head = self_head.rest
-+            other_head = other_head.rest
-+
-+        return not self_head and not other_head
-+
-+    def __getitem__(self, index):
-+        # Don't use this this data structure if you plan to do a lot of indexing, it is
-+        # very inefficient! Use a PVector instead!
-+
-+        if isinstance(index, slice):
-+            if index.start is not None and index.stop is None and (index.step is None or index.step == 1):
-+                return self._drop(index.start)
-+
-+            # Take the easy way out for all other slicing cases, not much structural reuse possible anyway
-+            return plist(tuple(self)[index])
-+
-+        if not isinstance(index, Integral):
-+            raise TypeError("'%s' object cannot be interpreted as an index" % type(index).__name__)
-+
-+        if index < 0:
-+            # NB: O(n)!
-+            index += len(self)
-+
-+        try:
-+            return self._drop(index).first
-+        except AttributeError:
-+            raise IndexError("PList index out of range")
-+
-+    def _drop(self, count):
-+        if count < 0:
-+            raise IndexError("PList index out of range")
-+
-+        head = self
-+        while count > 0:
-+            head = head.rest
-+            count -= 1
-+
-+        return head
-+
-+    def __hash__(self):
-+        return hash(tuple(self))
-+
-+    def remove(self, elem):
-+        """
-+        Return new list with first element equal to elem removed. O(k) where k is the position
-+        of the element that is removed.
-+
-+        Raises ValueError if no matching element is found.
-+
-+        >>> plist([1, 2, 1]).remove(1)
-+        plist([2, 1])
-+        """
-+
-+        builder = _PListBuilder()
-+        head = self
-+        while head:
-+            if head.first == elem:
-+                return builder.append_plist(head.rest)
-+
-+            builder.append_elem(head.first)
-+            head = head.rest
-+
-+        raise ValueError('{0} not found in PList'.format(elem))
-+
-+
-+class PList(_PListBase):
-+    """
-+    Classical Lisp style singly linked list. Adding elements to the head using cons is O(1).
-+    Element access is O(k) where k is the position of the element in the list. Taking the
-+    length of the list is O(n).
-+
-+    Fully supports the Sequence and Hashable protocols including indexing and slicing but
-+    if you need fast random access go for the PVector instead.
-+
-+    Do not instantiate directly, instead use the factory functions :py:func:`l` or :py:func:`plist` to
-+    create an instance.
-+
-+    Some examples:
-+
-+    >>> x = plist([1, 2])
-+    >>> y = x.cons(3)
-+    >>> x
-+    plist([1, 2])
-+    >>> y
-+    plist([3, 1, 2])
-+    >>> y.first
-+    3
-+    >>> y.rest == x
-+    True
-+    >>> y[:2]
-+    plist([3, 1])
-+    """
-+    __slots__ = ('first', 'rest')
-+
-+    def __new__(cls, first, rest):
-+        instance = super(PList, cls).__new__(cls)
-+        instance.first = first
-+        instance.rest = rest
-+        return instance
-+
-+    def __bool__(self):
-+        return True
-+    __nonzero__ = __bool__
-+
-+
-+Sequence.register(PList)
-+Hashable.register(PList)
-+
-+
-+class _EmptyPList(_PListBase):
-+    __slots__ = ()
-+
-+    def __bool__(self):
-+        return False
-+    __nonzero__ = __bool__
-+
-+    @property
-+    def first(self):
-+        raise AttributeError("Empty PList has no first")
-+
-+    @property
-+    def rest(self):
-+        return self
-+
-+
-+Sequence.register(_EmptyPList)
-+Hashable.register(_EmptyPList)
-+
-+_EMPTY_PLIST = _EmptyPList()
-+
-+
-+def plist(iterable=(), reverse=False):
-+    """
-+    Creates a new persistent list containing all elements of iterable.
-+    Optional parameter reverse specifies if the elements should be inserted in
-+    reverse order or not.
-+
-+    >>> plist([1, 2, 3])
-+    plist([1, 2, 3])
-+    >>> plist([1, 2, 3], reverse=True)
-+    plist([3, 2, 1])
-+    """
-+    if not reverse:
-+        iterable = list(iterable)
-+        iterable.reverse()
-+
-+    return reduce(lambda pl, elem: pl.cons(elem), iterable, _EMPTY_PLIST)
-+
-+
-+def l(*elements):
-+    """
-+    Creates a new persistent list containing all arguments.
-+
-+    >>> l(1, 2, 3)
-+    plist([1, 2, 3])
-+    """
-+    return plist(elements)
-diff --git a/third_party/python/pyrsistent/pyrsistent/_pmap.py b/third_party/python/pyrsistent/pyrsistent/_pmap.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/pyrsistent/pyrsistent/_pmap.py
-@@ -0,0 +1,460 @@
-+from ._compat import Mapping, Hashable
-+from itertools import chain
-+import six
-+from pyrsistent._pvector import pvector
-+from pyrsistent._transformations import transform
-+
-+
-+class PMap(object):
-+    """
-+    Persistent map/dict. Tries to follow the same naming conventions as the built in dict where feasible.
-+
-+    Do not instantiate directly, instead use the factory functions :py:func:`m` or :py:func:`pmap` to
-+    create an instance.
-+
-+    Was originally written as a very close copy of the Clojure equivalent but was later rewritten to closer
-+    re-assemble the python dict. This means that a sparse vector (a PVector) of buckets is used. The keys are
-+    hashed and the elements inserted at position hash % len(bucket_vector). Whenever the map size exceeds 2/3 of
-+    the containing vectors size the map is reallocated to a vector of double the size. This is done to avoid
-+    excessive hash collisions.
-+
-+    This structure corresponds most closely to the built in dict type and is intended as a replacement. Where the
-+    semantics are the same (more or less) the same function names have been used but for some cases it is not possible,
-+    for example assignments and deletion of values.
-+
-+    PMap implements the Mapping protocol and is Hashable. It also supports dot-notation for
-+    element access.
-+
-+    Random access and insert is log32(n) where n is the size of the map.
-+
-+    The following are examples of some common operations on persistent maps
-+
-+    >>> m1 = m(a=1, b=3)
-+    >>> m2 = m1.set('c', 3)
-+    >>> m3 = m2.remove('a')
-+    >>> m1
-+    pmap({'a': 1, 'b': 3})
-+    >>> m2
-+    pmap({'a': 1, 'c': 3, 'b': 3})
-+    >>> m3
-+    pmap({'c': 3, 'b': 3})
-+    >>> m3['c']
-+    3
-+    >>> m3.c
-+    3
-+    """
-+    __slots__ = ('_size', '_buckets', '__weakref__', '_cached_hash')
-+
-+    def __new__(cls, size, buckets):
-+        self = super(PMap, cls).__new__(cls)
-+        self._size = size
-+        self._buckets = buckets
-+        return self
-+
-+    @staticmethod
-+    def _get_bucket(buckets, key):
-+        index = hash(key) % len(buckets)
-+        bucket = buckets[index]
-+        return index, bucket
-+
-+    @staticmethod
-+    def _getitem(buckets, key):
-+        _, bucket = PMap._get_bucket(buckets, key)
-+        if bucket:
-+            for k, v in bucket:
-+                if k == key:
-+                    return v
-+
-+        raise KeyError(key)
-+
-+    def __getitem__(self, key):
-+        return PMap._getitem(self._buckets, key)
-+
-+    @staticmethod
-+    def _contains(buckets, key):
-+        _, bucket = PMap._get_bucket(buckets, key)
-+        if bucket:
-+            for k, _ in bucket:
-+                if k == key:
-+                    return True
-+
-+            return False
-+
-+        return False
-+
-+    def __contains__(self, key):
-+        return self._contains(self._buckets, key)
-+
-+    get = Mapping.get
-+
-+    def __iter__(self):
-+        return self.iterkeys()
-+
-+    def __getattr__(self, key):
-+        try:
-+            return self[key]
-+        except KeyError:
-+            raise AttributeError(
-+                "{0} has no attribute '{1}'".format(type(self).__name__, key)
-+            )
-+
-+    def iterkeys(self):
-+        for k, _ in self.iteritems():
-+            yield k
-+
-+    # These are more efficient implementations compared to the original
-+    # methods that are based on the keys iterator and then calls the
-+    # accessor functions to access the value for the corresponding key
-+    def itervalues(self):
-+        for _, v in self.iteritems():
-+            yield v
-+
-+    def iteritems(self):
-+        for bucket in self._buckets:
-+            if bucket:
-+                for k, v in bucket:
-+                    yield k, v
-+
-+    def values(self):
-+        return pvector(self.itervalues())
-+
-+    def keys(self):
-+        return pvector(self.iterkeys())
-+
-+    def items(self):
-+        return pvector(self.iteritems())
-+
-+    def __len__(self):
-+        return self._size
-+
-+    def __repr__(self):
-+        return 'pmap({0})'.format(str(dict(self)))
-+
-+    def __eq__(self, other):
-+        if self is other:
-+            return True
-+        if not isinstance(other, Mapping):
-+            return NotImplemented
-+        if len(self) != len(other):
-+            return False
-+        if isinstance(other, PMap):
-+            if (hasattr(self, '_cached_hash') and hasattr(other, '_cached_hash')
-+                    and self._cached_hash != other._cached_hash):
-+                return False
-+            if self._buckets == other._buckets:
-+                return True
-+            return dict(self.iteritems()) == dict(other.iteritems())
-+        elif isinstance(other, dict):
-+            return dict(self.iteritems()) == other
-+        return dict(self.iteritems()) == dict(six.iteritems(other))
-+
-+    __ne__ = Mapping.__ne__
-+
-+    def __lt__(self, other):
-+        raise TypeError('PMaps are not orderable')
-+
-+    __le__ = __lt__
-+    __gt__ = __lt__
-+    __ge__ = __lt__
-+
-+    def __str__(self):
-+        return self.__repr__()
-+
-+    def __hash__(self):
-+        if not hasattr(self, '_cached_hash'):
-+            self._cached_hash = hash(frozenset(self.iteritems()))
-+        return self._cached_hash
-+
-+    def set(self, key, val):
-+        """
-+        Return a new PMap with key and val inserted.
-+
-+        >>> m1 = m(a=1, b=2)
-+        >>> m2 = m1.set('a', 3)
-+        >>> m3 = m1.set('c' ,4)
-+        >>> m1
-+        pmap({'a': 1, 'b': 2})
-+        >>> m2
-+        pmap({'a': 3, 'b': 2})
-+        >>> m3
-+        pmap({'a': 1, 'c': 4, 'b': 2})
-+        """
-+        return self.evolver().set(key, val).persistent()
-+
-+    def remove(self, key):
-+        """
-+        Return a new PMap without the element specified by key. Raises KeyError if the element
-+        is not present.
-+
-+        >>> m1 = m(a=1, b=2)
-+        >>> m1.remove('a')
-+        pmap({'b': 2})
-+        """
-+        return self.evolver().remove(key).persistent()
-+
-+    def discard(self, key):
-+        """
-+        Return a new PMap without the element specified by key. Returns reference to itself
-+        if element is not present.
-+
-+        >>> m1 = m(a=1, b=2)
-+        >>> m1.discard('a')
-+        pmap({'b': 2})
-+        >>> m1 is m1.discard('c')
-+        True
-+        """
-+        try:
-+            return self.remove(key)
-+        except KeyError:
-+            return self
-+
-+    def update(self, *maps):
-+        """
-+        Return a new PMap with the items in Mappings inserted. If the same key is present in multiple
-+        maps the rightmost (last) value is inserted.
-+
-+        >>> m1 = m(a=1, b=2)
-+        >>> m1.update(m(a=2, c=3), {'a': 17, 'd': 35})
-+        pmap({'a': 17, 'c': 3, 'b': 2, 'd': 35})
-+        """
-+        return self.update_with(lambda l, r: r, *maps)
-+
-+    def update_with(self, update_fn, *maps):
-+        """
-+        Return a new PMap with the items in Mappings maps inserted. If the same key is present in multiple
-+        maps the values will be merged using merge_fn going from left to right.
-+
-+        >>> from operator import add
-+        >>> m1 = m(a=1, b=2)
-+        >>> m1.update_with(add, m(a=2))
-+        pmap({'a': 3, 'b': 2})
-+
-+        The reverse behaviour of the regular merge. Keep the leftmost element instead of the rightmost.
-+
-+        >>> m1 = m(a=1)
-+        >>> m1.update_with(lambda l, r: l, m(a=2), {'a':3})
-+        pmap({'a': 1})
-+        """
-+        evolver = self.evolver()
-+        for map in maps:
-+            for key, value in map.items():
-+                evolver.set(key, update_fn(evolver[key], value) if key in evolver else value)
-+
-+        return evolver.persistent()
-+
-+    def __add__(self, other):
-+        return self.update(other)
-+
-+    def __reduce__(self):
-+        # Pickling support
-+        return pmap, (dict(self),)
-+
-+    def transform(self, *transformations):
-+        """
-+        Transform arbitrarily complex combinations of PVectors and PMaps. A transformation
-+        consists of two parts. One match expression that specifies which elements to transform
-+        and one transformation function that performs the actual transformation.
-+
-+        >>> from pyrsistent import freeze, ny
-+        >>> news_paper = freeze({'articles': [{'author': 'Sara', 'content': 'A short article'},
-+        ...                                   {'author': 'Steve', 'content': 'A slightly longer article'}],
-+        ...                      'weather': {'temperature': '11C', 'wind': '5m/s'}})
-+        >>> short_news = news_paper.transform(['articles', ny, 'content'], lambda c: c[:25] + '...' if len(c) > 25 else c)
-+        >>> very_short_news = news_paper.transform(['articles', ny, 'content'], lambda c: c[:15] + '...' if len(c) > 15 else c)
-+        >>> very_short_news.articles[0].content
-+        'A short article'
-+        >>> very_short_news.articles[1].content
-+        'A slightly long...'
-+
-+        When nothing has been transformed the original data structure is kept
-+
-+        >>> short_news is news_paper
-+        True
-+        >>> very_short_news is news_paper
-+        False
-+        >>> very_short_news.articles[0] is news_paper.articles[0]
-+        True
-+        """
-+        return transform(self, transformations)
-+
-+    def copy(self):
-+        return self
-+
-+    class _Evolver(object):
-+        __slots__ = ('_buckets_evolver', '_size', '_original_pmap')
-+
-+        def __init__(self, original_pmap):
-+            self._original_pmap = original_pmap
-+            self._buckets_evolver = original_pmap._buckets.evolver()
-+            self._size = original_pmap._size
-+
-+        def __getitem__(self, key):
-+            return PMap._getitem(self._buckets_evolver, key)
-+
-+        def __setitem__(self, key, val):
-+            self.set(key, val)
-+
-+        def set(self, key, val):
-+            if len(self._buckets_evolver) < 0.67 * self._size:
-+                self._reallocate(2 * len(self._buckets_evolver))
-+
-+            kv = (key, val)
-+            index, bucket = PMap._get_bucket(self._buckets_evolver, key)
-+            if bucket:
-+                for k, v in bucket:
-+                    if k == key:
-+                        if v is not val:
-+                            new_bucket = [(k2, v2) if k2 != k else (k2, val) for k2, v2 in bucket]
-+                            self._buckets_evolver[index] = new_bucket
-+
-+                        return self
-+
-+                new_bucket = [kv]
-+                new_bucket.extend(bucket)
-+                self._buckets_evolver[index] = new_bucket
-+                self._size += 1
-+            else:
-+                self._buckets_evolver[index] = [kv]
-+                self._size += 1
-+
-+            return self
-+
-+        def _reallocate(self, new_size):
-+            new_list = new_size * [None]
-+            buckets = self._buckets_evolver.persistent()
-+            for k, v in chain.from_iterable(x for x in buckets if x):
-+                index = hash(k) % new_size
-+                if new_list[index]:
-+                    new_list[index].append((k, v))
-+                else:
-+                    new_list[index] = [(k, v)]
-+
-+            # A reallocation should always result in a dirty buckets evolver to avoid
-+            # possible loss of elements when doing the reallocation.
-+            self._buckets_evolver = pvector().evolver()
-+            self._buckets_evolver.extend(new_list)
-+
-+        def is_dirty(self):
-+            return self._buckets_evolver.is_dirty()
-+
-+        def persistent(self):
-+            if self.is_dirty():
-+                self._original_pmap = PMap(self._size, self._buckets_evolver.persistent())
-+
-+            return self._original_pmap
-+
-+        def __len__(self):
-+            return self._size
-+
-+        def __contains__(self, key):
-+            return PMap._contains(self._buckets_evolver, key)
-+
-+        def __delitem__(self, key):
-+            self.remove(key)
-+
-+        def remove(self, key):
-+            index, bucket = PMap._get_bucket(self._buckets_evolver, key)
-+
-+            if bucket:
-+                new_bucket = [(k, v) for (k, v) in bucket if k != key]
-+                if len(bucket) > len(new_bucket):
-+                    self._buckets_evolver[index] = new_bucket if new_bucket else None
-+                    self._size -= 1
-+                    return self
-+
-+            raise KeyError('{0}'.format(key))
-+
-+    def evolver(self):
-+        """
-+        Create a new evolver for this pmap. For a discussion on evolvers in general see the
-+        documentation for the pvector evolver.
-+
-+        Create the evolver and perform various mutating updates to it:
-+
-+        >>> m1 = m(a=1, b=2)
-+        >>> e = m1.evolver()
-+        >>> e['c'] = 3
-+        >>> len(e)
-+        3
-+        >>> del e['a']
-+
-+        The underlying pmap remains the same:
-+
-+        >>> m1
-+        pmap({'a': 1, 'b': 2})
-+
-+        The changes are kept in the evolver. An updated pmap can be created using the
-+        persistent() function on the evolver.
-+
-+        >>> m2 = e.persistent()
-+        >>> m2
-+        pmap({'c': 3, 'b': 2})
-+
-+        The new pmap will share data with the original pmap in the same way that would have
-+        been done if only using operations on the pmap.
-+        """
-+        return self._Evolver(self)
-+
-+Mapping.register(PMap)
-+Hashable.register(PMap)
-+
-+
-+def _turbo_mapping(initial, pre_size):
-+    if pre_size:
-+        size = pre_size
-+    else:
-+        try:
-+            size = 2 * len(initial) or 8
-+        except Exception:
-+            # Guess we can't figure out the length. Give up on length hinting,
-+            # we can always reallocate later.
-+            size = 8
-+
-+    buckets = size * [None]
-+
-+    if not isinstance(initial, Mapping):
-+        # Make a dictionary of the initial data if it isn't already,
-+        # that will save us some job further down since we can assume no
-+        # key collisions
-+        initial = dict(initial)
-+
-+    for k, v in six.iteritems(initial):
-+        h = hash(k)
-+        index = h % size
-+        bucket = buckets[index]
-+
-+        if bucket:
-+            bucket.append((k, v))
-+        else:
-+            buckets[index] = [(k, v)]
-+
-+    return PMap(len(initial), pvector().extend(buckets))
-+
-+
-+_EMPTY_PMAP = _turbo_mapping({}, 0)
-+
-+
-+def pmap(initial={}, pre_size=0):
-+    """
-+    Create new persistent map, inserts all elements in initial into the newly created map.
-+    The optional argument pre_size may be used to specify an initial size of the underlying bucket vector. This
-+    may have a positive performance impact in the cases where you know beforehand that a large number of elements
-+    will be inserted into the map eventually since it will reduce the number of reallocations required.
-+
-+    >>> pmap({'a': 13, 'b': 14})
-+    pmap({'a': 13, 'b': 14})
-+    """
-+    if not initial:
-+        return _EMPTY_PMAP
-+
-+    return _turbo_mapping(initial, pre_size)
-+
-+
-+def m(**kwargs):
-+    """
-+    Creates a new persitent map. Inserts all key value arguments into the newly created map.
-+
-+    >>> m(a=13, b=14)
-+    pmap({'a': 13, 'b': 14})
-+    """
-+    return pmap(kwargs)
-diff --git a/third_party/python/pyrsistent/pyrsistent/_precord.py b/third_party/python/pyrsistent/pyrsistent/_precord.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/pyrsistent/pyrsistent/_precord.py
-@@ -0,0 +1,169 @@
-+import six
-+from pyrsistent._checked_types import CheckedType, _restore_pickle, InvariantException, store_invariants
-+from pyrsistent._field_common import (
-+    set_fields, check_type, is_field_ignore_extra_complaint, PFIELD_NO_INITIAL, serialize, check_global_invariants
-+)
-+from pyrsistent._pmap import PMap, pmap
-+
-+
-+class _PRecordMeta(type):
-+    def __new__(mcs, name, bases, dct):
-+        set_fields(dct, bases, name='_precord_fields')
-+        store_invariants(dct, bases, '_precord_invariants', '__invariant__')
-+
-+        dct['_precord_mandatory_fields'] = \
-+            set(name for name, field in dct['_precord_fields'].items() if field.mandatory)
-+
-+        dct['_precord_initial_values'] = \
-+            dict((k, field.initial) for k, field in dct['_precord_fields'].items() if field.initial is not PFIELD_NO_INITIAL)
-+
-+
-+        dct['__slots__'] = ()
-+
-+        return super(_PRecordMeta, mcs).__new__(mcs, name, bases, dct)
-+
-+
-+@six.add_metaclass(_PRecordMeta)
-+class PRecord(PMap, CheckedType):
-+    """
-+    A PRecord is a PMap with a fixed set of specified fields. Records are declared as python classes inheriting
-+    from PRecord. Because it is a PMap it has full support for all Mapping methods such as iteration and element
-+    access using subscript notation.
-+
-+    More documentation and examples of PRecord usage is available at https://github.com/tobgu/pyrsistent
-+    """
-+    def __new__(cls, **kwargs):
-+        # Hack total! If these two special attributes exist that means we can create
-+        # ourselves. Otherwise we need to go through the Evolver to create the structures
-+        # for us.
-+        if '_precord_size' in kwargs and '_precord_buckets' in kwargs:
-+            return super(PRecord, cls).__new__(cls, kwargs['_precord_size'], kwargs['_precord_buckets'])
-+
-+        factory_fields = kwargs.pop('_factory_fields', None)
-+        ignore_extra = kwargs.pop('_ignore_extra', False)
-+
-+        initial_values = kwargs
-+        if cls._precord_initial_values:
-+            initial_values = dict((k, v() if callable(v) else v)
-+                                  for k, v in cls._precord_initial_values.items())
-+            initial_values.update(kwargs)
-+
-+        e = _PRecordEvolver(cls, pmap(), _factory_fields=factory_fields, _ignore_extra=ignore_extra)
-+        for k, v in initial_values.items():
-+            e[k] = v
-+
-+        return e.persistent()
-+
-+    def set(self, *args, **kwargs):
-+        """
-+        Set a field in the record. This set function differs slightly from that in the PMap
-+        class. First of all it accepts key-value pairs. Second it accepts multiple key-value
-+        pairs to perform one, atomic, update of multiple fields.
-+        """
-+
-+        # The PRecord set() can accept kwargs since all fields that have been declared are
-+        # valid python identifiers. Also allow multiple fields to be set in one operation.
-+        if args:
-+            return super(PRecord, self).set(args[0], args[1])
-+
-+        return self.update(kwargs)
-+
-+    def evolver(self):
-+        """
-+        Returns an evolver of this object.
-+        """
-+        return _PRecordEvolver(self.__class__, self)
-+
-+    def __repr__(self):
-+        return "{0}({1})".format(self.__class__.__name__,
-+                                 ', '.join('{0}={1}'.format(k, repr(v)) for k, v in self.items()))
-+
-+    @classmethod
-+    def create(cls, kwargs, _factory_fields=None, ignore_extra=False):
-+        """
-+        Factory method. Will create a new PRecord of the current type and assign the values
-+        specified in kwargs.
-+
-+        :param ignore_extra: A boolean which when set to True will ignore any keys which appear in kwargs that are not
-+                             in the set of fields on the PRecord.
-+        """
-+        if isinstance(kwargs, cls):
-+            return kwargs
-+
-+        if ignore_extra:
-+            kwargs = {k: kwargs[k] for k in cls._precord_fields if k in kwargs}
-+
-+        return cls(_factory_fields=_factory_fields, _ignore_extra=ignore_extra, **kwargs)
-+
-+    def __reduce__(self):
-+        # Pickling support
-+        return _restore_pickle, (self.__class__, dict(self),)
-+
-+    def serialize(self, format=None):
-+        """
-+        Serialize the current PRecord using custom serializer functions for fields where
-+        such have been supplied.
-+        """
-+        return dict((k, serialize(self._precord_fields[k].serializer, format, v)) for k, v in self.items())
-+
-+
-+class _PRecordEvolver(PMap._Evolver):
-+    __slots__ = ('_destination_cls', '_invariant_error_codes', '_missing_fields', '_factory_fields', '_ignore_extra')
-+
-+    def __init__(self, cls, original_pmap, _factory_fields=None, _ignore_extra=False):
-+        super(_PRecordEvolver, self).__init__(original_pmap)
-+        self._destination_cls = cls
-+        self._invariant_error_codes = []
-+        self._missing_fields = []
-+        self._factory_fields = _factory_fields
-+        self._ignore_extra = _ignore_extra
-+
-+    def __setitem__(self, key, original_value):
-+        self.set(key, original_value)
-+
-+    def set(self, key, original_value):
-+        field = self._destination_cls._precord_fields.get(key)
-+        if field:
-+            if self._factory_fields is None or field in self._factory_fields:
-+                try:
-+                    if is_field_ignore_extra_complaint(PRecord, field, self._ignore_extra):
-+                        value = field.factory(original_value, ignore_extra=self._ignore_extra)
-+                    else:
-+                        value = field.factory(original_value)
-+                except InvariantException as e:
-+                    self._invariant_error_codes += e.invariant_errors
-+                    self._missing_fields += e.missing_fields
-+                    return self
-+            else:
-+                value = original_value
-+
-+            check_type(self._destination_cls, field, key, value)
-+
-+            is_ok, error_code = field.invariant(value)
-+            if not is_ok:
-+                self._invariant_error_codes.append(error_code)
-+
-+            return super(_PRecordEvolver, self).set(key, value)
-+        else:
-+            raise AttributeError("'{0}' is not among the specified fields for {1}".format(key, self._destination_cls.__name__))
-+
-+    def persistent(self):
-+        cls = self._destination_cls
-+        is_dirty = self.is_dirty()
-+        pm = super(_PRecordEvolver, self).persistent()
-+        if is_dirty or not isinstance(pm, cls):
-+            result = cls(_precord_buckets=pm._buckets, _precord_size=pm._size)
-+        else:
-+            result = pm
-+
-+        if cls._precord_mandatory_fields:
-+            self._missing_fields += tuple('{0}.{1}'.format(cls.__name__, f) for f
-+                                          in (cls._precord_mandatory_fields - set(result.keys())))
-+
-+        if self._invariant_error_codes or self._missing_fields:
-+            raise InvariantException(tuple(self._invariant_error_codes), tuple(self._missing_fields),
-+                                     'Field invariant failed')
-+
-+        check_global_invariants(result, cls._precord_invariants)
-+
-+        return result
-diff --git a/third_party/python/pyrsistent/pyrsistent/_pset.py b/third_party/python/pyrsistent/pyrsistent/_pset.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/pyrsistent/pyrsistent/_pset.py
-@@ -0,0 +1,229 @@
-+from ._compat import Set, Hashable
-+import sys
-+from pyrsistent._pmap import pmap
-+
-+PY2 = sys.version_info[0] < 3
-+
-+
-+class PSet(object):
-+    """
-+    Persistent set implementation. Built on top of the persistent map. The set supports all operations
-+    in the Set protocol and is Hashable.
-+
-+    Do not instantiate directly, instead use the factory functions :py:func:`s` or :py:func:`pset`
-+    to create an instance.
-+
-+    Random access and insert is log32(n) where n is the size of the set.
-+
-+    Some examples:
-+
-+    >>> s = pset([1, 2, 3, 1])
-+    >>> s2 = s.add(4)
-+    >>> s3 = s2.remove(2)
-+    >>> s
-+    pset([1, 2, 3])
-+    >>> s2
-+    pset([1, 2, 3, 4])
-+    >>> s3
-+    pset([1, 3, 4])
-+    """
-+    __slots__ = ('_map', '__weakref__')
-+
-+    def __new__(cls, m):
-+        self = super(PSet, cls).__new__(cls)
-+        self._map = m
-+        return self
-+
-+    def __contains__(self, element):
-+        return element in self._map
-+
-+    def __iter__(self):
-+        return iter(self._map)
-+
-+    def __len__(self):
-+        return len(self._map)
-+
-+    def __repr__(self):
-+        if PY2 or not self:
-+            return 'p' + str(set(self))
-+
-+        return 'pset([{0}])'.format(str(set(self))[1:-1])
-+
-+    def __str__(self):
-+        return self.__repr__()
-+
-+    def __hash__(self):
-+        return hash(self._map)
-+
-+    def __reduce__(self):
-+        # Pickling support
-+        return pset, (list(self),)
-+
-+    @classmethod
-+    def _from_iterable(cls, it, pre_size=8):
-+        return PSet(pmap(dict((k, True) for k in it), pre_size=pre_size))
-+
-+    def add(self, element):
-+        """
-+        Return a new PSet with element added
-+
-+        >>> s1 = s(1, 2)
-+        >>> s1.add(3)
-+        pset([1, 2, 3])
-+        """
-+        return self.evolver().add(element).persistent()
-+
-+    def update(self, iterable):
-+        """
-+        Return a new PSet with elements in iterable added
-+
-+        >>> s1 = s(1, 2)
-+        >>> s1.update([3, 4, 4])
-+        pset([1, 2, 3, 4])
-+        """
-+        e = self.evolver()
-+        for element in iterable:
-+            e.add(element)
-+
-+        return e.persistent()
-+
-+    def remove(self, element):
-+        """
-+        Return a new PSet with element removed. Raises KeyError if element is not present.
-+
-+        >>> s1 = s(1, 2)
-+        >>> s1.remove(2)
-+        pset([1])
-+        """
-+        if element in self._map:
-+            return self.evolver().remove(element).persistent()
-+
-+        raise KeyError("Element '%s' not present in PSet" % element)
-+
-+    def discard(self, element):
-+        """
-+        Return a new PSet with element removed. Returns itself if element is not present.
-+        """
-+        if element in self._map:
-+            return self.evolver().remove(element).persistent()
-+
-+        return self
-+
-+    class _Evolver(object):
-+        __slots__ = ('_original_pset', '_pmap_evolver')
-+
-+        def __init__(self, original_pset):
-+            self._original_pset = original_pset
-+            self._pmap_evolver = original_pset._map.evolver()
-+
-+        def add(self, element):
-+            self._pmap_evolver[element] = True
-+            return self
-+
-+        def remove(self, element):
-+            del self._pmap_evolver[element]
-+            return self
-+
-+        def is_dirty(self):
-+            return self._pmap_evolver.is_dirty()
-+
-+        def persistent(self):
-+            if not self.is_dirty():
-+                return  self._original_pset
-+
-+            return PSet(self._pmap_evolver.persistent())
-+
-+        def __len__(self):
-+            return len(self._pmap_evolver)
-+
-+    def copy(self):
-+        return self
-+
-+    def evolver(self):
-+        """
-+        Create a new evolver for this pset. For a discussion on evolvers in general see the
-+        documentation for the pvector evolver.
-+
-+        Create the evolver and perform various mutating updates to it:
-+
-+        >>> s1 = s(1, 2, 3)
-+        >>> e = s1.evolver()
-+        >>> _ = e.add(4)
-+        >>> len(e)
-+        4
-+        >>> _ = e.remove(1)
-+
-+        The underlying pset remains the same:
-+
-+        >>> s1
-+        pset([1, 2, 3])
-+
-+        The changes are kept in the evolver. An updated pmap can be created using the
-+        persistent() function on the evolver.
-+
-+        >>> s2 = e.persistent()
-+        >>> s2
-+        pset([2, 3, 4])
-+
-+        The new pset will share data with the original pset in the same way that would have
-+        been done if only using operations on the pset.
-+        """
-+        return PSet._Evolver(self)
-+
-+    # All the operations and comparisons you would expect on a set.
-+    #
-+    # This is not very beautiful. If we avoid inheriting from PSet we can use the
-+    # __slots__ concepts (which requires a new style class) and hopefully save some memory.
-+    __le__ = Set.__le__
-+    __lt__ = Set.__lt__
-+    __gt__ = Set.__gt__
-+    __ge__ = Set.__ge__
-+    __eq__ = Set.__eq__
-+    __ne__ = Set.__ne__
-+
-+    __and__ = Set.__and__
-+    __or__ = Set.__or__
-+    __sub__ = Set.__sub__
-+    __xor__ = Set.__xor__
-+
-+    issubset = __le__
-+    issuperset = __ge__
-+    union = __or__
-+    intersection = __and__
-+    difference = __sub__
-+    symmetric_difference = __xor__
-+
-+    isdisjoint = Set.isdisjoint
-+
-+Set.register(PSet)
-+Hashable.register(PSet)
-+
-+_EMPTY_PSET = PSet(pmap())
-+
-+
-+def pset(iterable=(), pre_size=8):
-+    """
-+    Creates a persistent set from iterable. Optionally takes a sizing parameter equivalent to that
-+    used for :py:func:`pmap`.
-+
-+    >>> s1 = pset([1, 2, 3, 2])
-+    >>> s1
-+    pset([1, 2, 3])
-+    """
-+    if not iterable:
-+        return _EMPTY_PSET
-+
-+    return PSet._from_iterable(iterable, pre_size=pre_size)
-+
-+
-+def s(*elements):
-+    """
-+    Create a persistent set.
-+
-+    Takes an arbitrary number of arguments to insert into the new set.
-+
-+    >>> s1 = s(1, 2, 3, 2)
-+    >>> s1
-+    pset([1, 2, 3])
-+    """
-+    return pset(elements)
-diff --git a/third_party/python/pyrsistent/pyrsistent/_pvector.py b/third_party/python/pyrsistent/pyrsistent/_pvector.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/pyrsistent/pyrsistent/_pvector.py
-@@ -0,0 +1,713 @@
-+from abc import abstractmethod, ABCMeta
-+from ._compat import Sequence, Hashable
-+from numbers import Integral
-+import operator
-+import six
-+from pyrsistent._transformations import transform
-+
-+
-+def _bitcount(val):
-+    return bin(val).count("1")
-+
-+BRANCH_FACTOR = 32
-+BIT_MASK = BRANCH_FACTOR - 1
-+SHIFT = _bitcount(BIT_MASK)
-+
-+
-+def compare_pvector(v, other, operator):
-+    return operator(v.tolist(), other.tolist() if isinstance(other, PVector) else other)
-+
-+
-+def _index_or_slice(index, stop):
-+    if stop is None:
-+        return index
-+
-+    return slice(index, stop)
-+
-+
-+class PythonPVector(object):
-+    """
-+    Support structure for PVector that implements structural sharing for vectors using a trie.
-+    """
-+    __slots__ = ('_count', '_shift', '_root', '_tail', '_tail_offset', '__weakref__')
-+
-+    def __new__(cls, count, shift, root, tail):
-+        self = super(PythonPVector, cls).__new__(cls)
-+        self._count = count
-+        self._shift = shift
-+        self._root = root
-+        self._tail = tail
-+
-+        # Derived attribute stored for performance
-+        self._tail_offset = self._count - len(self._tail)
-+        return self
-+
-+    def __len__(self):
-+        return self._count
-+
-+    def __getitem__(self, index):
-+        if isinstance(index, slice):
-+            # There are more conditions than the below where it would be OK to
-+            # return ourselves, implement those...
-+            if index.start is None and index.stop is None and index.step is None:
-+                return self
-+
-+            # This is a bit nasty realizing the whole structure as a list before
-+            # slicing it but it is the fastest way I've found to date, and it's easy :-)
-+            return _EMPTY_PVECTOR.extend(self.tolist()[index])
-+
-+        if index < 0:
-+            index += self._count
-+
-+        return PythonPVector._node_for(self, index)[index & BIT_MASK]
-+
-+    def __add__(self, other):
-+        return self.extend(other)
-+
-+    def __repr__(self):
-+        return 'pvector({0})'.format(str(self.tolist()))
-+
-+    def __str__(self):
-+        return self.__repr__()
-+
-+    def __iter__(self):
-+        # This is kind of lazy and will produce some memory overhead but it is the fasted method
-+        # by far of those tried since it uses the speed of the built in python list directly.
-+        return iter(self.tolist())
-+
-+    def __ne__(self, other):
-+        return not self.__eq__(other)
-+
-+    def __eq__(self, other):
-+        return self is other or (hasattr(other, '__len__') and self._count == len(other)) and compare_pvector(self, other, operator.eq)
-+
-+    def __gt__(self, other):
-+        return compare_pvector(self, other, operator.gt)
-+
-+    def __lt__(self, other):
-+        return compare_pvector(self, other, operator.lt)
-+
-+    def __ge__(self, other):
-+        return compare_pvector(self, other, operator.ge)
-+
-+    def __le__(self, other):
-+        return compare_pvector(self, other, operator.le)
-+
-+    def __mul__(self, times):
-+        if times <= 0 or self is _EMPTY_PVECTOR:
-+            return _EMPTY_PVECTOR
-+
-+        if times == 1:
-+            return self
-+
-+        return _EMPTY_PVECTOR.extend(times * self.tolist())
-+
-+    __rmul__ = __mul__
-+
-+    def _fill_list(self, node, shift, the_list):
-+        if shift:
-+            shift -= SHIFT
-+            for n in node:
-+                self._fill_list(n, shift, the_list)
-+        else:
-+            the_list.extend(node)
-+
-+    def tolist(self):
-+        """
-+        The fastest way to convert the vector into a python list.
-+        """
-+        the_list = []
-+        self._fill_list(self._root, self._shift, the_list)
-+        the_list.extend(self._tail)
-+        return the_list
-+
-+    def _totuple(self):
-+        """
-+        Returns the content as a python tuple.
-+        """
-+        return tuple(self.tolist())
-+
-+    def __hash__(self):
-+        # Taking the easy way out again...
-+        return hash(self._totuple())
-+
-+    def transform(self, *transformations):
-+        return transform(self, transformations)
-+
-+    def __reduce__(self):
-+        # Pickling support
-+        return pvector, (self.tolist(),)
-+
-+    def mset(self, *args):
-+        if len(args) % 2:
-+            raise TypeError("mset expected an even number of arguments")
-+
-+        evolver = self.evolver()
-+        for i in range(0, len(args), 2):
-+            evolver[args[i]] = args[i+1]
-+
-+        return evolver.persistent()
-+
-+    class Evolver(object):
-+        __slots__ = ('_count', '_shift', '_root', '_tail', '_tail_offset', '_dirty_nodes',
-+                     '_extra_tail', '_cached_leafs', '_orig_pvector')
-+
-+        def __init__(self, v):
-+            self._reset(v)
-+
-+        def __getitem__(self, index):
-+            if not isinstance(index, Integral):
-+                raise TypeError("'%s' object cannot be interpreted as an index" % type(index).__name__)
-+
-+            if index < 0:
-+                index += self._count + len(self._extra_tail)
-+
-+            if self._count <= index < self._count + len(self._extra_tail):
-+                return self._extra_tail[index - self._count]
-+
-+            return PythonPVector._node_for(self, index)[index & BIT_MASK]
-+
-+        def _reset(self, v):
-+            self._count = v._count
-+            self._shift = v._shift
-+            self._root = v._root
-+            self._tail = v._tail
-+            self._tail_offset = v._tail_offset
-+            self._dirty_nodes = {}
-+            self._cached_leafs = {}
-+            self._extra_tail = []
-+            self._orig_pvector = v
-+
-+        def append(self, element):
-+            self._extra_tail.append(element)
-+            return self
-+
-+        def extend(self, iterable):
-+            self._extra_tail.extend(iterable)
-+            return self
-+
-+        def set(self, index, val):
-+            self[index] = val
-+            return self
-+
-+        def __setitem__(self, index, val):
-+            if not isinstance(index, Integral):
-+                raise TypeError("'%s' object cannot be interpreted as an index" % type(index).__name__)
-+
-+            if index < 0:
-+                index += self._count + len(self._extra_tail)
-+
-+            if 0 <= index < self._count:
-+                node = self._cached_leafs.get(index >> SHIFT)
-+                if node:
-+                    node[index & BIT_MASK] = val
-+                elif index >= self._tail_offset:
-+                    if id(self._tail) not in self._dirty_nodes:
-+                        self._tail = list(self._tail)
-+                        self._dirty_nodes[id(self._tail)] = True
-+                        self._cached_leafs[index >> SHIFT] = self._tail
-+                    self._tail[index & BIT_MASK] = val
-+                else:
-+                    self._root = self._do_set(self._shift, self._root, index, val)
-+            elif self._count <= index < self._count + len(self._extra_tail):
-+                self._extra_tail[index - self._count] = val
-+            elif index == self._count + len(self._extra_tail):
-+                self._extra_tail.append(val)
-+            else:
-+                raise IndexError("Index out of range: %s" % (index,))
-+
-+        def _do_set(self, level, node, i, val):
-+            if id(node) in self._dirty_nodes:
-+                ret = node
-+            else:
-+                ret = list(node)
-+                self._dirty_nodes[id(ret)] = True
-+
-+            if level == 0:
-+                ret[i & BIT_MASK] = val
-+                self._cached_leafs[i >> SHIFT] = ret
-+            else:
-+                sub_index = (i >> level) & BIT_MASK  # >>>
-+                ret[sub_index] = self._do_set(level - SHIFT, node[sub_index], i, val)
-+
-+            return ret
-+
-+        def delete(self, index):
-+            del self[index]
-+            return self
-+
-+        def __delitem__(self, key):
-+            if self._orig_pvector:
-+                # All structural sharing bets are off, base evolver on _extra_tail only
-+                l = PythonPVector(self._count, self._shift, self._root, self._tail).tolist()
-+                l.extend(self._extra_tail)
-+                self._reset(_EMPTY_PVECTOR)
-+                self._extra_tail = l
-+
-+            del self._extra_tail[key]
-+
-+        def persistent(self):
-+            result = self._orig_pvector
-+            if self.is_dirty():
-+                result = PythonPVector(self._count, self._shift, self._root, self._tail).extend(self._extra_tail)
-+                self._reset(result)
-+
-+            return result
-+
-+        def __len__(self):
-+            return self._count + len(self._extra_tail)
-+
-+        def is_dirty(self):
-+            return bool(self._dirty_nodes or self._extra_tail)
-+
-+    def evolver(self):
-+        return PythonPVector.Evolver(self)
-+
-+    def set(self, i, val):
-+        # This method could be implemented by a call to mset() but doing so would cause
-+        # a ~5 X performance penalty on PyPy (considered the primary platform for this implementation
-+        #  of PVector) so we're keeping this implementation for now.
-+
-+        if not isinstance(i, Integral):
-+            raise TypeError("'%s' object cannot be interpreted as an index" % type(i).__name__)
-+
-+        if i < 0:
-+            i += self._count
-+
-+        if 0 <= i < self._count:
-+            if i >= self._tail_offset:
-+                new_tail = list(self._tail)
-+                new_tail[i & BIT_MASK] = val
-+                return PythonPVector(self._count, self._shift, self._root, new_tail)
-+
-+            return PythonPVector(self._count, self._shift, self._do_set(self._shift, self._root, i, val), self._tail)
-+
-+        if i == self._count:
-+            return self.append(val)
-+
-+        raise IndexError("Index out of range: %s" % (i,))
-+
-+    def _do_set(self, level, node, i, val):
-+        ret = list(node)
-+        if level == 0:
-+            ret[i & BIT_MASK] = val
-+        else:
-+            sub_index = (i >> level) & BIT_MASK  # >>>
-+            ret[sub_index] = self._do_set(level - SHIFT, node[sub_index], i, val)
-+
-+        return ret
-+
-+    @staticmethod
-+    def _node_for(pvector_like, i):
-+        if 0 <= i < pvector_like._count:
-+            if i >= pvector_like._tail_offset:
-+                return pvector_like._tail
-+
-+            node = pvector_like._root
-+            for level in range(pvector_like._shift, 0, -SHIFT):
-+                node = node[(i >> level) & BIT_MASK]  # >>>
-+
-+            return node
-+
-+        raise IndexError("Index out of range: %s" % (i,))
-+
-+    def _create_new_root(self):
-+        new_shift = self._shift
-+
-+        # Overflow root?
-+        if (self._count >> SHIFT) > (1 << self._shift): # >>>
-+            new_root = [self._root, self._new_path(self._shift, self._tail)]
-+            new_shift += SHIFT
-+        else:
-+            new_root = self._push_tail(self._shift, self._root, self._tail)
-+
-+        return new_root, new_shift
-+
-+    def append(self, val):
-+        if len(self._tail) < BRANCH_FACTOR:
-+            new_tail = list(self._tail)
-+            new_tail.append(val)
-+            return PythonPVector(self._count + 1, self._shift, self._root, new_tail)
-+
-+        # Full tail, push into tree
-+        new_root, new_shift = self._create_new_root()
-+        return PythonPVector(self._count + 1, new_shift, new_root, [val])
-+
-+    def _new_path(self, level, node):
-+        if level == 0:
-+            return node
-+
-+        return [self._new_path(level - SHIFT, node)]
-+
-+    def _mutating_insert_tail(self):
-+        self._root, self._shift = self._create_new_root()
-+        self._tail = []
-+
-+    def _mutating_fill_tail(self, offset, sequence):
-+        max_delta_len = BRANCH_FACTOR - len(self._tail)
-+        delta = sequence[offset:offset + max_delta_len]
-+        self._tail.extend(delta)
-+        delta_len = len(delta)
-+        self._count += delta_len
-+        return offset + delta_len
-+
-+    def _mutating_extend(self, sequence):
-+        offset = 0
-+        sequence_len = len(sequence)
-+        while offset < sequence_len:
-+            offset = self._mutating_fill_tail(offset, sequence)
-+            if len(self._tail) == BRANCH_FACTOR:
-+                self._mutating_insert_tail()
-+
-+        self._tail_offset = self._count - len(self._tail)
-+
-+    def extend(self, obj):
-+        # Mutates the new vector directly for efficiency but that's only an
-+        # implementation detail, once it is returned it should be considered immutable
-+        l = obj.tolist() if isinstance(obj, PythonPVector) else list(obj)
-+        if l:
-+            new_vector = self.append(l[0])
-+            new_vector._mutating_extend(l[1:])
-+            return new_vector
-+
-+        return self
-+
-+    def _push_tail(self, level, parent, tail_node):
-+        """
-+        if parent is leaf, insert node,
-+        else does it map to an existing child? ->
-+             node_to_insert = push node one more level
-+        else alloc new path
-+
-+        return  node_to_insert placed in copy of parent
-+        """
-+        ret = list(parent)
-+
-+        if level == SHIFT:
-+            ret.append(tail_node)
-+            return ret
-+
-+        sub_index = ((self._count - 1) >> level) & BIT_MASK  # >>>
-+        if len(parent) > sub_index:
-+            ret[sub_index] = self._push_tail(level - SHIFT, parent[sub_index], tail_node)
-+            return ret
-+
-+        ret.append(self._new_path(level - SHIFT, tail_node))
-+        return ret
-+
-+    def index(self, value, *args, **kwargs):
-+        return self.tolist().index(value, *args, **kwargs)
-+
-+    def count(self, value):
-+        return self.tolist().count(value)
-+
-+    def delete(self, index, stop=None):
-+        l = self.tolist()
-+        del l[_index_or_slice(index, stop)]
-+        return _EMPTY_PVECTOR.extend(l)
-+
-+    def remove(self, value):
-+        l = self.tolist()
-+        l.remove(value)
-+        return _EMPTY_PVECTOR.extend(l)
-+
-+@six.add_metaclass(ABCMeta)
-+class PVector(object):
-+    """
-+    Persistent vector implementation. Meant as a replacement for the cases where you would normally
-+    use a Python list.
-+
-+    Do not instantiate directly, instead use the factory functions :py:func:`v` and :py:func:`pvector` to
-+    create an instance.
-+
-+    Heavily influenced by the persistent vector available in Clojure. Initially this was more or
-+    less just a port of the Java code for the Clojure vector. It has since been modified and to
-+    some extent optimized for usage in Python.
-+
-+    The vector is organized as a trie, any mutating method will return a new vector that contains the changes. No
-+    updates are done to the original vector. Structural sharing between vectors are applied where possible to save
-+    space and to avoid making complete copies.
-+
-+    This structure corresponds most closely to the built in list type and is intended as a replacement. Where the
-+    semantics are the same (more or less) the same function names have been used but for some cases it is not possible,
-+    for example assignments.
-+
-+    The PVector implements the Sequence protocol and is Hashable.
-+
-+    Inserts are amortized O(1). Random access is log32(n) where n is the size of the vector.
-+
-+    The following are examples of some common operations on persistent vectors:
-+
-+    >>> p = v(1, 2, 3)
-+    >>> p2 = p.append(4)
-+    >>> p3 = p2.extend([5, 6, 7])
-+    >>> p
-+    pvector([1, 2, 3])
-+    >>> p2
-+    pvector([1, 2, 3, 4])
-+    >>> p3
-+    pvector([1, 2, 3, 4, 5, 6, 7])
-+    >>> p3[5]
-+    6
-+    >>> p.set(1, 99)
-+    pvector([1, 99, 3])
-+    >>>
-+    """
-+
-+    @abstractmethod
-+    def __len__(self):
-+        """
-+        >>> len(v(1, 2, 3))
-+        3
-+        """
-+
-+    @abstractmethod
-+    def __getitem__(self, index):
-+        """
-+        Get value at index. Full slicing support.
-+
-+        >>> v1 = v(5, 6, 7, 8)
-+        >>> v1[2]
-+        7
-+        >>> v1[1:3]
-+        pvector([6, 7])
-+        """
-+
-+    @abstractmethod
-+    def __add__(self, other):
-+        """
-+        >>> v1 = v(1, 2)
-+        >>> v2 = v(3, 4)
-+        >>> v1 + v2
-+        pvector([1, 2, 3, 4])
-+        """
-+
-+    @abstractmethod
-+    def __mul__(self, times):
-+        """
-+        >>> v1 = v(1, 2)
-+        >>> 3 * v1
-+        pvector([1, 2, 1, 2, 1, 2])
-+        """
-+
-+    @abstractmethod
-+    def __hash__(self):
-+        """
-+        >>> v1 = v(1, 2, 3)
-+        >>> v2 = v(1, 2, 3)
-+        >>> hash(v1) == hash(v2)
-+        True
-+        """
-+
-+    @abstractmethod
-+    def evolver(self):
-+        """
-+        Create a new evolver for this pvector. The evolver acts as a mutable view of the vector
-+        with "transaction like" semantics. No part of the underlying vector i updated, it is still
-+        fully immutable. Furthermore multiple evolvers created from the same pvector do not
-+        interfere with each other.
-+
-+        You may want to use an evolver instead of working directly with the pvector in the
-+        following cases:
-+
-+        * Multiple updates are done to the same vector and the intermediate results are of no
-+          interest. In this case using an evolver may be a more efficient and easier to work with.
-+        * You need to pass a vector into a legacy function or a function that you have no control
-+          over which performs in place mutations of lists. In this case pass an evolver instance
-+          instead and then create a new pvector from the evolver once the function returns.
-+
-+        The following example illustrates a typical workflow when working with evolvers. It also
-+        displays most of the API (which i kept small by design, you should not be tempted to
-+        use evolvers in excess ;-)).
-+
-+        Create the evolver and perform various mutating updates to it:
-+
-+        >>> v1 = v(1, 2, 3, 4, 5)
-+        >>> e = v1.evolver()
-+        >>> e[1] = 22
-+        >>> _ = e.append(6)
-+        >>> _ = e.extend([7, 8, 9])
-+        >>> e[8] += 1
-+        >>> len(e)
-+        9
-+
-+        The underlying pvector remains the same:
-+
-+        >>> v1
-+        pvector([1, 2, 3, 4, 5])
-+
-+        The changes are kept in the evolver. An updated pvector can be created using the
-+        persistent() function on the evolver.
-+
-+        >>> v2 = e.persistent()
-+        >>> v2
-+        pvector([1, 22, 3, 4, 5, 6, 7, 8, 10])
-+
-+        The new pvector will share data with the original pvector in the same way that would have
-+        been done if only using operations on the pvector.
-+        """
-+
-+    @abstractmethod
-+    def mset(self, *args):
-+        """
-+        Return a new vector with elements in specified positions replaced by values (multi set).
-+
-+        Elements on even positions in the argument list are interpreted as indexes while
-+        elements on odd positions are considered values.
-+
-+        >>> v1 = v(1, 2, 3)
-+        >>> v1.mset(0, 11, 2, 33)
-+        pvector([11, 2, 33])
-+        """
-+
-+    @abstractmethod
-+    def set(self, i, val):
-+        """
-+        Return a new vector with element at position i replaced with val. The original vector remains unchanged.
-+
-+        Setting a value one step beyond the end of the vector is equal to appending. Setting beyond that will
-+        result in an IndexError.
-+
-+        >>> v1 = v(1, 2, 3)
-+        >>> v1.set(1, 4)
-+        pvector([1, 4, 3])
-+        >>> v1.set(3, 4)
-+        pvector([1, 2, 3, 4])
-+        >>> v1.set(-1, 4)
-+        pvector([1, 2, 4])
-+        """
-+
-+    @abstractmethod
-+    def append(self, val):
-+        """
-+        Return a new vector with val appended.
-+
-+        >>> v1 = v(1, 2)
-+        >>> v1.append(3)
-+        pvector([1, 2, 3])
-+        """
-+
-+    @abstractmethod
-+    def extend(self, obj):
-+        """
-+        Return a new vector with all values in obj appended to it. Obj may be another
-+        PVector or any other Iterable.
-+
-+        >>> v1 = v(1, 2, 3)
-+        >>> v1.extend([4, 5])
-+        pvector([1, 2, 3, 4, 5])
-+        """
-+
-+    @abstractmethod
-+    def index(self, value, *args, **kwargs):
-+        """
-+        Return first index of value. Additional indexes may be supplied to limit the search to a
-+        sub range of the vector.
-+
-+        >>> v1 = v(1, 2, 3, 4, 3)
-+        >>> v1.index(3)
-+        2
-+        >>> v1.index(3, 3, 5)
-+        4
-+        """
-+
-+    @abstractmethod
-+    def count(self, value):
-+        """
-+        Return the number of times that value appears in the vector.
-+
-+        >>> v1 = v(1, 4, 3, 4)
-+        >>> v1.count(4)
-+        2
-+        """
-+
-+    @abstractmethod
-+    def transform(self, *transformations):
-+        """
-+        Transform arbitrarily complex combinations of PVectors and PMaps. A transformation
-+        consists of two parts. One match expression that specifies which elements to transform
-+        and one transformation function that performs the actual transformation.
-+
-+        >>> from pyrsistent import freeze, ny
-+        >>> news_paper = freeze({'articles': [{'author': 'Sara', 'content': 'A short article'},
-+        ...                                   {'author': 'Steve', 'content': 'A slightly longer article'}],
-+        ...                      'weather': {'temperature': '11C', 'wind': '5m/s'}})
-+        >>> short_news = news_paper.transform(['articles', ny, 'content'], lambda c: c[:25] + '...' if len(c) > 25 else c)
-+        >>> very_short_news = news_paper.transform(['articles', ny, 'content'], lambda c: c[:15] + '...' if len(c) > 15 else c)
-+        >>> very_short_news.articles[0].content
-+        'A short article'
-+        >>> very_short_news.articles[1].content
-+        'A slightly long...'
-+
-+        When nothing has been transformed the original data structure is kept
-+
-+        >>> short_news is news_paper
-+        True
-+        >>> very_short_news is news_paper
-+        False
-+        >>> very_short_news.articles[0] is news_paper.articles[0]
-+        True
-+        """
-+
-+    @abstractmethod
-+    def delete(self, index, stop=None):
-+        """
-+        Delete a portion of the vector by index or range.
-+
-+        >>> v1 = v(1, 2, 3, 4, 5)
-+        >>> v1.delete(1)
-+        pvector([1, 3, 4, 5])
-+        >>> v1.delete(1, 3)
-+        pvector([1, 4, 5])
-+        """
-+
-+    @abstractmethod
-+    def remove(self, value):
-+        """
-+        Remove the first occurrence of a value from the vector.
-+
-+        >>> v1 = v(1, 2, 3, 2, 1)
-+        >>> v2 = v1.remove(1)
-+        >>> v2
-+        pvector([2, 3, 2, 1])
-+        >>> v2.remove(1)
-+        pvector([2, 3, 2])
-+        """
-+
-+
-+_EMPTY_PVECTOR = PythonPVector(0, SHIFT, [], [])
-+PVector.register(PythonPVector)
-+Sequence.register(PVector)
-+Hashable.register(PVector)
-+
-+def python_pvector(iterable=()):
-+    """
-+    Create a new persistent vector containing the elements in iterable.
-+
-+    >>> v1 = pvector([1, 2, 3])
-+    >>> v1
-+    pvector([1, 2, 3])
-+    """
-+    return _EMPTY_PVECTOR.extend(iterable)
-+
-+try:
-+    # Use the C extension as underlying trie implementation if it is available
-+    import os
-+    if os.environ.get('PYRSISTENT_NO_C_EXTENSION'):
-+        pvector = python_pvector
-+    else:
-+        from pvectorc import pvector
-+        PVector.register(type(pvector()))
-+except ImportError:
-+    pvector = python_pvector
-+
-+
-+def v(*elements):
-+    """
-+    Create a new persistent vector containing all parameters to this function.
-+
-+    >>> v1 = v(1, 2, 3)
-+    >>> v1
-+    pvector([1, 2, 3])
-+    """
-+    return pvector(elements)
-diff --git a/third_party/python/pyrsistent/pyrsistent/_toolz.py b/third_party/python/pyrsistent/pyrsistent/_toolz.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/pyrsistent/pyrsistent/_toolz.py
-@@ -0,0 +1,83 @@
-+"""
-+Functionality copied from the toolz package to avoid having
-+to add toolz as a dependency.
-+
-+See https://github.com/pytoolz/toolz/.
-+
-+toolz is relased under BSD licence. Below is the licence text
-+from toolz as it appeared when copying the code.
-+
-+--------------------------------------------------------------
-+
-+Copyright (c) 2013 Matthew Rocklin
-+
-+All rights reserved.
-+
-+Redistribution and use in source and binary forms, with or without
-+modification, are permitted provided that the following conditions are met:
-+
-+  a. Redistributions of source code must retain the above copyright notice,
-+     this list of conditions and the following disclaimer.
-+  b. Redistributions in binary form must reproduce the above copyright
-+     notice, this list of conditions and the following disclaimer in the
-+     documentation and/or other materials provided with the distribution.
-+  c. Neither the name of toolz nor the names of its contributors
-+     may be used to endorse or promote products derived from this software
-+     without specific prior written permission.
-+
-+
-+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
-+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-+LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
-+OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
-+DAMAGE.
-+"""
-+import operator
-+from six.moves import reduce
-+
-+
-+def get_in(keys, coll, default=None, no_default=False):
-+    """
-+    NB: This is a straight copy of the get_in implementation found in
-+        the toolz library (https://github.com/pytoolz/toolz/). It works
-+        with persistent data structures as well as the corresponding
-+        datastructures from the stdlib.
-+
-+    Returns coll[i0][i1]...[iX] where [i0, i1, ..., iX]==keys.
-+
-+    If coll[i0][i1]...[iX] cannot be found, returns ``default``, unless
-+    ``no_default`` is specified, then it raises KeyError or IndexError.
-+
-+    ``get_in`` is a generalization of ``operator.getitem`` for nested data
-+    structures such as dictionaries and lists.
-+    >>> from pyrsistent import freeze
-+    >>> transaction = freeze({'name': 'Alice',
-+    ...                       'purchase': {'items': ['Apple', 'Orange'],
-+    ...                                    'costs': [0.50, 1.25]},
-+    ...                       'credit card': '5555-1234-1234-1234'})
-+    >>> get_in(['purchase', 'items', 0], transaction)
-+    'Apple'
-+    >>> get_in(['name'], transaction)
-+    'Alice'
-+    >>> get_in(['purchase', 'total'], transaction)
-+    >>> get_in(['purchase', 'items', 'apple'], transaction)
-+    >>> get_in(['purchase', 'items', 10], transaction)
-+    >>> get_in(['purchase', 'total'], transaction, 0)
-+    0
-+    >>> get_in(['y'], {}, no_default=True)
-+    Traceback (most recent call last):
-+        ...
-+    KeyError: 'y'
-+    """
-+    try:
-+        return reduce(operator.getitem, keys, coll)
-+    except (KeyError, IndexError, TypeError):
-+        if no_default:
-+            raise
-+        return default
-\ No newline at end of file
-diff --git a/third_party/python/pyrsistent/pyrsistent/_transformations.py b/third_party/python/pyrsistent/pyrsistent/_transformations.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/pyrsistent/pyrsistent/_transformations.py
-@@ -0,0 +1,143 @@
-+import re
-+import six
-+try:
-+    from inspect import Parameter, signature
-+except ImportError:
-+    signature = None
-+    try:
-+        from inspect import getfullargspec as getargspec
-+    except ImportError:
-+        from inspect import getargspec
-+
-+
-+_EMPTY_SENTINEL = object()
-+
-+
-+def inc(x):
-+    """ Add one to the current value """
-+    return x + 1
-+
-+
-+def dec(x):
-+    """ Subtract one from the current value """
-+    return x - 1
-+
-+
-+def discard(evolver, key):
-+    """ Discard the element and returns a structure without the discarded elements """
-+    try:
-+        del evolver[key]
-+    except KeyError:
-+        pass
-+
-+
-+# Matchers
-+def rex(expr):
-+    """ Regular expression matcher to use together with transform functions """
-+    r = re.compile(expr)
-+    return lambda key: isinstance(key, six.string_types) and r.match(key)
-+
-+
-+def ny(_):
-+    """ Matcher that matches any value """
-+    return True
-+
-+
-+# Support functions
-+def _chunks(l, n):
-+    for i in range(0, len(l), n):
-+        yield l[i:i + n]
-+
-+
-+def transform(structure, transformations):
-+    r = structure
-+    for path, command in _chunks(transformations, 2):
-+        r = _do_to_path(r, path, command)
-+    return r
-+
-+
-+def _do_to_path(structure, path, command):
-+    if not path:
-+        return command(structure) if callable(command) else command
-+
-+    kvs = _get_keys_and_values(structure, path[0])
-+    return _update_structure(structure, kvs, path[1:], command)
-+
-+
-+def _items(structure):
-+    try:
-+        return structure.items()
-+    except AttributeError:
-+        # Support wider range of structures by adding a transform_items() or similar?
-+        return list(enumerate(structure))
-+
-+
-+def _get(structure, key, default):
-+    try:
-+        if hasattr(structure, '__getitem__'):
-+            return structure[key]
-+
-+        return getattr(structure, key)
-+
-+    except (IndexError, KeyError):
-+        return default
-+
-+
-+def _get_keys_and_values(structure, key_spec):
-+    if callable(key_spec):
-+        # Support predicates as callable objects in the path
-+        arity = _get_arity(key_spec)
-+        if arity == 1:
-+            # Unary predicates are called with the "key" of the path
-+            # - eg a key in a mapping, an index in a sequence.
-+            return [(k, v) for k, v in _items(structure) if key_spec(k)]
-+        elif arity == 2:
-+            # Binary predicates are called with the key and the corresponding
-+            # value.
-+            return [(k, v) for k, v in _items(structure) if key_spec(k, v)]
-+        else:
-+            # Other arities are an error.
-+            raise ValueError(
-+                "callable in transform path must take 1 or 2 arguments"
-+            )
-+
-+    # Non-callables are used as-is as a key.
-+    return [(key_spec, _get(structure, key_spec, _EMPTY_SENTINEL))]
-+
-+
-+if signature is None:
-+    def _get_arity(f):
-+        argspec = getargspec(f)
-+        return len(argspec.args) - len(argspec.defaults or ())
-+else:
-+    def _get_arity(f):
-+        return sum(
-+            1
-+            for p
-+            in signature(f).parameters.values()
-+            if p.default is Parameter.empty
-+            and p.kind in (Parameter.POSITIONAL_ONLY, Parameter.POSITIONAL_OR_KEYWORD)
-+        )
-+
-+
-+def _update_structure(structure, kvs, path, command):
-+    from pyrsistent._pmap import pmap
-+    e = structure.evolver()
-+    if not path and command is discard:
-+        # Do this in reverse to avoid index problems with vectors. See #92.
-+        for k, v in reversed(kvs):
-+            discard(e, k)
-+    else:
-+        for k, v in kvs:
-+            is_empty = False
-+            if v is _EMPTY_SENTINEL:
-+                # Allow expansion of structure but make sure to cover the case
-+                # when an empty pmap is added as leaf node. See #154.
-+                is_empty = True
-+                v = pmap()
-+
-+            result = _do_to_path(v, path, command)
-+            if result is not v or is_empty:
-+                e[k] = result
-+
-+    return e.persistent()
-diff --git a/third_party/python/pyrsistent/pyrsistent/py.typed b/third_party/python/pyrsistent/pyrsistent/py.typed
-new file mode 100644
-diff --git a/third_party/python/pyrsistent/pyrsistent/typing.py b/third_party/python/pyrsistent/pyrsistent/typing.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/pyrsistent/pyrsistent/typing.py
-@@ -0,0 +1,80 @@
-+"""Helpers for use with type annotation.
-+
-+Use the empty classes in this module when annotating the types of Pyrsistent
-+objects, instead of using the actual collection class.
-+
-+For example,
-+
-+    from pyrsistent import pvector
-+    from pyrsistent.typing import PVector
-+
-+    myvector: PVector[str] = pvector(['a', 'b', 'c'])
-+
-+"""
-+from __future__ import absolute_import
-+
-+try:
-+    from typing import Container
-+    from typing import Hashable
-+    from typing import Generic
-+    from typing import Iterable
-+    from typing import Mapping
-+    from typing import Sequence
-+    from typing import Sized
-+    from typing import TypeVar
-+
-+    __all__ = [
-+        'CheckedPMap',
-+        'CheckedPSet',
-+        'CheckedPVector',
-+        'PBag',
-+        'PDeque',
-+        'PList',
-+        'PMap',
-+        'PSet',
-+        'PVector',
-+    ]
-+
-+    T = TypeVar('T')
-+    KT = TypeVar('KT')
-+    VT = TypeVar('VT')
-+
-+    class CheckedPMap(Mapping[KT, VT], Hashable):
-+        pass
-+
-+    # PSet.add and PSet.discard have different type signatures than that of Set.
-+    class CheckedPSet(Generic[T], Hashable):
-+        pass
-+
-+    class CheckedPVector(Sequence[T], Hashable):
-+        pass
-+
-+    class PBag(Container[T], Iterable[T], Sized, Hashable):
-+        pass
-+
-+    class PDeque(Sequence[T], Hashable):
-+        pass
-+
-+    class PList(Sequence[T], Hashable):
-+        pass
-+
-+    class PMap(Mapping[KT, VT], Hashable):
-+        pass
-+
-+    # PSet.add and PSet.discard have different type signatures than that of Set.
-+    class PSet(Generic[T], Hashable):
-+        pass
-+
-+    class PVector(Sequence[T], Hashable):
-+        pass
-+
-+    class PVectorEvolver(Generic[T]):
-+        pass
-+
-+    class PMapEvolver(Generic[KT, VT]):
-+        pass
-+
-+    class PSetEvolver(Generic[T]):
-+        pass
-+except ImportError:
-+    pass
-diff --git a/third_party/python/pyrsistent/pyrsistent/typing.pyi b/third_party/python/pyrsistent/pyrsistent/typing.pyi
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/pyrsistent/pyrsistent/typing.pyi
-@@ -0,0 +1,292 @@
-+# flake8: noqa: E704
-+# from https://gist.github.com/WuTheFWasThat/091a17d4b5cab597dfd5d4c2d96faf09
-+# Stubs for pyrsistent (Python 3.6)
-+#
-+from typing import Any
-+from typing import Callable
-+from typing import Dict
-+from typing import Generic
-+from typing import Hashable
-+from typing import Iterator
-+from typing import Iterable
-+from typing import List
-+from typing import Mapping
-+from typing import Optional
-+from typing import Sequence
-+from typing import AbstractSet
-+from typing import Sized
-+from typing import Set
-+from typing import Tuple
-+from typing import TypeVar
-+from typing import Type
-+from typing import Union
-+from typing import overload
-+
-+T = TypeVar('T')
-+KT = TypeVar('KT')
-+VT = TypeVar('VT')
-+
-+
-+class PMap(Mapping[KT, VT], Hashable):
-+    def __add__(self, other: PMap[KT, VT]) -> PMap[KT, VT]: ...
-+    def __getitem__(self, key: KT) -> VT: ...
-+    def __getattr__(self, key: str) -> VT: ...
-+    def __hash__(self) -> int: ...
-+    def __iter__(self) -> Iterator[KT]: ...
-+    def __len__(self) -> int: ...
-+    def copy(self) -> PMap[KT, VT]: ...
-+    def discard(self, key: KT) -> PMap[KT, VT]: ...
-+    def evolver(self) -> PMapEvolver[KT, VT]: ...
-+    def iteritems(self) -> Iterable[Tuple[KT, VT]]: ...
-+    def iterkeys(self) -> Iterable[KT]: ...
-+    def itervalues(self) -> Iterable[VT]: ...
-+    def remove(self, key: KT) -> PMap[KT, VT]: ...
-+    def set(self, key: KT, val: VT) -> PMap[KT, VT]: ...
-+    def transform(self, *transformations: Any) -> PMap[KT, VT]: ...
-+    def update(self, *args: Mapping): ...
-+    def update_with(self, update_fn: Callable[[VT, VT], VT], *args: Mapping) -> Any: ...
-+
-+
-+class PMapEvolver(Generic[KT, VT]):
-+    def __delitem__(self, key: KT) -> None: ...
-+    def __getitem__(self, key: KT) -> VT: ...
-+    def __len__(self) -> int: ...
-+    def __setitem__(self, key: KT, val: VT) -> None: ...
-+    def is_dirty(self) -> bool: ...
-+    def persistent(self) -> PMap[KT, VT]: ...
-+    def remove(self, key: KT) -> PMapEvolver[KT, VT]: ...
-+    def set(self, key: KT, val: VT) -> PMapEvolver[KT, VT]: ...
-+
-+
-+class PVector(Sequence[T], Hashable):
-+    def __add__(self, other: PVector[T]) -> PVector[T]: ...
-+    @overload
-+    def __getitem__(self, index: int) -> T: ...
-+    @overload
-+    def __getitem__(self, index: slice) -> PVector[T]: ...
-+    def __hash__(self) -> int: ...
-+    def __len__(self) -> int: ...
-+    def __mul__(self, other: PVector[T]) -> PVector[T]: ...
-+    def append(self, val: T) -> PVector[T]: ...
-+    def delete(self, index: int, stop: Optional[int]) -> PVector[T]: ...
-+    def evolver(self) -> PVectorEvolver[T]: ...
-+    def extend(self, obj: Iterable[T]) -> PVector[T]: ...
-+    def tolist(self) -> List[T]: ...
-+    def mset(self, *args: Iterable[Union[T, int]]) -> PVector[T]: ...
-+    def remove(self, value: T) -> PVector[T]: ...
-+    # Not compatible with MutableSequence
-+    def set(self, i: int, val: T) -> PVector[T]: ...
-+    def transform(self, *transformations: Any) -> PVector[T]: ...
-+
-+
-+class PVectorEvolver(Sequence[T], Sized):
-+    def __delitem__(self, i: Union[int, slice]) -> None: ...
-+    @overload
-+    def __getitem__(self, index: int) -> T: ...
-+    # Not actually supported
-+    @overload
-+    def __getitem__(self, index: slice) -> PVectorEvolver[T]: ...
-+    def __len__(self) -> int: ...
-+    def __setitem__(self, index: int, val: T) -> None: ...
-+    def append(self, val: T) -> PVectorEvolver[T]: ...
-+    def delete(self, value: T) -> PVectorEvolver[T]: ...
-+    def extend(self, obj: Iterable[T]) -> PVectorEvolver[T]: ...
-+    def is_dirty(self) -> bool: ...
-+    def persistent(self) -> PVector[T]: ...
-+    def set(self, i: int, val: T) -> PVectorEvolver[T]: ...
-+
-+
-+class PSet(AbstractSet[T], Hashable):
-+    def __contains__(self, element: object) -> bool: ...
-+    def __hash__(self) -> int: ...
-+    def __iter__(self) -> Iterator[T]: ...
-+    def __len__(self) -> int: ...
-+    def add(self, element: T) -> PSet[T]: ...
-+    def copy(self) -> PSet[T]: ...
-+    def difference(self, iterable: Iterable) -> PSet[T]: ...
-+    def discard(self, element: T) -> PSet[T]: ...
-+    def evolver(self) -> PSetEvolver[T]: ...
-+    def intersection(self, iterable: Iterable) -> PSet[T]: ...
-+    def issubset(self, iterable: Iterable) -> bool: ...
-+    def issuperset(self, iterable: Iterable) -> bool: ...
-+    def remove(self, element: T) -> PSet[T]: ...
-+    def symmetric_difference(self, iterable: Iterable[T]) -> PSet[T]: ...
-+    def union(self, iterable: Iterable[T]) -> PSet[T]: ...
-+    def update(self, iterable: Iterable[T]) -> PSet[T]: ...
-+
-+
-+class PSetEvolver(Generic[T], Sized):
-+    def __len__(self) -> int: ...
-+    def add(self, element: T) -> PSetEvolver[T]: ...
-+    def is_dirty(self) -> bool: ...
-+    def persistent(self) -> PSet[T]: ...
-+    def remove(self, element: T) -> PSetEvolver[T]: ...
-+
-+
-+class PBag(Generic[T], Sized, Hashable):
-+    def __add__(self, other: PBag[T]) -> PBag[T]: ...
-+    def __and__(self, other: PBag[T]) -> PBag[T]: ...
-+    def __contains__(self, elem: object) -> bool: ...
-+    def __hash__(self) -> int: ...
-+    def __iter__(self) -> Iterator[T]: ...
-+    def __len__(self) -> int: ...
-+    def __or__(self, other: PBag[T]) -> PBag[T]: ...
-+    def __sub__(self, other: PBag[T]) -> PBag[T]: ...
-+    def add(self, elem: T) -> PBag[T]: ...
-+    def count(self, elem: T) -> int: ...
-+    def remove(self, elem: T) -> PBag[T]: ...
-+    def update(self, iterable: Iterable[T]) -> PBag[T]: ...
-+
-+
-+class PDeque(Sequence[T], Hashable):
-+    @overload
-+    def __getitem__(self, index: int) -> T: ...
-+    @overload
-+    def __getitem__(self, index: slice) -> PDeque[T]: ...
-+    def __hash__(self) -> int: ...
-+    def __len__(self) -> int: ...
-+    def __lt__(self, other: PDeque[T]) -> bool: ...
-+    def append(self, elem: T) -> PDeque[T]: ...
-+    def appendleft(self, elem: T) -> PDeque[T]: ...
-+    def extend(self, iterable: Iterable[T]) -> PDeque[T]: ...
-+    def extendleft(self, iterable: Iterable[T]) -> PDeque[T]: ...
-+    @property
-+    def left(self) -> T: ...
-+    # The real return type is Integral according to what pyrsistent
-+    # checks at runtime but mypy doesn't deal in numeric.*:
-+    # https://github.com/python/mypy/issues/2636
-+    @property
-+    def maxlen(self) -> int: ...
-+    def pop(self, count: int = 1) -> PDeque[T]: ...
-+    def popleft(self, count: int = 1) -> PDeque[T]: ...
-+    def remove(self, elem: T) -> PDeque[T]: ...
-+    def reverse(self) -> PDeque[T]: ...
-+    @property
-+    def right(self) -> T: ...
-+    def rotate(self, steps: int) -> PDeque[T]: ...
-+
-+
-+class PList(Sequence[T], Hashable):
-+    @overload
-+    def __getitem__(self, index: int) -> T: ...
-+    @overload
-+    def __getitem__(self, index: slice) -> PList[T]: ...
-+    def __hash__(self) -> int: ...
-+    def __len__(self) -> int: ...
-+    def __lt__(self, other: PList[T]) -> bool: ...
-+    def __gt__(self, other: PList[T]) -> bool: ...
-+    def cons(self, elem: T) -> PList[T]: ...
-+    @property
-+    def first(self) -> T: ...
-+    def mcons(self, iterable: Iterable[T]) -> PList[T]: ...
-+    def remove(self, elem: T) -> PList[T]: ...
-+    @property
-+    def rest(self) -> PList[T]: ...
-+    def reverse(self) -> PList[T]: ...
-+    def split(self, index: int) -> Tuple[PList[T], PList[T]]: ...
-+
-+T_PClass = TypeVar('T_PClass', bound='PClass')
-+
-+class PClass(Hashable):
-+    def __new__(cls, **kwargs: Any): ...
-+    def set(self: T_PClass, *args: Any, **kwargs: Any) -> T_PClass: ...
-+    @classmethod
-+    def create(
-+        cls: Type[T_PClass],
-+        kwargs: Any,
-+        _factory_fields: Optional[Any] = ...,
-+        ignore_extra: bool = ...,
-+    ) -> T_PClass: ...
-+    def serialize(self, format: Optional[Any] = ...): ...
-+    def transform(self, *transformations: Any): ...
-+    def __eq__(self, other: object): ...
-+    def __ne__(self, other: object): ...
-+    def __hash__(self): ...
-+    def __reduce__(self): ...
-+    def evolver(self) -> PClassEvolver: ...
-+    def remove(self: T_PClass, name: Any) -> T_PClass: ...
-+
-+class PClassEvolver:
-+    def __init__(self, original: Any, initial_dict: Any) -> None: ...
-+    def __getitem__(self, item: Any): ...
-+    def set(self, key: Any, value: Any): ...
-+    def __setitem__(self, key: Any, value: Any) -> None: ...
-+    def remove(self, item: Any): ...
-+    def __delitem__(self, item: Any) -> None: ...
-+    def persistent(self) -> PClass: ...
-+    def __getattr__(self, item: Any): ...
-+
-+
-+
-+class CheckedPMap(PMap[KT, VT]):
-+    __key_type__: Type[KT]
-+    __value_type__: Type[VT]
-+    def __new__(cls, source: Mapping[KT, VT] = ..., size: int = ...) -> CheckedPMap: ...
-+    @classmethod
-+    def create(cls, source_data: Mapping[KT, VT], _factory_fields: Any = ...) -> CheckedPMap[KT, VT]: ...
-+    def serialize(self, format: Optional[Any] = ...) -> Dict[KT, VT]: ...
-+
-+
-+class CheckedPVector(PVector[T]):
-+    __type__: Type[T]
-+    def __new__(self, initial: Iterable[T] = ...) -> CheckedPVector: ...
-+    @classmethod
-+    def create(cls, source_data: Iterable[T], _factory_fields: Any = ...) -> CheckedPVector[T]: ...
-+    def serialize(self, format: Optional[Any] = ...) -> List[T]: ...
-+
-+
-+class CheckedPSet(PSet[T]):
-+    __type__: Type[T]
-+    def __new__(cls, initial: Iterable[T] = ...) -> CheckedPSet: ...
-+    @classmethod
-+    def create(cls, source_data: Iterable[T], _factory_fields: Any = ...) -> CheckedPSet[T]: ...
-+    def serialize(self, format: Optional[Any] = ...) -> Set[T]: ...
-+
-+
-+class InvariantException(Exception):
-+    invariant_errors: Tuple[Any, ...] = ...  # possibly nested tuple
-+    missing_fields: Tuple[str, ...] = ...
-+    def __init__(
-+        self,
-+        error_codes: Any = ...,
-+        missing_fields: Any = ...,
-+        *args: Any,
-+        **kwargs: Any
-+    ) -> None: ...
-+
-+
-+class CheckedTypeError(TypeError):
-+    source_class: Type[Any]
-+    expected_types: Tuple[Any, ...]
-+    actual_type: Type[Any]
-+    actual_value: Any
-+    def __init__(
-+        self,
-+        source_class: Any,
-+        expected_types: Any,
-+        actual_type: Any,
-+        actual_value: Any,
-+        *args: Any,
-+        **kwargs: Any
-+    ) -> None: ...
-+
-+
-+class CheckedKeyTypeError(CheckedTypeError): ...
-+class CheckedValueTypeError(CheckedTypeError): ...
-+class CheckedType: ...
-+
-+
-+class PTypeError(TypeError):
-+    source_class: Type[Any] = ...
-+    field: str = ...
-+    expected_types: Tuple[Any, ...] = ...
-+    actual_type: Type[Any] = ...
-+    def __init__(
-+        self,
-+        source_class: Any,
-+        field: Any,
-+        expected_types: Any,
-+        actual_type: Any,
-+        *args: Any,
-+        **kwargs: Any
-+    ) -> None: ...
-diff --git a/third_party/python/pyrsistent/setup.cfg b/third_party/python/pyrsistent/setup.cfg
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/pyrsistent/setup.cfg
-@@ -0,0 +1,7 @@
-+[aliases]
-+test = pytest
-+
-+[egg_info]
-+tag_build = 
-+tag_date = 0
-+
-diff --git a/third_party/python/pyrsistent/setup.py b/third_party/python/pyrsistent/setup.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/pyrsistent/setup.py
-@@ -0,0 +1,81 @@
-+import os
-+from setuptools import setup, Extension
-+import sys
-+import platform
-+import warnings
-+import codecs
-+from distutils.command.build_ext import build_ext
-+from distutils.errors import CCompilerError
-+from distutils.errors import DistutilsPlatformError, DistutilsExecError
-+from _pyrsistent_version import __version__
-+
-+readme_path = os.path.join(os.path.dirname(__file__), 'README.rst')
-+with codecs.open(readme_path, encoding='utf8') as f:
-+    readme = f.read()
-+
-+extensions = []
-+if platform.python_implementation() == 'CPython':
-+    extensions = [Extension('pvectorc', sources=['pvectorcmodule.c'])]
-+
-+needs_pytest = {'pytest', 'test', 'ptr'}.intersection(sys.argv)
-+pytest_runner = ['pytest-runner'] if needs_pytest else []
-+
-+
-+class custom_build_ext(build_ext):
-+    """Allow C extension building to fail."""
-+
-+    warning_message = """
-+********************************************************************************
-+WARNING: Could not build the %s.
-+         Pyrsistent will still work but performance may be degraded.
-+         %s
-+********************************************************************************
-+"""
-+
-+    def run(self):
-+        try:
-+            build_ext.run(self)
-+        except Exception:
-+            e = sys.exc_info()[1]
-+            sys.stderr.write('%s\n' % str(e))
-+            sys.stderr.write(self.warning_message % ("extension modules", "There was an issue with your platform configuration - see above."))
-+
-+    def build_extension(self, ext):
-+        name = ext.name
-+        try:
-+            build_ext.build_extension(self, ext)
-+        except Exception:
-+            e = sys.exc_info()[1]
-+            sys.stderr.write('%s\n' % str(e))
-+            sys.stderr.write(self.warning_message % ("%s extension module" % name, "The output above this warning shows how the compilation failed."))
-+
-+setup(
-+    name='pyrsistent',
-+    version=__version__,
-+    description='Persistent/Functional/Immutable data structures',
-+    long_description=readme,
-+    author='Tobias Gustafsson',
-+    author_email='tobias.l.gustafsson@gmail.com',
-+    url='http://github.com/tobgu/pyrsistent/',
-+    license='MIT',
-+    py_modules=['_pyrsistent_version'],
-+    classifiers=[
-+        'Intended Audience :: Developers',
-+        'License :: OSI Approved :: MIT License',
-+        'Operating System :: OS Independent',
-+        'Programming Language :: Python :: 2.7',
-+        'Programming Language :: Python :: 3.5',
-+        'Programming Language :: Python :: 3.6',
-+        'Programming Language :: Python :: 3.7',
-+        'Programming Language :: Python :: Implementation :: PyPy',
-+    ],
-+    test_suite='tests',
-+    tests_require=['pytest<5', 'hypothesis<5'],
-+    scripts=[],
-+    setup_requires=pytest_runner,
-+    ext_modules=extensions,
-+    cmdclass={'build_ext': custom_build_ext},
-+    install_requires=['six'],
-+    packages=['pyrsistent'],
-+    package_data={'pyrsistent': ['py.typed', '__init__.pyi', 'typing.pyi']},
-+)
-diff --git a/third_party/python/yamllint/yamllint/__init__.py b/third_party/python/yamllint/yamllint/__init__.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/yamllint/yamllint/__init__.py
-@@ -0,0 +1,31 @@
-+# -*- coding: utf-8 -*-
-+# Copyright (C) 2016 Adrien Vergé
-+#
-+# This program is free software: you can redistribute it and/or modify
-+# it under the terms of the GNU General Public License as published by
-+# the Free Software Foundation, either version 3 of the License, or
-+# (at your option) any later version.
-+#
-+# This program is distributed in the hope that it will be useful,
-+# but WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+# GNU General Public License for more details.
-+#
-+# You should have received a copy of the GNU General Public License
-+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-+
-+"""A linter for YAML files.
-+
-+yamllint does not only check for syntax validity, but for weirdnesses like key
-+repetition and cosmetic problems such as lines length, trailing spaces,
-+indentation, etc."""
-+
-+
-+APP_NAME = 'yamllint'
-+APP_VERSION = '1.20.0'
-+APP_DESCRIPTION = __doc__
-+
-+__author__ = u'Adrien Vergé'
-+__copyright__ = u'Copyright 2016, Adrien Vergé'
-+__license__ = 'GPLv3'
-+__version__ = APP_VERSION
-diff --git a/third_party/python/yamllint/yamllint/__main__.py b/third_party/python/yamllint/yamllint/__main__.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/yamllint/yamllint/__main__.py
-@@ -0,0 +1,4 @@
-+from yamllint.cli import run
-+
-+if __name__ == '__main__':
-+    run()
-diff --git a/third_party/python/yamllint/yamllint/cli.py b/third_party/python/yamllint/yamllint/cli.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/yamllint/yamllint/cli.py
-@@ -0,0 +1,206 @@
-+# -*- coding: utf-8 -*-
-+# Copyright (C) 2016 Adrien Vergé
-+#
-+# This program is free software: you can redistribute it and/or modify
-+# it under the terms of the GNU General Public License as published by
-+# the Free Software Foundation, either version 3 of the License, or
-+# (at your option) any later version.
-+#
-+# This program is distributed in the hope that it will be useful,
-+# but WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+# GNU General Public License for more details.
-+#
-+# You should have received a copy of the GNU General Public License
-+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-+
-+from __future__ import print_function
-+
-+import argparse
-+import os
-+import platform
-+import sys
-+
-+from yamllint import APP_DESCRIPTION, APP_NAME, APP_VERSION
-+from yamllint import linter
-+from yamllint.config import YamlLintConfig, YamlLintConfigError
-+from yamllint.linter import PROBLEM_LEVELS
-+
-+
-+def find_files_recursively(items, conf):
-+    for item in items:
-+        if os.path.isdir(item):
-+            for root, dirnames, filenames in os.walk(item):
-+                for f in filenames:
-+                    filepath = os.path.join(root, f)
-+                    if conf.is_yaml_file(filepath):
-+                        yield filepath
-+        else:
-+            yield item
-+
-+
-+def supports_color():
-+    supported_platform = not (platform.system() == 'Windows' and not
-+                              ('ANSICON' in os.environ or
-+                               ('TERM' in os.environ and
-+                                os.environ['TERM'] == 'ANSI')))
-+    return (supported_platform and
-+            hasattr(sys.stdout, 'isatty') and sys.stdout.isatty())
-+
-+
-+class Format(object):
-+    @staticmethod
-+    def parsable(problem, filename):
-+        return ('%(file)s:%(line)s:%(column)s: [%(level)s] %(message)s' %
-+                {'file': filename,
-+                 'line': problem.line,
-+                 'column': problem.column,
-+                 'level': problem.level,
-+                 'message': problem.message})
-+
-+    @staticmethod
-+    def standard(problem, filename):
-+        line = '  %d:%d' % (problem.line, problem.column)
-+        line += max(12 - len(line), 0) * ' '
-+        line += problem.level
-+        line += max(21 - len(line), 0) * ' '
-+        line += problem.desc
-+        if problem.rule:
-+            line += '  (%s)' % problem.rule
-+        return line
-+
-+    @staticmethod
-+    def standard_color(problem, filename):
-+        line = '  \033[2m%d:%d\033[0m' % (problem.line, problem.column)
-+        line += max(20 - len(line), 0) * ' '
-+        if problem.level == 'warning':
-+            line += '\033[33m%s\033[0m' % problem.level
-+        else:
-+            line += '\033[31m%s\033[0m' % problem.level
-+        line += max(38 - len(line), 0) * ' '
-+        line += problem.desc
-+        if problem.rule:
-+            line += '  \033[2m(%s)\033[0m' % problem.rule
-+        return line
-+
-+
-+def show_problems(problems, file, args_format, no_warn):
-+    max_level = 0
-+    first = True
-+
-+    for problem in problems:
-+        max_level = max(max_level, PROBLEM_LEVELS[problem.level])
-+        if no_warn and (problem.level != 'error'):
-+            continue
-+        if args_format == 'parsable':
-+            print(Format.parsable(problem, file))
-+        elif args_format == 'colored' or \
-+                (args_format == 'auto' and supports_color()):
-+            if first:
-+                print('\033[4m%s\033[0m' % file)
-+                first = False
-+            print(Format.standard_color(problem, file))
-+        else:
-+            if first:
-+                print(file)
-+                first = False
-+            print(Format.standard(problem, file))
-+
-+    if not first and args_format != 'parsable':
-+        print('')
-+
-+    return max_level
-+
-+
-+def run(argv=None):
-+    parser = argparse.ArgumentParser(prog=APP_NAME,
-+                                     description=APP_DESCRIPTION)
-+    files_group = parser.add_mutually_exclusive_group(required=True)
-+    files_group.add_argument('files', metavar='FILE_OR_DIR', nargs='*',
-+                             default=(),
-+                             help='files to check')
-+    files_group.add_argument('-', action='store_true', dest='stdin',
-+                             help='read from standard input')
-+    config_group = parser.add_mutually_exclusive_group()
-+    config_group.add_argument('-c', '--config-file', dest='config_file',
-+                              action='store',
-+                              help='path to a custom configuration')
-+    config_group.add_argument('-d', '--config-data', dest='config_data',
-+                              action='store',
-+                              help='custom configuration (as YAML source)')
-+    parser.add_argument('-f', '--format',
-+                        choices=('parsable', 'standard', 'colored', 'auto'),
-+                        default='auto', help='format for parsing output')
-+    parser.add_argument('-s', '--strict',
-+                        action='store_true',
-+                        help='return non-zero exit code on warnings '
-+                             'as well as errors')
-+    parser.add_argument('--no-warnings',
-+                        action='store_true',
-+                        help='output only error level problems')
-+    parser.add_argument('-v', '--version', action='version',
-+                        version='{} {}'.format(APP_NAME, APP_VERSION))
-+
-+    args = parser.parse_args(argv)
-+
-+    # User-global config is supposed to be in ~/.config/yamllint/config
-+    if 'XDG_CONFIG_HOME' in os.environ:
-+        user_global_config = os.path.join(
-+            os.environ['XDG_CONFIG_HOME'], 'yamllint', 'config')
-+    else:
-+        user_global_config = os.path.expanduser('~/.config/yamllint/config')
-+
-+    try:
-+        if args.config_data is not None:
-+            if args.config_data != '' and ':' not in args.config_data:
-+                args.config_data = 'extends: ' + args.config_data
-+            conf = YamlLintConfig(content=args.config_data)
-+        elif args.config_file is not None:
-+            conf = YamlLintConfig(file=args.config_file)
-+        elif os.path.isfile('.yamllint'):
-+            conf = YamlLintConfig(file='.yamllint')
-+        elif os.path.isfile('.yamllint.yaml'):
-+            conf = YamlLintConfig(file='.yamllint.yaml')
-+        elif os.path.isfile('.yamllint.yml'):
-+            conf = YamlLintConfig(file='.yamllint.yml')
-+        elif os.path.isfile(user_global_config):
-+            conf = YamlLintConfig(file=user_global_config)
-+        else:
-+            conf = YamlLintConfig('extends: default')
-+    except YamlLintConfigError as e:
-+        print(e, file=sys.stderr)
-+        sys.exit(-1)
-+
-+    max_level = 0
-+
-+    for file in find_files_recursively(args.files, conf):
-+        filepath = file[2:] if file.startswith('./') else file
-+        try:
-+            with open(file) as f:
-+                problems = linter.run(f, conf, filepath)
-+        except EnvironmentError as e:
-+            print(e, file=sys.stderr)
-+            sys.exit(-1)
-+        prob_level = show_problems(problems, file, args_format=args.format,
-+                                   no_warn=args.no_warnings)
-+        max_level = max(max_level, prob_level)
-+
-+    # read yaml from stdin
-+    if args.stdin:
-+        try:
-+            problems = linter.run(sys.stdin, conf, '')
-+        except EnvironmentError as e:
-+            print(e, file=sys.stderr)
-+            sys.exit(-1)
-+        prob_level = show_problems(problems, 'stdin', args_format=args.format,
-+                                   no_warn=args.no_warnings)
-+        max_level = max(max_level, prob_level)
-+
-+    if max_level == PROBLEM_LEVELS['error']:
-+        return_code = 1
-+    elif max_level == PROBLEM_LEVELS['warning']:
-+        return_code = 2 if args.strict else 0
-+    else:
-+        return_code = 0
-+
-+    sys.exit(return_code)
-diff --git a/third_party/python/yamllint/yamllint/conf/default.yaml b/third_party/python/yamllint/yamllint/conf/default.yaml
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/yamllint/yamllint/conf/default.yaml
-@@ -0,0 +1,33 @@
-+---
-+
-+yaml-files:
-+  - '*.yaml'
-+  - '*.yml'
-+  - '.yamllint'
-+
-+rules:
-+  braces: enable
-+  brackets: enable
-+  colons: enable
-+  commas: enable
-+  comments:
-+    level: warning
-+  comments-indentation:
-+    level: warning
-+  document-end: disable
-+  document-start:
-+    level: warning
-+  empty-lines: enable
-+  empty-values: disable
-+  hyphens: enable
-+  indentation: enable
-+  key-duplicates: enable
-+  key-ordering: disable
-+  line-length: enable
-+  new-line-at-end-of-file: enable
-+  new-lines: enable
-+  octal-values: disable
-+  quoted-strings: disable
-+  trailing-spaces: enable
-+  truthy:
-+    level: warning
-diff --git a/third_party/python/yamllint/yamllint/conf/relaxed.yaml b/third_party/python/yamllint/yamllint/conf/relaxed.yaml
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/yamllint/yamllint/conf/relaxed.yaml
-@@ -0,0 +1,29 @@
-+---
-+
-+extends: default
-+
-+rules:
-+  braces:
-+    level: warning
-+    max-spaces-inside: 1
-+  brackets:
-+    level: warning
-+    max-spaces-inside: 1
-+  colons:
-+    level: warning
-+  commas:
-+    level: warning
-+  comments: disable
-+  comments-indentation: disable
-+  document-start: disable
-+  empty-lines:
-+    level: warning
-+  hyphens:
-+    level: warning
-+  indentation:
-+    level: warning
-+    indent-sequences: consistent
-+  line-length:
-+    level: warning
-+    allow-non-breakable-inline-mappings: true
-+  truthy: disable
-diff --git a/third_party/python/yamllint/yamllint/config.py b/third_party/python/yamllint/yamllint/config.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/yamllint/yamllint/config.py
-@@ -0,0 +1,198 @@
-+# -*- coding: utf-8 -*-
-+# Copyright (C) 2016 Adrien Vergé
-+#
-+# This program is free software: you can redistribute it and/or modify
-+# it under the terms of the GNU General Public License as published by
-+# the Free Software Foundation, either version 3 of the License, or
-+# (at your option) any later version.
-+#
-+# This program is distributed in the hope that it will be useful,
-+# but WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+# GNU General Public License for more details.
-+#
-+# You should have received a copy of the GNU General Public License
-+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-+
-+import os.path
-+
-+import pathspec
-+import yaml
-+
-+import yamllint.rules
-+
-+
-+class YamlLintConfigError(Exception):
-+    pass
-+
-+
-+class YamlLintConfig(object):
-+    def __init__(self, content=None, file=None):
-+        assert (content is None) ^ (file is None)
-+
-+        self.ignore = None
-+
-+        self.yaml_files = pathspec.PathSpec.from_lines(
-+            'gitwildmatch', ['*.yaml', '*.yml', '.yamllint'])
-+
-+        if file is not None:
-+            with open(file) as f:
-+                content = f.read()
-+
-+        self.parse(content)
-+        self.validate()
-+
-+    def is_file_ignored(self, filepath):
-+        return self.ignore and self.ignore.match_file(filepath)
-+
-+    def is_yaml_file(self, filepath):
-+        return self.yaml_files.match_file(filepath)
-+
-+    def enabled_rules(self, filepath):
-+        return [yamllint.rules.get(id) for id, val in self.rules.items()
-+                if val is not False and (
-+                    filepath is None or 'ignore' not in val or
-+                    not val['ignore'].match_file(filepath))]
-+
-+    def extend(self, base_config):
-+        assert isinstance(base_config, YamlLintConfig)
-+
-+        for rule in self.rules:
-+            if (isinstance(self.rules[rule], dict) and
-+                    rule in base_config.rules and
-+                    base_config.rules[rule] is not False):
-+                base_config.rules[rule].update(self.rules[rule])
-+            else:
-+                base_config.rules[rule] = self.rules[rule]
-+
-+        self.rules = base_config.rules
-+
-+        if base_config.ignore is not None:
-+            self.ignore = base_config.ignore
-+
-+    def parse(self, raw_content):
-+        try:
-+            conf = yaml.safe_load(raw_content)
-+        except Exception as e:
-+            raise YamlLintConfigError('invalid config: %s' % e)
-+
-+        if not isinstance(conf, dict):
-+            raise YamlLintConfigError('invalid config: not a dict')
-+
-+        self.rules = conf.get('rules', {})
-+        for rule in self.rules:
-+            if self.rules[rule] == 'enable':
-+                self.rules[rule] = {}
-+            elif self.rules[rule] == 'disable':
-+                self.rules[rule] = False
-+
-+        # Does this conf override another conf that we need to load?
-+        if 'extends' in conf:
-+            path = get_extended_config_file(conf['extends'])
-+            base = YamlLintConfig(file=path)
-+            try:
-+                self.extend(base)
-+            except Exception as e:
-+                raise YamlLintConfigError('invalid config: %s' % e)
-+
-+        if 'ignore' in conf:
-+            if not isinstance(conf['ignore'], str):
-+                raise YamlLintConfigError(
-+                    'invalid config: ignore should contain file patterns')
-+            self.ignore = pathspec.PathSpec.from_lines(
-+                'gitwildmatch', conf['ignore'].splitlines())
-+
-+        if 'yaml-files' in conf:
-+            if not (isinstance(conf['yaml-files'], list)
-+                    and all(isinstance(i, str) for i in conf['yaml-files'])):
-+                raise YamlLintConfigError(
-+                    'invalid config: yaml-files '
-+                    'should be a list of file patterns')
-+            self.yaml_files = pathspec.PathSpec.from_lines('gitwildmatch',
-+                                                           conf['yaml-files'])
-+
-+    def validate(self):
-+        for id in self.rules:
-+            try:
-+                rule = yamllint.rules.get(id)
-+            except Exception as e:
-+                raise YamlLintConfigError('invalid config: %s' % e)
-+
-+            self.rules[id] = validate_rule_conf(rule, self.rules[id])
-+
-+
-+def validate_rule_conf(rule, conf):
-+    if conf is False:  # disable
-+        return False
-+
-+    if isinstance(conf, dict):
-+        if ('ignore' in conf and
-+                not isinstance(conf['ignore'], pathspec.pathspec.PathSpec)):
-+            if not isinstance(conf['ignore'], str):
-+                raise YamlLintConfigError(
-+                    'invalid config: ignore should contain file patterns')
-+            conf['ignore'] = pathspec.PathSpec.from_lines(
-+                'gitwildmatch', conf['ignore'].splitlines())
-+
-+        if 'level' not in conf:
-+            conf['level'] = 'error'
-+        elif conf['level'] not in ('error', 'warning'):
-+            raise YamlLintConfigError(
-+                'invalid config: level should be "error" or "warning"')
-+
-+        options = getattr(rule, 'CONF', {})
-+        options_default = getattr(rule, 'DEFAULT', {})
-+        for optkey in conf:
-+            if optkey in ('ignore', 'level'):
-+                continue
-+            if optkey not in options:
-+                raise YamlLintConfigError(
-+                    'invalid config: unknown option "%s" for rule "%s"' %
-+                    (optkey, rule.ID))
-+            # Example: CONF = {option: (bool, 'mixed')}
-+            #          → {option: true}         → {option: mixed}
-+            if isinstance(options[optkey], tuple):
-+                if (conf[optkey] not in options[optkey] and
-+                        type(conf[optkey]) not in options[optkey]):
-+                    raise YamlLintConfigError(
-+                        'invalid config: option "%s" of "%s" should be in %s'
-+                        % (optkey, rule.ID, options[optkey]))
-+            # Example: CONF = {option: ['flag1', 'flag2']}
-+            #          → {option: [flag1]}      → {option: [flag1, flag2]}
-+            elif isinstance(options[optkey], list):
-+                if (type(conf[optkey]) is not list or
-+                        any(flag not in options[optkey]
-+                            for flag in conf[optkey])):
-+                    raise YamlLintConfigError(
-+                        ('invalid config: option "%s" of "%s" should only '
-+                         'contain values in %s')
-+                        % (optkey, rule.ID, str(options[optkey])))
-+            # Example: CONF = {option: int}
-+            #          → {option: 42}
-+            else:
-+                if not isinstance(conf[optkey], options[optkey]):
-+                    raise YamlLintConfigError(
-+                        'invalid config: option "%s" of "%s" should be %s'
-+                        % (optkey, rule.ID, options[optkey].__name__))
-+        for optkey in options:
-+            if optkey not in conf:
-+                conf[optkey] = options_default[optkey]
-+    else:
-+        raise YamlLintConfigError(('invalid config: rule "%s": should be '
-+                                   'either "enable", "disable" or a dict')
-+                                  % rule.ID)
-+
-+    return conf
-+
-+
-+def get_extended_config_file(name):
-+    # Is it a standard conf shipped with yamllint...
-+    if '/' not in name:
-+        std_conf = os.path.join(os.path.dirname(os.path.realpath(__file__)),
-+                                'conf', name + '.yaml')
-+
-+        if os.path.isfile(std_conf):
-+            return std_conf
-+
-+    # or a custom conf on filesystem?
-+    return name
-diff --git a/third_party/python/yamllint/yamllint/linter.py b/third_party/python/yamllint/yamllint/linter.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/yamllint/yamllint/linter.py
-@@ -0,0 +1,240 @@
-+# -*- coding: utf-8 -*-
-+# Copyright (C) 2016 Adrien Vergé
-+#
-+# This program is free software: you can redistribute it and/or modify
-+# it under the terms of the GNU General Public License as published by
-+# the Free Software Foundation, either version 3 of the License, or
-+# (at your option) any later version.
-+#
-+# This program is distributed in the hope that it will be useful,
-+# but WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+# GNU General Public License for more details.
-+#
-+# You should have received a copy of the GNU General Public License
-+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-+
-+import re
-+
-+import yaml
-+
-+from yamllint import parser
-+
-+
-+PROBLEM_LEVELS = {
-+    0: None,
-+    1: 'warning',
-+    2: 'error',
-+    None: 0,
-+    'warning': 1,
-+    'error': 2,
-+}
-+
-+
-+class LintProblem(object):
-+    """Represents a linting problem found by yamllint."""
-+    def __init__(self, line, column, desc='<no description>', rule=None):
-+        #: Line on which the problem was found (starting at 1)
-+        self.line = line
-+        #: Column on which the problem was found (starting at 1)
-+        self.column = column
-+        #: Human-readable description of the problem
-+        self.desc = desc
-+        #: Identifier of the rule that detected the problem
-+        self.rule = rule
-+        self.level = None
-+
-+    @property
-+    def message(self):
-+        if self.rule is not None:
-+            return '{} ({})'.format(self.desc, self.rule)
-+        return self.desc
-+
-+    def __eq__(self, other):
-+        return (self.line == other.line and
-+                self.column == other.column and
-+                self.rule == other.rule)
-+
-+    def __lt__(self, other):
-+        return (self.line < other.line or
-+                (self.line == other.line and self.column < other.column))
-+
-+    def __repr__(self):
-+        return '%d:%d: %s' % (self.line, self.column, self.message)
-+
-+
-+def get_cosmetic_problems(buffer, conf, filepath):
-+    rules = conf.enabled_rules(filepath)
-+
-+    # Split token rules from line rules
-+    token_rules = [r for r in rules if r.TYPE == 'token']
-+    comment_rules = [r for r in rules if r.TYPE == 'comment']
-+    line_rules = [r for r in rules if r.TYPE == 'line']
-+
-+    context = {}
-+    for rule in token_rules:
-+        context[rule.ID] = {}
-+
-+    class DisableDirective:
-+        def __init__(self):
-+            self.rules = set()
-+            self.all_rules = {r.ID for r in rules}
-+
-+        def process_comment(self, comment):
-+            try:
-+                comment = str(comment)
-+            except UnicodeError:
-+                return  # this certainly wasn't a yamllint directive comment
-+
-+            if re.match(r'^# yamllint disable( rule:\S+)*\s*$', comment):
-+                rules = [item[5:] for item in comment[18:].split(' ')][1:]
-+                if len(rules) == 0:
-+                    self.rules = self.all_rules.copy()
-+                else:
-+                    for id in rules:
-+                        if id in self.all_rules:
-+                            self.rules.add(id)
-+
-+            elif re.match(r'^# yamllint enable( rule:\S+)*\s*$', comment):
-+                rules = [item[5:] for item in comment[17:].split(' ')][1:]
-+                if len(rules) == 0:
-+                    self.rules.clear()
-+                else:
-+                    for id in rules:
-+                        self.rules.discard(id)
-+
-+        def is_disabled_by_directive(self, problem):
-+            return problem.rule in self.rules
-+
-+    class DisableLineDirective(DisableDirective):
-+        def process_comment(self, comment):
-+            try:
-+                comment = str(comment)
-+            except UnicodeError:
-+                return  # this certainly wasn't a yamllint directive comment
-+
-+            if re.match(r'^# yamllint disable-line( rule:\S+)*\s*$', comment):
-+                rules = [item[5:] for item in comment[23:].split(' ')][1:]
-+                if len(rules) == 0:
-+                    self.rules = self.all_rules.copy()
-+                else:
-+                    for id in rules:
-+                        if id in self.all_rules:
-+                            self.rules.add(id)
-+
-+    # Use a cache to store problems and flush it only when a end of line is
-+    # found. This allows the use of yamllint directive to disable some rules on
-+    # some lines.
-+    cache = []
-+    disabled = DisableDirective()
-+    disabled_for_line = DisableLineDirective()
-+    disabled_for_next_line = DisableLineDirective()
-+
-+    for elem in parser.token_or_comment_or_line_generator(buffer):
-+        if isinstance(elem, parser.Token):
-+            for rule in token_rules:
-+                rule_conf = conf.rules[rule.ID]
-+                for problem in rule.check(rule_conf,
-+                                          elem.curr, elem.prev, elem.next,
-+                                          elem.nextnext,
-+                                          context[rule.ID]):
-+                    problem.rule = rule.ID
-+                    problem.level = rule_conf['level']
-+                    cache.append(problem)
-+        elif isinstance(elem, parser.Comment):
-+            for rule in comment_rules:
-+                rule_conf = conf.rules[rule.ID]
-+                for problem in rule.check(rule_conf, elem):
-+                    problem.rule = rule.ID
-+                    problem.level = rule_conf['level']
-+                    cache.append(problem)
-+
-+            disabled.process_comment(elem)
-+            if elem.is_inline():
-+                disabled_for_line.process_comment(elem)
-+            else:
-+                disabled_for_next_line.process_comment(elem)
-+        elif isinstance(elem, parser.Line):
-+            for rule in line_rules:
-+                rule_conf = conf.rules[rule.ID]
-+                for problem in rule.check(rule_conf, elem):
-+                    problem.rule = rule.ID
-+                    problem.level = rule_conf['level']
-+                    cache.append(problem)
-+
-+            # This is the last token/comment/line of this line, let's flush the
-+            # problems found (but filter them according to the directives)
-+            for problem in cache:
-+                if not (disabled_for_line.is_disabled_by_directive(problem) or
-+                        disabled.is_disabled_by_directive(problem)):
-+                    yield problem
-+
-+            disabled_for_line = disabled_for_next_line
-+            disabled_for_next_line = DisableLineDirective()
-+            cache = []
-+
-+
-+def get_syntax_error(buffer):
-+    try:
-+        list(yaml.parse(buffer, Loader=yaml.BaseLoader))
-+    except yaml.error.MarkedYAMLError as e:
-+        problem = LintProblem(e.problem_mark.line + 1,
-+                              e.problem_mark.column + 1,
-+                              'syntax error: ' + e.problem + ' (syntax)')
-+        problem.level = 'error'
-+        return problem
-+
-+
-+def _run(buffer, conf, filepath):
-+    assert hasattr(buffer, '__getitem__'), \
-+        '_run() argument must be a buffer, not a stream'
-+
-+    first_line = next(parser.line_generator(buffer)).content
-+    if re.match(r'^#\s*yamllint disable-file\s*$', first_line):
-+        return
-+
-+    # If the document contains a syntax error, save it and yield it at the
-+    # right line
-+    syntax_error = get_syntax_error(buffer)
-+
-+    for problem in get_cosmetic_problems(buffer, conf, filepath):
-+        # Insert the syntax error (if any) at the right place...
-+        if (syntax_error and syntax_error.line <= problem.line and
-+                syntax_error.column <= problem.column):
-+            yield syntax_error
-+
-+            # If there is already a yamllint error at the same place, discard
-+            # it as it is probably redundant (and maybe it's just a 'warning',
-+            # in which case the script won't even exit with a failure status).
-+            if (syntax_error.line == problem.line and
-+                    syntax_error.column == problem.column):
-+                syntax_error = None
-+                continue
-+
-+            syntax_error = None
-+
-+        yield problem
-+
-+    if syntax_error:
-+        yield syntax_error
-+
-+
-+def run(input, conf, filepath=None):
-+    """Lints a YAML source.
-+
-+    Returns a generator of LintProblem objects.
-+
-+    :param input: buffer, string or stream to read from
-+    :param conf: yamllint configuration object
-+    """
-+    if conf.is_file_ignored(filepath):
-+        return ()
-+
-+    if isinstance(input, (type(b''), type(u''))):  # compat with Python 2 & 3
-+        return _run(input, conf, filepath)
-+    elif hasattr(input, 'read'):  # Python 2's file or Python 3's io.IOBase
-+        # We need to have everything in memory to parse correctly
-+        content = input.read()
-+        return _run(content, conf, filepath)
-+    else:
-+        raise TypeError('input should be a string or a stream')
-diff --git a/third_party/python/yamllint/yamllint/parser.py b/third_party/python/yamllint/yamllint/parser.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/yamllint/yamllint/parser.py
-@@ -0,0 +1,161 @@
-+# -*- coding: utf-8 -*-
-+# Copyright (C) 2016 Adrien Vergé
-+#
-+# This program is free software: you can redistribute it and/or modify
-+# it under the terms of the GNU General Public License as published by
-+# the Free Software Foundation, either version 3 of the License, or
-+# (at your option) any later version.
-+#
-+# This program is distributed in the hope that it will be useful,
-+# but WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+# GNU General Public License for more details.
-+#
-+# You should have received a copy of the GNU General Public License
-+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-+
-+import yaml
-+
-+
-+class Line(object):
-+    def __init__(self, line_no, buffer, start, end):
-+        self.line_no = line_no
-+        self.start = start
-+        self.end = end
-+        self.buffer = buffer
-+
-+    @property
-+    def content(self):
-+        return self.buffer[self.start:self.end]
-+
-+
-+class Token(object):
-+    def __init__(self, line_no, curr, prev, next, nextnext):
-+        self.line_no = line_no
-+        self.curr = curr
-+        self.prev = prev
-+        self.next = next
-+        self.nextnext = nextnext
-+
-+
-+class Comment(object):
-+    def __init__(self, line_no, column_no, buffer, pointer,
-+                 token_before=None, token_after=None, comment_before=None):
-+        self.line_no = line_no
-+        self.column_no = column_no
-+        self.buffer = buffer
-+        self.pointer = pointer
-+        self.token_before = token_before
-+        self.token_after = token_after
-+        self.comment_before = comment_before
-+
-+    def __str__(self):
-+        end = self.buffer.find('\n', self.pointer)
-+        if end == -1:
-+            end = self.buffer.find('\0', self.pointer)
-+        if end != -1:
-+            return self.buffer[self.pointer:end]
-+        return self.buffer[self.pointer:]
-+
-+    def __eq__(self, other):
-+        return (isinstance(other, Comment) and
-+                self.line_no == other.line_no and
-+                self.column_no == other.column_no and
-+                str(self) == str(other))
-+
-+    def is_inline(self):
-+        return (
-+            not isinstance(self.token_before, yaml.StreamStartToken) and
-+            self.line_no == self.token_before.end_mark.line + 1 and
-+            # sometimes token end marks are on the next line
-+            self.buffer[self.token_before.end_mark.pointer - 1] != '\n'
-+        )
-+
-+
-+def line_generator(buffer):
-+    line_no = 1
-+    cur = 0
-+    next = buffer.find('\n')
-+    while next != -1:
-+        if next > 0 and buffer[next - 1] == '\r':
-+            yield Line(line_no, buffer, start=cur, end=next - 1)
-+        else:
-+            yield Line(line_no, buffer, start=cur, end=next)
-+        cur = next + 1
-+        next = buffer.find('\n', cur)
-+        line_no += 1
-+
-+    yield Line(line_no, buffer, start=cur, end=len(buffer))
-+
-+
-+def comments_between_tokens(token1, token2):
-+    """Find all comments between two tokens"""
-+    if token2 is None:
-+        buf = token1.end_mark.buffer[token1.end_mark.pointer:]
-+    elif (token1.end_mark.line == token2.start_mark.line and
-+          not isinstance(token1, yaml.StreamStartToken) and
-+          not isinstance(token2, yaml.StreamEndToken)):
-+        return
-+    else:
-+        buf = token1.end_mark.buffer[token1.end_mark.pointer:
-+                                     token2.start_mark.pointer]
-+
-+    line_no = token1.end_mark.line + 1
-+    column_no = token1.end_mark.column + 1
-+    pointer = token1.end_mark.pointer
-+
-+    comment_before = None
-+    for line in buf.split('\n'):
-+        pos = line.find('#')
-+        if pos != -1:
-+            comment = Comment(line_no, column_no + pos,
-+                              token1.end_mark.buffer, pointer + pos,
-+                              token1, token2, comment_before)
-+            yield comment
-+
-+            comment_before = comment
-+
-+        pointer += len(line) + 1
-+        line_no += 1
-+        column_no = 1
-+
-+
-+def token_or_comment_generator(buffer):
-+    yaml_loader = yaml.BaseLoader(buffer)
-+
-+    try:
-+        prev = None
-+        curr = yaml_loader.get_token()
-+        while curr is not None:
-+            next = yaml_loader.get_token()
-+            nextnext = (yaml_loader.peek_token()
-+                        if yaml_loader.check_token() else None)
-+
-+            yield Token(curr.start_mark.line + 1, curr, prev, next, nextnext)
-+
-+            for comment in comments_between_tokens(curr, next):
-+                yield comment
-+
-+            prev = curr
-+            curr = next
-+
-+    except yaml.scanner.ScannerError:
-+        pass
-+
-+
-+def token_or_comment_or_line_generator(buffer):
-+    """Generator that mixes tokens and lines, ordering them by line number"""
-+    tok_or_com_gen = token_or_comment_generator(buffer)
-+    line_gen = line_generator(buffer)
-+
-+    tok_or_com = next(tok_or_com_gen, None)
-+    line = next(line_gen, None)
-+
-+    while tok_or_com is not None or line is not None:
-+        if tok_or_com is None or (line is not None and
-+                                  tok_or_com.line_no > line.line_no):
-+            yield line
-+            line = next(line_gen, None)
-+        else:
-+            yield tok_or_com
-+            tok_or_com = next(tok_or_com_gen, None)
-diff --git a/third_party/python/yamllint/yamllint/rules/__init__.py b/third_party/python/yamllint/yamllint/rules/__init__.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/yamllint/yamllint/rules/__init__.py
-@@ -0,0 +1,70 @@
-+# -*- coding: utf-8 -*-
-+# Copyright (C) 2016 Adrien Vergé
-+#
-+# This program is free software: you can redistribute it and/or modify
-+# it under the terms of the GNU General Public License as published by
-+# the Free Software Foundation, either version 3 of the License, or
-+# (at your option) any later version.
-+#
-+# This program is distributed in the hope that it will be useful,
-+# but WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+# GNU General Public License for more details.
-+#
-+# You should have received a copy of the GNU General Public License
-+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-+
-+from yamllint.rules import (
-+    braces,
-+    brackets,
-+    colons,
-+    commas,
-+    comments,
-+    comments_indentation,
-+    document_end,
-+    document_start,
-+    empty_lines,
-+    empty_values,
-+    hyphens,
-+    indentation,
-+    key_duplicates,
-+    key_ordering,
-+    line_length,
-+    new_line_at_end_of_file,
-+    new_lines,
-+    octal_values,
-+    quoted_strings,
-+    trailing_spaces,
-+    truthy,
-+)
-+
-+_RULES = {
-+    braces.ID: braces,
-+    brackets.ID: brackets,
-+    colons.ID: colons,
-+    commas.ID: commas,
-+    comments.ID: comments,
-+    comments_indentation.ID: comments_indentation,
-+    document_end.ID: document_end,
-+    document_start.ID: document_start,
-+    empty_lines.ID: empty_lines,
-+    empty_values.ID: empty_values,
-+    hyphens.ID: hyphens,
-+    indentation.ID: indentation,
-+    key_duplicates.ID: key_duplicates,
-+    key_ordering.ID: key_ordering,
-+    line_length.ID: line_length,
-+    new_line_at_end_of_file.ID: new_line_at_end_of_file,
-+    new_lines.ID: new_lines,
-+    octal_values.ID: octal_values,
-+    quoted_strings.ID: quoted_strings,
-+    trailing_spaces.ID: trailing_spaces,
-+    truthy.ID: truthy,
-+}
-+
-+
-+def get(id):
-+    if id not in _RULES:
-+        raise ValueError('no such rule: "%s"' % id)
-+
-+    return _RULES[id]
-diff --git a/third_party/python/yamllint/yamllint/rules/braces.py b/third_party/python/yamllint/yamllint/rules/braces.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/yamllint/yamllint/rules/braces.py
-@@ -0,0 +1,143 @@
-+# -*- coding: utf-8 -*-
-+# Copyright (C) 2016 Adrien Vergé
-+#
-+# This program is free software: you can redistribute it and/or modify
-+# it under the terms of the GNU General Public License as published by
-+# the Free Software Foundation, either version 3 of the License, or
-+# (at your option) any later version.
-+#
-+# This program is distributed in the hope that it will be useful,
-+# but WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+# GNU General Public License for more details.
-+#
-+# You should have received a copy of the GNU General Public License
-+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-+
-+"""
-+Use this rule to control the number of spaces inside braces (``{`` and ``}``).
-+
-+.. rubric:: Options
-+
-+* ``min-spaces-inside`` defines the minimal number of spaces required inside
-+  braces.
-+* ``max-spaces-inside`` defines the maximal number of spaces allowed inside
-+  braces.
-+* ``min-spaces-inside-empty`` defines the minimal number of spaces required
-+  inside empty braces.
-+* ``max-spaces-inside-empty`` defines the maximal number of spaces allowed
-+  inside empty braces.
-+
-+.. rubric:: Examples
-+
-+#. With ``braces: {min-spaces-inside: 0, max-spaces-inside: 0}``
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    object: {key1: 4, key2: 8}
-+
-+   the following code snippet would **FAIL**:
-+   ::
-+
-+    object: { key1: 4, key2: 8 }
-+
-+#. With ``braces: {min-spaces-inside: 1, max-spaces-inside: 3}``
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    object: { key1: 4, key2: 8 }
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    object: { key1: 4, key2: 8   }
-+
-+   the following code snippet would **FAIL**:
-+   ::
-+
-+    object: {    key1: 4, key2: 8   }
-+
-+   the following code snippet would **FAIL**:
-+   ::
-+
-+    object: {key1: 4, key2: 8 }
-+
-+#. With ``braces: {min-spaces-inside-empty: 0, max-spaces-inside-empty: 0}``
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    object: {}
-+
-+   the following code snippet would **FAIL**:
-+   ::
-+
-+    object: { }
-+
-+#. With ``braces: {min-spaces-inside-empty: 1, max-spaces-inside-empty: -1}``
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    object: {         }
-+
-+   the following code snippet would **FAIL**:
-+   ::
-+
-+    object: {}
-+"""
-+
-+
-+import yaml
-+
-+from yamllint.rules.common import spaces_after, spaces_before
-+
-+
-+ID = 'braces'
-+TYPE = 'token'
-+CONF = {'min-spaces-inside': int,
-+        'max-spaces-inside': int,
-+        'min-spaces-inside-empty': int,
-+        'max-spaces-inside-empty': int}
-+DEFAULT = {'min-spaces-inside': 0,
-+           'max-spaces-inside': 0,
-+           'min-spaces-inside-empty': -1,
-+           'max-spaces-inside-empty': -1}
-+
-+
-+def check(conf, token, prev, next, nextnext, context):
-+    if (isinstance(token, yaml.FlowMappingStartToken) and
-+            isinstance(next, yaml.FlowMappingEndToken)):
-+        problem = spaces_after(token, prev, next,
-+                               min=(conf['min-spaces-inside-empty']
-+                                    if conf['min-spaces-inside-empty'] != -1
-+                                    else conf['min-spaces-inside']),
-+                               max=(conf['max-spaces-inside-empty']
-+                                    if conf['max-spaces-inside-empty'] != -1
-+                                    else conf['max-spaces-inside']),
-+                               min_desc='too few spaces inside empty braces',
-+                               max_desc='too many spaces inside empty braces')
-+        if problem is not None:
-+            yield problem
-+
-+    elif isinstance(token, yaml.FlowMappingStartToken):
-+        problem = spaces_after(token, prev, next,
-+                               min=conf['min-spaces-inside'],
-+                               max=conf['max-spaces-inside'],
-+                               min_desc='too few spaces inside braces',
-+                               max_desc='too many spaces inside braces')
-+        if problem is not None:
-+            yield problem
-+
-+    elif (isinstance(token, yaml.FlowMappingEndToken) and
-+            (prev is None or
-+             not isinstance(prev, yaml.FlowMappingStartToken))):
-+        problem = spaces_before(token, prev, next,
-+                                min=conf['min-spaces-inside'],
-+                                max=conf['max-spaces-inside'],
-+                                min_desc='too few spaces inside braces',
-+                                max_desc='too many spaces inside braces')
-+        if problem is not None:
-+            yield problem
-diff --git a/third_party/python/yamllint/yamllint/rules/brackets.py b/third_party/python/yamllint/yamllint/rules/brackets.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/yamllint/yamllint/rules/brackets.py
-@@ -0,0 +1,145 @@
-+# -*- coding: utf-8 -*-
-+# Copyright (C) 2016 Adrien Vergé
-+#
-+# This program is free software: you can redistribute it and/or modify
-+# it under the terms of the GNU General Public License as published by
-+# the Free Software Foundation, either version 3 of the License, or
-+# (at your option) any later version.
-+#
-+# This program is distributed in the hope that it will be useful,
-+# but WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+# GNU General Public License for more details.
-+#
-+# You should have received a copy of the GNU General Public License
-+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-+
-+"""
-+Use this rule to control the number of spaces inside brackets (``[`` and
-+``]``).
-+
-+.. rubric:: Options
-+
-+* ``min-spaces-inside`` defines the minimal number of spaces required inside
-+  brackets.
-+* ``max-spaces-inside`` defines the maximal number of spaces allowed inside
-+  brackets.
-+* ``min-spaces-inside-empty`` defines the minimal number of spaces required
-+  inside empty brackets.
-+* ``max-spaces-inside-empty`` defines the maximal number of spaces allowed
-+  inside empty brackets.
-+
-+.. rubric:: Examples
-+
-+#. With ``brackets: {min-spaces-inside: 0, max-spaces-inside: 0}``
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    object: [1, 2, abc]
-+
-+   the following code snippet would **FAIL**:
-+   ::
-+
-+    object: [ 1, 2, abc ]
-+
-+#. With ``brackets: {min-spaces-inside: 1, max-spaces-inside: 3}``
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    object: [ 1, 2, abc ]
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    object: [ 1, 2, abc   ]
-+
-+   the following code snippet would **FAIL**:
-+   ::
-+
-+    object: [    1, 2, abc   ]
-+
-+   the following code snippet would **FAIL**:
-+   ::
-+
-+    object: [1, 2, abc ]
-+
-+#. With ``brackets: {min-spaces-inside-empty: 0, max-spaces-inside-empty: 0}``
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    object: []
-+
-+   the following code snippet would **FAIL**:
-+   ::
-+
-+    object: [ ]
-+
-+#. With ``brackets: {min-spaces-inside-empty: 1, max-spaces-inside-empty: -1}``
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    object: [         ]
-+
-+   the following code snippet would **FAIL**:
-+   ::
-+
-+    object: []
-+"""
-+
-+
-+import yaml
-+
-+from yamllint.rules.common import spaces_after, spaces_before
-+
-+
-+ID = 'brackets'
-+TYPE = 'token'
-+CONF = {'min-spaces-inside': int,
-+        'max-spaces-inside': int,
-+        'min-spaces-inside-empty': int,
-+        'max-spaces-inside-empty': int}
-+DEFAULT = {'min-spaces-inside': 0,
-+           'max-spaces-inside': 0,
-+           'min-spaces-inside-empty': -1,
-+           'max-spaces-inside-empty': -1}
-+
-+
-+def check(conf, token, prev, next, nextnext, context):
-+    if (isinstance(token, yaml.FlowSequenceStartToken) and
-+            isinstance(next, yaml.FlowSequenceEndToken)):
-+        problem = spaces_after(token, prev, next,
-+                               min=(conf['min-spaces-inside-empty']
-+                                    if conf['min-spaces-inside-empty'] != -1
-+                                    else conf['min-spaces-inside']),
-+                               max=(conf['max-spaces-inside-empty']
-+                                    if conf['max-spaces-inside-empty'] != -1
-+                                    else conf['max-spaces-inside']),
-+                               min_desc='too few spaces inside empty brackets',
-+                               max_desc=('too many spaces inside empty '
-+                                         'brackets'))
-+        if problem is not None:
-+            yield problem
-+
-+    elif isinstance(token, yaml.FlowSequenceStartToken):
-+        problem = spaces_after(token, prev, next,
-+                               min=conf['min-spaces-inside'],
-+                               max=conf['max-spaces-inside'],
-+                               min_desc='too few spaces inside brackets',
-+                               max_desc='too many spaces inside brackets')
-+        if problem is not None:
-+            yield problem
-+
-+    elif (isinstance(token, yaml.FlowSequenceEndToken) and
-+            (prev is None or
-+             not isinstance(prev, yaml.FlowSequenceStartToken))):
-+        problem = spaces_before(token, prev, next,
-+                                min=conf['min-spaces-inside'],
-+                                max=conf['max-spaces-inside'],
-+                                min_desc='too few spaces inside brackets',
-+                                max_desc='too many spaces inside brackets')
-+        if problem is not None:
-+            yield problem
-diff --git a/third_party/python/yamllint/yamllint/rules/colons.py b/third_party/python/yamllint/yamllint/rules/colons.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/yamllint/yamllint/rules/colons.py
-@@ -0,0 +1,105 @@
-+# -*- coding: utf-8 -*-
-+# Copyright (C) 2016 Adrien Vergé
-+#
-+# This program is free software: you can redistribute it and/or modify
-+# it under the terms of the GNU General Public License as published by
-+# the Free Software Foundation, either version 3 of the License, or
-+# (at your option) any later version.
-+#
-+# This program is distributed in the hope that it will be useful,
-+# but WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+# GNU General Public License for more details.
-+#
-+# You should have received a copy of the GNU General Public License
-+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-+
-+"""
-+Use this rule to control the number of spaces before and after colons (``:``).
-+
-+.. rubric:: Options
-+
-+* ``max-spaces-before`` defines the maximal number of spaces allowed before
-+  colons (use ``-1`` to disable).
-+* ``max-spaces-after`` defines the maximal number of spaces allowed after
-+  colons (use ``-1`` to disable).
-+
-+.. rubric:: Examples
-+
-+#. With ``colons: {max-spaces-before: 0, max-spaces-after: 1}``
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    object:
-+      - a
-+      - b
-+    key: value
-+
-+#. With ``colons: {max-spaces-before: 1}``
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    object :
-+      - a
-+      - b
-+
-+   the following code snippet would **FAIL**:
-+   ::
-+
-+    object  :
-+      - a
-+      - b
-+
-+#. With ``colons: {max-spaces-after: 2}``
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    first:  1
-+    second: 2
-+    third:  3
-+
-+   the following code snippet would **FAIL**:
-+   ::
-+
-+    first: 1
-+    2nd:   2
-+    third: 3
-+"""
-+
-+
-+import yaml
-+
-+from yamllint.rules.common import is_explicit_key, spaces_after, spaces_before
-+
-+
-+ID = 'colons'
-+TYPE = 'token'
-+CONF = {'max-spaces-before': int,
-+        'max-spaces-after': int}
-+DEFAULT = {'max-spaces-before': 0,
-+           'max-spaces-after': 1}
-+
-+
-+def check(conf, token, prev, next, nextnext, context):
-+    if isinstance(token, yaml.ValueToken):
-+        problem = spaces_before(token, prev, next,
-+                                max=conf['max-spaces-before'],
-+                                max_desc='too many spaces before colon')
-+        if problem is not None:
-+            yield problem
-+
-+        problem = spaces_after(token, prev, next,
-+                               max=conf['max-spaces-after'],
-+                               max_desc='too many spaces after colon')
-+        if problem is not None:
-+            yield problem
-+
-+    if isinstance(token, yaml.KeyToken) and is_explicit_key(token):
-+        problem = spaces_after(token, prev, next,
-+                               max=conf['max-spaces-after'],
-+                               max_desc='too many spaces after question mark')
-+        if problem is not None:
-+            yield problem
-diff --git a/third_party/python/yamllint/yamllint/rules/commas.py b/third_party/python/yamllint/yamllint/rules/commas.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/yamllint/yamllint/rules/commas.py
-@@ -0,0 +1,131 @@
-+# -*- coding: utf-8 -*-
-+# Copyright (C) 2016 Adrien Vergé
-+#
-+# This program is free software: you can redistribute it and/or modify
-+# it under the terms of the GNU General Public License as published by
-+# the Free Software Foundation, either version 3 of the License, or
-+# (at your option) any later version.
-+#
-+# This program is distributed in the hope that it will be useful,
-+# but WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+# GNU General Public License for more details.
-+#
-+# You should have received a copy of the GNU General Public License
-+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-+
-+"""
-+Use this rule to control the number of spaces before and after commas (``,``).
-+
-+.. rubric:: Options
-+
-+* ``max-spaces-before`` defines the maximal number of spaces allowed before
-+  commas (use ``-1`` to disable).
-+* ``min-spaces-after`` defines the minimal number of spaces required after
-+  commas.
-+* ``max-spaces-after`` defines the maximal number of spaces allowed after
-+  commas (use ``-1`` to disable).
-+
-+.. rubric:: Examples
-+
-+#. With ``commas: {max-spaces-before: 0}``
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    strange var:
-+      [10, 20, 30, {x: 1, y: 2}]
-+
-+   the following code snippet would **FAIL**:
-+   ::
-+
-+    strange var:
-+      [10, 20 , 30, {x: 1, y: 2}]
-+
-+#. With ``commas: {max-spaces-before: 2}``
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    strange var:
-+      [10  , 20 , 30,  {x: 1  , y: 2}]
-+
-+#. With ``commas: {max-spaces-before: -1}``
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    strange var:
-+      [10,
-+       20   , 30
-+       ,   {x: 1, y: 2}]
-+
-+#. With ``commas: {min-spaces-after: 1, max-spaces-after: 1}``
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    strange var:
-+      [10, 20,30, {x: 1, y: 2}]
-+
-+   the following code snippet would **FAIL**:
-+   ::
-+
-+    strange var:
-+      [10, 20,30,   {x: 1,   y: 2}]
-+
-+#. With ``commas: {min-spaces-after: 1, max-spaces-after: 3}``
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    strange var:
-+      [10, 20,  30,  {x: 1,   y: 2}]
-+
-+#. With ``commas: {min-spaces-after: 0, max-spaces-after: 1}``
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    strange var:
-+      [10, 20,30, {x: 1, y: 2}]
-+"""
-+
-+
-+import yaml
-+
-+from yamllint.linter import LintProblem
-+from yamllint.rules.common import spaces_after, spaces_before
-+
-+
-+ID = 'commas'
-+TYPE = 'token'
-+CONF = {'max-spaces-before': int,
-+        'min-spaces-after': int,
-+        'max-spaces-after': int}
-+DEFAULT = {'max-spaces-before': 0,
-+           'min-spaces-after': 1,
-+           'max-spaces-after': 1}
-+
-+
-+def check(conf, token, prev, next, nextnext, context):
-+    if isinstance(token, yaml.FlowEntryToken):
-+        if (prev is not None and conf['max-spaces-before'] != -1 and
-+                prev.end_mark.line < token.start_mark.line):
-+            yield LintProblem(token.start_mark.line + 1,
-+                              max(1, token.start_mark.column),
-+                              'too many spaces before comma')
-+        else:
-+            problem = spaces_before(token, prev, next,
-+                                    max=conf['max-spaces-before'],
-+                                    max_desc='too many spaces before comma')
-+            if problem is not None:
-+                yield problem
-+
-+        problem = spaces_after(token, prev, next,
-+                               min=conf['min-spaces-after'],
-+                               max=conf['max-spaces-after'],
-+                               min_desc='too few spaces after comma',
-+                               max_desc='too many spaces after comma')
-+        if problem is not None:
-+            yield problem
-diff --git a/third_party/python/yamllint/yamllint/rules/comments.py b/third_party/python/yamllint/yamllint/rules/comments.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/yamllint/yamllint/rules/comments.py
-@@ -0,0 +1,104 @@
-+# -*- coding: utf-8 -*-
-+# Copyright (C) 2016 Adrien Vergé
-+#
-+# This program is free software: you can redistribute it and/or modify
-+# it under the terms of the GNU General Public License as published by
-+# the Free Software Foundation, either version 3 of the License, or
-+# (at your option) any later version.
-+#
-+# This program is distributed in the hope that it will be useful,
-+# but WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+# GNU General Public License for more details.
-+#
-+# You should have received a copy of the GNU General Public License
-+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-+
-+"""
-+Use this rule to control the position and formatting of comments.
-+
-+.. rubric:: Options
-+
-+* Use ``require-starting-space`` to require a space character right after the
-+  ``#``. Set to ``true`` to enable, ``false`` to disable.
-+* Use ``ignore-shebangs`` to ignore a
-+  `shebang <https://en.wikipedia.org/wiki/Shebang_(Unix)>`_ at the beginning of
-+  the file when ``require-starting-space`` is set.
-+* ``min-spaces-from-content`` is used to visually separate inline comments from
-+  content. It defines the minimal required number of spaces between a comment
-+  and its preceding content.
-+
-+.. rubric:: Examples
-+
-+#. With ``comments: {require-starting-space: true}``
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    # This sentence
-+    # is a block comment
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    ##############################
-+    ## This is some documentation
-+
-+   the following code snippet would **FAIL**:
-+   ::
-+
-+    #This sentence
-+    #is a block comment
-+
-+#. With ``comments: {min-spaces-from-content: 2}``
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    x = 2 ^ 127 - 1  # Mersenne prime number
-+
-+   the following code snippet would **FAIL**:
-+   ::
-+
-+    x = 2 ^ 127 - 1 # Mersenne prime number
-+"""
-+
-+
-+import re
-+
-+from yamllint.linter import LintProblem
-+
-+
-+ID = 'comments'
-+TYPE = 'comment'
-+CONF = {'require-starting-space': bool,
-+        'ignore-shebangs': bool,
-+        'min-spaces-from-content': int}
-+DEFAULT = {'require-starting-space': True,
-+           'ignore-shebangs': True,
-+           'min-spaces-from-content': 2}
-+
-+
-+def check(conf, comment):
-+    if (conf['min-spaces-from-content'] != -1 and comment.is_inline() and
-+            comment.pointer - comment.token_before.end_mark.pointer <
-+            conf['min-spaces-from-content']):
-+        yield LintProblem(comment.line_no, comment.column_no,
-+                          'too few spaces before comment')
-+
-+    if conf['require-starting-space']:
-+        text_start = comment.pointer + 1
-+        while (comment.buffer[text_start] == '#' and
-+               text_start < len(comment.buffer)):
-+            text_start += 1
-+        if text_start < len(comment.buffer):
-+            if (conf['ignore-shebangs'] and
-+                    comment.line_no == 1 and
-+                    comment.column_no == 1 and
-+                    re.match(r'^!\S', comment.buffer[text_start:])):
-+                return
-+            elif comment.buffer[text_start] not in (' ', '\n', '\0'):
-+                column = comment.column_no + text_start - comment.pointer
-+                yield LintProblem(comment.line_no,
-+                                  column,
-+                                  'missing starting space in comment')
-diff --git a/third_party/python/yamllint/yamllint/rules/comments_indentation.py b/third_party/python/yamllint/yamllint/rules/comments_indentation.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/yamllint/yamllint/rules/comments_indentation.py
-@@ -0,0 +1,139 @@
-+# -*- coding: utf-8 -*-
-+# Copyright (C) 2016 Adrien Vergé
-+#
-+# This program is free software: you can redistribute it and/or modify
-+# it under the terms of the GNU General Public License as published by
-+# the Free Software Foundation, either version 3 of the License, or
-+# (at your option) any later version.
-+#
-+# This program is distributed in the hope that it will be useful,
-+# but WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+# GNU General Public License for more details.
-+#
-+# You should have received a copy of the GNU General Public License
-+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-+
-+"""
-+Use this rule to force comments to be indented like content.
-+
-+.. rubric:: Examples
-+
-+#. With ``comments-indentation: {}``
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    # Fibonacci
-+    [0, 1, 1, 2, 3, 5]
-+
-+   the following code snippet would **FAIL**:
-+   ::
-+
-+      # Fibonacci
-+    [0, 1, 1, 2, 3, 5]
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    list:
-+        - 2
-+        - 3
-+        # - 4
-+        - 5
-+
-+   the following code snippet would **FAIL**:
-+   ::
-+
-+    list:
-+        - 2
-+        - 3
-+    #    - 4
-+        - 5
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    # This is the first object
-+    obj1:
-+      - item A
-+      # - item B
-+    # This is the second object
-+    obj2: []
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    # This sentence
-+    # is a block comment
-+
-+   the following code snippet would **FAIL**:
-+   ::
-+
-+    # This sentence
-+     # is a block comment
-+"""
-+
-+
-+import yaml
-+
-+from yamllint.linter import LintProblem
-+from yamllint.rules.common import get_line_indent
-+
-+
-+ID = 'comments-indentation'
-+TYPE = 'comment'
-+
-+
-+# Case A:
-+#
-+#     prev: line:
-+#       # commented line
-+#       current: line
-+#
-+# Case B:
-+#
-+#       prev: line
-+#       # commented line 1
-+#     # commented line 2
-+#     current: line
-+
-+def check(conf, comment):
-+    # Only check block comments
-+    if (not isinstance(comment.token_before, yaml.StreamStartToken) and
-+            comment.token_before.end_mark.line + 1 == comment.line_no):
-+        return
-+
-+    next_line_indent = comment.token_after.start_mark.column
-+    if isinstance(comment.token_after, yaml.StreamEndToken):
-+        next_line_indent = 0
-+
-+    if isinstance(comment.token_before, yaml.StreamStartToken):
-+        prev_line_indent = 0
-+    else:
-+        prev_line_indent = get_line_indent(comment.token_before)
-+
-+    # In the following case only the next line indent is valid:
-+    #     list:
-+    #         # comment
-+    #         - 1
-+    #         - 2
-+    if prev_line_indent <= next_line_indent:
-+        prev_line_indent = next_line_indent
-+
-+    # If two indents are valid but a previous comment went back to normal
-+    # indent, for the next ones to do the same. In other words, avoid this:
-+    #     list:
-+    #         - 1
-+    #     # comment on valid indent (0)
-+    #         # comment on valid indent (4)
-+    #     other-list:
-+    #         - 2
-+    if (comment.comment_before is not None and
-+            not comment.comment_before.is_inline()):
-+        prev_line_indent = comment.comment_before.column_no - 1
-+
-+    if (comment.column_no - 1 != prev_line_indent and
-+            comment.column_no - 1 != next_line_indent):
-+        yield LintProblem(comment.line_no, comment.column_no,
-+                          'comment not indented like content')
-diff --git a/third_party/python/yamllint/yamllint/rules/common.py b/third_party/python/yamllint/yamllint/rules/common.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/yamllint/yamllint/rules/common.py
-@@ -0,0 +1,89 @@
-+# -*- coding: utf-8 -*-
-+# Copyright (C) 2016 Adrien Vergé
-+#
-+# This program is free software: you can redistribute it and/or modify
-+# it under the terms of the GNU General Public License as published by
-+# the Free Software Foundation, either version 3 of the License, or
-+# (at your option) any later version.
-+#
-+# This program is distributed in the hope that it will be useful,
-+# but WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+# GNU General Public License for more details.
-+#
-+# You should have received a copy of the GNU General Public License
-+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-+
-+import string
-+
-+import yaml
-+
-+from yamllint.linter import LintProblem
-+
-+
-+def spaces_after(token, prev, next, min=-1, max=-1,
-+                 min_desc=None, max_desc=None):
-+    if next is not None and token.end_mark.line == next.start_mark.line:
-+        spaces = next.start_mark.pointer - token.end_mark.pointer
-+        if max != - 1 and spaces > max:
-+            return LintProblem(token.start_mark.line + 1,
-+                               next.start_mark.column, max_desc)
-+        elif min != - 1 and spaces < min:
-+            return LintProblem(token.start_mark.line + 1,
-+                               next.start_mark.column + 1, min_desc)
-+
-+
-+def spaces_before(token, prev, next, min=-1, max=-1,
-+                  min_desc=None, max_desc=None):
-+    if (prev is not None and prev.end_mark.line == token.start_mark.line and
-+            # Discard tokens (only scalars?) that end at the start of next line
-+            (prev.end_mark.pointer == 0 or
-+             prev.end_mark.buffer[prev.end_mark.pointer - 1] != '\n')):
-+        spaces = token.start_mark.pointer - prev.end_mark.pointer
-+        if max != - 1 and spaces > max:
-+            return LintProblem(token.start_mark.line + 1,
-+                               token.start_mark.column, max_desc)
-+        elif min != - 1 and spaces < min:
-+            return LintProblem(token.start_mark.line + 1,
-+                               token.start_mark.column + 1, min_desc)
-+
-+
-+def get_line_indent(token):
-+    """Finds the indent of the line the token starts in."""
-+    start = token.start_mark.buffer.rfind('\n', 0,
-+                                          token.start_mark.pointer) + 1
-+    content = start
-+    while token.start_mark.buffer[content] == ' ':
-+        content += 1
-+    return content - start
-+
-+
-+def get_real_end_line(token):
-+    """Finds the line on which the token really ends.
-+
-+    With pyyaml, scalar tokens often end on a next line.
-+    """
-+    end_line = token.end_mark.line + 1
-+
-+    if not isinstance(token, yaml.ScalarToken):
-+        return end_line
-+
-+    pos = token.end_mark.pointer - 1
-+    while (pos >= token.start_mark.pointer - 1 and
-+           token.end_mark.buffer[pos] in string.whitespace):
-+        if token.end_mark.buffer[pos] == '\n':
-+            end_line -= 1
-+        pos -= 1
-+    return end_line
-+
-+
-+def is_explicit_key(token):
-+    # explicit key:
-+    #   ? key
-+    #   : v
-+    # or
-+    #   ?
-+    #     key
-+    #   : v
-+    return (token.start_mark.pointer < token.end_mark.pointer and
-+            token.start_mark.buffer[token.start_mark.pointer] == '?')
-diff --git a/third_party/python/yamllint/yamllint/rules/document_end.py b/third_party/python/yamllint/yamllint/rules/document_end.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/yamllint/yamllint/rules/document_end.py
-@@ -0,0 +1,107 @@
-+# -*- coding: utf-8 -*-
-+# Copyright (C) 2016 Adrien Vergé
-+#
-+# This program is free software: you can redistribute it and/or modify
-+# it under the terms of the GNU General Public License as published by
-+# the Free Software Foundation, either version 3 of the License, or
-+# (at your option) any later version.
-+#
-+# This program is distributed in the hope that it will be useful,
-+# but WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+# GNU General Public License for more details.
-+#
-+# You should have received a copy of the GNU General Public License
-+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-+
-+"""
-+Use this rule to require or forbid the use of document end marker (``...``).
-+
-+.. rubric:: Options
-+
-+* Set ``present`` to ``true`` when the document end marker is required, or to
-+  ``false`` when it is forbidden.
-+
-+.. rubric:: Examples
-+
-+#. With ``document-end: {present: true}``
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    ---
-+    this:
-+      is: [a, document]
-+    ...
-+    ---
-+    - this
-+    - is: another one
-+    ...
-+
-+   the following code snippet would **FAIL**:
-+   ::
-+
-+    ---
-+    this:
-+      is: [a, document]
-+    ---
-+    - this
-+    - is: another one
-+    ...
-+
-+#. With ``document-end: {present: false}``
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    ---
-+    this:
-+      is: [a, document]
-+    ---
-+    - this
-+    - is: another one
-+
-+   the following code snippet would **FAIL**:
-+   ::
-+
-+    ---
-+    this:
-+      is: [a, document]
-+    ...
-+    ---
-+    - this
-+    - is: another one
-+"""
-+
-+
-+import yaml
-+
-+from yamllint.linter import LintProblem
-+
-+
-+ID = 'document-end'
-+TYPE = 'token'
-+CONF = {'present': bool}
-+DEFAULT = {'present': True}
-+
-+
-+def check(conf, token, prev, next, nextnext, context):
-+    if conf['present']:
-+        is_stream_end = isinstance(token, yaml.StreamEndToken)
-+        is_start = isinstance(token, yaml.DocumentStartToken)
-+        prev_is_end_or_stream_start = isinstance(
-+            prev, (yaml.DocumentEndToken, yaml.StreamStartToken)
-+        )
-+
-+        if is_stream_end and not prev_is_end_or_stream_start:
-+            yield LintProblem(token.start_mark.line, 1,
-+                              'missing document end "..."')
-+        elif is_start and not prev_is_end_or_stream_start:
-+            yield LintProblem(token.start_mark.line + 1, 1,
-+                              'missing document end "..."')
-+
-+    else:
-+        if isinstance(token, yaml.DocumentEndToken):
-+            yield LintProblem(token.start_mark.line + 1,
-+                              token.start_mark.column + 1,
-+                              'found forbidden document end "..."')
-diff --git a/third_party/python/yamllint/yamllint/rules/document_start.py b/third_party/python/yamllint/yamllint/rules/document_start.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/yamllint/yamllint/rules/document_start.py
-@@ -0,0 +1,93 @@
-+# -*- coding: utf-8 -*-
-+# Copyright (C) 2016 Adrien Vergé
-+#
-+# This program is free software: you can redistribute it and/or modify
-+# it under the terms of the GNU General Public License as published by
-+# the Free Software Foundation, either version 3 of the License, or
-+# (at your option) any later version.
-+#
-+# This program is distributed in the hope that it will be useful,
-+# but WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+# GNU General Public License for more details.
-+#
-+# You should have received a copy of the GNU General Public License
-+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-+
-+"""
-+Use this rule to require or forbid the use of document start marker (``---``).
-+
-+.. rubric:: Options
-+
-+* Set ``present`` to ``true`` when the document start marker is required, or to
-+  ``false`` when it is forbidden.
-+
-+.. rubric:: Examples
-+
-+#. With ``document-start: {present: true}``
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    ---
-+    this:
-+      is: [a, document]
-+    ---
-+    - this
-+    - is: another one
-+
-+   the following code snippet would **FAIL**:
-+   ::
-+
-+    this:
-+      is: [a, document]
-+    ---
-+    - this
-+    - is: another one
-+
-+#. With ``document-start: {present: false}``
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    this:
-+      is: [a, document]
-+    ...
-+
-+   the following code snippet would **FAIL**:
-+   ::
-+
-+    ---
-+    this:
-+      is: [a, document]
-+    ...
-+"""
-+
-+
-+import yaml
-+
-+from yamllint.linter import LintProblem
-+
-+
-+ID = 'document-start'
-+TYPE = 'token'
-+CONF = {'present': bool}
-+DEFAULT = {'present': True}
-+
-+
-+def check(conf, token, prev, next, nextnext, context):
-+    if conf['present']:
-+        if (isinstance(prev, (yaml.StreamStartToken,
-+                              yaml.DocumentEndToken,
-+                              yaml.DirectiveToken)) and
-+            not isinstance(token, (yaml.DocumentStartToken,
-+                                   yaml.DirectiveToken,
-+                                   yaml.StreamEndToken))):
-+            yield LintProblem(token.start_mark.line + 1, 1,
-+                              'missing document start "---"')
-+
-+    else:
-+        if isinstance(token, yaml.DocumentStartToken):
-+            yield LintProblem(token.start_mark.line + 1,
-+                              token.start_mark.column + 1,
-+                              'found forbidden document start "---"')
-diff --git a/third_party/python/yamllint/yamllint/rules/empty_lines.py b/third_party/python/yamllint/yamllint/rules/empty_lines.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/yamllint/yamllint/rules/empty_lines.py
-@@ -0,0 +1,108 @@
-+# -*- coding: utf-8 -*-
-+# Copyright (C) 2016 Adrien Vergé
-+#
-+# This program is free software: you can redistribute it and/or modify
-+# it under the terms of the GNU General Public License as published by
-+# the Free Software Foundation, either version 3 of the License, or
-+# (at your option) any later version.
-+#
-+# This program is distributed in the hope that it will be useful,
-+# but WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+# GNU General Public License for more details.
-+#
-+# You should have received a copy of the GNU General Public License
-+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-+
-+"""
-+Use this rule to set a maximal number of allowed consecutive blank lines.
-+
-+.. rubric:: Options
-+
-+* ``max`` defines the maximal number of empty lines allowed in the document.
-+* ``max-start`` defines the maximal number of empty lines allowed at the
-+  beginning of the file. This option takes precedence over ``max``.
-+* ``max-end`` defines the maximal number of empty lines allowed at the end of
-+  the file.  This option takes precedence over ``max``.
-+
-+.. rubric:: Examples
-+
-+#. With ``empty-lines: {max: 1}``
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    - foo:
-+        - 1
-+        - 2
-+
-+    - bar: [3, 4]
-+
-+   the following code snippet would **FAIL**:
-+   ::
-+
-+    - foo:
-+        - 1
-+        - 2
-+
-+
-+    - bar: [3, 4]
-+"""
-+
-+
-+from yamllint.linter import LintProblem
-+
-+
-+ID = 'empty-lines'
-+TYPE = 'line'
-+CONF = {'max': int,
-+        'max-start': int,
-+        'max-end': int}
-+DEFAULT = {'max': 2,
-+           'max-start': 0,
-+           'max-end': 0}
-+
-+
-+def check(conf, line):
-+    if line.start == line.end and line.end < len(line.buffer):
-+        # Only alert on the last blank line of a series
-+        if (line.end + 2 <= len(line.buffer) and
-+                line.buffer[line.end:line.end + 2] == '\n\n'):
-+            return
-+        elif (line.end + 4 <= len(line.buffer) and
-+              line.buffer[line.end:line.end + 4] == '\r\n\r\n'):
-+            return
-+
-+        blank_lines = 0
-+
-+        start = line.start
-+        while start >= 2 and line.buffer[start - 2:start] == '\r\n':
-+            blank_lines += 1
-+            start -= 2
-+        while start >= 1 and line.buffer[start - 1] == '\n':
-+            blank_lines += 1
-+            start -= 1
-+
-+        max = conf['max']
-+
-+        # Special case: start of document
-+        if start == 0:
-+            blank_lines += 1  # first line doesn't have a preceding \n
-+            max = conf['max-start']
-+
-+        # Special case: end of document
-+        # NOTE: The last line of a file is always supposed to end with a new
-+        # line. See POSIX definition of a line at:
-+        if ((line.end == len(line.buffer) - 1 and
-+             line.buffer[line.end] == '\n') or
-+            (line.end == len(line.buffer) - 2 and
-+             line.buffer[line.end:line.end + 2] == '\r\n')):
-+            # Allow the exception of the one-byte file containing '\n'
-+            if line.end == 0:
-+                return
-+
-+            max = conf['max-end']
-+
-+        if blank_lines > max:
-+            yield LintProblem(line.line_no, 1, 'too many blank lines (%d > %d)'
-+                                               % (blank_lines, max))
-diff --git a/third_party/python/yamllint/yamllint/rules/empty_values.py b/third_party/python/yamllint/yamllint/rules/empty_values.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/yamllint/yamllint/rules/empty_values.py
-@@ -0,0 +1,96 @@
-+# -*- coding: utf-8 -*-
-+# Copyright (C) 2017 Greg Dubicki
-+#
-+# This program is free software: you can redistribute it and/or modify
-+# it under the terms of the GNU General Public License as published by
-+# the Free Software Foundation, either version 3 of the License, or
-+# (at your option) any later version.
-+#
-+# This program is distributed in the hope that it will be useful,
-+# but WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+# GNU General Public License for more details.
-+#
-+# You should have received a copy of the GNU General Public License
-+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-+
-+"""
-+Use this rule to prevent nodes with empty content, that implicitly result in
-+``null`` values.
-+
-+.. rubric:: Options
-+
-+* Use ``forbid-in-block-mappings`` to prevent empty values in block mappings.
-+* Use ``forbid-in-flow-mappings`` to prevent empty values in flow mappings.
-+
-+.. rubric:: Examples
-+
-+#. With ``empty-values: {forbid-in-block-mappings: true}``
-+
-+   the following code snippets would **PASS**:
-+   ::
-+
-+    some-mapping:
-+      sub-element: correctly indented
-+
-+   ::
-+
-+    explicitly-null: null
-+
-+   the following code snippets would **FAIL**:
-+   ::
-+
-+    some-mapping:
-+    sub-element: incorrectly indented
-+
-+   ::
-+
-+    implicitly-null:
-+
-+#. With ``empty-values: {forbid-in-flow-mappings: true}``
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    {prop: null}
-+    {a: 1, b: 2, c: 3}
-+
-+   the following code snippets would **FAIL**:
-+   ::
-+
-+    {prop: }
-+
-+   ::
-+
-+    {a: 1, b:, c: 3}
-+
-+"""
-+
-+import yaml
-+
-+from yamllint.linter import LintProblem
-+
-+
-+ID = 'empty-values'
-+TYPE = 'token'
-+CONF = {'forbid-in-block-mappings': bool,
-+        'forbid-in-flow-mappings': bool}
-+DEFAULT = {'forbid-in-block-mappings': True,
-+           'forbid-in-flow-mappings': True}
-+
-+
-+def check(conf, token, prev, next, nextnext, context):
-+
-+    if conf['forbid-in-block-mappings']:
-+        if isinstance(token, yaml.ValueToken) and isinstance(next, (
-+                yaml.KeyToken, yaml.BlockEndToken)):
-+            yield LintProblem(token.start_mark.line + 1,
-+                              token.end_mark.column + 1,
-+                              'empty value in block mapping')
-+
-+    if conf['forbid-in-flow-mappings']:
-+        if isinstance(token, yaml.ValueToken) and isinstance(next, (
-+                yaml.FlowEntryToken, yaml.FlowMappingEndToken)):
-+            yield LintProblem(token.start_mark.line + 1,
-+                              token.end_mark.column + 1,
-+                              'empty value in flow mapping')
-diff --git a/third_party/python/yamllint/yamllint/rules/hyphens.py b/third_party/python/yamllint/yamllint/rules/hyphens.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/yamllint/yamllint/rules/hyphens.py
-@@ -0,0 +1,88 @@
-+# -*- coding: utf-8 -*-
-+# Copyright (C) 2016 Adrien Vergé
-+#
-+# This program is free software: you can redistribute it and/or modify
-+# it under the terms of the GNU General Public License as published by
-+# the Free Software Foundation, either version 3 of the License, or
-+# (at your option) any later version.
-+#
-+# This program is distributed in the hope that it will be useful,
-+# but WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+# GNU General Public License for more details.
-+#
-+# You should have received a copy of the GNU General Public License
-+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-+
-+"""
-+Use this rule to control the number of spaces after hyphens (``-``).
-+
-+.. rubric:: Options
-+
-+* ``max-spaces-after`` defines the maximal number of spaces allowed after
-+  hyphens.
-+
-+.. rubric:: Examples
-+
-+#. With ``hyphens: {max-spaces-after: 1}``
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    - first list:
-+        - a
-+        - b
-+    - - 1
-+      - 2
-+      - 3
-+
-+   the following code snippet would **FAIL**:
-+   ::
-+
-+    -  first list:
-+         - a
-+         - b
-+
-+   the following code snippet would **FAIL**:
-+   ::
-+
-+    - - 1
-+      -  2
-+      - 3
-+
-+#. With ``hyphens: {max-spaces-after: 3}``
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    -   key
-+    -  key2
-+    - key42
-+
-+   the following code snippet would **FAIL**:
-+   ::
-+
-+    -    key
-+    -   key2
-+    -  key42
-+"""
-+
-+
-+import yaml
-+
-+from yamllint.rules.common import spaces_after
-+
-+
-+ID = 'hyphens'
-+TYPE = 'token'
-+CONF = {'max-spaces-after': int}
-+DEFAULT = {'max-spaces-after': 1}
-+
-+
-+def check(conf, token, prev, next, nextnext, context):
-+    if isinstance(token, yaml.BlockEntryToken):
-+        problem = spaces_after(token, prev, next,
-+                               max=conf['max-spaces-after'],
-+                               max_desc='too many spaces after hyphen')
-+        if problem is not None:
-+            yield problem
-diff --git a/third_party/python/yamllint/yamllint/rules/indentation.py b/third_party/python/yamllint/yamllint/rules/indentation.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/yamllint/yamllint/rules/indentation.py
-@@ -0,0 +1,575 @@
-+# -*- coding: utf-8 -*-
-+# Copyright (C) 2016 Adrien Vergé
-+#
-+# This program is free software: you can redistribute it and/or modify
-+# it under the terms of the GNU General Public License as published by
-+# the Free Software Foundation, either version 3 of the License, or
-+# (at your option) any later version.
-+#
-+# This program is distributed in the hope that it will be useful,
-+# but WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+# GNU General Public License for more details.
-+#
-+# You should have received a copy of the GNU General Public License
-+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-+
-+"""
-+Use this rule to control the indentation.
-+
-+.. rubric:: Options
-+
-+* ``spaces`` defines the indentation width, in spaces. Set either to an integer
-+  (e.g. ``2`` or ``4``, representing the number of spaces in an indentation
-+  level) or to ``consistent`` to allow any number, as long as it remains the
-+  same within the file.
-+* ``indent-sequences`` defines whether block sequences should be indented or
-+  not (when in a mapping, this indentation is not mandatory -- some people
-+  perceive the ``-`` as part of the indentation). Possible values: ``true``,
-+  ``false``, ``whatever`` and ``consistent``. ``consistent`` requires either
-+  all block sequences to be indented, or none to be. ``whatever`` means either
-+  indenting or not indenting individual block sequences is OK.
-+* ``check-multi-line-strings`` defines whether to lint indentation in
-+  multi-line strings. Set to ``true`` to enable, ``false`` to disable.
-+
-+.. rubric:: Examples
-+
-+#. With ``indentation: {spaces: 1}``
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    history:
-+     - name: Unix
-+       date: 1969
-+     - name: Linux
-+       date: 1991
-+    nest:
-+     recurse:
-+      - haystack:
-+         needle
-+
-+#. With ``indentation: {spaces: 4}``
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    history:
-+        - name: Unix
-+          date: 1969
-+        - name: Linux
-+          date: 1991
-+    nest:
-+        recurse:
-+            - haystack:
-+                  needle
-+
-+   the following code snippet would **FAIL**:
-+   ::
-+
-+    history:
-+      - name: Unix
-+        date: 1969
-+      - name: Linux
-+        date: 1991
-+    nest:
-+      recurse:
-+        - haystack:
-+            needle
-+
-+#. With ``indentation: {spaces: consistent}``
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    history:
-+       - name: Unix
-+         date: 1969
-+       - name: Linux
-+         date: 1991
-+    nest:
-+       recurse:
-+          - haystack:
-+               needle
-+
-+   the following code snippet would **FAIL**:
-+   ::
-+
-+    some:
-+      Russian:
-+          dolls
-+
-+#. With ``indentation: {spaces: 2, indent-sequences: false}``
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    list:
-+    - flying
-+    - spaghetti
-+    - monster
-+
-+   the following code snippet would **FAIL**:
-+   ::
-+
-+    list:
-+      - flying
-+      - spaghetti
-+      - monster
-+
-+#. With ``indentation: {spaces: 2, indent-sequences: whatever}``
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    list:
-+    - flying:
-+      - spaghetti
-+      - monster
-+    - not flying:
-+        - spaghetti
-+        - sauce
-+
-+#. With ``indentation: {spaces: 2, indent-sequences: consistent}``
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    - flying:
-+      - spaghetti
-+      - monster
-+    - not flying:
-+      - spaghetti
-+      - sauce
-+
-+   the following code snippet would **FAIL**:
-+   ::
-+
-+    - flying:
-+        - spaghetti
-+        - monster
-+    - not flying:
-+      - spaghetti
-+      - sauce
-+
-+#. With ``indentation: {spaces: 4, check-multi-line-strings: true}``
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    Blaise Pascal:
-+        Je vous écris une longue lettre parce que
-+        je n'ai pas le temps d'en écrire une courte.
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    Blaise Pascal: Je vous écris une longue lettre parce que
-+                   je n'ai pas le temps d'en écrire une courte.
-+
-+   the following code snippet would **FAIL**:
-+   ::
-+
-+    Blaise Pascal: Je vous écris une longue lettre parce que
-+      je n'ai pas le temps d'en écrire une courte.
-+
-+   the following code snippet would **FAIL**:
-+   ::
-+
-+    C code:
-+        void main() {
-+            printf("foo");
-+        }
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    C code:
-+        void main() {
-+        printf("bar");
-+        }
-+"""
-+
-+import yaml
-+
-+from yamllint.linter import LintProblem
-+from yamllint.rules.common import get_real_end_line, is_explicit_key
-+
-+
-+ID = 'indentation'
-+TYPE = 'token'
-+CONF = {'spaces': (int, 'consistent'),
-+        'indent-sequences': (bool, 'whatever', 'consistent'),
-+        'check-multi-line-strings': bool}
-+DEFAULT = {'spaces': 'consistent',
-+           'indent-sequences': True,
-+           'check-multi-line-strings': False}
-+
-+ROOT, B_MAP, F_MAP, B_SEQ, F_SEQ, B_ENT, KEY, VAL = range(8)
-+labels = ('ROOT', 'B_MAP', 'F_MAP', 'B_SEQ', 'F_SEQ', 'B_ENT', 'KEY', 'VAL')
-+
-+
-+class Parent(object):
-+    def __init__(self, type, indent, line_indent=None):
-+        self.type = type
-+        self.indent = indent
-+        self.line_indent = line_indent
-+        self.explicit_key = False
-+        self.implicit_block_seq = False
-+
-+    def __repr__(self):
-+        return '%s:%d' % (labels[self.type], self.indent)
-+
-+
-+def check_scalar_indentation(conf, token, context):
-+    if token.start_mark.line == token.end_mark.line:
-+        return
-+
-+    def compute_expected_indent(found_indent):
-+        def detect_indent(base_indent):
-+            if not isinstance(context['spaces'], int):
-+                context['spaces'] = found_indent - base_indent
-+            return base_indent + context['spaces']
-+
-+        if token.plain:
-+            return token.start_mark.column
-+        elif token.style in ('"', "'"):
-+            return token.start_mark.column + 1
-+        elif token.style in ('>', '|'):
-+            if context['stack'][-1].type == B_ENT:
-+                # - >
-+                #     multi
-+                #     line
-+                return detect_indent(token.start_mark.column)
-+            elif context['stack'][-1].type == KEY:
-+                assert context['stack'][-1].explicit_key
-+                # - ? >
-+                #       multi-line
-+                #       key
-+                #   : >
-+                #       multi-line
-+                #       value
-+                return detect_indent(token.start_mark.column)
-+            elif context['stack'][-1].type == VAL:
-+                if token.start_mark.line + 1 > context['cur_line']:
-+                    # - key:
-+                    #     >
-+                    #       multi
-+                    #       line
-+                    return detect_indent(context['stack'][-1].indent)
-+                elif context['stack'][-2].explicit_key:
-+                    # - ? key
-+                    #   : >
-+                    #       multi-line
-+                    #       value
-+                    return detect_indent(token.start_mark.column)
-+                else:
-+                    # - key: >
-+                    #     multi
-+                    #     line
-+                    return detect_indent(context['stack'][-2].indent)
-+            else:
-+                return detect_indent(context['stack'][-1].indent)
-+
-+    expected_indent = None
-+
-+    line_no = token.start_mark.line + 1
-+
-+    line_start = token.start_mark.pointer
-+    while True:
-+        line_start = token.start_mark.buffer.find(
-+            '\n', line_start, token.end_mark.pointer - 1) + 1
-+        if line_start == 0:
-+            break
-+        line_no += 1
-+
-+        indent = 0
-+        while token.start_mark.buffer[line_start + indent] == ' ':
-+            indent += 1
-+        if token.start_mark.buffer[line_start + indent] == '\n':
-+            continue
-+
-+        if expected_indent is None:
-+            expected_indent = compute_expected_indent(indent)
-+
-+        if indent != expected_indent:
-+            yield LintProblem(line_no, indent + 1,
-+                              'wrong indentation: expected %d but found %d' %
-+                              (expected_indent, indent))
-+
-+
-+def _check(conf, token, prev, next, nextnext, context):
-+    if 'stack' not in context:
-+        context['stack'] = [Parent(ROOT, 0)]
-+        context['cur_line'] = -1
-+        context['spaces'] = conf['spaces']
-+        context['indent-sequences'] = conf['indent-sequences']
-+
-+    # Step 1: Lint
-+
-+    is_visible = (
-+        not isinstance(token, (yaml.StreamStartToken, yaml.StreamEndToken)) and
-+        not isinstance(token, yaml.BlockEndToken) and
-+        not (isinstance(token, yaml.ScalarToken) and token.value == ''))
-+    first_in_line = (is_visible and
-+                     token.start_mark.line + 1 > context['cur_line'])
-+
-+    def detect_indent(base_indent, next):
-+        if not isinstance(context['spaces'], int):
-+            context['spaces'] = next.start_mark.column - base_indent
-+        return base_indent + context['spaces']
-+
-+    if first_in_line:
-+        found_indentation = token.start_mark.column
-+        expected = context['stack'][-1].indent
-+
-+        if isinstance(token, (yaml.FlowMappingEndToken,
-+                              yaml.FlowSequenceEndToken)):
-+            expected = context['stack'][-1].line_indent
-+        elif (context['stack'][-1].type == KEY and
-+                context['stack'][-1].explicit_key and
-+                not isinstance(token, yaml.ValueToken)):
-+            expected = detect_indent(expected, token)
-+
-+        if found_indentation != expected:
-+            yield LintProblem(token.start_mark.line + 1, found_indentation + 1,
-+                              'wrong indentation: expected %d but found %d' %
-+                              (expected, found_indentation))
-+
-+    if (isinstance(token, yaml.ScalarToken) and
-+            conf['check-multi-line-strings']):
-+        for problem in check_scalar_indentation(conf, token, context):
-+            yield problem
-+
-+    # Step 2.a:
-+
-+    if is_visible:
-+        context['cur_line'] = get_real_end_line(token)
-+        if first_in_line:
-+            context['cur_line_indent'] = found_indentation
-+
-+    # Step 2.b: Update state
-+
-+    if isinstance(token, yaml.BlockMappingStartToken):
-+        #   - a: 1
-+        # or
-+        #   - ? a
-+        #     : 1
-+        # or
-+        #   - ?
-+        #       a
-+        #     : 1
-+        assert isinstance(next, yaml.KeyToken)
-+        assert next.start_mark.line == token.start_mark.line
-+
-+        indent = token.start_mark.column
-+
-+        context['stack'].append(Parent(B_MAP, indent))
-+
-+    elif isinstance(token, yaml.FlowMappingStartToken):
-+        if next.start_mark.line == token.start_mark.line:
-+            #   - {a: 1, b: 2}
-+            indent = next.start_mark.column
-+        else:
-+            #   - {
-+            #     a: 1, b: 2
-+            #   }
-+            indent = detect_indent(context['cur_line_indent'], next)
-+
-+        context['stack'].append(Parent(F_MAP, indent,
-+                                       line_indent=context['cur_line_indent']))
-+
-+    elif isinstance(token, yaml.BlockSequenceStartToken):
-+        #   - - a
-+        #     - b
-+        assert isinstance(next, yaml.BlockEntryToken)
-+        assert next.start_mark.line == token.start_mark.line
-+
-+        indent = token.start_mark.column
-+
-+        context['stack'].append(Parent(B_SEQ, indent))
-+
-+    elif (isinstance(token, yaml.BlockEntryToken) and
-+            # in case of an empty entry
-+            not isinstance(next, (yaml.BlockEntryToken, yaml.BlockEndToken))):
-+        # It looks like pyyaml doesn't issue BlockSequenceStartTokens when the
-+        # list is not indented. We need to compensate that.
-+        if context['stack'][-1].type != B_SEQ:
-+            context['stack'].append(Parent(B_SEQ, token.start_mark.column))
-+            context['stack'][-1].implicit_block_seq = True
-+
-+        if next.start_mark.line == token.end_mark.line:
-+            #   - item 1
-+            #   - item 2
-+            indent = next.start_mark.column
-+        elif next.start_mark.column == token.start_mark.column:
-+            #   -
-+            #   key: value
-+            indent = next.start_mark.column
-+        else:
-+            #   -
-+            #     item 1
-+            #   -
-+            #     key:
-+            #       value
-+            indent = detect_indent(token.start_mark.column, next)
-+
-+        context['stack'].append(Parent(B_ENT, indent))
-+
-+    elif isinstance(token, yaml.FlowSequenceStartToken):
-+        if next.start_mark.line == token.start_mark.line:
-+            #   - [a, b]
-+            indent = next.start_mark.column
-+        else:
-+            #   - [
-+            #   a, b
-+            # ]
-+            indent = detect_indent(context['cur_line_indent'], next)
-+
-+        context['stack'].append(Parent(F_SEQ, indent,
-+                                       line_indent=context['cur_line_indent']))
-+
-+    elif isinstance(token, yaml.KeyToken):
-+        indent = context['stack'][-1].indent
-+
-+        context['stack'].append(Parent(KEY, indent))
-+
-+        context['stack'][-1].explicit_key = is_explicit_key(token)
-+
-+    elif isinstance(token, yaml.ValueToken):
-+        assert context['stack'][-1].type == KEY
-+
-+        # Special cases:
-+        #     key: &anchor
-+        #       value
-+        # and:
-+        #     key: !!tag
-+        #       value
-+        if isinstance(next, (yaml.AnchorToken, yaml.TagToken)):
-+            if (next.start_mark.line == prev.start_mark.line and
-+                    next.start_mark.line < nextnext.start_mark.line):
-+                next = nextnext
-+
-+        # Only if value is not empty
-+        if not isinstance(next, (yaml.BlockEndToken,
-+                                 yaml.FlowMappingEndToken,
-+                                 yaml.FlowSequenceEndToken,
-+                                 yaml.KeyToken)):
-+            if context['stack'][-1].explicit_key:
-+                #   ? k
-+                #   : value
-+                # or
-+                #   ? k
-+                #   :
-+                #     value
-+                indent = detect_indent(context['stack'][-1].indent, next)
-+            elif next.start_mark.line == prev.start_mark.line:
-+                #   k: value
-+                indent = next.start_mark.column
-+            elif isinstance(next, (yaml.BlockSequenceStartToken,
-+                                   yaml.BlockEntryToken)):
-+                # NOTE: We add BlockEntryToken in the test above because
-+                # sometimes BlockSequenceStartToken are not issued. Try
-+                # yaml.scan()ning this:
-+                #     '- lib:\n'
-+                #     '  - var\n'
-+                if context['indent-sequences'] is False:
-+                    indent = context['stack'][-1].indent
-+                elif context['indent-sequences'] is True:
-+                    if (context['spaces'] == 'consistent' and
-+                            next.start_mark.column -
-+                            context['stack'][-1].indent == 0):
-+                        # In this case, the block sequence item is not indented
-+                        # (while it should be), but we don't know yet the
-+                        # indentation it should have (because `spaces` is
-+                        # `consistent` and its value has not been computed yet
-+                        # -- this is probably the beginning of the document).
-+                        # So we choose an arbitrary value (2).
-+                        indent = 2
-+                    else:
-+                        indent = detect_indent(context['stack'][-1].indent,
-+                                               next)
-+                else:  # 'whatever' or 'consistent'
-+                    if next.start_mark.column == context['stack'][-1].indent:
-+                        #   key:
-+                        #   - e1
-+                        #   - e2
-+                        if context['indent-sequences'] == 'consistent':
-+                            context['indent-sequences'] = False
-+                        indent = context['stack'][-1].indent
-+                    else:
-+                        if context['indent-sequences'] == 'consistent':
-+                            context['indent-sequences'] = True
-+                        #   key:
-+                        #     - e1
-+                        #     - e2
-+                        indent = detect_indent(context['stack'][-1].indent,
-+                                               next)
-+            else:
-+                #   k:
-+                #     value
-+                indent = detect_indent(context['stack'][-1].indent, next)
-+
-+            context['stack'].append(Parent(VAL, indent))
-+
-+    consumed_current_token = False
-+    while True:
-+        if (context['stack'][-1].type == F_SEQ and
-+                isinstance(token, yaml.FlowSequenceEndToken) and
-+                not consumed_current_token):
-+            context['stack'].pop()
-+            consumed_current_token = True
-+
-+        elif (context['stack'][-1].type == F_MAP and
-+                isinstance(token, yaml.FlowMappingEndToken) and
-+                not consumed_current_token):
-+            context['stack'].pop()
-+            consumed_current_token = True
-+
-+        elif (context['stack'][-1].type in (B_MAP, B_SEQ) and
-+                isinstance(token, yaml.BlockEndToken) and
-+                not context['stack'][-1].implicit_block_seq and
-+                not consumed_current_token):
-+            context['stack'].pop()
-+            consumed_current_token = True
-+
-+        elif (context['stack'][-1].type == B_ENT and
-+                not isinstance(token, yaml.BlockEntryToken) and
-+                context['stack'][-2].implicit_block_seq and
-+                not isinstance(token, (yaml.AnchorToken, yaml.TagToken)) and
-+                not isinstance(next, yaml.BlockEntryToken)):
-+            context['stack'].pop()
-+            context['stack'].pop()
-+
-+        elif (context['stack'][-1].type == B_ENT and
-+                isinstance(next, (yaml.BlockEntryToken, yaml.BlockEndToken))):
-+            context['stack'].pop()
-+
-+        elif (context['stack'][-1].type == VAL and
-+                not isinstance(token, yaml.ValueToken) and
-+                not isinstance(token, (yaml.AnchorToken, yaml.TagToken))):
-+            assert context['stack'][-2].type == KEY
-+            context['stack'].pop()
-+            context['stack'].pop()
-+
-+        elif (context['stack'][-1].type == KEY and
-+                isinstance(next, (yaml.BlockEndToken,
-+                                  yaml.FlowMappingEndToken,
-+                                  yaml.FlowSequenceEndToken,
-+                                  yaml.KeyToken))):
-+            # A key without a value: it's part of a set. Let's drop this key
-+            # and leave room for the next one.
-+            context['stack'].pop()
-+
-+        else:
-+            break
-+
-+
-+def check(conf, token, prev, next, nextnext, context):
-+    try:
-+        for problem in _check(conf, token, prev, next, nextnext, context):
-+            yield problem
-+    except AssertionError:
-+        yield LintProblem(token.start_mark.line + 1,
-+                          token.start_mark.column + 1,
-+                          'cannot infer indentation: unexpected token')
-diff --git a/third_party/python/yamllint/yamllint/rules/key_duplicates.py b/third_party/python/yamllint/yamllint/rules/key_duplicates.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/yamllint/yamllint/rules/key_duplicates.py
-@@ -0,0 +1,100 @@
-+# -*- coding: utf-8 -*-
-+# Copyright (C) 2016 Adrien Vergé
-+#
-+# This program is free software: you can redistribute it and/or modify
-+# it under the terms of the GNU General Public License as published by
-+# the Free Software Foundation, either version 3 of the License, or
-+# (at your option) any later version.
-+#
-+# This program is distributed in the hope that it will be useful,
-+# but WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+# GNU General Public License for more details.
-+#
-+# You should have received a copy of the GNU General Public License
-+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-+
-+"""
-+Use this rule to prevent multiple entries with the same key in mappings.
-+
-+.. rubric:: Examples
-+
-+#. With ``key-duplicates: {}``
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    - key 1: v
-+      key 2: val
-+      key 3: value
-+    - {a: 1, b: 2, c: 3}
-+
-+   the following code snippet would **FAIL**:
-+   ::
-+
-+    - key 1: v
-+      key 2: val
-+      key 1: value
-+
-+   the following code snippet would **FAIL**:
-+   ::
-+
-+    - {a: 1, b: 2, b: 3}
-+
-+   the following code snippet would **FAIL**:
-+   ::
-+
-+    duplicated key: 1
-+    "duplicated key": 2
-+
-+    other duplication: 1
-+    ? >-
-+        other
-+        duplication
-+    : 2
-+"""
-+
-+import yaml
-+
-+from yamllint.linter import LintProblem
-+
-+
-+ID = 'key-duplicates'
-+TYPE = 'token'
-+
-+MAP, SEQ = range(2)
-+
-+
-+class Parent(object):
-+    def __init__(self, type):
-+        self.type = type
-+        self.keys = []
-+
-+
-+def check(conf, token, prev, next, nextnext, context):
-+    if 'stack' not in context:
-+        context['stack'] = []
-+
-+    if isinstance(token, (yaml.BlockMappingStartToken,
-+                          yaml.FlowMappingStartToken)):
-+        context['stack'].append(Parent(MAP))
-+    elif isinstance(token, (yaml.BlockSequenceStartToken,
-+                            yaml.FlowSequenceStartToken)):
-+        context['stack'].append(Parent(SEQ))
-+    elif isinstance(token, (yaml.BlockEndToken,
-+                            yaml.FlowMappingEndToken,
-+                            yaml.FlowSequenceEndToken)):
-+        context['stack'].pop()
-+    elif (isinstance(token, yaml.KeyToken) and
-+          isinstance(next, yaml.ScalarToken)):
-+        # This check is done because KeyTokens can be found inside flow
-+        # sequences... strange, but allowed.
-+        if len(context['stack']) > 0 and context['stack'][-1].type == MAP:
-+            if (next.value in context['stack'][-1].keys and
-+                    # `<<` is "merge key", see http://yaml.org/type/merge.html
-+                    next.value != '<<'):
-+                yield LintProblem(
-+                    next.start_mark.line + 1, next.start_mark.column + 1,
-+                    'duplication of key "%s" in mapping' % next.value)
-+            else:
-+                context['stack'][-1].keys.append(next.value)
-diff --git a/third_party/python/yamllint/yamllint/rules/key_ordering.py b/third_party/python/yamllint/yamllint/rules/key_ordering.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/yamllint/yamllint/rules/key_ordering.py
-@@ -0,0 +1,109 @@
-+# -*- coding: utf-8 -*-
-+# Copyright (C) 2017 Johannes F. Knauf
-+#
-+# This program is free software: you can redistribute it and/or modify
-+# it under the terms of the GNU General Public License as published by
-+# the Free Software Foundation, either version 3 of the License, or
-+# (at your option) any later version.
-+#
-+# This program is distributed in the hope that it will be useful,
-+# but WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+# GNU General Public License for more details.
-+#
-+# You should have received a copy of the GNU General Public License
-+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-+
-+"""
-+Use this rule to enforce alphabetical ordering of keys in mappings. The sorting
-+order uses the Unicode code point number. As a result, the ordering is
-+case-sensitive and not accent-friendly (see examples below).
-+
-+.. rubric:: Examples
-+
-+#. With ``key-ordering: {}``
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    - key 1: v
-+      key 2: val
-+      key 3: value
-+    - {a: 1, b: 2, c: 3}
-+    - T-shirt: 1
-+      T-shirts: 2
-+      t-shirt: 3
-+      t-shirts: 4
-+    - hair: true
-+      hais: true
-+      haïr: true
-+      haïssable: true
-+
-+   the following code snippet would **FAIL**:
-+   ::
-+
-+    - key 2: v
-+      key 1: val
-+
-+   the following code snippet would **FAIL**:
-+   ::
-+
-+    - {b: 1, a: 2}
-+
-+   the following code snippet would **FAIL**:
-+   ::
-+
-+    - T-shirt: 1
-+      t-shirt: 2
-+      T-shirts: 3
-+      t-shirts: 4
-+
-+   the following code snippet would **FAIL**:
-+   ::
-+
-+    - haïr: true
-+      hais: true
-+"""
-+
-+import yaml
-+
-+from yamllint.linter import LintProblem
-+
-+
-+ID = 'key-ordering'
-+TYPE = 'token'
-+
-+MAP, SEQ = range(2)
-+
-+
-+class Parent(object):
-+    def __init__(self, type):
-+        self.type = type
-+        self.keys = []
-+
-+
-+def check(conf, token, prev, next, nextnext, context):
-+    if 'stack' not in context:
-+        context['stack'] = []
-+
-+    if isinstance(token, (yaml.BlockMappingStartToken,
-+                          yaml.FlowMappingStartToken)):
-+        context['stack'].append(Parent(MAP))
-+    elif isinstance(token, (yaml.BlockSequenceStartToken,
-+                            yaml.FlowSequenceStartToken)):
-+        context['stack'].append(Parent(SEQ))
-+    elif isinstance(token, (yaml.BlockEndToken,
-+                            yaml.FlowMappingEndToken,
-+                            yaml.FlowSequenceEndToken)):
-+        context['stack'].pop()
-+    elif (isinstance(token, yaml.KeyToken) and
-+          isinstance(next, yaml.ScalarToken)):
-+        # This check is done because KeyTokens can be found inside flow
-+        # sequences... strange, but allowed.
-+        if len(context['stack']) > 0 and context['stack'][-1].type == MAP:
-+            if any(next.value < key for key in context['stack'][-1].keys):
-+                yield LintProblem(
-+                    next.start_mark.line + 1, next.start_mark.column + 1,
-+                    'wrong ordering of key "%s" in mapping' % next.value)
-+            else:
-+                context['stack'][-1].keys.append(next.value)
-diff --git a/third_party/python/yamllint/yamllint/rules/line_length.py b/third_party/python/yamllint/yamllint/rules/line_length.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/yamllint/yamllint/rules/line_length.py
-@@ -0,0 +1,149 @@
-+# -*- coding: utf-8 -*-
-+# Copyright (C) 2016 Adrien Vergé
-+#
-+# This program is free software: you can redistribute it and/or modify
-+# it under the terms of the GNU General Public License as published by
-+# the Free Software Foundation, either version 3 of the License, or
-+# (at your option) any later version.
-+#
-+# This program is distributed in the hope that it will be useful,
-+# but WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+# GNU General Public License for more details.
-+#
-+# You should have received a copy of the GNU General Public License
-+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-+
-+"""
-+Use this rule to set a limit to lines length.
-+
-+Note: with Python 2, the ``line-length`` rule may not work properly with
-+unicode characters because of the way strings are represented in bytes. We
-+recommend running yamllint with Python 3.
-+
-+.. rubric:: Options
-+
-+* ``max`` defines the maximal (inclusive) length of lines.
-+* ``allow-non-breakable-words`` is used to allow non breakable words (without
-+  spaces inside) to overflow the limit. This is useful for long URLs, for
-+  instance. Use ``true`` to allow, ``false`` to forbid.
-+* ``allow-non-breakable-inline-mappings`` implies ``allow-non-breakable-words``
-+  and extends it to also allow non-breakable words in inline mappings.
-+
-+.. rubric:: Examples
-+
-+#. With ``line-length: {max: 70}``
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    long sentence:
-+      Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do
-+      eiusmod tempor incididunt ut labore et dolore magna aliqua.
-+
-+   the following code snippet would **FAIL**:
-+   ::
-+
-+    long sentence:
-+      Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod
-+      tempor incididunt ut labore et dolore magna aliqua.
-+
-+#. With ``line-length: {max: 60, allow-non-breakable-words: true}``
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    this:
-+      is:
-+        - a:
-+            http://localhost/very/very/very/very/very/very/very/very/long/url
-+
-+    # this comment is too long,
-+    # but hard to split:
-+    # http://localhost/another/very/very/very/very/very/very/very/very/long/url
-+
-+   the following code snippet would **FAIL**:
-+   ::
-+
-+    - this line is waaaaaaaaaaaaaay too long but could be easily split...
-+
-+   and the following code snippet would also **FAIL**:
-+   ::
-+
-+    - foobar: http://localhost/very/very/very/very/very/very/very/very/long/url
-+
-+#. With ``line-length: {max: 60, allow-non-breakable-words: true,
-+   allow-non-breakable-inline-mappings: true}``
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    - foobar: http://localhost/very/very/very/very/very/very/very/very/long/url
-+
-+#. With ``line-length: {max: 60, allow-non-breakable-words: false}``
-+
-+   the following code snippet would **FAIL**:
-+   ::
-+
-+    this:
-+      is:
-+        - a:
-+            http://localhost/very/very/very/very/very/very/very/very/long/url
-+"""
-+
-+
-+import yaml
-+
-+from yamllint.linter import LintProblem
-+
-+
-+ID = 'line-length'
-+TYPE = 'line'
-+CONF = {'max': int,
-+        'allow-non-breakable-words': bool,
-+        'allow-non-breakable-inline-mappings': bool}
-+DEFAULT = {'max': 80,
-+           'allow-non-breakable-words': True,
-+           'allow-non-breakable-inline-mappings': False}
-+
-+
-+def check_inline_mapping(line):
-+    loader = yaml.SafeLoader(line.content)
-+    try:
-+        while loader.peek_token():
-+            if isinstance(loader.get_token(), yaml.BlockMappingStartToken):
-+                while loader.peek_token():
-+                    if isinstance(loader.get_token(), yaml.ValueToken):
-+                        t = loader.get_token()
-+                        if isinstance(t, yaml.ScalarToken):
-+                            return (
-+                                ' ' not in line.content[t.start_mark.column:])
-+    except yaml.scanner.ScannerError:
-+        pass
-+
-+    return False
-+
-+
-+def check(conf, line):
-+    if line.end - line.start > conf['max']:
-+        conf['allow-non-breakable-words'] |= \
-+            conf['allow-non-breakable-inline-mappings']
-+        if conf['allow-non-breakable-words']:
-+            start = line.start
-+            while start < line.end and line.buffer[start] == ' ':
-+                start += 1
-+
-+            if start != line.end:
-+                if line.buffer[start] in ('#', '-'):
-+                    start += 2
-+
-+                if line.buffer.find(' ', start, line.end) == -1:
-+                    return
-+
-+                if (conf['allow-non-breakable-inline-mappings'] and
-+                        check_inline_mapping(line)):
-+                    return
-+
-+        yield LintProblem(line.line_no, conf['max'] + 1,
-+                          'line too long (%d > %d characters)' %
-+                          (line.end - line.start, conf['max']))
-diff --git a/third_party/python/yamllint/yamllint/rules/new_line_at_end_of_file.py b/third_party/python/yamllint/yamllint/rules/new_line_at_end_of_file.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/yamllint/yamllint/rules/new_line_at_end_of_file.py
-@@ -0,0 +1,37 @@
-+# -*- coding: utf-8 -*-
-+# Copyright (C) 2016 Adrien Vergé
-+#
-+# This program is free software: you can redistribute it and/or modify
-+# it under the terms of the GNU General Public License as published by
-+# the Free Software Foundation, either version 3 of the License, or
-+# (at your option) any later version.
-+#
-+# This program is distributed in the hope that it will be useful,
-+# but WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+# GNU General Public License for more details.
-+#
-+# You should have received a copy of the GNU General Public License
-+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-+
-+"""
-+Use this rule to require a new line character (``\\n``) at the end of files.
-+
-+The POSIX standard `requires the last line to end with a new line character
-+<http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap03.html#tag_03_206>`_.
-+All UNIX tools expect a new line at the end of files. Most text editors use
-+this convention too.
-+"""
-+
-+
-+from yamllint.linter import LintProblem
-+
-+
-+ID = 'new-line-at-end-of-file'
-+TYPE = 'line'
-+
-+
-+def check(conf, line):
-+    if line.end == len(line.buffer) and line.end > line.start:
-+        yield LintProblem(line.line_no, line.end - line.start + 1,
-+                          'no new line character at the end of file')
-diff --git a/third_party/python/yamllint/yamllint/rules/new_lines.py b/third_party/python/yamllint/yamllint/rules/new_lines.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/yamllint/yamllint/rules/new_lines.py
-@@ -0,0 +1,46 @@
-+# -*- coding: utf-8 -*-
-+# Copyright (C) 2016 Adrien Vergé
-+#
-+# This program is free software: you can redistribute it and/or modify
-+# it under the terms of the GNU General Public License as published by
-+# the Free Software Foundation, either version 3 of the License, or
-+# (at your option) any later version.
-+#
-+# This program is distributed in the hope that it will be useful,
-+# but WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+# GNU General Public License for more details.
-+#
-+# You should have received a copy of the GNU General Public License
-+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-+
-+"""
-+Use this rule to force the type of new line characters.
-+
-+.. rubric:: Options
-+
-+* Set ``type`` to ``unix`` to use UNIX-typed new line characters (``\\n``), or
-+  ``dos`` to use DOS-typed new line characters (``\\r\\n``).
-+"""
-+
-+
-+from yamllint.linter import LintProblem
-+
-+
-+ID = 'new-lines'
-+TYPE = 'line'
-+CONF = {'type': ('unix', 'dos')}
-+DEFAULT = {'type': 'unix'}
-+
-+
-+def check(conf, line):
-+    if line.start == 0 and len(line.buffer) > line.end:
-+        if conf['type'] == 'dos':
-+            if (line.end + 2 > len(line.buffer) or
-+                    line.buffer[line.end:line.end + 2] != '\r\n'):
-+                yield LintProblem(1, line.end - line.start + 1,
-+                                  'wrong new line character: expected \\r\\n')
-+        else:
-+            if line.buffer[line.end] == '\r':
-+                yield LintProblem(1, line.end - line.start + 1,
-+                                  'wrong new line character: expected \\n')
-diff --git a/third_party/python/yamllint/yamllint/rules/octal_values.py b/third_party/python/yamllint/yamllint/rules/octal_values.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/yamllint/yamllint/rules/octal_values.py
-@@ -0,0 +1,95 @@
-+# -*- coding: utf-8 -*-
-+# Copyright (C) 2017 ScienJus
-+#
-+# This program is free software: you can redistribute it and/or modify
-+# it under the terms of the GNU General Public License as published by
-+# the Free Software Foundation, either version 3 of the License, or
-+# (at your option) any later version.
-+#
-+# This program is distributed in the hope that it will be useful,
-+# but WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+# GNU General Public License for more details.
-+#
-+# You should have received a copy of the GNU General Public License
-+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-+
-+"""
-+Use this rule to prevent values with octal numbers. In YAML, numbers that
-+start with ``0`` are interpreted as octal, but this is not always wanted.
-+For instance ``010`` is the city code of Beijing, and should not be
-+converted to ``8``.
-+
-+.. rubric:: Examples
-+
-+#. With ``octal-values: {forbid-implicit-octal: true}``
-+
-+   the following code snippets would **PASS**:
-+   ::
-+
-+    user:
-+      city-code: '010'
-+
-+   the following code snippets would **PASS**:
-+   ::
-+
-+    user:
-+      city-code: 010,021
-+
-+   the following code snippets would **FAIL**:
-+   ::
-+
-+    user:
-+      city-code: 010
-+
-+#. With ``octal-values: {forbid-explicit-octal: true}``
-+
-+   the following code snippets would **PASS**:
-+   ::
-+
-+    user:
-+      city-code: '0o10'
-+
-+   the following code snippets would **FAIL**:
-+   ::
-+
-+    user:
-+      city-code: 0o10
-+"""
-+
-+import yaml
-+
-+from yamllint.linter import LintProblem
-+
-+
-+ID = 'octal-values'
-+TYPE = 'token'
-+CONF = {'forbid-implicit-octal': bool,
-+        'forbid-explicit-octal': bool}
-+DEFAULT = {'forbid-implicit-octal': True,
-+           'forbid-explicit-octal': True}
-+
-+
-+def check(conf, token, prev, next, nextnext, context):
-+    if prev and isinstance(prev, yaml.tokens.TagToken):
-+        return
-+
-+    if conf['forbid-implicit-octal']:
-+        if isinstance(token, yaml.tokens.ScalarToken):
-+            if not token.style:
-+                val = token.value
-+                if val.isdigit() and len(val) > 1 and val[0] == '0':
-+                    yield LintProblem(
-+                        token.start_mark.line + 1, token.end_mark.column + 1,
-+                        'forbidden implicit octal value "%s"' %
-+                        token.value)
-+
-+    if conf['forbid-explicit-octal']:
-+        if isinstance(token, yaml.tokens.ScalarToken):
-+            if not token.style:
-+                val = token.value
-+                if len(val) > 2 and val[:2] == '0o' and val[2:].isdigit():
-+                    yield LintProblem(
-+                        token.start_mark.line + 1, token.end_mark.column + 1,
-+                        'forbidden explicit octal value "%s"' %
-+                        token.value)
-diff --git a/third_party/python/yamllint/yamllint/rules/quoted_strings.py b/third_party/python/yamllint/yamllint/rules/quoted_strings.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/yamllint/yamllint/rules/quoted_strings.py
-@@ -0,0 +1,78 @@
-+# -*- coding: utf-8 -*-
-+# Copyright (C) 2018 ClearScore
-+#
-+# This program is free software: you can redistribute it and/or modify
-+# it under the terms of the GNU General Public License as published by
-+# the Free Software Foundation, either version 3 of the License, or
-+# (at your option) any later version.
-+#
-+# This program is distributed in the hope that it will be useful,
-+# but WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+# GNU General Public License for more details.
-+#
-+# You should have received a copy of the GNU General Public License
-+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-+
-+"""
-+Use this rule to forbid any string values that are not quoted.
-+You can also enforce the type of the quote used using the ``quote-type`` option
-+(``single``, ``double`` or ``any``).
-+
-+**Note**: Multi-line strings (with ``|`` or ``>``) will not be checked.
-+
-+.. rubric:: Examples
-+
-+#. With ``quoted-strings: {quote-type: any}``
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    foo: "bar"
-+    bar: 'foo'
-+    number: 123
-+    boolean: true
-+
-+   the following code snippet would **FAIL**:
-+   ::
-+
-+    foo: bar
-+"""
-+
-+import yaml
-+
-+from yamllint.linter import LintProblem
-+
-+ID = 'quoted-strings'
-+TYPE = 'token'
-+CONF = {'quote-type': ('any', 'single', 'double')}
-+DEFAULT = {'quote-type': 'any'}
-+
-+
-+def check(conf, token, prev, next, nextnext, context):
-+    quote_type = conf['quote-type']
-+
-+    if (isinstance(token, yaml.tokens.ScalarToken) and
-+            isinstance(prev, (yaml.ValueToken, yaml.TagToken))):
-+        # Ignore explicit types, e.g. !!str testtest or !!int 42
-+        if (prev and isinstance(prev, yaml.tokens.TagToken) and
-+                prev.value[0] == '!!'):
-+            return
-+
-+        # Ignore numbers, booleans, etc.
-+        resolver = yaml.resolver.Resolver()
-+        if resolver.resolve(yaml.nodes.ScalarNode, token.value,
-+                            (True, False)) != 'tag:yaml.org,2002:str':
-+            return
-+
-+        # Ignore multi-line strings
-+        if (not token.plain) and (token.style == "|" or token.style == ">"):
-+            return
-+
-+        if ((quote_type == 'single' and token.style != "'") or
-+                (quote_type == 'double' and token.style != '"') or
-+                (quote_type == 'any' and token.style is None)):
-+            yield LintProblem(
-+                token.start_mark.line + 1,
-+                token.start_mark.column + 1,
-+                "string value is not quoted with %s quotes" % (quote_type))
-diff --git a/third_party/python/yamllint/yamllint/rules/trailing_spaces.py b/third_party/python/yamllint/yamllint/rules/trailing_spaces.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/yamllint/yamllint/rules/trailing_spaces.py
-@@ -0,0 +1,62 @@
-+# -*- coding: utf-8 -*-
-+# Copyright (C) 2016 Adrien Vergé
-+#
-+# This program is free software: you can redistribute it and/or modify
-+# it under the terms of the GNU General Public License as published by
-+# the Free Software Foundation, either version 3 of the License, or
-+# (at your option) any later version.
-+#
-+# This program is distributed in the hope that it will be useful,
-+# but WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+# GNU General Public License for more details.
-+#
-+# You should have received a copy of the GNU General Public License
-+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-+
-+"""
-+Use this rule to forbid trailing spaces at the end of lines.
-+
-+.. rubric:: Examples
-+
-+#. With ``trailing-spaces: {}``
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    this document doesn't contain
-+    any trailing
-+    spaces
-+
-+   the following code snippet would **FAIL**:
-+   ::
-+
-+    this document contains     """ """
-+    trailing spaces
-+    on lines 1 and 3         """ """
-+"""
-+
-+
-+import string
-+
-+from yamllint.linter import LintProblem
-+
-+
-+ID = 'trailing-spaces'
-+TYPE = 'line'
-+
-+
-+def check(conf, line):
-+    if line.end == 0:
-+        return
-+
-+    # YAML recognizes two white space characters: space and tab.
-+    # http://yaml.org/spec/1.2/spec.html#id2775170
-+
-+    pos = line.end
-+    while line.buffer[pos - 1] in string.whitespace and pos > line.start:
-+        pos -= 1
-+
-+    if pos != line.end and line.buffer[pos] in ' \t':
-+        yield LintProblem(line.line_no, pos - line.start + 1,
-+                          'trailing spaces')
-diff --git a/third_party/python/yamllint/yamllint/rules/truthy.py b/third_party/python/yamllint/yamllint/rules/truthy.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/yamllint/yamllint/rules/truthy.py
-@@ -0,0 +1,126 @@
-+# -*- coding: utf-8 -*-
-+# Copyright (C) 2016 Peter Ericson
-+#
-+# This program is free software: you can redistribute it and/or modify
-+# it under the terms of the GNU General Public License as published by
-+# the Free Software Foundation, either version 3 of the License, or
-+# (at your option) any later version.
-+#
-+# This program is distributed in the hope that it will be useful,
-+# but WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+# GNU General Public License for more details.
-+#
-+# You should have received a copy of the GNU General Public License
-+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-+
-+"""
-+Use this rule to forbid non-explictly typed truthy values other than allowed
-+ones (by default: ``true`` and ``false``), for example ``YES`` or ``off``.
-+
-+This can be useful to prevent surprises from YAML parsers transforming
-+``[yes, FALSE, Off]`` into ``[true, false, false]`` or
-+``{y: 1, yes: 2, on: 3, true: 4, True: 5}`` into ``{y: 1, true: 5}``.
-+
-+.. rubric:: Options
-+
-+* ``allowed-values`` defines the list of truthy values which will be ignored
-+  during linting. The default is ``['true', 'false']``, but can be changed to
-+  any list containing: ``'TRUE'``, ``'True'``,  ``'true'``, ``'FALSE'``,
-+  ``'False'``, ``'false'``, ``'YES'``, ``'Yes'``, ``'yes'``, ``'NO'``,
-+  ``'No'``, ``'no'``, ``'ON'``, ``'On'``, ``'on'``, ``'OFF'``, ``'Off'``,
-+  ``'off'``.
-+
-+.. rubric:: Examples
-+
-+#. With ``truthy: {}``
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    boolean: true
-+
-+    object: {"True": 1, 1: "True"}
-+
-+    "yes":  1
-+    "on":   2
-+    "True": 3
-+
-+     explicit:
-+       string1: !!str True
-+       string2: !!str yes
-+       string3: !!str off
-+       encoded: !!binary |
-+                  True
-+                  OFF
-+                  pad==  # this decodes as 'N\xbb\x9e8Qii'
-+       boolean1: !!bool true
-+       boolean2: !!bool "false"
-+       boolean3: !!bool FALSE
-+       boolean4: !!bool True
-+       boolean5: !!bool off
-+       boolean6: !!bool NO
-+
-+   the following code snippet would **FAIL**:
-+   ::
-+
-+    object: {True: 1, 1: True}
-+
-+   the following code snippet would **FAIL**:
-+   ::
-+
-+    yes:  1
-+    on:   2
-+    True: 3
-+
-+#. With ``truthy: {allowed-values: ["yes", "no"]}``
-+
-+   the following code snippet would **PASS**:
-+   ::
-+
-+    - yes
-+    - no
-+    - "true"
-+    - 'false'
-+    - foo
-+    - bar
-+
-+   the following code snippet would **FAIL**:
-+   ::
-+
-+    - true
-+    - false
-+    - on
-+    - off
-+"""
-+
-+import yaml
-+
-+from yamllint.linter import LintProblem
-+
-+
-+TRUTHY = ['YES', 'Yes', 'yes',
-+          'NO', 'No', 'no',
-+          'TRUE', 'True', 'true',
-+          'FALSE', 'False', 'false',
-+          'ON', 'On', 'on',
-+          'OFF', 'Off', 'off']
-+
-+
-+ID = 'truthy'
-+TYPE = 'token'
-+CONF = {'allowed-values': list(TRUTHY)}
-+DEFAULT = {'allowed-values': ['true', 'false']}
-+
-+
-+def check(conf, token, prev, next, nextnext, context):
-+    if prev and isinstance(prev, yaml.tokens.TagToken):
-+        return
-+
-+    if isinstance(token, yaml.tokens.ScalarToken):
-+        if (token.value in (set(TRUTHY) - set(conf['allowed-values'])) and
-+                token.style is None):
-+            yield LintProblem(token.start_mark.line + 1,
-+                              token.start_mark.column + 1,
-+                              "truthy value should be one of [" +
-+                              ", ".join(sorted(conf['allowed-values'])) + "]")
-diff --git a/third_party/python/zipp/zipp.py b/third_party/python/zipp/zipp.py
-new file mode 100644
---- /dev/null
-+++ b/third_party/python/zipp/zipp.py
-@@ -0,0 +1,286 @@
-+# coding: utf-8
-+
-+from __future__ import division
-+
-+import io
-+import sys
-+import posixpath
-+import zipfile
-+import functools
-+import itertools
-+from collections import OrderedDict
-+
-+try:
-+    from contextlib import suppress
-+except ImportError:
-+    from contextlib2 import suppress
-+
-+__metaclass__ = type
-+
-+
-+def _parents(path):
-+    """
-+    Given a path with elements separated by
-+    posixpath.sep, generate all parents of that path.
-+
-+    >>> list(_parents('b/d'))
-+    ['b']
-+    >>> list(_parents('/b/d/'))
-+    ['/b']
-+    >>> list(_parents('b/d/f/'))
-+    ['b/d', 'b']
-+    >>> list(_parents('b'))
-+    []
-+    >>> list(_parents(''))
-+    []
-+    """
-+    return itertools.islice(_ancestry(path), 1, None)
-+
-+
-+def _ancestry(path):
-+    """
-+    Given a path with elements separated by
-+    posixpath.sep, generate all elements of that path
-+
-+    >>> list(_ancestry('b/d'))
-+    ['b/d', 'b']
-+    >>> list(_ancestry('/b/d/'))
-+    ['/b/d', '/b']
-+    >>> list(_ancestry('b/d/f/'))
-+    ['b/d/f', 'b/d', 'b']
-+    >>> list(_ancestry('b'))
-+    ['b']
-+    >>> list(_ancestry(''))
-+    []
-+    """
-+    path = path.rstrip(posixpath.sep)
-+    while path and path != posixpath.sep:
-+        yield path
-+        path, tail = posixpath.split(path)
-+
-+
-+class CompleteDirs(zipfile.ZipFile):
-+    """
-+    A ZipFile subclass that ensures that implied directories
-+    are always included in the namelist.
-+    """
-+
-+    @staticmethod
-+    def _implied_dirs(names):
-+        parents = itertools.chain.from_iterable(map(_parents, names))
-+        # Cast names to a set for O(1) lookups
-+        existing = set(names)
-+        # Deduplicate entries in original order
-+        implied_dirs = OrderedDict.fromkeys(
-+            p + posixpath.sep for p in parents
-+            if p + posixpath.sep not in existing
-+        )
-+        return implied_dirs
-+
-+    def namelist(self):
-+        names = super(CompleteDirs, self).namelist()
-+        return names + list(self._implied_dirs(names))
-+
-+    def _name_set(self):
-+        return set(self.namelist())
-+
-+    def resolve_dir(self, name):
-+        """
-+        If the name represents a directory, return that name
-+        as a directory (with the trailing slash).
-+        """
-+        names = self._name_set()
-+        dirname = name + '/'
-+        dir_match = name not in names and dirname in names
-+        return dirname if dir_match else name
-+
-+    @classmethod
-+    def make(cls, source):
-+        """
-+        Given a source (filename or zipfile), return an
-+        appropriate CompleteDirs subclass.
-+        """
-+        if isinstance(source, CompleteDirs):
-+            return source
-+
-+        if not isinstance(source, zipfile.ZipFile):
-+            return cls(_pathlib_compat(source))
-+
-+        # Only allow for FastPath when supplied zipfile is read-only
-+        if 'r' not in source.mode:
-+            cls = CompleteDirs
-+
-+        res = cls.__new__(cls)
-+        vars(res).update(vars(source))
-+        return res
-+
-+
-+class FastLookup(CompleteDirs):
-+    """
-+    ZipFile subclass to ensure implicit
-+    dirs exist and are resolved rapidly.
-+    """
-+    def namelist(self):
-+        with suppress(AttributeError):
-+            return self.__names
-+        self.__names = super(FastLookup, self).namelist()
-+        return self.__names
-+
-+    def _name_set(self):
-+        with suppress(AttributeError):
-+            return self.__lookup
-+        self.__lookup = super(FastLookup, self)._name_set()
-+        return self.__lookup
-+
-+
-+def _pathlib_compat(path):
-+    """
-+    For path-like objects, convert to a filename for compatibility
-+    on Python 3.6.1 and earlier.
-+    """
-+    try:
-+        return path.__fspath__()
-+    except AttributeError:
-+        return str(path)
-+
-+
-+class Path:
-+    """
-+    A pathlib-compatible interface for zip files.
-+
-+    Consider a zip file with this structure::
-+
-+        .
-+        ├── a.txt
-+        └── b
-+            ├── c.txt
-+            └── d
-+                └── e.txt
-+
-+    >>> data = io.BytesIO()
-+    >>> zf = zipfile.ZipFile(data, 'w')
-+    >>> zf.writestr('a.txt', 'content of a')
-+    >>> zf.writestr('b/c.txt', 'content of c')
-+    >>> zf.writestr('b/d/e.txt', 'content of e')
-+    >>> zf.filename = 'abcde.zip'
-+
-+    Path accepts the zipfile object itself or a filename
-+
-+    >>> root = Path(zf)
-+
-+    From there, several path operations are available.
-+
-+    Directory iteration (including the zip file itself):
-+
-+    >>> a, b = root.iterdir()
-+    >>> a
-+    Path('abcde.zip', 'a.txt')
-+    >>> b
-+    Path('abcde.zip', 'b/')
-+
-+    name property:
-+
-+    >>> b.name
-+    'b'
-+
-+    join with divide operator:
-+
-+    >>> c = b / 'c.txt'
-+    >>> c
-+    Path('abcde.zip', 'b/c.txt')
-+    >>> c.name
-+    'c.txt'
-+
-+    Read text:
-+
-+    >>> c.read_text()
-+    'content of c'
-+
-+    existence:
-+
-+    >>> c.exists()
-+    True
-+    >>> (b / 'missing.txt').exists()
-+    False
-+
-+    Coercion to string:
-+
-+    >>> str(c)
-+    'abcde.zip/b/c.txt'
-+    """
-+
-+    __repr = "{self.__class__.__name__}({self.root.filename!r}, {self.at!r})"
-+
-+    def __init__(self, root, at=""):
-+        self.root = FastLookup.make(root)
-+        self.at = at
-+
-+    def open(self, mode='r', *args, **kwargs):
-+        """
-+        Open this entry as text or binary following the semantics
-+        of ``pathlib.Path.open()`` by passing arguments through
-+        to io.TextIOWrapper().
-+        """
-+        pwd = kwargs.pop('pwd', None)
-+        zip_mode = mode[0]
-+        stream = self.root.open(self.at, zip_mode, pwd=pwd)
-+        if 'b' in mode:
-+            if args or kwargs:
-+                raise ValueError("encoding args invalid for binary operation")
-+            return stream
-+        return io.TextIOWrapper(stream, *args, **kwargs)
-+
-+    @property
-+    def name(self):
-+        return posixpath.basename(self.at.rstrip("/"))
-+
-+    def read_text(self, *args, **kwargs):
-+        with self.open('r', *args, **kwargs) as strm:
-+            return strm.read()
-+
-+    def read_bytes(self):
-+        with self.open('rb') as strm:
-+            return strm.read()
-+
-+    def _is_child(self, path):
-+        return posixpath.dirname(path.at.rstrip("/")) == self.at.rstrip("/")
-+
-+    def _next(self, at):
-+        return Path(self.root, at)
-+
-+    def is_dir(self):
-+        return not self.at or self.at.endswith("/")
-+
-+    def is_file(self):
-+        return not self.is_dir()
-+
-+    def exists(self):
-+        return self.at in self.root._name_set()
-+
-+    def iterdir(self):
-+        if not self.is_dir():
-+            raise ValueError("Can't listdir a file")
-+        subs = map(self._next, self.root.namelist())
-+        return filter(self._is_child, subs)
-+
-+    def __str__(self):
-+        return posixpath.join(self.root.filename, self.at)
-+
-+    def __repr__(self):
-+        return self.__repr.format(self=self)
-+
-+    def joinpath(self, add):
-+        next = posixpath.join(self.at, _pathlib_compat(add))
-+        return self._next(self.root.resolve_dir(next))
-+
-+    __truediv__ = joinpath
-+
-+    @property
-+    def parent(self):
-+        parent_at = posixpath.dirname(self.at.rstrip('/'))
-+        if parent_at:
-+            parent_at += '/'
-+        return self._next(parent_at)
-+
-+    if sys.version_info < (3,):
-+        __div__ = __truediv__

+ 5639 - 0
mozilla-release/patches/1602773-2no3-PARTIAL-75a1.patch

@@ -0,0 +1,5639 @@
+# HG changeset patch
+# User Alessio Placitelli <alessio.placitelli@gmail.com>
+# Date 1583163245 0
+# Node ID de7b6ed631c807e831743b64bd33fcbfc6811f4f
+# Parent  19ef6982286ee63b7f85a279c6aa06ece444b765
+Bug 1602773 - Vendor glean_parser and its dependencies. r=ahal CLOSED TREE
+
+Differential Revision: https://phabricator.services.mozilla.com/D64313
+
+diff --git a/build/virtualenv_packages.txt b/build/virtualenv_packages.txt
+--- a/build/virtualenv_packages.txt
++++ b/build/virtualenv_packages.txt
+@@ -1,16 +1,17 @@
+ mozilla.pth:python/mach
+ mozilla.pth:python/mozboot
+ mozilla.pth:python/mozbuild
+ mozilla.pth:python/mozlint
+ mozilla.pth:python/mozperftest
+ mozilla.pth:python/mozrelease
+ mozilla.pth:python/mozterm
+ mozilla.pth:python/mozversioncontrol
++mozilla.pth:third_party/python/appdirs
+ mozilla.pth:third_party/python/atomicwrites
+ mozilla.pth:third_party/python/attrs/src
+ python2:mozilla.pth:third_party/python/backports
+ mozilla.pth:third_party/python/biplist
+ mozilla.pth:third_party/python/blessings
+ mozilla.pth:third_party/python/Click
+ mozilla.pth:third_party/python/compare-locales
+ mozilla.pth:third_party/python/configobj
+@@ -22,16 +23,17 @@ mozilla.pth:third_party/python/ecdsa/src
+ python2:mozilla.pth:third_party/python/enum34
+ mozilla.pth:third_party/python/fluent.migrate
+ mozilla.pth:third_party/python/fluent.syntax
+ mozilla.pth:third_party/python/funcsigs
+ python2:mozilla.pth:third_party/python/futures
+ mozilla.pth:third_party/python/more-itertools
+ mozilla.pth:third_party/python/packaging
+ mozilla.pth:third_party/python/pathlib2
++mozilla.pth:third_party/python/pathspec
+ mozilla.pth:third_party/python/gyp/pylib
+ mozilla.pth:third_party/python/python-hglib
+ mozilla.pth:third_party/python/pluggy
+ mozilla.pth:third_party/python/jsmin
+ !windows:optional:setup.py:third_party/python/psutil:build_ext:--inplace
+ !windows:mozilla.pth:third_party/python/psutil
+ windows:mozilla.pth:third_party/python/psutil-cp27-none-win_amd64
+ mozilla.pth:third_party/python/pylru
+@@ -46,16 +48,17 @@ mozilla.pth:third_party/python/slugid
+ mozilla.pth:third_party/python/py
+ mozilla.pth:third_party/python/pytest/src
+ mozilla.pth:third_party/python/pytoml
+ mozilla.pth:third_party/python/redo
+ mozilla.pth:third_party/python/responses
+ mozilla.pth:third_party/python/six
+ mozilla.pth:third_party/python/voluptuous
+ mozilla.pth:third_party/python/json-e
++mozilla.pth:third_party/python/yamllint
+ mozilla.pth:build
+ mozilla.pth:build/pymake
+ mozilla.pth:config
+ mozilla.pth:config/mozunit
+ mozilla.pth:dom/bindings
+ mozilla.pth:dom/bindings/parser
+ mozilla.pth:layout/tools/reftest
+ mozilla.pth:third_party/python/ply
+diff --git a/third_party/python/appdirs/appdirs.py b/third_party/python/appdirs/appdirs.py
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/appdirs/appdirs.py
+@@ -0,0 +1,608 @@
++#!/usr/bin/env python
++# -*- coding: utf-8 -*-
++# Copyright (c) 2005-2010 ActiveState Software Inc.
++# Copyright (c) 2013 Eddy Petrișor
++
++"""Utilities for determining application-specific dirs.
++
++See <http://github.com/ActiveState/appdirs> for details and usage.
++"""
++# Dev Notes:
++# - MSDN on where to store app data files:
++#   http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
++# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
++# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
++
++__version_info__ = (1, 4, 3)
++__version__ = '.'.join(map(str, __version_info__))
++
++
++import sys
++import os
++
++PY3 = sys.version_info[0] == 3
++
++if PY3:
++    unicode = str
++
++if sys.platform.startswith('java'):
++    import platform
++    os_name = platform.java_ver()[3][0]
++    if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc.
++        system = 'win32'
++    elif os_name.startswith('Mac'): # "Mac OS X", etc.
++        system = 'darwin'
++    else: # "Linux", "SunOS", "FreeBSD", etc.
++        # Setting this to "linux2" is not ideal, but only Windows or Mac
++        # are actually checked for and the rest of the module expects
++        # *sys.platform* style strings.
++        system = 'linux2'
++else:
++    system = sys.platform
++
++
++
++def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
++    r"""Return full path to the user-specific data dir for this application.
++
++        "appname" is the name of application.
++            If None, just the system directory is returned.
++        "appauthor" (only used on Windows) is the name of the
++            appauthor or distributing body for this application. Typically
++            it is the owning company name. This falls back to appname. You may
++            pass False to disable it.
++        "version" is an optional version path element to append to the
++            path. You might want to use this if you want multiple versions
++            of your app to be able to run independently. If used, this
++            would typically be "<major>.<minor>".
++            Only applied when appname is present.
++        "roaming" (boolean, default False) can be set True to use the Windows
++            roaming appdata directory. That means that for users on a Windows
++            network setup for roaming profiles, this user data will be
++            sync'd on login. See
++            <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
++            for a discussion of issues.
++
++    Typical user data directories are:
++        Mac OS X:               ~/Library/Application Support/<AppName>
++        Unix:                   ~/.local/share/<AppName>    # or in $XDG_DATA_HOME, if defined
++        Win XP (not roaming):   C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
++        Win XP (roaming):       C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
++        Win 7  (not roaming):   C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
++        Win 7  (roaming):       C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
++
++    For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
++    That means, by default "~/.local/share/<AppName>".
++    """
++    if system == "win32":
++        if appauthor is None:
++            appauthor = appname
++        const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
++        path = os.path.normpath(_get_win_folder(const))
++        if appname:
++            if appauthor is not False:
++                path = os.path.join(path, appauthor, appname)
++            else:
++                path = os.path.join(path, appname)
++    elif system == 'darwin':
++        path = os.path.expanduser('~/Library/Application Support/')
++        if appname:
++            path = os.path.join(path, appname)
++    else:
++        path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share"))
++        if appname:
++            path = os.path.join(path, appname)
++    if appname and version:
++        path = os.path.join(path, version)
++    return path
++
++
++def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
++    r"""Return full path to the user-shared data dir for this application.
++
++        "appname" is the name of application.
++            If None, just the system directory is returned.
++        "appauthor" (only used on Windows) is the name of the
++            appauthor or distributing body for this application. Typically
++            it is the owning company name. This falls back to appname. You may
++            pass False to disable it.
++        "version" is an optional version path element to append to the
++            path. You might want to use this if you want multiple versions
++            of your app to be able to run independently. If used, this
++            would typically be "<major>.<minor>".
++            Only applied when appname is present.
++        "multipath" is an optional parameter only applicable to *nix
++            which indicates that the entire list of data dirs should be
++            returned. By default, the first item from XDG_DATA_DIRS is
++            returned, or '/usr/local/share/<AppName>',
++            if XDG_DATA_DIRS is not set
++
++    Typical site data directories are:
++        Mac OS X:   /Library/Application Support/<AppName>
++        Unix:       /usr/local/share/<AppName> or /usr/share/<AppName>
++        Win XP:     C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
++        Vista:      (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
++        Win 7:      C:\ProgramData\<AppAuthor>\<AppName>   # Hidden, but writeable on Win 7.
++
++    For Unix, this is using the $XDG_DATA_DIRS[0] default.
++
++    WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
++    """
++    if system == "win32":
++        if appauthor is None:
++            appauthor = appname
++        path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
++        if appname:
++            if appauthor is not False:
++                path = os.path.join(path, appauthor, appname)
++            else:
++                path = os.path.join(path, appname)
++    elif system == 'darwin':
++        path = os.path.expanduser('/Library/Application Support')
++        if appname:
++            path = os.path.join(path, appname)
++    else:
++        # XDG default for $XDG_DATA_DIRS
++        # only first, if multipath is False
++        path = os.getenv('XDG_DATA_DIRS',
++                         os.pathsep.join(['/usr/local/share', '/usr/share']))
++        pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
++        if appname:
++            if version:
++                appname = os.path.join(appname, version)
++            pathlist = [os.sep.join([x, appname]) for x in pathlist]
++
++        if multipath:
++            path = os.pathsep.join(pathlist)
++        else:
++            path = pathlist[0]
++        return path
++
++    if appname and version:
++        path = os.path.join(path, version)
++    return path
++
++
++def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
++    r"""Return full path to the user-specific config dir for this application.
++
++        "appname" is the name of application.
++            If None, just the system directory is returned.
++        "appauthor" (only used on Windows) is the name of the
++            appauthor or distributing body for this application. Typically
++            it is the owning company name. This falls back to appname. You may
++            pass False to disable it.
++        "version" is an optional version path element to append to the
++            path. You might want to use this if you want multiple versions
++            of your app to be able to run independently. If used, this
++            would typically be "<major>.<minor>".
++            Only applied when appname is present.
++        "roaming" (boolean, default False) can be set True to use the Windows
++            roaming appdata directory. That means that for users on a Windows
++            network setup for roaming profiles, this user data will be
++            sync'd on login. See
++            <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
++            for a discussion of issues.
++
++    Typical user config directories are:
++        Mac OS X:               same as user_data_dir
++        Unix:                   ~/.config/<AppName>     # or in $XDG_CONFIG_HOME, if defined
++        Win *:                  same as user_data_dir
++
++    For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
++    That means, by default "~/.config/<AppName>".
++    """
++    if system in ["win32", "darwin"]:
++        path = user_data_dir(appname, appauthor, None, roaming)
++    else:
++        path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
++        if appname:
++            path = os.path.join(path, appname)
++    if appname and version:
++        path = os.path.join(path, version)
++    return path
++
++
++def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
++    r"""Return full path to the user-shared data dir for this application.
++
++        "appname" is the name of application.
++            If None, just the system directory is returned.
++        "appauthor" (only used on Windows) is the name of the
++            appauthor or distributing body for this application. Typically
++            it is the owning company name. This falls back to appname. You may
++            pass False to disable it.
++        "version" is an optional version path element to append to the
++            path. You might want to use this if you want multiple versions
++            of your app to be able to run independently. If used, this
++            would typically be "<major>.<minor>".
++            Only applied when appname is present.
++        "multipath" is an optional parameter only applicable to *nix
++            which indicates that the entire list of config dirs should be
++            returned. By default, the first item from XDG_CONFIG_DIRS is
++            returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set
++
++    Typical site config directories are:
++        Mac OS X:   same as site_data_dir
++        Unix:       /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
++                    $XDG_CONFIG_DIRS
++        Win *:      same as site_data_dir
++        Vista:      (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
++
++    For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
++
++    WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
++    """
++    if system in ["win32", "darwin"]:
++        path = site_data_dir(appname, appauthor)
++        if appname and version:
++            path = os.path.join(path, version)
++    else:
++        # XDG default for $XDG_CONFIG_DIRS
++        # only first, if multipath is False
++        path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
++        pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
++        if appname:
++            if version:
++                appname = os.path.join(appname, version)
++            pathlist = [os.sep.join([x, appname]) for x in pathlist]
++
++        if multipath:
++            path = os.pathsep.join(pathlist)
++        else:
++            path = pathlist[0]
++    return path
++
++
++def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
++    r"""Return full path to the user-specific cache dir for this application.
++
++        "appname" is the name of application.
++            If None, just the system directory is returned.
++        "appauthor" (only used on Windows) is the name of the
++            appauthor or distributing body for this application. Typically
++            it is the owning company name. This falls back to appname. You may
++            pass False to disable it.
++        "version" is an optional version path element to append to the
++            path. You might want to use this if you want multiple versions
++            of your app to be able to run independently. If used, this
++            would typically be "<major>.<minor>".
++            Only applied when appname is present.
++        "opinion" (boolean) can be False to disable the appending of
++            "Cache" to the base app data dir for Windows. See
++            discussion below.
++
++    Typical user cache directories are:
++        Mac OS X:   ~/Library/Caches/<AppName>
++        Unix:       ~/.cache/<AppName> (XDG default)
++        Win XP:     C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
++        Vista:      C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache
++
++    On Windows the only suggestion in the MSDN docs is that local settings go in
++    the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
++    app data dir (the default returned by `user_data_dir` above). Apps typically
++    put cache data somewhere *under* the given dir here. Some examples:
++        ...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
++        ...\Acme\SuperApp\Cache\1.0
++    OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
++    This can be disabled with the `opinion=False` option.
++    """
++    if system == "win32":
++        if appauthor is None:
++            appauthor = appname
++        path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
++        if appname:
++            if appauthor is not False:
++                path = os.path.join(path, appauthor, appname)
++            else:
++                path = os.path.join(path, appname)
++            if opinion:
++                path = os.path.join(path, "Cache")
++    elif system == 'darwin':
++        path = os.path.expanduser('~/Library/Caches')
++        if appname:
++            path = os.path.join(path, appname)
++    else:
++        path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
++        if appname:
++            path = os.path.join(path, appname)
++    if appname and version:
++        path = os.path.join(path, version)
++    return path
++
++
++def user_state_dir(appname=None, appauthor=None, version=None, roaming=False):
++    r"""Return full path to the user-specific state dir for this application.
++
++        "appname" is the name of application.
++            If None, just the system directory is returned.
++        "appauthor" (only used on Windows) is the name of the
++            appauthor or distributing body for this application. Typically
++            it is the owning company name. This falls back to appname. You may
++            pass False to disable it.
++        "version" is an optional version path element to append to the
++            path. You might want to use this if you want multiple versions
++            of your app to be able to run independently. If used, this
++            would typically be "<major>.<minor>".
++            Only applied when appname is present.
++        "roaming" (boolean, default False) can be set True to use the Windows
++            roaming appdata directory. That means that for users on a Windows
++            network setup for roaming profiles, this user data will be
++            sync'd on login. See
++            <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
++            for a discussion of issues.
++
++    Typical user state directories are:
++        Mac OS X:  same as user_data_dir
++        Unix:      ~/.local/state/<AppName>   # or in $XDG_STATE_HOME, if defined
++        Win *:     same as user_data_dir
++
++    For Unix, we follow this Debian proposal <https://wiki.debian.org/XDGBaseDirectorySpecification#state>
++    to extend the XDG spec and support $XDG_STATE_HOME.
++
++    That means, by default "~/.local/state/<AppName>".
++    """
++    if system in ["win32", "darwin"]:
++        path = user_data_dir(appname, appauthor, None, roaming)
++    else:
++        path = os.getenv('XDG_STATE_HOME', os.path.expanduser("~/.local/state"))
++        if appname:
++            path = os.path.join(path, appname)
++    if appname and version:
++        path = os.path.join(path, version)
++    return path
++
++
++def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
++    r"""Return full path to the user-specific log dir for this application.
++
++        "appname" is the name of application.
++            If None, just the system directory is returned.
++        "appauthor" (only used on Windows) is the name of the
++            appauthor or distributing body for this application. Typically
++            it is the owning company name. This falls back to appname. You may
++            pass False to disable it.
++        "version" is an optional version path element to append to the
++            path. You might want to use this if you want multiple versions
++            of your app to be able to run independently. If used, this
++            would typically be "<major>.<minor>".
++            Only applied when appname is present.
++        "opinion" (boolean) can be False to disable the appending of
++            "Logs" to the base app data dir for Windows, and "log" to the
++            base cache dir for Unix. See discussion below.
++
++    Typical user log directories are:
++        Mac OS X:   ~/Library/Logs/<AppName>
++        Unix:       ~/.cache/<AppName>/log  # or under $XDG_CACHE_HOME if defined
++        Win XP:     C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
++        Vista:      C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs
++
++    On Windows the only suggestion in the MSDN docs is that local settings
++    go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
++    examples of what some windows apps use for a logs dir.)
++
++    OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
++    value for Windows and appends "log" to the user cache dir for Unix.
++    This can be disabled with the `opinion=False` option.
++    """
++    if system == "darwin":
++        path = os.path.join(
++            os.path.expanduser('~/Library/Logs'),
++            appname)
++    elif system == "win32":
++        path = user_data_dir(appname, appauthor, version)
++        version = False
++        if opinion:
++            path = os.path.join(path, "Logs")
++    else:
++        path = user_cache_dir(appname, appauthor, version)
++        version = False
++        if opinion:
++            path = os.path.join(path, "log")
++    if appname and version:
++        path = os.path.join(path, version)
++    return path
++
++
++class AppDirs(object):
++    """Convenience wrapper for getting application dirs."""
++    def __init__(self, appname=None, appauthor=None, version=None,
++            roaming=False, multipath=False):
++        self.appname = appname
++        self.appauthor = appauthor
++        self.version = version
++        self.roaming = roaming
++        self.multipath = multipath
++
++    @property
++    def user_data_dir(self):
++        return user_data_dir(self.appname, self.appauthor,
++                             version=self.version, roaming=self.roaming)
++
++    @property
++    def site_data_dir(self):
++        return site_data_dir(self.appname, self.appauthor,
++                             version=self.version, multipath=self.multipath)
++
++    @property
++    def user_config_dir(self):
++        return user_config_dir(self.appname, self.appauthor,
++                               version=self.version, roaming=self.roaming)
++
++    @property
++    def site_config_dir(self):
++        return site_config_dir(self.appname, self.appauthor,
++                             version=self.version, multipath=self.multipath)
++
++    @property
++    def user_cache_dir(self):
++        return user_cache_dir(self.appname, self.appauthor,
++                              version=self.version)
++
++    @property
++    def user_state_dir(self):
++        return user_state_dir(self.appname, self.appauthor,
++                              version=self.version)
++
++    @property
++    def user_log_dir(self):
++        return user_log_dir(self.appname, self.appauthor,
++                            version=self.version)
++
++
++#---- internal support stuff
++
++def _get_win_folder_from_registry(csidl_name):
++    """This is a fallback technique at best. I'm not sure if using the
++    registry for this guarantees us the correct answer for all CSIDL_*
++    names.
++    """
++    if PY3:
++      import winreg as _winreg
++    else:
++      import _winreg
++
++    shell_folder_name = {
++        "CSIDL_APPDATA": "AppData",
++        "CSIDL_COMMON_APPDATA": "Common AppData",
++        "CSIDL_LOCAL_APPDATA": "Local AppData",
++    }[csidl_name]
++
++    key = _winreg.OpenKey(
++        _winreg.HKEY_CURRENT_USER,
++        r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
++    )
++    dir, type = _winreg.QueryValueEx(key, shell_folder_name)
++    return dir
++
++
++def _get_win_folder_with_pywin32(csidl_name):
++    from win32com.shell import shellcon, shell
++    dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
++    # Try to make this a unicode path because SHGetFolderPath does
++    # not return unicode strings when there is unicode data in the
++    # path.
++    try:
++        dir = unicode(dir)
++
++        # Downgrade to short path name if have highbit chars. See
++        # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
++        has_high_char = False
++        for c in dir:
++            if ord(c) > 255:
++                has_high_char = True
++                break
++        if has_high_char:
++            try:
++                import win32api
++                dir = win32api.GetShortPathName(dir)
++            except ImportError:
++                pass
++    except UnicodeError:
++        pass
++    return dir
++
++
++def _get_win_folder_with_ctypes(csidl_name):
++    import ctypes
++
++    csidl_const = {
++        "CSIDL_APPDATA": 26,
++        "CSIDL_COMMON_APPDATA": 35,
++        "CSIDL_LOCAL_APPDATA": 28,
++    }[csidl_name]
++
++    buf = ctypes.create_unicode_buffer(1024)
++    ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
++
++    # Downgrade to short path name if have highbit chars. See
++    # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
++    has_high_char = False
++    for c in buf:
++        if ord(c) > 255:
++            has_high_char = True
++            break
++    if has_high_char:
++        buf2 = ctypes.create_unicode_buffer(1024)
++        if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
++            buf = buf2
++
++    return buf.value
++
++def _get_win_folder_with_jna(csidl_name):
++    import array
++    from com.sun import jna
++    from com.sun.jna.platform import win32
++
++    buf_size = win32.WinDef.MAX_PATH * 2
++    buf = array.zeros('c', buf_size)
++    shell = win32.Shell32.INSTANCE
++    shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)
++    dir = jna.Native.toString(buf.tostring()).rstrip("\0")
++
++    # Downgrade to short path name if have highbit chars. See
++    # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
++    has_high_char = False
++    for c in dir:
++        if ord(c) > 255:
++            has_high_char = True
++            break
++    if has_high_char:
++        buf = array.zeros('c', buf_size)
++        kernel = win32.Kernel32.INSTANCE
++        if kernel.GetShortPathName(dir, buf, buf_size):
++            dir = jna.Native.toString(buf.tostring()).rstrip("\0")
++
++    return dir
++
++if system == "win32":
++    try:
++        import win32com.shell
++        _get_win_folder = _get_win_folder_with_pywin32
++    except ImportError:
++        try:
++            from ctypes import windll
++            _get_win_folder = _get_win_folder_with_ctypes
++        except ImportError:
++            try:
++                import com.sun.jna
++                _get_win_folder = _get_win_folder_with_jna
++            except ImportError:
++                _get_win_folder = _get_win_folder_from_registry
++
++
++#---- self test code
++
++if __name__ == "__main__":
++    appname = "MyApp"
++    appauthor = "MyCompany"
++
++    props = ("user_data_dir",
++             "user_config_dir",
++             "user_cache_dir",
++             "user_state_dir",
++             "user_log_dir",
++             "site_data_dir",
++             "site_config_dir")
++
++    print("-- app dirs %s --" % __version__)
++
++    print("-- app dirs (with optional 'version')")
++    dirs = AppDirs(appname, appauthor, version="1.0")
++    for prop in props:
++        print("%s: %s" % (prop, getattr(dirs, prop)))
++
++    print("\n-- app dirs (without optional 'version')")
++    dirs = AppDirs(appname, appauthor)
++    for prop in props:
++        print("%s: %s" % (prop, getattr(dirs, prop)))
++
++    print("\n-- app dirs (without optional 'appauthor')")
++    dirs = AppDirs(appname)
++    for prop in props:
++        print("%s: %s" % (prop, getattr(dirs, prop)))
++
++    print("\n-- app dirs (with disabled 'appauthor')")
++    dirs = AppDirs(appname, appauthor=False)
++    for prop in props:
++        print("%s: %s" % (prop, getattr(dirs, prop)))
+diff --git a/third_party/python/pathspec/pathspec/__init__.py b/third_party/python/pathspec/pathspec/__init__.py
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/pathspec/pathspec/__init__.py
+@@ -0,0 +1,66 @@
++# encoding: utf-8
++"""
++The *pathspec* package provides pattern matching for file paths. So far
++this only includes Git's wildmatch pattern matching (the style used for
++".gitignore" files).
++
++The following classes are imported and made available from the root of
++the `pathspec` package:
++
++- :class:`pathspec.pathspec.PathSpec`
++
++- :class:`pathspec.pattern.Pattern`
++
++- :class:`pathspec.pattern.RegexPattern`
++
++- :class:`pathspec.util.RecursionError`
++
++The following functions are also imported:
++
++- :func:`pathspec.util.iter_tree`
++- :func:`pathspec.util.lookup_pattern`
++- :func:`pathspec.util.match_files`
++"""
++from __future__ import unicode_literals
++
++__author__ = "Caleb P. Burns"
++__copyright__ = "Copyright © 2013-2018 Caleb P. Burns"
++__created__ = "2013-10-12"
++__credits__ = [
++	"dahlia <https://github.com/dahlia>",
++	"highb <https://github.com/highb>",
++	"029xue <https://github.com/029xue>",
++	"mikexstudios <https://github.com/mikexstudios>",
++	"nhumrich <https://github.com/nhumrich>",
++	"davidfraser <https://github.com/davidfraser>",
++	"demurgos <https://github.com/demurgos>",
++	"ghickman <https://github.com/ghickman>",
++	"nvie <https://github.com/nvie>",
++	"adrienverge <https://github.com/adrienverge>",
++	"AndersBlomdell <https://github.com/AndersBlomdell>",
++	"highb <https://github.com/highb>",
++	"thmxv <https://github.com/thmxv>",
++	"wimglenn <https://github.com/wimglenn>",
++	"hugovk <https://github.com/hugovk>",
++	"dcecile <https://github.com/dcecile>",
++	"mroutis <https://github.com/mroutis>",
++	"jdufresne <https://github.com/jdufresne>",
++	"groodt <https://github.com/groodt>",
++]
++__email__ = "cpburnz@gmail.com"
++__license__ = "MPL 2.0"
++__project__ = "pathspec"
++__status__ = "Development"
++__updated__ = "2019-12-27"
++__version__ = "0.7.0"
++
++from .pathspec import PathSpec
++from .pattern import Pattern, RegexPattern
++from .util import iter_tree, lookup_pattern, match_files, RecursionError
++
++# Load pattern implementations.
++from . import patterns
++
++# Expose `GitIgnorePattern` class in the root module for backward
++# compatibility with v0.4.
++from .patterns.gitwildmatch import GitIgnorePattern
+diff --git a/third_party/python/pathspec/pathspec/compat.py b/third_party/python/pathspec/pathspec/compat.py
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/pathspec/pathspec/compat.py
+@@ -0,0 +1,36 @@
++# encoding: utf-8
++"""
++This module provides compatibility between Python 2 and 3. Hardly
++anything is used by this project to constitute including `six`_.
++
++.. _`six`: http://pythonhosted.org/six
++"""
++
++import sys
++
++if sys.version_info[0] < 3:
++	# Python 2.
++	unicode = unicode
++	string_types = (basestring,)
++
++	from itertools import izip_longest
++
++	def iterkeys(mapping):
++		return mapping.iterkeys()
++
++else:
++	# Python 3.
++	unicode = str
++	string_types = (unicode,)
++
++	from itertools import zip_longest as izip_longest
++
++	def iterkeys(mapping):
++		return mapping.keys()
++
++try:
++	# Python 3.6+.
++	from collections.abc import Collection as collection_type
++except ImportError:
++	# Python 2.7 - 3.5.
++	from collections import Container as collection_type
+diff --git a/third_party/python/pathspec/pathspec/pathspec.py b/third_party/python/pathspec/pathspec/pathspec.py
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/pathspec/pathspec/pathspec.py
+@@ -0,0 +1,137 @@
++# encoding: utf-8
++"""
++This module provides an object oriented interface for pattern matching
++of files.
++"""
++
++from . import util
++from .compat import collection_type, iterkeys, izip_longest, string_types, unicode
++
++
++class PathSpec(object):
++	"""
++	The :class:`PathSpec` class is a wrapper around a list of compiled
++	:class:`.Pattern` instances.
++	"""
++
++	def __init__(self, patterns):
++		"""
++		Initializes the :class:`PathSpec` instance.
++
++		*patterns* (:class:`~collections.abc.Collection` or :class:`~collections.abc.Iterable`)
++		yields each compiled pattern (:class:`.Pattern`).
++		"""
++
++		self.patterns = patterns if isinstance(patterns, collection_type) else list(patterns)
++		"""
++		*patterns* (:class:`~collections.abc.Collection` of :class:`.Pattern`)
++		contains the compiled patterns.
++		"""
++
++	def __eq__(self, other):
++		"""
++		Tests the equality of this path-spec with *other* (:class:`PathSpec`)
++		by comparing their :attr:`~PathSpec.patterns` attributes.
++		"""
++		if isinstance(other, PathSpec):
++			paired_patterns = izip_longest(self.patterns, other.patterns)
++			return all(a == b for a, b in paired_patterns)
++		else:
++			return NotImplemented
++
++	def __len__(self):
++		"""
++		Returns the number of compiled patterns this path-spec contains
++		(:class:`int`).
++		"""
++		return len(self.patterns)
++
++	@classmethod
++	def from_lines(cls, pattern_factory, lines):
++		"""
++		Compiles the pattern lines.
++
++		*pattern_factory* can be either the name of a registered pattern
++		factory (:class:`str`), or a :class:`~collections.abc.Callable` used
++		to compile patterns. It must accept an uncompiled pattern (:class:`str`)
++		and return the compiled pattern (:class:`.Pattern`).
++
++		*lines* (:class:`~collections.abc.Iterable`) yields each uncompiled
++		pattern (:class:`str`). This simply has to yield each line so it can
++		be a :class:`file` (e.g., from :func:`open` or :class:`io.StringIO`)
++		or the result from :meth:`str.splitlines`.
++
++		Returns the :class:`PathSpec` instance.
++		"""
++		if isinstance(pattern_factory, string_types):
++			pattern_factory = util.lookup_pattern(pattern_factory)
++		if not callable(pattern_factory):
++			raise TypeError("pattern_factory:{!r} is not callable.".format(pattern_factory))
++
++		if isinstance(lines, (bytes, unicode)):
++			raise TypeError("lines:{!r} is not an iterable.".format(lines))
++
++		lines = [pattern_factory(line) for line in lines if line]
++		return cls(lines)
++
++	def match_file(self, file, separators=None):
++		"""
++		Matches the file to this path-spec.
++
++		*file* (:class:`str`) is the file path to be matched against
++		:attr:`self.patterns <PathSpec.patterns>`.
++
++		*separators* (:class:`~collections.abc.Collection` of :class:`str`)
++		optionally contains the path separators to normalize. See
++		:func:`~pathspec.util.normalize_file` for more information.
++
++		Returns :data:`True` if *file* matched; otherwise, :data:`False`.
++		"""
++		norm_file = util.normalize_file(file, separators=separators)
++		return util.match_file(self.patterns, norm_file)
++
++	def match_files(self, files, separators=None):
++		"""
++		Matches the files to this path-spec.
++
++		*files* (:class:`~collections.abc.Iterable` of :class:`str`) contains
++		the file paths to be matched against :attr:`self.patterns
++		<PathSpec.patterns>`.
++
++		*separators* (:class:`~collections.abc.Collection` of :class:`str`;
++		or :data:`None`) optionally contains the path separators to
++		normalize. See :func:`~pathspec.util.normalize_file` for more
++		information.
++
++		Returns the matched files (:class:`~collections.abc.Iterable` of
++		:class:`str`).
++		"""
++		if isinstance(files, (bytes, unicode)):
++			raise TypeError("files:{!r} is not an iterable.".format(files))
++
++		file_map = util.normalize_files(files, separators=separators)
++		matched_files = util.match_files(self.patterns, iterkeys(file_map))
++		for path in matched_files:
++			yield file_map[path]
++
++	def match_tree(self, root, on_error=None, follow_links=None):
++		"""
++		Walks the specified root path for all files and matches them to this
++		path-spec.
++
++		*root* (:class:`str`) is the root directory to search for files.
++
++		*on_error* (:class:`~collections.abc.Callable` or :data:`None`)
++		optionally is the error handler for file-system exceptions. See
++		:func:`~pathspec.util.iter_tree` for more information.
++
++
++		*follow_links* (:class:`bool` or :data:`None`) optionally is whether
++		to walk symbolik links that resolve to directories. See
++		:func:`~pathspec.util.iter_tree` for more information.
++
++		Returns the matched files (:class:`~collections.abc.Iterable` of
++		:class:`str`).
++		"""
++		files = util.iter_tree(root, on_error=on_error, follow_links=follow_links)
++		return self.match_files(files)
+diff --git a/third_party/python/pathspec/pathspec/pattern.py b/third_party/python/pathspec/pathspec/pattern.py
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/pathspec/pathspec/pattern.py
+@@ -0,0 +1,146 @@
++# encoding: utf-8
++"""
++This module provides the base definition for patterns.
++"""
++
++import re
++
++from .compat import unicode
++
++
++class Pattern(object):
++	"""
++	The :class:`Pattern` class is the abstract definition of a pattern.
++	"""
++
++	# Make the class dict-less.
++	__slots__ = ('include',)
++
++	def __init__(self, include):
++		"""
++		Initializes the :class:`Pattern` instance.
++
++		*include* (:class:`bool` or :data:`None`) is whether the matched
++		files should be included (:data:`True`), excluded (:data:`False`),
++		or is a null-operation (:data:`None`).
++		"""
++
++		self.include = include
++		"""
++		*include* (:class:`bool` or :data:`None`) is whether the matched
++		files should be included (:data:`True`), excluded (:data:`False`),
++		or is a null-operation (:data:`None`).
++		"""
++
++	def match(self, files):
++		"""
++		Matches this pattern against the specified files.
++
++		*files* (:class:`~collections.abc.Iterable` of :class:`str`) contains
++		each file relative to the root directory (e.g., ``"relative/path/to/file"``).
++
++		Returns an :class:`~collections.abc.Iterable` yielding each matched
++		file path (:class:`str`).
++		"""
++		raise NotImplementedError("{}.{} must override match().".format(self.__class__.__module__, self.__class__.__name__))
++
++
++class RegexPattern(Pattern):
++	"""
++	The :class:`RegexPattern` class is an implementation of a pattern
++	using regular expressions.
++	"""
++
++	# Make the class dict-less.
++	__slots__ = ('regex',)
++
++	def __init__(self, pattern, include=None):
++		"""
++		Initializes the :class:`RegexPattern` instance.
++
++		*pattern* (:class:`unicode`, :class:`bytes`, :class:`re.RegexObject`,
++		or :data:`None`) is the pattern to compile into a regular
++		expression.
++
++		*include* (:class:`bool` or :data:`None`) must be :data:`None`
++		unless *pattern* is a precompiled regular expression (:class:`re.RegexObject`)
++		in which case it is whether matched files should be included
++		(:data:`True`), excluded (:data:`False`), or is a null operation
++		(:data:`None`).
++
++			.. NOTE:: Subclasses do not need to support the *include*
++			   parameter.
++		"""
++
++		self.regex = None
++		"""
++		*regex* (:class:`re.RegexObject`) is the regular expression for the
++		pattern.
++		"""
++
++		if isinstance(pattern, (unicode, bytes)):
++			assert include is None, "include:{!r} must be null when pattern:{!r} is a string.".format(include, pattern)
++			regex, include = self.pattern_to_regex(pattern)
++			# NOTE: Make sure to allow a null regular expression to be
++			# returned for a null-operation.
++			if include is not None:
++				regex = re.compile(regex)
++
++		elif pattern is not None and hasattr(pattern, 'match'):
++			# Assume pattern is a precompiled regular expression.
++			# - NOTE: Used specified *include*.
++			regex = pattern
++
++		elif pattern is None:
++			# NOTE: Make sure to allow a null pattern to be passed for a
++			# null-operation.
++			assert include is None, "include:{!r} must be null when pattern:{!r} is null.".format(include, pattern)
++
++		else:
++			raise TypeError("pattern:{!r} is not a string, RegexObject, or None.".format(pattern))
++
++		super(RegexPattern, self).__init__(include)
++		self.regex = regex
++
++	def __eq__(self, other):
++		"""
++		Tests the equality of this regex pattern with *other* (:class:`RegexPattern`)
++		by comparing their :attr:`~Pattern.include` and :attr:`~RegexPattern.regex`
++		attributes.
++		"""
++		if isinstance(other, RegexPattern):
++			return self.include == other.include and self.regex == other.regex
++		else:
++			return NotImplemented
++
++	def match(self, files):
++		"""
++		Matches this pattern against the specified files.
++
++		*files* (:class:`~collections.abc.Iterable` of :class:`str`)
++		contains each file relative to the root directory (e.g., "relative/path/to/file").
++
++		Returns an :class:`~collections.abc.Iterable` yielding each matched
++		file path (:class:`str`).
++		"""
++		if self.include is not None:
++			for path in files:
++				if self.regex.match(path) is not None:
++					yield path
++
++	@classmethod
++	def pattern_to_regex(cls, pattern):
++		"""
++		Convert the pattern into an uncompiled regular expression.
++
++		*pattern* (:class:`str`) is the pattern to convert into a regular
++		expression.
++
++		Returns the uncompiled regular expression (:class:`str` or :data:`None`),
++		and whether matched files should be included (:data:`True`),
++		excluded (:data:`False`), or is a null-operation (:data:`None`).
++
++			.. NOTE:: The default implementation simply returns *pattern* and
++			   :data:`True`.
++		"""
++		return pattern, True
+diff --git a/third_party/python/pathspec/pathspec/patterns/__init__.py b/third_party/python/pathspec/pathspec/patterns/__init__.py
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/pathspec/pathspec/patterns/__init__.py
+@@ -0,0 +1,8 @@
++# encoding: utf-8
++"""
++The *pathspec.patterns* package contains the pattern matching
++implementations.
++"""
++
++# Load pattern implementations.
++from .gitwildmatch import GitWildMatchPattern
+diff --git a/third_party/python/pathspec/pathspec/patterns/gitwildmatch.py b/third_party/python/pathspec/pathspec/patterns/gitwildmatch.py
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/pathspec/pathspec/patterns/gitwildmatch.py
+@@ -0,0 +1,330 @@
++# encoding: utf-8
++"""
++This module implements Git's wildmatch pattern matching which itself is
++derived from Rsync's wildmatch. Git uses wildmatch for its ".gitignore"
++files.
++"""
++from __future__ import unicode_literals
++
++import re
++import warnings
++
++from .. import util
++from ..compat import unicode
++from ..pattern import RegexPattern
++
++#: The encoding to use when parsing a byte string pattern.
++_BYTES_ENCODING = 'latin1'
++
++
++class GitWildMatchPattern(RegexPattern):
++	"""
++	The :class:`GitWildMatchPattern` class represents a compiled Git
++	wildmatch pattern.
++	"""
++
++	# Keep the dict-less class hierarchy.
++	__slots__ = ()
++
++	@classmethod
++	def pattern_to_regex(cls, pattern):
++		"""
++		Convert the pattern into a regular expression.
++
++		*pattern* (:class:`unicode` or :class:`bytes`) is the pattern to
++		convert into a regular expression.
++
++		Returns the uncompiled regular expression (:class:`unicode`, :class:`bytes`,
++		or :data:`None`), and whether matched files should be included
++		(:data:`True`), excluded (:data:`False`), or if it is a
++		null-operation (:data:`None`).
++		"""
++		if isinstance(pattern, unicode):
++			return_type = unicode
++		elif isinstance(pattern, bytes):
++			return_type = bytes
++			pattern = pattern.decode(_BYTES_ENCODING)
++		else:
++			raise TypeError("pattern:{!r} is not a unicode or byte string.".format(pattern))
++
++		pattern = pattern.strip()
++
++		if pattern.startswith('#'):
++			# A pattern starting with a hash ('#') serves as a comment
++			# (neither includes nor excludes files). Escape the hash with a
++			# back-slash to match a literal hash (i.e., '\#').
++			regex = None
++			include = None
++
++		elif pattern == '/':
++			# EDGE CASE: According to `git check-ignore` (v2.4.1), a single
++			# '/' does not match any file.
++			regex = None
++			include = None
++
++		elif pattern:
++
++			if pattern.startswith('!'):
++				# A pattern starting with an exclamation mark ('!') negates the
++				# pattern (exclude instead of include). Escape the exclamation
++				# mark with a back-slash to match a literal exclamation mark
++				# (i.e., '\!').
++				include = False
++				# Remove leading exclamation mark.
++				pattern = pattern[1:]
++			else:
++				include = True
++
++			if pattern.startswith('\\'):
++				# Remove leading back-slash escape for escaped hash ('#') or
++				# exclamation mark ('!').
++				pattern = pattern[1:]
++
++			# Split pattern into segments.
++			pattern_segs = pattern.split('/')
++
++			# Normalize pattern to make processing easier.
++
++			if not pattern_segs[0]:
++				# A pattern beginning with a slash ('/') will only match paths
++				# directly on the root directory instead of any descendant
++				# paths. So, remove empty first segment to make pattern relative
++				# to root.
++				del pattern_segs[0]
++
++			elif len(pattern_segs) == 1 or (len(pattern_segs) == 2 and not pattern_segs[1]):
++				# A single pattern without a beginning slash ('/') will match
++				# any descendant path. This is equivalent to "**/{pattern}". So,
++				# prepend with double-asterisks to make pattern relative to
++				# root.
++				# EDGE CASE: This also holds for a single pattern with a
++				# trailing slash (e.g. dir/).
++				if pattern_segs[0] != '**':
++					pattern_segs.insert(0, '**')
++
++			else:
++				# EDGE CASE: A pattern without a beginning slash ('/') but
++				# contains at least one prepended directory (e.g.
++				# "dir/{pattern}") should not match "**/dir/{pattern}",
++				# according to `git check-ignore` (v2.4.1).
++				pass
++
++			if not pattern_segs[-1] and len(pattern_segs) > 1:
++				# A pattern ending with a slash ('/') will match all descendant
++				# paths if it is a directory but not if it is a regular file.
++				# This is equivilent to "{pattern}/**". So, set last segment to
++				# double asterisks to include all descendants.
++				pattern_segs[-1] = '**'
++
++			# Build regular expression from pattern.
++			output = ['^']
++			need_slash = False
++			end = len(pattern_segs) - 1
++			for i, seg in enumerate(pattern_segs):
++				if seg == '**':
++					if i == 0 and i == end:
++						# A pattern consisting solely of double-asterisks ('**')
++						# will match every path.
++						output.append('.+')
++					elif i == 0:
++						# A normalized pattern beginning with double-asterisks
++						# ('**') will match any leading path segments.
++						output.append('(?:.+/)?')
++						need_slash = False
++					elif i == end:
++						# A normalized pattern ending with double-asterisks ('**')
++						# will match any trailing path segments.
++						output.append('/.*')
++					else:
++						# A pattern with inner double-asterisks ('**') will match
++						# multiple (or zero) inner path segments.
++						output.append('(?:/.+)?')
++						need_slash = True
++				elif seg == '*':
++					# Match single path segment.
++					if need_slash:
++						output.append('/')
++					output.append('[^/]+')
++					need_slash = True
++				else:
++					# Match segment glob pattern.
++					if need_slash:
++						output.append('/')
++					output.append(cls._translate_segment_glob(seg))
++					if i == end and include is True:
++						# A pattern ending without a slash ('/') will match a file
++						# or a directory (with paths underneath it). E.g., "foo"
++						# matches "foo", "foo/bar", "foo/bar/baz", etc.
++						# EDGE CASE: However, this does not hold for exclusion cases
++						# according to `git check-ignore` (v2.4.1).
++						output.append('(?:/.*)?')
++					need_slash = True
++			output.append('$')
++			regex = ''.join(output)
++
++		else:
++			# A blank pattern is a null-operation (neither includes nor
++			# excludes files).
++			regex = None
++			include = None
++
++		if regex is not None and return_type is bytes:
++			regex = regex.encode(_BYTES_ENCODING)
++
++		return regex, include
++
++	@staticmethod
++	def _translate_segment_glob(pattern):
++		"""
++		Translates the glob pattern to a regular expression. This is used in
++		the constructor to translate a path segment glob pattern to its
++		corresponding regular expression.
++
++		*pattern* (:class:`str`) is the glob pattern.
++
++		Returns the regular expression (:class:`str`).
++		"""
++		# NOTE: This is derived from `fnmatch.translate()` and is similar to
++		# the POSIX function `fnmatch()` with the `FNM_PATHNAME` flag set.
++
++		escape = False
++		regex = ''
++		i, end = 0, len(pattern)
++		while i < end:
++			# Get next character.
++			char = pattern[i]
++			i += 1
++
++			if escape:
++				# Escape the character.
++				escape = False
++				regex += re.escape(char)
++
++			elif char == '\\':
++				# Escape character, escape next character.
++				escape = True
++
++			elif char == '*':
++				# Multi-character wildcard. Match any string (except slashes),
++				# including an empty string.
++				regex += '[^/]*'
++
++			elif char == '?':
++				# Single-character wildcard. Match any single character (except
++				# a slash).
++				regex += '[^/]'
++
++			elif char == '[':
++				# Braket expression wildcard. Except for the beginning
++				# exclamation mark, the whole braket expression can be used
++				# directly as regex but we have to find where the expression
++				# ends.
++				# - "[][!]" matchs ']', '[' and '!'.
++				# - "[]-]" matchs ']' and '-'.
++				# - "[!]a-]" matchs any character except ']', 'a' and '-'.
++				j = i
++				# Pass brack expression negation.
++				if j < end and pattern[j] == '!':
++					j += 1
++				# Pass first closing braket if it is at the beginning of the
++				# expression.
++				if j < end and pattern[j] == ']':
++					j += 1
++				# Find closing braket. Stop once we reach the end or find it.
++				while j < end and pattern[j] != ']':
++					j += 1
++
++				if j < end:
++					# Found end of braket expression. Increment j to be one past
++					# the closing braket:
++					#
++					#  [...]
++					#   ^   ^
++					#   i   j
++					#
++					j += 1
++					expr = '['
++
++					if pattern[i] == '!':
++						# Braket expression needs to be negated.
++						expr += '^'
++						i += 1
++					elif pattern[i] == '^':
++						# POSIX declares that the regex braket expression negation
++						# "[^...]" is undefined in a glob pattern. Python's
++						# `fnmatch.translate()` escapes the caret ('^') as a
++						# literal. To maintain consistency with undefined behavior,
++						# I am escaping the '^' as well.
++						expr += '\\^'
++						i += 1
++
++					# Build regex braket expression. Escape slashes so they are
++					# treated as literal slashes by regex as defined by POSIX.
++					expr += pattern[i:j].replace('\\', '\\\\')
++
++					# Add regex braket expression to regex result.
++					regex += expr
++
++					# Set i to one past the closing braket.
++					i = j
++
++				else:
++					# Failed to find closing braket, treat opening braket as a
++					# braket literal instead of as an expression.
++					regex += '\\['
++
++			else:
++				# Regular character, escape it for regex.
++				regex += re.escape(char)
++
++		return regex
++
++	@staticmethod
++	def escape(s):
++		"""
++		Escape special characters in the given string.
++
++		*s* (:class:`unicode` or :class:`bytes`) a filename or a string
++		that you want to escape, usually before adding it to a `.gitignore`
++
++		Returns the escaped string (:class:`unicode`, :class:`bytes`)
++		"""
++		# Reference: https://git-scm.com/docs/gitignore#_pattern_format
++		meta_characters = r"[]!*#?"
++
++		return "".join("\\" + x if x in meta_characters else x for x in s)
++
++util.register_pattern('gitwildmatch', GitWildMatchPattern)
++
++
++class GitIgnorePattern(GitWildMatchPattern):
++	"""
++	The :class:`GitIgnorePattern` class is deprecated by :class:`GitWildMatchPattern`.
++	This class only exists to maintain compatibility with v0.4.
++	"""
++
++	def __init__(self, *args, **kw):
++		"""
++		Warn about deprecation.
++		"""
++		self._deprecated()
++		return super(GitIgnorePattern, self).__init__(*args, **kw)
++
++	@staticmethod
++	def _deprecated():
++		"""
++		Warn about deprecation.
++		"""
++		warnings.warn("GitIgnorePattern ('gitignore') is deprecated. Use GitWildMatchPattern ('gitwildmatch') instead.", DeprecationWarning, stacklevel=3)
++
++	@classmethod
++	def pattern_to_regex(cls, *args, **kw):
++		"""
++		Warn about deprecation.
++		"""
++		cls._deprecated()
++		return super(GitIgnorePattern, cls).pattern_to_regex(*args, **kw)
++
++# Register `GitIgnorePattern` as "gitignore" for backward compatibility
++# with v0.4.
++util.register_pattern('gitignore', GitIgnorePattern)
+diff --git a/third_party/python/pathspec/pathspec/util.py b/third_party/python/pathspec/pathspec/util.py
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/pathspec/pathspec/util.py
+@@ -0,0 +1,350 @@
++# encoding: utf-8
++"""
++This module provides utility methods for dealing with path-specs.
++"""
++
++import os
++import os.path
++import posixpath
++import stat
++
++from .compat import collection_type, string_types
++
++NORMALIZE_PATH_SEPS = [sep for sep in [os.sep, os.altsep] if sep and sep != posixpath.sep]
++"""
++*NORMALIZE_PATH_SEPS* (:class:`list` of :class:`str`) contains the path
++separators that need to be normalized to the POSIX separator for the
++current operating system. The separators are determined by examining
++:data:`os.sep` and :data:`os.altsep`.
++"""
++
++_registered_patterns = {}
++"""
++*_registered_patterns* (``dict``) maps a name (``str``) to the
++registered pattern factory (``callable``).
++"""
++
++def iter_tree(root, on_error=None, follow_links=None):
++	"""
++	Walks the specified directory for all files.
++
++	*root* (:class:`str`) is the root directory to search for files.
++
++	*on_error* (:class:`~collections.abc.Callable` or :data:`None`)
++	optionally is the error handler for file-system exceptions. It will be
++	called with the exception (:exc:`OSError`). Reraise the exception to
++	abort the walk. Default is :data:`None` to ignore file-system
++	exceptions.
++
++	*follow_links* (:class:`bool` or :data:`None`) optionally is whether
++	to walk symbolik links that resolve to directories. Default is
++	:data:`None` for :data:`True`.
++
++	Raises :exc:`RecursionError` if recursion is detected.
++
++	Returns an :class:`~collections.abc.Iterable` yielding the path to
++	each file (:class:`str`) relative to *root*.
++	"""
++	if on_error is not None and not callable(on_error):
++		raise TypeError("on_error:{!r} is not callable.".format(on_error))
++
++	if follow_links is None:
++		follow_links = True
++
++	for file_rel in _iter_tree_next(os.path.abspath(root), '', {}, on_error, follow_links):
++		yield file_rel
++
++def _iter_tree_next(root_full, dir_rel, memo, on_error, follow_links):
++	"""
++	Scan the directory for all descendant files.
++
++	*root_full* (:class:`str`) the absolute path to the root directory.
++
++	*dir_rel* (:class:`str`) the path to the directory to scan relative to
++	*root_full*.
++
++	*memo* (:class:`dict`) keeps track of ancestor directories
++	encountered. Maps each ancestor real path (:class:`str``) to relative
++	path (:class:`str`).
++
++	*on_error* (:class:`~collections.abc.Callable` or :data:`None`)
++	optionally is the error handler for file-system exceptions.
++
++	*follow_links* (:class:`bool`) is whether to walk symbolik links that
++	resolve to directories.
++	"""
++	dir_full = os.path.join(root_full, dir_rel)
++	dir_real = os.path.realpath(dir_full)
++
++	# Remember each encountered ancestor directory and its canonical
++	# (real) path. If a canonical path is encountered more than once,
++	# recursion has occurred.
++	if dir_real not in memo:
++		memo[dir_real] = dir_rel
++	else:
++		raise RecursionError(real_path=dir_real, first_path=memo[dir_real], second_path=dir_rel)
++
++	for node in os.listdir(dir_full):
++		node_rel = os.path.join(dir_rel, node)
++		node_full = os.path.join(root_full, node_rel)
++
++		# Inspect child node.
++		try:
++			node_stat = os.lstat(node_full)
++		except OSError as e:
++			if on_error is not None:
++				on_error(e)
++			continue
++
++		if stat.S_ISLNK(node_stat.st_mode):
++			# Child node is a link, inspect the target node.
++			is_link = True
++			try:
++				node_stat = os.stat(node_full)
++			except OSError as e:
++				if on_error is not None:
++					on_error(e)
++				continue
++		else:
++			is_link = False
++
++		if stat.S_ISDIR(node_stat.st_mode) and (follow_links or not is_link):
++			# Child node is a directory, recurse into it and yield its
++			# decendant files.
++			for file_rel in _iter_tree_next(root_full, node_rel, memo, on_error, follow_links):
++				yield file_rel
++
++		elif stat.S_ISREG(node_stat.st_mode):
++			# Child node is a file, yield it.
++			yield node_rel
++
++	# NOTE: Make sure to remove the canonical (real) path of the directory
++	# from the ancestors memo once we are done with it. This allows the
++	# same directory to appear multiple times. If this is not done, the
++	# second occurance of the directory will be incorrectly interpreted as
++	# a recursion. See <https://github.com/cpburnz/python-path-specification/pull/7>.
++	del memo[dir_real]
++
++def lookup_pattern(name):
++	"""
++	Lookups a registered pattern factory by name.
++
++	*name* (:class:`str`) is the name of the pattern factory.
++
++	Returns the registered pattern factory (:class:`~collections.abc.Callable`).
++	If no pattern factory is registered, raises :exc:`KeyError`.
++	"""
++	return _registered_patterns[name]
++
++def match_file(patterns, file):
++	"""
++	Matches the file to the patterns.
++
++	*patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`)
++	contains the patterns to use.
++
++	*file* (:class:`str`) is the normalized file path to be matched
++	against *patterns*.
++
++	Returns :data:`True` if *file* matched; otherwise, :data:`False`.
++	"""
++	matched = False
++	for pattern in patterns:
++		if pattern.include is not None:
++			if file in pattern.match((file,)):
++				matched = pattern.include
++	return matched
++
++def match_files(patterns, files):
++	"""
++	Matches the files to the patterns.
++
++	*patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`)
++	contains the patterns to use.
++
++	*files* (:class:`~collections.abc.Iterable` of :class:`str`) contains
++	the normalized file paths to be matched against *patterns*.
++
++	Returns the matched files (:class:`set` of :class:`str`).
++	"""
++	all_files = files if isinstance(files, collection_type) else list(files)
++	return_files = set()
++	for pattern in patterns:
++		if pattern.include is not None:
++			result_files = pattern.match(all_files)
++			if pattern.include:
++				return_files.update(result_files)
++			else:
++				return_files.difference_update(result_files)
++	return return_files
++
++def normalize_file(file, separators=None):
++	"""
++	Normalizes the file path to use the POSIX path separator (i.e., ``'/'``).
++
++	*file* (:class:`str`) is the file path.
++
++	*separators* (:class:`~collections.abc.Collection` of :class:`str`; or
++	:data:`None`) optionally contains the path separators to normalize.
++	This does not need to include the POSIX path separator (``'/'``), but
++	including it will not affect the results. Default is :data:`None` for
++	:data:`NORMALIZE_PATH_SEPS`. To prevent normalization, pass an empty
++	container (e.g., an empty tuple ``()``).
++
++	Returns the normalized file path (:class:`str`).
++	"""
++	# Normalize path separators.
++	if separators is None:
++		separators = NORMALIZE_PATH_SEPS
++	norm_file = file
++	for sep in separators:
++		norm_file = norm_file.replace(sep, posixpath.sep)
++
++	# Remove current directory prefix.
++	if norm_file.startswith('./'):
++		norm_file = norm_file[2:]
++
++	return norm_file
++
++def normalize_files(files, separators=None):
++	"""
++	Normalizes the file paths to use the POSIX path separator.
++
++	*files* (:class:`~collections.abc.Iterable` of :class:`str`) contains
++	the file paths to be normalized.
++
++	*separators* (:class:`~collections.abc.Collection` of :class:`str`; or
++	:data:`None`) optionally contains the path separators to normalize.
++	See :func:`normalize_file` for more information.
++
++	Returns a :class:`dict` mapping the each normalized file path (:class:`str`)
++	to the original file path (:class:`str`)
++	"""
++	norm_files = {}
++	for path in files:
++		norm_files[normalize_file(path, separators=separators)] = path
++	return norm_files
++
++def register_pattern(name, pattern_factory, override=None):
++	"""
++	Registers the specified pattern factory.
++
++	*name* (:class:`str`) is the name to register the pattern factory
++	under.
++
++	*pattern_factory* (:class:`~collections.abc.Callable`) is used to
++	compile patterns. It must accept an uncompiled pattern (:class:`str`)
++	and return the compiled pattern (:class:`.Pattern`).
++
++	*override* (:class:`bool` or :data:`None`) optionally is whether to
++	allow overriding an already registered pattern under the same name
++	(:data:`True`), instead of raising an :exc:`AlreadyRegisteredError`
++	(:data:`False`). Default is :data:`None` for :data:`False`.
++	"""
++	if not isinstance(name, string_types):
++		raise TypeError("name:{!r} is not a string.".format(name))
++	if not callable(pattern_factory):
++		raise TypeError("pattern_factory:{!r} is not callable.".format(pattern_factory))
++	if name in _registered_patterns and not override:
++		raise AlreadyRegisteredError(name, _registered_patterns[name])
++	_registered_patterns[name] = pattern_factory
++
++
++class AlreadyRegisteredError(Exception):
++	"""
++	The :exc:`AlreadyRegisteredError` exception is raised when a pattern
++	factory is registered under a name already in use.
++	"""
++
++	def __init__(self, name, pattern_factory):
++		"""
++		Initializes the :exc:`AlreadyRegisteredError` instance.
++
++		*name* (:class:`str`) is the name of the registered pattern.
++
++		*pattern_factory* (:class:`~collections.abc.Callable`) is the
++		registered pattern factory.
++		"""
++		super(AlreadyRegisteredError, self).__init__(name, pattern_factory)
++
++	@property
++	def message(self):
++		"""
++		*message* (:class:`str`) is the error message.
++		"""
++		return "{name!r} is already registered for pattern factory:{pattern_factory!r}.".format(
++			name=self.name,
++			pattern_factory=self.pattern_factory,
++		)
++
++	@property
++	def name(self):
++		"""
++		*name* (:class:`str`) is the name of the registered pattern.
++		"""
++		return self.args[0]
++
++	@property
++	def pattern_factory(self):
++		"""
++		*pattern_factory* (:class:`~collections.abc.Callable`) is the
++		registered pattern factory.
++		"""
++		return self.args[1]
++
++
++class RecursionError(Exception):
++	"""
++	The :exc:`RecursionError` exception is raised when recursion is
++	detected.
++	"""
++
++	def __init__(self, real_path, first_path, second_path):
++		"""
++		Initializes the :exc:`RecursionError` instance.
++
++		*real_path* (:class:`str`) is the real path that recursion was
++		encountered on.
++
++		*first_path* (:class:`str`) is the first path encountered for
++		*real_path*.
++
++		*second_path* (:class:`str`) is the second path encountered for
++		*real_path*.
++		"""
++		super(RecursionError, self).__init__(real_path, first_path, second_path)
++
++	@property
++	def first_path(self):
++		"""
++		*first_path* (:class:`str`) is the first path encountered for
++		:attr:`self.real_path <RecursionError.real_path>`.
++		"""
++		return self.args[1]
++
++	@property
++	def message(self):
++		"""
++		*message* (:class:`str`) is the error message.
++		"""
++		return "Real path {real!r} was encountered at {first!r} and then {second!r}.".format(
++			real=self.real_path,
++			first=self.first_path,
++			second=self.second_path,
++		)
++
++	@property
++	def real_path(self):
++		"""
++		*real_path* (:class:`str`) is the real path that recursion was
++		encountered on.
++		"""
++		return self.args[0]
++
++	@property
++	def second_path(self):
++		"""
++		*second_path* (:class:`str`) is the second path encountered for
++		:attr:`self.real_path <RecursionError.real_path>`.
++		"""
++		return self.args[2]
+diff --git a/third_party/python/yamllint/yamllint/__init__.py b/third_party/python/yamllint/yamllint/__init__.py
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/yamllint/yamllint/__init__.py
+@@ -0,0 +1,31 @@
++# -*- coding: utf-8 -*-
++# Copyright (C) 2016 Adrien Vergé
++#
++# This program is free software: you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation, either version 3 of the License, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program.  If not, see <http://www.gnu.org/licenses/>.
++
++"""A linter for YAML files.
++
++yamllint does not only check for syntax validity, but for weirdnesses like key
++repetition and cosmetic problems such as lines length, trailing spaces,
++indentation, etc."""
++
++
++APP_NAME = 'yamllint'
++APP_VERSION = '1.20.0'
++APP_DESCRIPTION = __doc__
++
++__author__ = u'Adrien Vergé'
++__copyright__ = u'Copyright 2016, Adrien Vergé'
++__license__ = 'GPLv3'
++__version__ = APP_VERSION
+diff --git a/third_party/python/yamllint/yamllint/__main__.py b/third_party/python/yamllint/yamllint/__main__.py
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/yamllint/yamllint/__main__.py
+@@ -0,0 +1,4 @@
++from yamllint.cli import run
++
++if __name__ == '__main__':
++    run()
+diff --git a/third_party/python/yamllint/yamllint/cli.py b/third_party/python/yamllint/yamllint/cli.py
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/yamllint/yamllint/cli.py
+@@ -0,0 +1,206 @@
++# -*- coding: utf-8 -*-
++# Copyright (C) 2016 Adrien Vergé
++#
++# This program is free software: you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation, either version 3 of the License, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program.  If not, see <http://www.gnu.org/licenses/>.
++
++from __future__ import print_function
++
++import argparse
++import os
++import platform
++import sys
++
++from yamllint import APP_DESCRIPTION, APP_NAME, APP_VERSION
++from yamllint import linter
++from yamllint.config import YamlLintConfig, YamlLintConfigError
++from yamllint.linter import PROBLEM_LEVELS
++
++
++def find_files_recursively(items, conf):
++    for item in items:
++        if os.path.isdir(item):
++            for root, dirnames, filenames in os.walk(item):
++                for f in filenames:
++                    filepath = os.path.join(root, f)
++                    if conf.is_yaml_file(filepath):
++                        yield filepath
++        else:
++            yield item
++
++
++def supports_color():
++    supported_platform = not (platform.system() == 'Windows' and not
++                              ('ANSICON' in os.environ or
++                               ('TERM' in os.environ and
++                                os.environ['TERM'] == 'ANSI')))
++    return (supported_platform and
++            hasattr(sys.stdout, 'isatty') and sys.stdout.isatty())
++
++
++class Format(object):
++    @staticmethod
++    def parsable(problem, filename):
++        return ('%(file)s:%(line)s:%(column)s: [%(level)s] %(message)s' %
++                {'file': filename,
++                 'line': problem.line,
++                 'column': problem.column,
++                 'level': problem.level,
++                 'message': problem.message})
++
++    @staticmethod
++    def standard(problem, filename):
++        line = '  %d:%d' % (problem.line, problem.column)
++        line += max(12 - len(line), 0) * ' '
++        line += problem.level
++        line += max(21 - len(line), 0) * ' '
++        line += problem.desc
++        if problem.rule:
++            line += '  (%s)' % problem.rule
++        return line
++
++    @staticmethod
++    def standard_color(problem, filename):
++        line = '  \033[2m%d:%d\033[0m' % (problem.line, problem.column)
++        line += max(20 - len(line), 0) * ' '
++        if problem.level == 'warning':
++            line += '\033[33m%s\033[0m' % problem.level
++        else:
++            line += '\033[31m%s\033[0m' % problem.level
++        line += max(38 - len(line), 0) * ' '
++        line += problem.desc
++        if problem.rule:
++            line += '  \033[2m(%s)\033[0m' % problem.rule
++        return line
++
++
++def show_problems(problems, file, args_format, no_warn):
++    max_level = 0
++    first = True
++
++    for problem in problems:
++        max_level = max(max_level, PROBLEM_LEVELS[problem.level])
++        if no_warn and (problem.level != 'error'):
++            continue
++        if args_format == 'parsable':
++            print(Format.parsable(problem, file))
++        elif args_format == 'colored' or \
++                (args_format == 'auto' and supports_color()):
++            if first:
++                print('\033[4m%s\033[0m' % file)
++                first = False
++            print(Format.standard_color(problem, file))
++        else:
++            if first:
++                print(file)
++                first = False
++            print(Format.standard(problem, file))
++
++    if not first and args_format != 'parsable':
++        print('')
++
++    return max_level
++
++
++def run(argv=None):
++    parser = argparse.ArgumentParser(prog=APP_NAME,
++                                     description=APP_DESCRIPTION)
++    files_group = parser.add_mutually_exclusive_group(required=True)
++    files_group.add_argument('files', metavar='FILE_OR_DIR', nargs='*',
++                             default=(),
++                             help='files to check')
++    files_group.add_argument('-', action='store_true', dest='stdin',
++                             help='read from standard input')
++    config_group = parser.add_mutually_exclusive_group()
++    config_group.add_argument('-c', '--config-file', dest='config_file',
++                              action='store',
++                              help='path to a custom configuration')
++    config_group.add_argument('-d', '--config-data', dest='config_data',
++                              action='store',
++                              help='custom configuration (as YAML source)')
++    parser.add_argument('-f', '--format',
++                        choices=('parsable', 'standard', 'colored', 'auto'),
++                        default='auto', help='format for parsing output')
++    parser.add_argument('-s', '--strict',
++                        action='store_true',
++                        help='return non-zero exit code on warnings '
++                             'as well as errors')
++    parser.add_argument('--no-warnings',
++                        action='store_true',
++                        help='output only error level problems')
++    parser.add_argument('-v', '--version', action='version',
++                        version='{} {}'.format(APP_NAME, APP_VERSION))
++
++    args = parser.parse_args(argv)
++
++    # User-global config is supposed to be in ~/.config/yamllint/config
++    if 'XDG_CONFIG_HOME' in os.environ:
++        user_global_config = os.path.join(
++            os.environ['XDG_CONFIG_HOME'], 'yamllint', 'config')
++    else:
++        user_global_config = os.path.expanduser('~/.config/yamllint/config')
++
++    try:
++        if args.config_data is not None:
++            if args.config_data != '' and ':' not in args.config_data:
++                args.config_data = 'extends: ' + args.config_data
++            conf = YamlLintConfig(content=args.config_data)
++        elif args.config_file is not None:
++            conf = YamlLintConfig(file=args.config_file)
++        elif os.path.isfile('.yamllint'):
++            conf = YamlLintConfig(file='.yamllint')
++        elif os.path.isfile('.yamllint.yaml'):
++            conf = YamlLintConfig(file='.yamllint.yaml')
++        elif os.path.isfile('.yamllint.yml'):
++            conf = YamlLintConfig(file='.yamllint.yml')
++        elif os.path.isfile(user_global_config):
++            conf = YamlLintConfig(file=user_global_config)
++        else:
++            conf = YamlLintConfig('extends: default')
++    except YamlLintConfigError as e:
++        print(e, file=sys.stderr)
++        sys.exit(-1)
++
++    max_level = 0
++
++    for file in find_files_recursively(args.files, conf):
++        filepath = file[2:] if file.startswith('./') else file
++        try:
++            with open(file) as f:
++                problems = linter.run(f, conf, filepath)
++        except EnvironmentError as e:
++            print(e, file=sys.stderr)
++            sys.exit(-1)
++        prob_level = show_problems(problems, file, args_format=args.format,
++                                   no_warn=args.no_warnings)
++        max_level = max(max_level, prob_level)
++
++    # read yaml from stdin
++    if args.stdin:
++        try:
++            problems = linter.run(sys.stdin, conf, '')
++        except EnvironmentError as e:
++            print(e, file=sys.stderr)
++            sys.exit(-1)
++        prob_level = show_problems(problems, 'stdin', args_format=args.format,
++                                   no_warn=args.no_warnings)
++        max_level = max(max_level, prob_level)
++
++    if max_level == PROBLEM_LEVELS['error']:
++        return_code = 1
++    elif max_level == PROBLEM_LEVELS['warning']:
++        return_code = 2 if args.strict else 0
++    else:
++        return_code = 0
++
++    sys.exit(return_code)
+diff --git a/third_party/python/yamllint/yamllint/conf/default.yaml b/third_party/python/yamllint/yamllint/conf/default.yaml
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/yamllint/yamllint/conf/default.yaml
+@@ -0,0 +1,33 @@
++---
++
++yaml-files:
++  - '*.yaml'
++  - '*.yml'
++  - '.yamllint'
++
++rules:
++  braces: enable
++  brackets: enable
++  colons: enable
++  commas: enable
++  comments:
++    level: warning
++  comments-indentation:
++    level: warning
++  document-end: disable
++  document-start:
++    level: warning
++  empty-lines: enable
++  empty-values: disable
++  hyphens: enable
++  indentation: enable
++  key-duplicates: enable
++  key-ordering: disable
++  line-length: enable
++  new-line-at-end-of-file: enable
++  new-lines: enable
++  octal-values: disable
++  quoted-strings: disable
++  trailing-spaces: enable
++  truthy:
++    level: warning
+diff --git a/third_party/python/yamllint/yamllint/conf/relaxed.yaml b/third_party/python/yamllint/yamllint/conf/relaxed.yaml
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/yamllint/yamllint/conf/relaxed.yaml
+@@ -0,0 +1,29 @@
++---
++
++extends: default
++
++rules:
++  braces:
++    level: warning
++    max-spaces-inside: 1
++  brackets:
++    level: warning
++    max-spaces-inside: 1
++  colons:
++    level: warning
++  commas:
++    level: warning
++  comments: disable
++  comments-indentation: disable
++  document-start: disable
++  empty-lines:
++    level: warning
++  hyphens:
++    level: warning
++  indentation:
++    level: warning
++    indent-sequences: consistent
++  line-length:
++    level: warning
++    allow-non-breakable-inline-mappings: true
++  truthy: disable
+diff --git a/third_party/python/yamllint/yamllint/config.py b/third_party/python/yamllint/yamllint/config.py
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/yamllint/yamllint/config.py
+@@ -0,0 +1,198 @@
++# -*- coding: utf-8 -*-
++# Copyright (C) 2016 Adrien Vergé
++#
++# This program is free software: you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation, either version 3 of the License, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program.  If not, see <http://www.gnu.org/licenses/>.
++
++import os.path
++
++import pathspec
++import yaml
++
++import yamllint.rules
++
++
++class YamlLintConfigError(Exception):
++    pass
++
++
++class YamlLintConfig(object):
++    def __init__(self, content=None, file=None):
++        assert (content is None) ^ (file is None)
++
++        self.ignore = None
++
++        self.yaml_files = pathspec.PathSpec.from_lines(
++            'gitwildmatch', ['*.yaml', '*.yml', '.yamllint'])
++
++        if file is not None:
++            with open(file) as f:
++                content = f.read()
++
++        self.parse(content)
++        self.validate()
++
++    def is_file_ignored(self, filepath):
++        return self.ignore and self.ignore.match_file(filepath)
++
++    def is_yaml_file(self, filepath):
++        return self.yaml_files.match_file(filepath)
++
++    def enabled_rules(self, filepath):
++        return [yamllint.rules.get(id) for id, val in self.rules.items()
++                if val is not False and (
++                    filepath is None or 'ignore' not in val or
++                    not val['ignore'].match_file(filepath))]
++
++    def extend(self, base_config):
++        assert isinstance(base_config, YamlLintConfig)
++
++        for rule in self.rules:
++            if (isinstance(self.rules[rule], dict) and
++                    rule in base_config.rules and
++                    base_config.rules[rule] is not False):
++                base_config.rules[rule].update(self.rules[rule])
++            else:
++                base_config.rules[rule] = self.rules[rule]
++
++        self.rules = base_config.rules
++
++        if base_config.ignore is not None:
++            self.ignore = base_config.ignore
++
++    def parse(self, raw_content):
++        try:
++            conf = yaml.safe_load(raw_content)
++        except Exception as e:
++            raise YamlLintConfigError('invalid config: %s' % e)
++
++        if not isinstance(conf, dict):
++            raise YamlLintConfigError('invalid config: not a dict')
++
++        self.rules = conf.get('rules', {})
++        for rule in self.rules:
++            if self.rules[rule] == 'enable':
++                self.rules[rule] = {}
++            elif self.rules[rule] == 'disable':
++                self.rules[rule] = False
++
++        # Does this conf override another conf that we need to load?
++        if 'extends' in conf:
++            path = get_extended_config_file(conf['extends'])
++            base = YamlLintConfig(file=path)
++            try:
++                self.extend(base)
++            except Exception as e:
++                raise YamlLintConfigError('invalid config: %s' % e)
++
++        if 'ignore' in conf:
++            if not isinstance(conf['ignore'], str):
++                raise YamlLintConfigError(
++                    'invalid config: ignore should contain file patterns')
++            self.ignore = pathspec.PathSpec.from_lines(
++                'gitwildmatch', conf['ignore'].splitlines())
++
++        if 'yaml-files' in conf:
++            if not (isinstance(conf['yaml-files'], list)
++                    and all(isinstance(i, str) for i in conf['yaml-files'])):
++                raise YamlLintConfigError(
++                    'invalid config: yaml-files '
++                    'should be a list of file patterns')
++            self.yaml_files = pathspec.PathSpec.from_lines('gitwildmatch',
++                                                           conf['yaml-files'])
++
++    def validate(self):
++        for id in self.rules:
++            try:
++                rule = yamllint.rules.get(id)
++            except Exception as e:
++                raise YamlLintConfigError('invalid config: %s' % e)
++
++            self.rules[id] = validate_rule_conf(rule, self.rules[id])
++
++
++def validate_rule_conf(rule, conf):
++    if conf is False:  # disable
++        return False
++
++    if isinstance(conf, dict):
++        if ('ignore' in conf and
++                not isinstance(conf['ignore'], pathspec.pathspec.PathSpec)):
++            if not isinstance(conf['ignore'], str):
++                raise YamlLintConfigError(
++                    'invalid config: ignore should contain file patterns')
++            conf['ignore'] = pathspec.PathSpec.from_lines(
++                'gitwildmatch', conf['ignore'].splitlines())
++
++        if 'level' not in conf:
++            conf['level'] = 'error'
++        elif conf['level'] not in ('error', 'warning'):
++            raise YamlLintConfigError(
++                'invalid config: level should be "error" or "warning"')
++
++        options = getattr(rule, 'CONF', {})
++        options_default = getattr(rule, 'DEFAULT', {})
++        for optkey in conf:
++            if optkey in ('ignore', 'level'):
++                continue
++            if optkey not in options:
++                raise YamlLintConfigError(
++                    'invalid config: unknown option "%s" for rule "%s"' %
++                    (optkey, rule.ID))
++            # Example: CONF = {option: (bool, 'mixed')}
++            #          → {option: true}         → {option: mixed}
++            if isinstance(options[optkey], tuple):
++                if (conf[optkey] not in options[optkey] and
++                        type(conf[optkey]) not in options[optkey]):
++                    raise YamlLintConfigError(
++                        'invalid config: option "%s" of "%s" should be in %s'
++                        % (optkey, rule.ID, options[optkey]))
++            # Example: CONF = {option: ['flag1', 'flag2']}
++            #          → {option: [flag1]}      → {option: [flag1, flag2]}
++            elif isinstance(options[optkey], list):
++                if (type(conf[optkey]) is not list or
++                        any(flag not in options[optkey]
++                            for flag in conf[optkey])):
++                    raise YamlLintConfigError(
++                        ('invalid config: option "%s" of "%s" should only '
++                         'contain values in %s')
++                        % (optkey, rule.ID, str(options[optkey])))
++            # Example: CONF = {option: int}
++            #          → {option: 42}
++            else:
++                if not isinstance(conf[optkey], options[optkey]):
++                    raise YamlLintConfigError(
++                        'invalid config: option "%s" of "%s" should be %s'
++                        % (optkey, rule.ID, options[optkey].__name__))
++        for optkey in options:
++            if optkey not in conf:
++                conf[optkey] = options_default[optkey]
++    else:
++        raise YamlLintConfigError(('invalid config: rule "%s": should be '
++                                   'either "enable", "disable" or a dict')
++                                  % rule.ID)
++
++    return conf
++
++
++def get_extended_config_file(name):
++    # Is it a standard conf shipped with yamllint...
++    if '/' not in name:
++        std_conf = os.path.join(os.path.dirname(os.path.realpath(__file__)),
++                                'conf', name + '.yaml')
++
++        if os.path.isfile(std_conf):
++            return std_conf
++
++    # or a custom conf on filesystem?
++    return name
+diff --git a/third_party/python/yamllint/yamllint/linter.py b/third_party/python/yamllint/yamllint/linter.py
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/yamllint/yamllint/linter.py
+@@ -0,0 +1,240 @@
++# -*- coding: utf-8 -*-
++# Copyright (C) 2016 Adrien Vergé
++#
++# This program is free software: you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation, either version 3 of the License, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program.  If not, see <http://www.gnu.org/licenses/>.
++
++import re
++
++import yaml
++
++from yamllint import parser
++
++
++PROBLEM_LEVELS = {
++    0: None,
++    1: 'warning',
++    2: 'error',
++    None: 0,
++    'warning': 1,
++    'error': 2,
++}
++
++
++class LintProblem(object):
++    """Represents a linting problem found by yamllint."""
++    def __init__(self, line, column, desc='<no description>', rule=None):
++        #: Line on which the problem was found (starting at 1)
++        self.line = line
++        #: Column on which the problem was found (starting at 1)
++        self.column = column
++        #: Human-readable description of the problem
++        self.desc = desc
++        #: Identifier of the rule that detected the problem
++        self.rule = rule
++        self.level = None
++
++    @property
++    def message(self):
++        if self.rule is not None:
++            return '{} ({})'.format(self.desc, self.rule)
++        return self.desc
++
++    def __eq__(self, other):
++        return (self.line == other.line and
++                self.column == other.column and
++                self.rule == other.rule)
++
++    def __lt__(self, other):
++        return (self.line < other.line or
++                (self.line == other.line and self.column < other.column))
++
++    def __repr__(self):
++        return '%d:%d: %s' % (self.line, self.column, self.message)
++
++
++def get_cosmetic_problems(buffer, conf, filepath):
++    rules = conf.enabled_rules(filepath)
++
++    # Split token rules from line rules
++    token_rules = [r for r in rules if r.TYPE == 'token']
++    comment_rules = [r for r in rules if r.TYPE == 'comment']
++    line_rules = [r for r in rules if r.TYPE == 'line']
++
++    context = {}
++    for rule in token_rules:
++        context[rule.ID] = {}
++
++    class DisableDirective:
++        def __init__(self):
++            self.rules = set()
++            self.all_rules = {r.ID for r in rules}
++
++        def process_comment(self, comment):
++            try:
++                comment = str(comment)
++            except UnicodeError:
++                return  # this certainly wasn't a yamllint directive comment
++
++            if re.match(r'^# yamllint disable( rule:\S+)*\s*$', comment):
++                rules = [item[5:] for item in comment[18:].split(' ')][1:]
++                if len(rules) == 0:
++                    self.rules = self.all_rules.copy()
++                else:
++                    for id in rules:
++                        if id in self.all_rules:
++                            self.rules.add(id)
++
++            elif re.match(r'^# yamllint enable( rule:\S+)*\s*$', comment):
++                rules = [item[5:] for item in comment[17:].split(' ')][1:]
++                if len(rules) == 0:
++                    self.rules.clear()
++                else:
++                    for id in rules:
++                        self.rules.discard(id)
++
++        def is_disabled_by_directive(self, problem):
++            return problem.rule in self.rules
++
++    class DisableLineDirective(DisableDirective):
++        def process_comment(self, comment):
++            try:
++                comment = str(comment)
++            except UnicodeError:
++                return  # this certainly wasn't a yamllint directive comment
++
++            if re.match(r'^# yamllint disable-line( rule:\S+)*\s*$', comment):
++                rules = [item[5:] for item in comment[23:].split(' ')][1:]
++                if len(rules) == 0:
++                    self.rules = self.all_rules.copy()
++                else:
++                    for id in rules:
++                        if id in self.all_rules:
++                            self.rules.add(id)
++
++    # Use a cache to store problems and flush it only when a end of line is
++    # found. This allows the use of yamllint directive to disable some rules on
++    # some lines.
++    cache = []
++    disabled = DisableDirective()
++    disabled_for_line = DisableLineDirective()
++    disabled_for_next_line = DisableLineDirective()
++
++    for elem in parser.token_or_comment_or_line_generator(buffer):
++        if isinstance(elem, parser.Token):
++            for rule in token_rules:
++                rule_conf = conf.rules[rule.ID]
++                for problem in rule.check(rule_conf,
++                                          elem.curr, elem.prev, elem.next,
++                                          elem.nextnext,
++                                          context[rule.ID]):
++                    problem.rule = rule.ID
++                    problem.level = rule_conf['level']
++                    cache.append(problem)
++        elif isinstance(elem, parser.Comment):
++            for rule in comment_rules:
++                rule_conf = conf.rules[rule.ID]
++                for problem in rule.check(rule_conf, elem):
++                    problem.rule = rule.ID
++                    problem.level = rule_conf['level']
++                    cache.append(problem)
++
++            disabled.process_comment(elem)
++            if elem.is_inline():
++                disabled_for_line.process_comment(elem)
++            else:
++                disabled_for_next_line.process_comment(elem)
++        elif isinstance(elem, parser.Line):
++            for rule in line_rules:
++                rule_conf = conf.rules[rule.ID]
++                for problem in rule.check(rule_conf, elem):
++                    problem.rule = rule.ID
++                    problem.level = rule_conf['level']
++                    cache.append(problem)
++
++            # This is the last token/comment/line of this line, let's flush the
++            # problems found (but filter them according to the directives)
++            for problem in cache:
++                if not (disabled_for_line.is_disabled_by_directive(problem) or
++                        disabled.is_disabled_by_directive(problem)):
++                    yield problem
++
++            disabled_for_line = disabled_for_next_line
++            disabled_for_next_line = DisableLineDirective()
++            cache = []
++
++
++def get_syntax_error(buffer):
++    try:
++        list(yaml.parse(buffer, Loader=yaml.BaseLoader))
++    except yaml.error.MarkedYAMLError as e:
++        problem = LintProblem(e.problem_mark.line + 1,
++                              e.problem_mark.column + 1,
++                              'syntax error: ' + e.problem + ' (syntax)')
++        problem.level = 'error'
++        return problem
++
++
++def _run(buffer, conf, filepath):
++    assert hasattr(buffer, '__getitem__'), \
++        '_run() argument must be a buffer, not a stream'
++
++    first_line = next(parser.line_generator(buffer)).content
++    if re.match(r'^#\s*yamllint disable-file\s*$', first_line):
++        return
++
++    # If the document contains a syntax error, save it and yield it at the
++    # right line
++    syntax_error = get_syntax_error(buffer)
++
++    for problem in get_cosmetic_problems(buffer, conf, filepath):
++        # Insert the syntax error (if any) at the right place...
++        if (syntax_error and syntax_error.line <= problem.line and
++                syntax_error.column <= problem.column):
++            yield syntax_error
++
++            # If there is already a yamllint error at the same place, discard
++            # it as it is probably redundant (and maybe it's just a 'warning',
++            # in which case the script won't even exit with a failure status).
++            if (syntax_error.line == problem.line and
++                    syntax_error.column == problem.column):
++                syntax_error = None
++                continue
++
++            syntax_error = None
++
++        yield problem
++
++    if syntax_error:
++        yield syntax_error
++
++
++def run(input, conf, filepath=None):
++    """Lints a YAML source.
++
++    Returns a generator of LintProblem objects.
++
++    :param input: buffer, string or stream to read from
++    :param conf: yamllint configuration object
++    """
++    if conf.is_file_ignored(filepath):
++        return ()
++
++    if isinstance(input, (type(b''), type(u''))):  # compat with Python 2 & 3
++        return _run(input, conf, filepath)
++    elif hasattr(input, 'read'):  # Python 2's file or Python 3's io.IOBase
++        # We need to have everything in memory to parse correctly
++        content = input.read()
++        return _run(content, conf, filepath)
++    else:
++        raise TypeError('input should be a string or a stream')
+diff --git a/third_party/python/yamllint/yamllint/parser.py b/third_party/python/yamllint/yamllint/parser.py
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/yamllint/yamllint/parser.py
+@@ -0,0 +1,161 @@
++# -*- coding: utf-8 -*-
++# Copyright (C) 2016 Adrien Vergé
++#
++# This program is free software: you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation, either version 3 of the License, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program.  If not, see <http://www.gnu.org/licenses/>.
++
++import yaml
++
++
++class Line(object):
++    def __init__(self, line_no, buffer, start, end):
++        self.line_no = line_no
++        self.start = start
++        self.end = end
++        self.buffer = buffer
++
++    @property
++    def content(self):
++        return self.buffer[self.start:self.end]
++
++
++class Token(object):
++    def __init__(self, line_no, curr, prev, next, nextnext):
++        self.line_no = line_no
++        self.curr = curr
++        self.prev = prev
++        self.next = next
++        self.nextnext = nextnext
++
++
++class Comment(object):
++    def __init__(self, line_no, column_no, buffer, pointer,
++                 token_before=None, token_after=None, comment_before=None):
++        self.line_no = line_no
++        self.column_no = column_no
++        self.buffer = buffer
++        self.pointer = pointer
++        self.token_before = token_before
++        self.token_after = token_after
++        self.comment_before = comment_before
++
++    def __str__(self):
++        end = self.buffer.find('\n', self.pointer)
++        if end == -1:
++            end = self.buffer.find('\0', self.pointer)
++        if end != -1:
++            return self.buffer[self.pointer:end]
++        return self.buffer[self.pointer:]
++
++    def __eq__(self, other):
++        return (isinstance(other, Comment) and
++                self.line_no == other.line_no and
++                self.column_no == other.column_no and
++                str(self) == str(other))
++
++    def is_inline(self):
++        return (
++            not isinstance(self.token_before, yaml.StreamStartToken) and
++            self.line_no == self.token_before.end_mark.line + 1 and
++            # sometimes token end marks are on the next line
++            self.buffer[self.token_before.end_mark.pointer - 1] != '\n'
++        )
++
++
++def line_generator(buffer):
++    line_no = 1
++    cur = 0
++    next = buffer.find('\n')
++    while next != -1:
++        if next > 0 and buffer[next - 1] == '\r':
++            yield Line(line_no, buffer, start=cur, end=next - 1)
++        else:
++            yield Line(line_no, buffer, start=cur, end=next)
++        cur = next + 1
++        next = buffer.find('\n', cur)
++        line_no += 1
++
++    yield Line(line_no, buffer, start=cur, end=len(buffer))
++
++
++def comments_between_tokens(token1, token2):
++    """Find all comments between two tokens"""
++    if token2 is None:
++        buf = token1.end_mark.buffer[token1.end_mark.pointer:]
++    elif (token1.end_mark.line == token2.start_mark.line and
++          not isinstance(token1, yaml.StreamStartToken) and
++          not isinstance(token2, yaml.StreamEndToken)):
++        return
++    else:
++        buf = token1.end_mark.buffer[token1.end_mark.pointer:
++                                     token2.start_mark.pointer]
++
++    line_no = token1.end_mark.line + 1
++    column_no = token1.end_mark.column + 1
++    pointer = token1.end_mark.pointer
++
++    comment_before = None
++    for line in buf.split('\n'):
++        pos = line.find('#')
++        if pos != -1:
++            comment = Comment(line_no, column_no + pos,
++                              token1.end_mark.buffer, pointer + pos,
++                              token1, token2, comment_before)
++            yield comment
++
++            comment_before = comment
++
++        pointer += len(line) + 1
++        line_no += 1
++        column_no = 1
++
++
++def token_or_comment_generator(buffer):
++    yaml_loader = yaml.BaseLoader(buffer)
++
++    try:
++        prev = None
++        curr = yaml_loader.get_token()
++        while curr is not None:
++            next = yaml_loader.get_token()
++            nextnext = (yaml_loader.peek_token()
++                        if yaml_loader.check_token() else None)
++
++            yield Token(curr.start_mark.line + 1, curr, prev, next, nextnext)
++
++            for comment in comments_between_tokens(curr, next):
++                yield comment
++
++            prev = curr
++            curr = next
++
++    except yaml.scanner.ScannerError:
++        pass
++
++
++def token_or_comment_or_line_generator(buffer):
++    """Generator that mixes tokens and lines, ordering them by line number"""
++    tok_or_com_gen = token_or_comment_generator(buffer)
++    line_gen = line_generator(buffer)
++
++    tok_or_com = next(tok_or_com_gen, None)
++    line = next(line_gen, None)
++
++    while tok_or_com is not None or line is not None:
++        if tok_or_com is None or (line is not None and
++                                  tok_or_com.line_no > line.line_no):
++            yield line
++            line = next(line_gen, None)
++        else:
++            yield tok_or_com
++            tok_or_com = next(tok_or_com_gen, None)
+diff --git a/third_party/python/yamllint/yamllint/rules/__init__.py b/third_party/python/yamllint/yamllint/rules/__init__.py
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/yamllint/yamllint/rules/__init__.py
+@@ -0,0 +1,70 @@
++# -*- coding: utf-8 -*-
++# Copyright (C) 2016 Adrien Vergé
++#
++# This program is free software: you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation, either version 3 of the License, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program.  If not, see <http://www.gnu.org/licenses/>.
++
++from yamllint.rules import (
++    braces,
++    brackets,
++    colons,
++    commas,
++    comments,
++    comments_indentation,
++    document_end,
++    document_start,
++    empty_lines,
++    empty_values,
++    hyphens,
++    indentation,
++    key_duplicates,
++    key_ordering,
++    line_length,
++    new_line_at_end_of_file,
++    new_lines,
++    octal_values,
++    quoted_strings,
++    trailing_spaces,
++    truthy,
++)
++
++_RULES = {
++    braces.ID: braces,
++    brackets.ID: brackets,
++    colons.ID: colons,
++    commas.ID: commas,
++    comments.ID: comments,
++    comments_indentation.ID: comments_indentation,
++    document_end.ID: document_end,
++    document_start.ID: document_start,
++    empty_lines.ID: empty_lines,
++    empty_values.ID: empty_values,
++    hyphens.ID: hyphens,
++    indentation.ID: indentation,
++    key_duplicates.ID: key_duplicates,
++    key_ordering.ID: key_ordering,
++    line_length.ID: line_length,
++    new_line_at_end_of_file.ID: new_line_at_end_of_file,
++    new_lines.ID: new_lines,
++    octal_values.ID: octal_values,
++    quoted_strings.ID: quoted_strings,
++    trailing_spaces.ID: trailing_spaces,
++    truthy.ID: truthy,
++}
++
++
++def get(id):
++    if id not in _RULES:
++        raise ValueError('no such rule: "%s"' % id)
++
++    return _RULES[id]
+diff --git a/third_party/python/yamllint/yamllint/rules/braces.py b/third_party/python/yamllint/yamllint/rules/braces.py
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/yamllint/yamllint/rules/braces.py
+@@ -0,0 +1,143 @@
++# -*- coding: utf-8 -*-
++# Copyright (C) 2016 Adrien Vergé
++#
++# This program is free software: you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation, either version 3 of the License, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program.  If not, see <http://www.gnu.org/licenses/>.
++
++"""
++Use this rule to control the number of spaces inside braces (``{`` and ``}``).
++
++.. rubric:: Options
++
++* ``min-spaces-inside`` defines the minimal number of spaces required inside
++  braces.
++* ``max-spaces-inside`` defines the maximal number of spaces allowed inside
++  braces.
++* ``min-spaces-inside-empty`` defines the minimal number of spaces required
++  inside empty braces.
++* ``max-spaces-inside-empty`` defines the maximal number of spaces allowed
++  inside empty braces.
++
++.. rubric:: Examples
++
++#. With ``braces: {min-spaces-inside: 0, max-spaces-inside: 0}``
++
++   the following code snippet would **PASS**:
++   ::
++
++    object: {key1: 4, key2: 8}
++
++   the following code snippet would **FAIL**:
++   ::
++
++    object: { key1: 4, key2: 8 }
++
++#. With ``braces: {min-spaces-inside: 1, max-spaces-inside: 3}``
++
++   the following code snippet would **PASS**:
++   ::
++
++    object: { key1: 4, key2: 8 }
++
++   the following code snippet would **PASS**:
++   ::
++
++    object: { key1: 4, key2: 8   }
++
++   the following code snippet would **FAIL**:
++   ::
++
++    object: {    key1: 4, key2: 8   }
++
++   the following code snippet would **FAIL**:
++   ::
++
++    object: {key1: 4, key2: 8 }
++
++#. With ``braces: {min-spaces-inside-empty: 0, max-spaces-inside-empty: 0}``
++
++   the following code snippet would **PASS**:
++   ::
++
++    object: {}
++
++   the following code snippet would **FAIL**:
++   ::
++
++    object: { }
++
++#. With ``braces: {min-spaces-inside-empty: 1, max-spaces-inside-empty: -1}``
++
++   the following code snippet would **PASS**:
++   ::
++
++    object: {         }
++
++   the following code snippet would **FAIL**:
++   ::
++
++    object: {}
++"""
++
++
++import yaml
++
++from yamllint.rules.common import spaces_after, spaces_before
++
++
++ID = 'braces'
++TYPE = 'token'
++CONF = {'min-spaces-inside': int,
++        'max-spaces-inside': int,
++        'min-spaces-inside-empty': int,
++        'max-spaces-inside-empty': int}
++DEFAULT = {'min-spaces-inside': 0,
++           'max-spaces-inside': 0,
++           'min-spaces-inside-empty': -1,
++           'max-spaces-inside-empty': -1}
++
++
++def check(conf, token, prev, next, nextnext, context):
++    if (isinstance(token, yaml.FlowMappingStartToken) and
++            isinstance(next, yaml.FlowMappingEndToken)):
++        problem = spaces_after(token, prev, next,
++                               min=(conf['min-spaces-inside-empty']
++                                    if conf['min-spaces-inside-empty'] != -1
++                                    else conf['min-spaces-inside']),
++                               max=(conf['max-spaces-inside-empty']
++                                    if conf['max-spaces-inside-empty'] != -1
++                                    else conf['max-spaces-inside']),
++                               min_desc='too few spaces inside empty braces',
++                               max_desc='too many spaces inside empty braces')
++        if problem is not None:
++            yield problem
++
++    elif isinstance(token, yaml.FlowMappingStartToken):
++        problem = spaces_after(token, prev, next,
++                               min=conf['min-spaces-inside'],
++                               max=conf['max-spaces-inside'],
++                               min_desc='too few spaces inside braces',
++                               max_desc='too many spaces inside braces')
++        if problem is not None:
++            yield problem
++
++    elif (isinstance(token, yaml.FlowMappingEndToken) and
++            (prev is None or
++             not isinstance(prev, yaml.FlowMappingStartToken))):
++        problem = spaces_before(token, prev, next,
++                                min=conf['min-spaces-inside'],
++                                max=conf['max-spaces-inside'],
++                                min_desc='too few spaces inside braces',
++                                max_desc='too many spaces inside braces')
++        if problem is not None:
++            yield problem
+diff --git a/third_party/python/yamllint/yamllint/rules/brackets.py b/third_party/python/yamllint/yamllint/rules/brackets.py
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/yamllint/yamllint/rules/brackets.py
+@@ -0,0 +1,145 @@
++# -*- coding: utf-8 -*-
++# Copyright (C) 2016 Adrien Vergé
++#
++# This program is free software: you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation, either version 3 of the License, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program.  If not, see <http://www.gnu.org/licenses/>.
++
++"""
++Use this rule to control the number of spaces inside brackets (``[`` and
++``]``).
++
++.. rubric:: Options
++
++* ``min-spaces-inside`` defines the minimal number of spaces required inside
++  brackets.
++* ``max-spaces-inside`` defines the maximal number of spaces allowed inside
++  brackets.
++* ``min-spaces-inside-empty`` defines the minimal number of spaces required
++  inside empty brackets.
++* ``max-spaces-inside-empty`` defines the maximal number of spaces allowed
++  inside empty brackets.
++
++.. rubric:: Examples
++
++#. With ``brackets: {min-spaces-inside: 0, max-spaces-inside: 0}``
++
++   the following code snippet would **PASS**:
++   ::
++
++    object: [1, 2, abc]
++
++   the following code snippet would **FAIL**:
++   ::
++
++    object: [ 1, 2, abc ]
++
++#. With ``brackets: {min-spaces-inside: 1, max-spaces-inside: 3}``
++
++   the following code snippet would **PASS**:
++   ::
++
++    object: [ 1, 2, abc ]
++
++   the following code snippet would **PASS**:
++   ::
++
++    object: [ 1, 2, abc   ]
++
++   the following code snippet would **FAIL**:
++   ::
++
++    object: [    1, 2, abc   ]
++
++   the following code snippet would **FAIL**:
++   ::
++
++    object: [1, 2, abc ]
++
++#. With ``brackets: {min-spaces-inside-empty: 0, max-spaces-inside-empty: 0}``
++
++   the following code snippet would **PASS**:
++   ::
++
++    object: []
++
++   the following code snippet would **FAIL**:
++   ::
++
++    object: [ ]
++
++#. With ``brackets: {min-spaces-inside-empty: 1, max-spaces-inside-empty: -1}``
++
++   the following code snippet would **PASS**:
++   ::
++
++    object: [         ]
++
++   the following code snippet would **FAIL**:
++   ::
++
++    object: []
++"""
++
++
++import yaml
++
++from yamllint.rules.common import spaces_after, spaces_before
++
++
++ID = 'brackets'
++TYPE = 'token'
++CONF = {'min-spaces-inside': int,
++        'max-spaces-inside': int,
++        'min-spaces-inside-empty': int,
++        'max-spaces-inside-empty': int}
++DEFAULT = {'min-spaces-inside': 0,
++           'max-spaces-inside': 0,
++           'min-spaces-inside-empty': -1,
++           'max-spaces-inside-empty': -1}
++
++
++def check(conf, token, prev, next, nextnext, context):
++    if (isinstance(token, yaml.FlowSequenceStartToken) and
++            isinstance(next, yaml.FlowSequenceEndToken)):
++        problem = spaces_after(token, prev, next,
++                               min=(conf['min-spaces-inside-empty']
++                                    if conf['min-spaces-inside-empty'] != -1
++                                    else conf['min-spaces-inside']),
++                               max=(conf['max-spaces-inside-empty']
++                                    if conf['max-spaces-inside-empty'] != -1
++                                    else conf['max-spaces-inside']),
++                               min_desc='too few spaces inside empty brackets',
++                               max_desc=('too many spaces inside empty '
++                                         'brackets'))
++        if problem is not None:
++            yield problem
++
++    elif isinstance(token, yaml.FlowSequenceStartToken):
++        problem = spaces_after(token, prev, next,
++                               min=conf['min-spaces-inside'],
++                               max=conf['max-spaces-inside'],
++                               min_desc='too few spaces inside brackets',
++                               max_desc='too many spaces inside brackets')
++        if problem is not None:
++            yield problem
++
++    elif (isinstance(token, yaml.FlowSequenceEndToken) and
++            (prev is None or
++             not isinstance(prev, yaml.FlowSequenceStartToken))):
++        problem = spaces_before(token, prev, next,
++                                min=conf['min-spaces-inside'],
++                                max=conf['max-spaces-inside'],
++                                min_desc='too few spaces inside brackets',
++                                max_desc='too many spaces inside brackets')
++        if problem is not None:
++            yield problem
+diff --git a/third_party/python/yamllint/yamllint/rules/colons.py b/third_party/python/yamllint/yamllint/rules/colons.py
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/yamllint/yamllint/rules/colons.py
+@@ -0,0 +1,105 @@
++# -*- coding: utf-8 -*-
++# Copyright (C) 2016 Adrien Vergé
++#
++# This program is free software: you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation, either version 3 of the License, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program.  If not, see <http://www.gnu.org/licenses/>.
++
++"""
++Use this rule to control the number of spaces before and after colons (``:``).
++
++.. rubric:: Options
++
++* ``max-spaces-before`` defines the maximal number of spaces allowed before
++  colons (use ``-1`` to disable).
++* ``max-spaces-after`` defines the maximal number of spaces allowed after
++  colons (use ``-1`` to disable).
++
++.. rubric:: Examples
++
++#. With ``colons: {max-spaces-before: 0, max-spaces-after: 1}``
++
++   the following code snippet would **PASS**:
++   ::
++
++    object:
++      - a
++      - b
++    key: value
++
++#. With ``colons: {max-spaces-before: 1}``
++
++   the following code snippet would **PASS**:
++   ::
++
++    object :
++      - a
++      - b
++
++   the following code snippet would **FAIL**:
++   ::
++
++    object  :
++      - a
++      - b
++
++#. With ``colons: {max-spaces-after: 2}``
++
++   the following code snippet would **PASS**:
++   ::
++
++    first:  1
++    second: 2
++    third:  3
++
++   the following code snippet would **FAIL**:
++   ::
++
++    first: 1
++    2nd:   2
++    third: 3
++"""
++
++
++import yaml
++
++from yamllint.rules.common import is_explicit_key, spaces_after, spaces_before
++
++
++ID = 'colons'
++TYPE = 'token'
++CONF = {'max-spaces-before': int,
++        'max-spaces-after': int}
++DEFAULT = {'max-spaces-before': 0,
++           'max-spaces-after': 1}
++
++
++def check(conf, token, prev, next, nextnext, context):
++    if isinstance(token, yaml.ValueToken):
++        problem = spaces_before(token, prev, next,
++                                max=conf['max-spaces-before'],
++                                max_desc='too many spaces before colon')
++        if problem is not None:
++            yield problem
++
++        problem = spaces_after(token, prev, next,
++                               max=conf['max-spaces-after'],
++                               max_desc='too many spaces after colon')
++        if problem is not None:
++            yield problem
++
++    if isinstance(token, yaml.KeyToken) and is_explicit_key(token):
++        problem = spaces_after(token, prev, next,
++                               max=conf['max-spaces-after'],
++                               max_desc='too many spaces after question mark')
++        if problem is not None:
++            yield problem
+diff --git a/third_party/python/yamllint/yamllint/rules/commas.py b/third_party/python/yamllint/yamllint/rules/commas.py
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/yamllint/yamllint/rules/commas.py
+@@ -0,0 +1,131 @@
++# -*- coding: utf-8 -*-
++# Copyright (C) 2016 Adrien Vergé
++#
++# This program is free software: you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation, either version 3 of the License, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program.  If not, see <http://www.gnu.org/licenses/>.
++
++"""
++Use this rule to control the number of spaces before and after commas (``,``).
++
++.. rubric:: Options
++
++* ``max-spaces-before`` defines the maximal number of spaces allowed before
++  commas (use ``-1`` to disable).
++* ``min-spaces-after`` defines the minimal number of spaces required after
++  commas.
++* ``max-spaces-after`` defines the maximal number of spaces allowed after
++  commas (use ``-1`` to disable).
++
++.. rubric:: Examples
++
++#. With ``commas: {max-spaces-before: 0}``
++
++   the following code snippet would **PASS**:
++   ::
++
++    strange var:
++      [10, 20, 30, {x: 1, y: 2}]
++
++   the following code snippet would **FAIL**:
++   ::
++
++    strange var:
++      [10, 20 , 30, {x: 1, y: 2}]
++
++#. With ``commas: {max-spaces-before: 2}``
++
++   the following code snippet would **PASS**:
++   ::
++
++    strange var:
++      [10  , 20 , 30,  {x: 1  , y: 2}]
++
++#. With ``commas: {max-spaces-before: -1}``
++
++   the following code snippet would **PASS**:
++   ::
++
++    strange var:
++      [10,
++       20   , 30
++       ,   {x: 1, y: 2}]
++
++#. With ``commas: {min-spaces-after: 1, max-spaces-after: 1}``
++
++   the following code snippet would **PASS**:
++   ::
++
++    strange var:
++      [10, 20,30, {x: 1, y: 2}]
++
++   the following code snippet would **FAIL**:
++   ::
++
++    strange var:
++      [10, 20,30,   {x: 1,   y: 2}]
++
++#. With ``commas: {min-spaces-after: 1, max-spaces-after: 3}``
++
++   the following code snippet would **PASS**:
++   ::
++
++    strange var:
++      [10, 20,  30,  {x: 1,   y: 2}]
++
++#. With ``commas: {min-spaces-after: 0, max-spaces-after: 1}``
++
++   the following code snippet would **PASS**:
++   ::
++
++    strange var:
++      [10, 20,30, {x: 1, y: 2}]
++"""
++
++
++import yaml
++
++from yamllint.linter import LintProblem
++from yamllint.rules.common import spaces_after, spaces_before
++
++
++ID = 'commas'
++TYPE = 'token'
++CONF = {'max-spaces-before': int,
++        'min-spaces-after': int,
++        'max-spaces-after': int}
++DEFAULT = {'max-spaces-before': 0,
++           'min-spaces-after': 1,
++           'max-spaces-after': 1}
++
++
++def check(conf, token, prev, next, nextnext, context):
++    if isinstance(token, yaml.FlowEntryToken):
++        if (prev is not None and conf['max-spaces-before'] != -1 and
++                prev.end_mark.line < token.start_mark.line):
++            yield LintProblem(token.start_mark.line + 1,
++                              max(1, token.start_mark.column),
++                              'too many spaces before comma')
++        else:
++            problem = spaces_before(token, prev, next,
++                                    max=conf['max-spaces-before'],
++                                    max_desc='too many spaces before comma')
++            if problem is not None:
++                yield problem
++
++        problem = spaces_after(token, prev, next,
++                               min=conf['min-spaces-after'],
++                               max=conf['max-spaces-after'],
++                               min_desc='too few spaces after comma',
++                               max_desc='too many spaces after comma')
++        if problem is not None:
++            yield problem
+diff --git a/third_party/python/yamllint/yamllint/rules/comments.py b/third_party/python/yamllint/yamllint/rules/comments.py
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/yamllint/yamllint/rules/comments.py
+@@ -0,0 +1,104 @@
++# -*- coding: utf-8 -*-
++# Copyright (C) 2016 Adrien Vergé
++#
++# This program is free software: you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation, either version 3 of the License, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program.  If not, see <http://www.gnu.org/licenses/>.
++
++"""
++Use this rule to control the position and formatting of comments.
++
++.. rubric:: Options
++
++* Use ``require-starting-space`` to require a space character right after the
++  ``#``. Set to ``true`` to enable, ``false`` to disable.
++* Use ``ignore-shebangs`` to ignore a
++  `shebang <https://en.wikipedia.org/wiki/Shebang_(Unix)>`_ at the beginning of
++  the file when ``require-starting-space`` is set.
++* ``min-spaces-from-content`` is used to visually separate inline comments from
++  content. It defines the minimal required number of spaces between a comment
++  and its preceding content.
++
++.. rubric:: Examples
++
++#. With ``comments: {require-starting-space: true}``
++
++   the following code snippet would **PASS**:
++   ::
++
++    # This sentence
++    # is a block comment
++
++   the following code snippet would **PASS**:
++   ::
++
++    ##############################
++    ## This is some documentation
++
++   the following code snippet would **FAIL**:
++   ::
++
++    #This sentence
++    #is a block comment
++
++#. With ``comments: {min-spaces-from-content: 2}``
++
++   the following code snippet would **PASS**:
++   ::
++
++    x = 2 ^ 127 - 1  # Mersenne prime number
++
++   the following code snippet would **FAIL**:
++   ::
++
++    x = 2 ^ 127 - 1 # Mersenne prime number
++"""
++
++
++import re
++
++from yamllint.linter import LintProblem
++
++
++ID = 'comments'
++TYPE = 'comment'
++CONF = {'require-starting-space': bool,
++        'ignore-shebangs': bool,
++        'min-spaces-from-content': int}
++DEFAULT = {'require-starting-space': True,
++           'ignore-shebangs': True,
++           'min-spaces-from-content': 2}
++
++
++def check(conf, comment):
++    if (conf['min-spaces-from-content'] != -1 and comment.is_inline() and
++            comment.pointer - comment.token_before.end_mark.pointer <
++            conf['min-spaces-from-content']):
++        yield LintProblem(comment.line_no, comment.column_no,
++                          'too few spaces before comment')
++
++    if conf['require-starting-space']:
++        text_start = comment.pointer + 1
++        while (comment.buffer[text_start] == '#' and
++               text_start < len(comment.buffer)):
++            text_start += 1
++        if text_start < len(comment.buffer):
++            if (conf['ignore-shebangs'] and
++                    comment.line_no == 1 and
++                    comment.column_no == 1 and
++                    re.match(r'^!\S', comment.buffer[text_start:])):
++                return
++            elif comment.buffer[text_start] not in (' ', '\n', '\0'):
++                column = comment.column_no + text_start - comment.pointer
++                yield LintProblem(comment.line_no,
++                                  column,
++                                  'missing starting space in comment')
+diff --git a/third_party/python/yamllint/yamllint/rules/comments_indentation.py b/third_party/python/yamllint/yamllint/rules/comments_indentation.py
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/yamllint/yamllint/rules/comments_indentation.py
+@@ -0,0 +1,139 @@
++# -*- coding: utf-8 -*-
++# Copyright (C) 2016 Adrien Vergé
++#
++# This program is free software: you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation, either version 3 of the License, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program.  If not, see <http://www.gnu.org/licenses/>.
++
++"""
++Use this rule to force comments to be indented like content.
++
++.. rubric:: Examples
++
++#. With ``comments-indentation: {}``
++
++   the following code snippet would **PASS**:
++   ::
++
++    # Fibonacci
++    [0, 1, 1, 2, 3, 5]
++
++   the following code snippet would **FAIL**:
++   ::
++
++      # Fibonacci
++    [0, 1, 1, 2, 3, 5]
++
++   the following code snippet would **PASS**:
++   ::
++
++    list:
++        - 2
++        - 3
++        # - 4
++        - 5
++
++   the following code snippet would **FAIL**:
++   ::
++
++    list:
++        - 2
++        - 3
++    #    - 4
++        - 5
++
++   the following code snippet would **PASS**:
++   ::
++
++    # This is the first object
++    obj1:
++      - item A
++      # - item B
++    # This is the second object
++    obj2: []
++
++   the following code snippet would **PASS**:
++   ::
++
++    # This sentence
++    # is a block comment
++
++   the following code snippet would **FAIL**:
++   ::
++
++    # This sentence
++     # is a block comment
++"""
++
++
++import yaml
++
++from yamllint.linter import LintProblem
++from yamllint.rules.common import get_line_indent
++
++
++ID = 'comments-indentation'
++TYPE = 'comment'
++
++
++# Case A:
++#
++#     prev: line:
++#       # commented line
++#       current: line
++#
++# Case B:
++#
++#       prev: line
++#       # commented line 1
++#     # commented line 2
++#     current: line
++
++def check(conf, comment):
++    # Only check block comments
++    if (not isinstance(comment.token_before, yaml.StreamStartToken) and
++            comment.token_before.end_mark.line + 1 == comment.line_no):
++        return
++
++    next_line_indent = comment.token_after.start_mark.column
++    if isinstance(comment.token_after, yaml.StreamEndToken):
++        next_line_indent = 0
++
++    if isinstance(comment.token_before, yaml.StreamStartToken):
++        prev_line_indent = 0
++    else:
++        prev_line_indent = get_line_indent(comment.token_before)
++
++    # In the following case only the next line indent is valid:
++    #     list:
++    #         # comment
++    #         - 1
++    #         - 2
++    if prev_line_indent <= next_line_indent:
++        prev_line_indent = next_line_indent
++
++    # If two indents are valid but a previous comment went back to normal
++    # indent, for the next ones to do the same. In other words, avoid this:
++    #     list:
++    #         - 1
++    #     # comment on valid indent (0)
++    #         # comment on valid indent (4)
++    #     other-list:
++    #         - 2
++    if (comment.comment_before is not None and
++            not comment.comment_before.is_inline()):
++        prev_line_indent = comment.comment_before.column_no - 1
++
++    if (comment.column_no - 1 != prev_line_indent and
++            comment.column_no - 1 != next_line_indent):
++        yield LintProblem(comment.line_no, comment.column_no,
++                          'comment not indented like content')
+diff --git a/third_party/python/yamllint/yamllint/rules/common.py b/third_party/python/yamllint/yamllint/rules/common.py
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/yamllint/yamllint/rules/common.py
+@@ -0,0 +1,89 @@
++# -*- coding: utf-8 -*-
++# Copyright (C) 2016 Adrien Vergé
++#
++# This program is free software: you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation, either version 3 of the License, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program.  If not, see <http://www.gnu.org/licenses/>.
++
++import string
++
++import yaml
++
++from yamllint.linter import LintProblem
++
++
++def spaces_after(token, prev, next, min=-1, max=-1,
++                 min_desc=None, max_desc=None):
++    if next is not None and token.end_mark.line == next.start_mark.line:
++        spaces = next.start_mark.pointer - token.end_mark.pointer
++        if max != - 1 and spaces > max:
++            return LintProblem(token.start_mark.line + 1,
++                               next.start_mark.column, max_desc)
++        elif min != - 1 and spaces < min:
++            return LintProblem(token.start_mark.line + 1,
++                               next.start_mark.column + 1, min_desc)
++
++
++def spaces_before(token, prev, next, min=-1, max=-1,
++                  min_desc=None, max_desc=None):
++    if (prev is not None and prev.end_mark.line == token.start_mark.line and
++            # Discard tokens (only scalars?) that end at the start of next line
++            (prev.end_mark.pointer == 0 or
++             prev.end_mark.buffer[prev.end_mark.pointer - 1] != '\n')):
++        spaces = token.start_mark.pointer - prev.end_mark.pointer
++        if max != - 1 and spaces > max:
++            return LintProblem(token.start_mark.line + 1,
++                               token.start_mark.column, max_desc)
++        elif min != - 1 and spaces < min:
++            return LintProblem(token.start_mark.line + 1,
++                               token.start_mark.column + 1, min_desc)
++
++
++def get_line_indent(token):
++    """Finds the indent of the line the token starts in."""
++    start = token.start_mark.buffer.rfind('\n', 0,
++                                          token.start_mark.pointer) + 1
++    content = start
++    while token.start_mark.buffer[content] == ' ':
++        content += 1
++    return content - start
++
++
++def get_real_end_line(token):
++    """Finds the line on which the token really ends.
++
++    With pyyaml, scalar tokens often end on a next line.
++    """
++    end_line = token.end_mark.line + 1
++
++    if not isinstance(token, yaml.ScalarToken):
++        return end_line
++
++    pos = token.end_mark.pointer - 1
++    while (pos >= token.start_mark.pointer - 1 and
++           token.end_mark.buffer[pos] in string.whitespace):
++        if token.end_mark.buffer[pos] == '\n':
++            end_line -= 1
++        pos -= 1
++    return end_line
++
++
++def is_explicit_key(token):
++    # explicit key:
++    #   ? key
++    #   : v
++    # or
++    #   ?
++    #     key
++    #   : v
++    return (token.start_mark.pointer < token.end_mark.pointer and
++            token.start_mark.buffer[token.start_mark.pointer] == '?')
+diff --git a/third_party/python/yamllint/yamllint/rules/document_end.py b/third_party/python/yamllint/yamllint/rules/document_end.py
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/yamllint/yamllint/rules/document_end.py
+@@ -0,0 +1,107 @@
++# -*- coding: utf-8 -*-
++# Copyright (C) 2016 Adrien Vergé
++#
++# This program is free software: you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation, either version 3 of the License, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program.  If not, see <http://www.gnu.org/licenses/>.
++
++"""
++Use this rule to require or forbid the use of document end marker (``...``).
++
++.. rubric:: Options
++
++* Set ``present`` to ``true`` when the document end marker is required, or to
++  ``false`` when it is forbidden.
++
++.. rubric:: Examples
++
++#. With ``document-end: {present: true}``
++
++   the following code snippet would **PASS**:
++   ::
++
++    ---
++    this:
++      is: [a, document]
++    ...
++    ---
++    - this
++    - is: another one
++    ...
++
++   the following code snippet would **FAIL**:
++   ::
++
++    ---
++    this:
++      is: [a, document]
++    ---
++    - this
++    - is: another one
++    ...
++
++#. With ``document-end: {present: false}``
++
++   the following code snippet would **PASS**:
++   ::
++
++    ---
++    this:
++      is: [a, document]
++    ---
++    - this
++    - is: another one
++
++   the following code snippet would **FAIL**:
++   ::
++
++    ---
++    this:
++      is: [a, document]
++    ...
++    ---
++    - this
++    - is: another one
++"""
++
++
++import yaml
++
++from yamllint.linter import LintProblem
++
++
++ID = 'document-end'
++TYPE = 'token'
++CONF = {'present': bool}
++DEFAULT = {'present': True}
++
++
++def check(conf, token, prev, next, nextnext, context):
++    if conf['present']:
++        is_stream_end = isinstance(token, yaml.StreamEndToken)
++        is_start = isinstance(token, yaml.DocumentStartToken)
++        prev_is_end_or_stream_start = isinstance(
++            prev, (yaml.DocumentEndToken, yaml.StreamStartToken)
++        )
++
++        if is_stream_end and not prev_is_end_or_stream_start:
++            yield LintProblem(token.start_mark.line, 1,
++                              'missing document end "..."')
++        elif is_start and not prev_is_end_or_stream_start:
++            yield LintProblem(token.start_mark.line + 1, 1,
++                              'missing document end "..."')
++
++    else:
++        if isinstance(token, yaml.DocumentEndToken):
++            yield LintProblem(token.start_mark.line + 1,
++                              token.start_mark.column + 1,
++                              'found forbidden document end "..."')
+diff --git a/third_party/python/yamllint/yamllint/rules/document_start.py b/third_party/python/yamllint/yamllint/rules/document_start.py
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/yamllint/yamllint/rules/document_start.py
+@@ -0,0 +1,93 @@
++# -*- coding: utf-8 -*-
++# Copyright (C) 2016 Adrien Vergé
++#
++# This program is free software: you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation, either version 3 of the License, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program.  If not, see <http://www.gnu.org/licenses/>.
++
++"""
++Use this rule to require or forbid the use of document start marker (``---``).
++
++.. rubric:: Options
++
++* Set ``present`` to ``true`` when the document start marker is required, or to
++  ``false`` when it is forbidden.
++
++.. rubric:: Examples
++
++#. With ``document-start: {present: true}``
++
++   the following code snippet would **PASS**:
++   ::
++
++    ---
++    this:
++      is: [a, document]
++    ---
++    - this
++    - is: another one
++
++   the following code snippet would **FAIL**:
++   ::
++
++    this:
++      is: [a, document]
++    ---
++    - this
++    - is: another one
++
++#. With ``document-start: {present: false}``
++
++   the following code snippet would **PASS**:
++   ::
++
++    this:
++      is: [a, document]
++    ...
++
++   the following code snippet would **FAIL**:
++   ::
++
++    ---
++    this:
++      is: [a, document]
++    ...
++"""
++
++
++import yaml
++
++from yamllint.linter import LintProblem
++
++
++ID = 'document-start'
++TYPE = 'token'
++CONF = {'present': bool}
++DEFAULT = {'present': True}
++
++
++def check(conf, token, prev, next, nextnext, context):
++    if conf['present']:
++        if (isinstance(prev, (yaml.StreamStartToken,
++                              yaml.DocumentEndToken,
++                              yaml.DirectiveToken)) and
++            not isinstance(token, (yaml.DocumentStartToken,
++                                   yaml.DirectiveToken,
++                                   yaml.StreamEndToken))):
++            yield LintProblem(token.start_mark.line + 1, 1,
++                              'missing document start "---"')
++
++    else:
++        if isinstance(token, yaml.DocumentStartToken):
++            yield LintProblem(token.start_mark.line + 1,
++                              token.start_mark.column + 1,
++                              'found forbidden document start "---"')
+diff --git a/third_party/python/yamllint/yamllint/rules/empty_lines.py b/third_party/python/yamllint/yamllint/rules/empty_lines.py
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/yamllint/yamllint/rules/empty_lines.py
+@@ -0,0 +1,108 @@
++# -*- coding: utf-8 -*-
++# Copyright (C) 2016 Adrien Vergé
++#
++# This program is free software: you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation, either version 3 of the License, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program.  If not, see <http://www.gnu.org/licenses/>.
++
++"""
++Use this rule to set a maximal number of allowed consecutive blank lines.
++
++.. rubric:: Options
++
++* ``max`` defines the maximal number of empty lines allowed in the document.
++* ``max-start`` defines the maximal number of empty lines allowed at the
++  beginning of the file. This option takes precedence over ``max``.
++* ``max-end`` defines the maximal number of empty lines allowed at the end of
++  the file.  This option takes precedence over ``max``.
++
++.. rubric:: Examples
++
++#. With ``empty-lines: {max: 1}``
++
++   the following code snippet would **PASS**:
++   ::
++
++    - foo:
++        - 1
++        - 2
++
++    - bar: [3, 4]
++
++   the following code snippet would **FAIL**:
++   ::
++
++    - foo:
++        - 1
++        - 2
++
++
++    - bar: [3, 4]
++"""
++
++
++from yamllint.linter import LintProblem
++
++
++ID = 'empty-lines'
++TYPE = 'line'
++CONF = {'max': int,
++        'max-start': int,
++        'max-end': int}
++DEFAULT = {'max': 2,
++           'max-start': 0,
++           'max-end': 0}
++
++
++def check(conf, line):
++    if line.start == line.end and line.end < len(line.buffer):
++        # Only alert on the last blank line of a series
++        if (line.end + 2 <= len(line.buffer) and
++                line.buffer[line.end:line.end + 2] == '\n\n'):
++            return
++        elif (line.end + 4 <= len(line.buffer) and
++              line.buffer[line.end:line.end + 4] == '\r\n\r\n'):
++            return
++
++        blank_lines = 0
++
++        start = line.start
++        while start >= 2 and line.buffer[start - 2:start] == '\r\n':
++            blank_lines += 1
++            start -= 2
++        while start >= 1 and line.buffer[start - 1] == '\n':
++            blank_lines += 1
++            start -= 1
++
++        max = conf['max']
++
++        # Special case: start of document
++        if start == 0:
++            blank_lines += 1  # first line doesn't have a preceding \n
++            max = conf['max-start']
++
++        # Special case: end of document
++        # NOTE: The last line of a file is always supposed to end with a new
++        # line. See POSIX definition of a line at:
++        if ((line.end == len(line.buffer) - 1 and
++             line.buffer[line.end] == '\n') or
++            (line.end == len(line.buffer) - 2 and
++             line.buffer[line.end:line.end + 2] == '\r\n')):
++            # Allow the exception of the one-byte file containing '\n'
++            if line.end == 0:
++                return
++
++            max = conf['max-end']
++
++        if blank_lines > max:
++            yield LintProblem(line.line_no, 1, 'too many blank lines (%d > %d)'
++                                               % (blank_lines, max))
+diff --git a/third_party/python/yamllint/yamllint/rules/empty_values.py b/third_party/python/yamllint/yamllint/rules/empty_values.py
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/yamllint/yamllint/rules/empty_values.py
+@@ -0,0 +1,96 @@
++# -*- coding: utf-8 -*-
++# Copyright (C) 2017 Greg Dubicki
++#
++# This program is free software: you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation, either version 3 of the License, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program.  If not, see <http://www.gnu.org/licenses/>.
++
++"""
++Use this rule to prevent nodes with empty content, that implicitly result in
++``null`` values.
++
++.. rubric:: Options
++
++* Use ``forbid-in-block-mappings`` to prevent empty values in block mappings.
++* Use ``forbid-in-flow-mappings`` to prevent empty values in flow mappings.
++
++.. rubric:: Examples
++
++#. With ``empty-values: {forbid-in-block-mappings: true}``
++
++   the following code snippets would **PASS**:
++   ::
++
++    some-mapping:
++      sub-element: correctly indented
++
++   ::
++
++    explicitly-null: null
++
++   the following code snippets would **FAIL**:
++   ::
++
++    some-mapping:
++    sub-element: incorrectly indented
++
++   ::
++
++    implicitly-null:
++
++#. With ``empty-values: {forbid-in-flow-mappings: true}``
++
++   the following code snippet would **PASS**:
++   ::
++
++    {prop: null}
++    {a: 1, b: 2, c: 3}
++
++   the following code snippets would **FAIL**:
++   ::
++
++    {prop: }
++
++   ::
++
++    {a: 1, b:, c: 3}
++
++"""
++
++import yaml
++
++from yamllint.linter import LintProblem
++
++
++ID = 'empty-values'
++TYPE = 'token'
++CONF = {'forbid-in-block-mappings': bool,
++        'forbid-in-flow-mappings': bool}
++DEFAULT = {'forbid-in-block-mappings': True,
++           'forbid-in-flow-mappings': True}
++
++
++def check(conf, token, prev, next, nextnext, context):
++
++    if conf['forbid-in-block-mappings']:
++        if isinstance(token, yaml.ValueToken) and isinstance(next, (
++                yaml.KeyToken, yaml.BlockEndToken)):
++            yield LintProblem(token.start_mark.line + 1,
++                              token.end_mark.column + 1,
++                              'empty value in block mapping')
++
++    if conf['forbid-in-flow-mappings']:
++        if isinstance(token, yaml.ValueToken) and isinstance(next, (
++                yaml.FlowEntryToken, yaml.FlowMappingEndToken)):
++            yield LintProblem(token.start_mark.line + 1,
++                              token.end_mark.column + 1,
++                              'empty value in flow mapping')
+diff --git a/third_party/python/yamllint/yamllint/rules/hyphens.py b/third_party/python/yamllint/yamllint/rules/hyphens.py
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/yamllint/yamllint/rules/hyphens.py
+@@ -0,0 +1,88 @@
++# -*- coding: utf-8 -*-
++# Copyright (C) 2016 Adrien Vergé
++#
++# This program is free software: you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation, either version 3 of the License, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program.  If not, see <http://www.gnu.org/licenses/>.
++
++"""
++Use this rule to control the number of spaces after hyphens (``-``).
++
++.. rubric:: Options
++
++* ``max-spaces-after`` defines the maximal number of spaces allowed after
++  hyphens.
++
++.. rubric:: Examples
++
++#. With ``hyphens: {max-spaces-after: 1}``
++
++   the following code snippet would **PASS**:
++   ::
++
++    - first list:
++        - a
++        - b
++    - - 1
++      - 2
++      - 3
++
++   the following code snippet would **FAIL**:
++   ::
++
++    -  first list:
++         - a
++         - b
++
++   the following code snippet would **FAIL**:
++   ::
++
++    - - 1
++      -  2
++      - 3
++
++#. With ``hyphens: {max-spaces-after: 3}``
++
++   the following code snippet would **PASS**:
++   ::
++
++    -   key
++    -  key2
++    - key42
++
++   the following code snippet would **FAIL**:
++   ::
++
++    -    key
++    -   key2
++    -  key42
++"""
++
++
++import yaml
++
++from yamllint.rules.common import spaces_after
++
++
++ID = 'hyphens'
++TYPE = 'token'
++CONF = {'max-spaces-after': int}
++DEFAULT = {'max-spaces-after': 1}
++
++
++def check(conf, token, prev, next, nextnext, context):
++    if isinstance(token, yaml.BlockEntryToken):
++        problem = spaces_after(token, prev, next,
++                               max=conf['max-spaces-after'],
++                               max_desc='too many spaces after hyphen')
++        if problem is not None:
++            yield problem
+diff --git a/third_party/python/yamllint/yamllint/rules/indentation.py b/third_party/python/yamllint/yamllint/rules/indentation.py
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/yamllint/yamllint/rules/indentation.py
+@@ -0,0 +1,575 @@
++# -*- coding: utf-8 -*-
++# Copyright (C) 2016 Adrien Vergé
++#
++# This program is free software: you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation, either version 3 of the License, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program.  If not, see <http://www.gnu.org/licenses/>.
++
++"""
++Use this rule to control the indentation.
++
++.. rubric:: Options
++
++* ``spaces`` defines the indentation width, in spaces. Set either to an integer
++  (e.g. ``2`` or ``4``, representing the number of spaces in an indentation
++  level) or to ``consistent`` to allow any number, as long as it remains the
++  same within the file.
++* ``indent-sequences`` defines whether block sequences should be indented or
++  not (when in a mapping, this indentation is not mandatory -- some people
++  perceive the ``-`` as part of the indentation). Possible values: ``true``,
++  ``false``, ``whatever`` and ``consistent``. ``consistent`` requires either
++  all block sequences to be indented, or none to be. ``whatever`` means either
++  indenting or not indenting individual block sequences is OK.
++* ``check-multi-line-strings`` defines whether to lint indentation in
++  multi-line strings. Set to ``true`` to enable, ``false`` to disable.
++
++.. rubric:: Examples
++
++#. With ``indentation: {spaces: 1}``
++
++   the following code snippet would **PASS**:
++   ::
++
++    history:
++     - name: Unix
++       date: 1969
++     - name: Linux
++       date: 1991
++    nest:
++     recurse:
++      - haystack:
++         needle
++
++#. With ``indentation: {spaces: 4}``
++
++   the following code snippet would **PASS**:
++   ::
++
++    history:
++        - name: Unix
++          date: 1969
++        - name: Linux
++          date: 1991
++    nest:
++        recurse:
++            - haystack:
++                  needle
++
++   the following code snippet would **FAIL**:
++   ::
++
++    history:
++      - name: Unix
++        date: 1969
++      - name: Linux
++        date: 1991
++    nest:
++      recurse:
++        - haystack:
++            needle
++
++#. With ``indentation: {spaces: consistent}``
++
++   the following code snippet would **PASS**:
++   ::
++
++    history:
++       - name: Unix
++         date: 1969
++       - name: Linux
++         date: 1991
++    nest:
++       recurse:
++          - haystack:
++               needle
++
++   the following code snippet would **FAIL**:
++   ::
++
++    some:
++      Russian:
++          dolls
++
++#. With ``indentation: {spaces: 2, indent-sequences: false}``
++
++   the following code snippet would **PASS**:
++   ::
++
++    list:
++    - flying
++    - spaghetti
++    - monster
++
++   the following code snippet would **FAIL**:
++   ::
++
++    list:
++      - flying
++      - spaghetti
++      - monster
++
++#. With ``indentation: {spaces: 2, indent-sequences: whatever}``
++
++   the following code snippet would **PASS**:
++   ::
++
++    list:
++    - flying:
++      - spaghetti
++      - monster
++    - not flying:
++        - spaghetti
++        - sauce
++
++#. With ``indentation: {spaces: 2, indent-sequences: consistent}``
++
++   the following code snippet would **PASS**:
++   ::
++
++    - flying:
++      - spaghetti
++      - monster
++    - not flying:
++      - spaghetti
++      - sauce
++
++   the following code snippet would **FAIL**:
++   ::
++
++    - flying:
++        - spaghetti
++        - monster
++    - not flying:
++      - spaghetti
++      - sauce
++
++#. With ``indentation: {spaces: 4, check-multi-line-strings: true}``
++
++   the following code snippet would **PASS**:
++   ::
++
++    Blaise Pascal:
++        Je vous écris une longue lettre parce que
++        je n'ai pas le temps d'en écrire une courte.
++
++   the following code snippet would **PASS**:
++   ::
++
++    Blaise Pascal: Je vous écris une longue lettre parce que
++                   je n'ai pas le temps d'en écrire une courte.
++
++   the following code snippet would **FAIL**:
++   ::
++
++    Blaise Pascal: Je vous écris une longue lettre parce que
++      je n'ai pas le temps d'en écrire une courte.
++
++   the following code snippet would **FAIL**:
++   ::
++
++    C code:
++        void main() {
++            printf("foo");
++        }
++
++   the following code snippet would **PASS**:
++   ::
++
++    C code:
++        void main() {
++        printf("bar");
++        }
++"""
++
++import yaml
++
++from yamllint.linter import LintProblem
++from yamllint.rules.common import get_real_end_line, is_explicit_key
++
++
++ID = 'indentation'
++TYPE = 'token'
++CONF = {'spaces': (int, 'consistent'),
++        'indent-sequences': (bool, 'whatever', 'consistent'),
++        'check-multi-line-strings': bool}
++DEFAULT = {'spaces': 'consistent',
++           'indent-sequences': True,
++           'check-multi-line-strings': False}
++
++ROOT, B_MAP, F_MAP, B_SEQ, F_SEQ, B_ENT, KEY, VAL = range(8)
++labels = ('ROOT', 'B_MAP', 'F_MAP', 'B_SEQ', 'F_SEQ', 'B_ENT', 'KEY', 'VAL')
++
++
++class Parent(object):
++    def __init__(self, type, indent, line_indent=None):
++        self.type = type
++        self.indent = indent
++        self.line_indent = line_indent
++        self.explicit_key = False
++        self.implicit_block_seq = False
++
++    def __repr__(self):
++        return '%s:%d' % (labels[self.type], self.indent)
++
++
++def check_scalar_indentation(conf, token, context):
++    if token.start_mark.line == token.end_mark.line:
++        return
++
++    def compute_expected_indent(found_indent):
++        def detect_indent(base_indent):
++            if not isinstance(context['spaces'], int):
++                context['spaces'] = found_indent - base_indent
++            return base_indent + context['spaces']
++
++        if token.plain:
++            return token.start_mark.column
++        elif token.style in ('"', "'"):
++            return token.start_mark.column + 1
++        elif token.style in ('>', '|'):
++            if context['stack'][-1].type == B_ENT:
++                # - >
++                #     multi
++                #     line
++                return detect_indent(token.start_mark.column)
++            elif context['stack'][-1].type == KEY:
++                assert context['stack'][-1].explicit_key
++                # - ? >
++                #       multi-line
++                #       key
++                #   : >
++                #       multi-line
++                #       value
++                return detect_indent(token.start_mark.column)
++            elif context['stack'][-1].type == VAL:
++                if token.start_mark.line + 1 > context['cur_line']:
++                    # - key:
++                    #     >
++                    #       multi
++                    #       line
++                    return detect_indent(context['stack'][-1].indent)
++                elif context['stack'][-2].explicit_key:
++                    # - ? key
++                    #   : >
++                    #       multi-line
++                    #       value
++                    return detect_indent(token.start_mark.column)
++                else:
++                    # - key: >
++                    #     multi
++                    #     line
++                    return detect_indent(context['stack'][-2].indent)
++            else:
++                return detect_indent(context['stack'][-1].indent)
++
++    expected_indent = None
++
++    line_no = token.start_mark.line + 1
++
++    line_start = token.start_mark.pointer
++    while True:
++        line_start = token.start_mark.buffer.find(
++            '\n', line_start, token.end_mark.pointer - 1) + 1
++        if line_start == 0:
++            break
++        line_no += 1
++
++        indent = 0
++        while token.start_mark.buffer[line_start + indent] == ' ':
++            indent += 1
++        if token.start_mark.buffer[line_start + indent] == '\n':
++            continue
++
++        if expected_indent is None:
++            expected_indent = compute_expected_indent(indent)
++
++        if indent != expected_indent:
++            yield LintProblem(line_no, indent + 1,
++                              'wrong indentation: expected %d but found %d' %
++                              (expected_indent, indent))
++
++
++def _check(conf, token, prev, next, nextnext, context):
++    if 'stack' not in context:
++        context['stack'] = [Parent(ROOT, 0)]
++        context['cur_line'] = -1
++        context['spaces'] = conf['spaces']
++        context['indent-sequences'] = conf['indent-sequences']
++
++    # Step 1: Lint
++
++    is_visible = (
++        not isinstance(token, (yaml.StreamStartToken, yaml.StreamEndToken)) and
++        not isinstance(token, yaml.BlockEndToken) and
++        not (isinstance(token, yaml.ScalarToken) and token.value == ''))
++    first_in_line = (is_visible and
++                     token.start_mark.line + 1 > context['cur_line'])
++
++    def detect_indent(base_indent, next):
++        if not isinstance(context['spaces'], int):
++            context['spaces'] = next.start_mark.column - base_indent
++        return base_indent + context['spaces']
++
++    if first_in_line:
++        found_indentation = token.start_mark.column
++        expected = context['stack'][-1].indent
++
++        if isinstance(token, (yaml.FlowMappingEndToken,
++                              yaml.FlowSequenceEndToken)):
++            expected = context['stack'][-1].line_indent
++        elif (context['stack'][-1].type == KEY and
++                context['stack'][-1].explicit_key and
++                not isinstance(token, yaml.ValueToken)):
++            expected = detect_indent(expected, token)
++
++        if found_indentation != expected:
++            yield LintProblem(token.start_mark.line + 1, found_indentation + 1,
++                              'wrong indentation: expected %d but found %d' %
++                              (expected, found_indentation))
++
++    if (isinstance(token, yaml.ScalarToken) and
++            conf['check-multi-line-strings']):
++        for problem in check_scalar_indentation(conf, token, context):
++            yield problem
++
++    # Step 2.a:
++
++    if is_visible:
++        context['cur_line'] = get_real_end_line(token)
++        if first_in_line:
++            context['cur_line_indent'] = found_indentation
++
++    # Step 2.b: Update state
++
++    if isinstance(token, yaml.BlockMappingStartToken):
++        #   - a: 1
++        # or
++        #   - ? a
++        #     : 1
++        # or
++        #   - ?
++        #       a
++        #     : 1
++        assert isinstance(next, yaml.KeyToken)
++        assert next.start_mark.line == token.start_mark.line
++
++        indent = token.start_mark.column
++
++        context['stack'].append(Parent(B_MAP, indent))
++
++    elif isinstance(token, yaml.FlowMappingStartToken):
++        if next.start_mark.line == token.start_mark.line:
++            #   - {a: 1, b: 2}
++            indent = next.start_mark.column
++        else:
++            #   - {
++            #     a: 1, b: 2
++            #   }
++            indent = detect_indent(context['cur_line_indent'], next)
++
++        context['stack'].append(Parent(F_MAP, indent,
++                                       line_indent=context['cur_line_indent']))
++
++    elif isinstance(token, yaml.BlockSequenceStartToken):
++        #   - - a
++        #     - b
++        assert isinstance(next, yaml.BlockEntryToken)
++        assert next.start_mark.line == token.start_mark.line
++
++        indent = token.start_mark.column
++
++        context['stack'].append(Parent(B_SEQ, indent))
++
++    elif (isinstance(token, yaml.BlockEntryToken) and
++            # in case of an empty entry
++            not isinstance(next, (yaml.BlockEntryToken, yaml.BlockEndToken))):
++        # It looks like pyyaml doesn't issue BlockSequenceStartTokens when the
++        # list is not indented. We need to compensate that.
++        if context['stack'][-1].type != B_SEQ:
++            context['stack'].append(Parent(B_SEQ, token.start_mark.column))
++            context['stack'][-1].implicit_block_seq = True
++
++        if next.start_mark.line == token.end_mark.line:
++            #   - item 1
++            #   - item 2
++            indent = next.start_mark.column
++        elif next.start_mark.column == token.start_mark.column:
++            #   -
++            #   key: value
++            indent = next.start_mark.column
++        else:
++            #   -
++            #     item 1
++            #   -
++            #     key:
++            #       value
++            indent = detect_indent(token.start_mark.column, next)
++
++        context['stack'].append(Parent(B_ENT, indent))
++
++    elif isinstance(token, yaml.FlowSequenceStartToken):
++        if next.start_mark.line == token.start_mark.line:
++            #   - [a, b]
++            indent = next.start_mark.column
++        else:
++            #   - [
++            #   a, b
++            # ]
++            indent = detect_indent(context['cur_line_indent'], next)
++
++        context['stack'].append(Parent(F_SEQ, indent,
++                                       line_indent=context['cur_line_indent']))
++
++    elif isinstance(token, yaml.KeyToken):
++        indent = context['stack'][-1].indent
++
++        context['stack'].append(Parent(KEY, indent))
++
++        context['stack'][-1].explicit_key = is_explicit_key(token)
++
++    elif isinstance(token, yaml.ValueToken):
++        assert context['stack'][-1].type == KEY
++
++        # Special cases:
++        #     key: &anchor
++        #       value
++        # and:
++        #     key: !!tag
++        #       value
++        if isinstance(next, (yaml.AnchorToken, yaml.TagToken)):
++            if (next.start_mark.line == prev.start_mark.line and
++                    next.start_mark.line < nextnext.start_mark.line):
++                next = nextnext
++
++        # Only if value is not empty
++        if not isinstance(next, (yaml.BlockEndToken,
++                                 yaml.FlowMappingEndToken,
++                                 yaml.FlowSequenceEndToken,
++                                 yaml.KeyToken)):
++            if context['stack'][-1].explicit_key:
++                #   ? k
++                #   : value
++                # or
++                #   ? k
++                #   :
++                #     value
++                indent = detect_indent(context['stack'][-1].indent, next)
++            elif next.start_mark.line == prev.start_mark.line:
++                #   k: value
++                indent = next.start_mark.column
++            elif isinstance(next, (yaml.BlockSequenceStartToken,
++                                   yaml.BlockEntryToken)):
++                # NOTE: We add BlockEntryToken in the test above because
++                # sometimes BlockSequenceStartToken are not issued. Try
++                # yaml.scan()ning this:
++                #     '- lib:\n'
++                #     '  - var\n'
++                if context['indent-sequences'] is False:
++                    indent = context['stack'][-1].indent
++                elif context['indent-sequences'] is True:
++                    if (context['spaces'] == 'consistent' and
++                            next.start_mark.column -
++                            context['stack'][-1].indent == 0):
++                        # In this case, the block sequence item is not indented
++                        # (while it should be), but we don't know yet the
++                        # indentation it should have (because `spaces` is
++                        # `consistent` and its value has not been computed yet
++                        # -- this is probably the beginning of the document).
++                        # So we choose an arbitrary value (2).
++                        indent = 2
++                    else:
++                        indent = detect_indent(context['stack'][-1].indent,
++                                               next)
++                else:  # 'whatever' or 'consistent'
++                    if next.start_mark.column == context['stack'][-1].indent:
++                        #   key:
++                        #   - e1
++                        #   - e2
++                        if context['indent-sequences'] == 'consistent':
++                            context['indent-sequences'] = False
++                        indent = context['stack'][-1].indent
++                    else:
++                        if context['indent-sequences'] == 'consistent':
++                            context['indent-sequences'] = True
++                        #   key:
++                        #     - e1
++                        #     - e2
++                        indent = detect_indent(context['stack'][-1].indent,
++                                               next)
++            else:
++                #   k:
++                #     value
++                indent = detect_indent(context['stack'][-1].indent, next)
++
++            context['stack'].append(Parent(VAL, indent))
++
++    consumed_current_token = False
++    while True:
++        if (context['stack'][-1].type == F_SEQ and
++                isinstance(token, yaml.FlowSequenceEndToken) and
++                not consumed_current_token):
++            context['stack'].pop()
++            consumed_current_token = True
++
++        elif (context['stack'][-1].type == F_MAP and
++                isinstance(token, yaml.FlowMappingEndToken) and
++                not consumed_current_token):
++            context['stack'].pop()
++            consumed_current_token = True
++
++        elif (context['stack'][-1].type in (B_MAP, B_SEQ) and
++                isinstance(token, yaml.BlockEndToken) and
++                not context['stack'][-1].implicit_block_seq and
++                not consumed_current_token):
++            context['stack'].pop()
++            consumed_current_token = True
++
++        elif (context['stack'][-1].type == B_ENT and
++                not isinstance(token, yaml.BlockEntryToken) and
++                context['stack'][-2].implicit_block_seq and
++                not isinstance(token, (yaml.AnchorToken, yaml.TagToken)) and
++                not isinstance(next, yaml.BlockEntryToken)):
++            context['stack'].pop()
++            context['stack'].pop()
++
++        elif (context['stack'][-1].type == B_ENT and
++                isinstance(next, (yaml.BlockEntryToken, yaml.BlockEndToken))):
++            context['stack'].pop()
++
++        elif (context['stack'][-1].type == VAL and
++                not isinstance(token, yaml.ValueToken) and
++                not isinstance(token, (yaml.AnchorToken, yaml.TagToken))):
++            assert context['stack'][-2].type == KEY
++            context['stack'].pop()
++            context['stack'].pop()
++
++        elif (context['stack'][-1].type == KEY and
++                isinstance(next, (yaml.BlockEndToken,
++                                  yaml.FlowMappingEndToken,
++                                  yaml.FlowSequenceEndToken,
++                                  yaml.KeyToken))):
++            # A key without a value: it's part of a set. Let's drop this key
++            # and leave room for the next one.
++            context['stack'].pop()
++
++        else:
++            break
++
++
++def check(conf, token, prev, next, nextnext, context):
++    try:
++        for problem in _check(conf, token, prev, next, nextnext, context):
++            yield problem
++    except AssertionError:
++        yield LintProblem(token.start_mark.line + 1,
++                          token.start_mark.column + 1,
++                          'cannot infer indentation: unexpected token')
+diff --git a/third_party/python/yamllint/yamllint/rules/key_duplicates.py b/third_party/python/yamllint/yamllint/rules/key_duplicates.py
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/yamllint/yamllint/rules/key_duplicates.py
+@@ -0,0 +1,100 @@
++# -*- coding: utf-8 -*-
++# Copyright (C) 2016 Adrien Vergé
++#
++# This program is free software: you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation, either version 3 of the License, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program.  If not, see <http://www.gnu.org/licenses/>.
++
++"""
++Use this rule to prevent multiple entries with the same key in mappings.
++
++.. rubric:: Examples
++
++#. With ``key-duplicates: {}``
++
++   the following code snippet would **PASS**:
++   ::
++
++    - key 1: v
++      key 2: val
++      key 3: value
++    - {a: 1, b: 2, c: 3}
++
++   the following code snippet would **FAIL**:
++   ::
++
++    - key 1: v
++      key 2: val
++      key 1: value
++
++   the following code snippet would **FAIL**:
++   ::
++
++    - {a: 1, b: 2, b: 3}
++
++   the following code snippet would **FAIL**:
++   ::
++
++    duplicated key: 1
++    "duplicated key": 2
++
++    other duplication: 1
++    ? >-
++        other
++        duplication
++    : 2
++"""
++
++import yaml
++
++from yamllint.linter import LintProblem
++
++
++ID = 'key-duplicates'
++TYPE = 'token'
++
++MAP, SEQ = range(2)
++
++
++class Parent(object):
++    def __init__(self, type):
++        self.type = type
++        self.keys = []
++
++
++def check(conf, token, prev, next, nextnext, context):
++    if 'stack' not in context:
++        context['stack'] = []
++
++    if isinstance(token, (yaml.BlockMappingStartToken,
++                          yaml.FlowMappingStartToken)):
++        context['stack'].append(Parent(MAP))
++    elif isinstance(token, (yaml.BlockSequenceStartToken,
++                            yaml.FlowSequenceStartToken)):
++        context['stack'].append(Parent(SEQ))
++    elif isinstance(token, (yaml.BlockEndToken,
++                            yaml.FlowMappingEndToken,
++                            yaml.FlowSequenceEndToken)):
++        context['stack'].pop()
++    elif (isinstance(token, yaml.KeyToken) and
++          isinstance(next, yaml.ScalarToken)):
++        # This check is done because KeyTokens can be found inside flow
++        # sequences... strange, but allowed.
++        if len(context['stack']) > 0 and context['stack'][-1].type == MAP:
++            if (next.value in context['stack'][-1].keys and
++                    # `<<` is "merge key", see http://yaml.org/type/merge.html
++                    next.value != '<<'):
++                yield LintProblem(
++                    next.start_mark.line + 1, next.start_mark.column + 1,
++                    'duplication of key "%s" in mapping' % next.value)
++            else:
++                context['stack'][-1].keys.append(next.value)
+diff --git a/third_party/python/yamllint/yamllint/rules/key_ordering.py b/third_party/python/yamllint/yamllint/rules/key_ordering.py
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/yamllint/yamllint/rules/key_ordering.py
+@@ -0,0 +1,109 @@
++# -*- coding: utf-8 -*-
++# Copyright (C) 2017 Johannes F. Knauf
++#
++# This program is free software: you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation, either version 3 of the License, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program.  If not, see <http://www.gnu.org/licenses/>.
++
++"""
++Use this rule to enforce alphabetical ordering of keys in mappings. The sorting
++order uses the Unicode code point number. As a result, the ordering is
++case-sensitive and not accent-friendly (see examples below).
++
++.. rubric:: Examples
++
++#. With ``key-ordering: {}``
++
++   the following code snippet would **PASS**:
++   ::
++
++    - key 1: v
++      key 2: val
++      key 3: value
++    - {a: 1, b: 2, c: 3}
++    - T-shirt: 1
++      T-shirts: 2
++      t-shirt: 3
++      t-shirts: 4
++    - hair: true
++      hais: true
++      haïr: true
++      haïssable: true
++
++   the following code snippet would **FAIL**:
++   ::
++
++    - key 2: v
++      key 1: val
++
++   the following code snippet would **FAIL**:
++   ::
++
++    - {b: 1, a: 2}
++
++   the following code snippet would **FAIL**:
++   ::
++
++    - T-shirt: 1
++      t-shirt: 2
++      T-shirts: 3
++      t-shirts: 4
++
++   the following code snippet would **FAIL**:
++   ::
++
++    - haïr: true
++      hais: true
++"""
++
++import yaml
++
++from yamllint.linter import LintProblem
++
++
++ID = 'key-ordering'
++TYPE = 'token'
++
++MAP, SEQ = range(2)
++
++
++class Parent(object):
++    def __init__(self, type):
++        self.type = type
++        self.keys = []
++
++
++def check(conf, token, prev, next, nextnext, context):
++    if 'stack' not in context:
++        context['stack'] = []
++
++    if isinstance(token, (yaml.BlockMappingStartToken,
++                          yaml.FlowMappingStartToken)):
++        context['stack'].append(Parent(MAP))
++    elif isinstance(token, (yaml.BlockSequenceStartToken,
++                            yaml.FlowSequenceStartToken)):
++        context['stack'].append(Parent(SEQ))
++    elif isinstance(token, (yaml.BlockEndToken,
++                            yaml.FlowMappingEndToken,
++                            yaml.FlowSequenceEndToken)):
++        context['stack'].pop()
++    elif (isinstance(token, yaml.KeyToken) and
++          isinstance(next, yaml.ScalarToken)):
++        # This check is done because KeyTokens can be found inside flow
++        # sequences... strange, but allowed.
++        if len(context['stack']) > 0 and context['stack'][-1].type == MAP:
++            if any(next.value < key for key in context['stack'][-1].keys):
++                yield LintProblem(
++                    next.start_mark.line + 1, next.start_mark.column + 1,
++                    'wrong ordering of key "%s" in mapping' % next.value)
++            else:
++                context['stack'][-1].keys.append(next.value)
+diff --git a/third_party/python/yamllint/yamllint/rules/line_length.py b/third_party/python/yamllint/yamllint/rules/line_length.py
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/yamllint/yamllint/rules/line_length.py
+@@ -0,0 +1,149 @@
++# -*- coding: utf-8 -*-
++# Copyright (C) 2016 Adrien Vergé
++#
++# This program is free software: you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation, either version 3 of the License, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program.  If not, see <http://www.gnu.org/licenses/>.
++
++"""
++Use this rule to set a limit to lines length.
++
++Note: with Python 2, the ``line-length`` rule may not work properly with
++unicode characters because of the way strings are represented in bytes. We
++recommend running yamllint with Python 3.
++
++.. rubric:: Options
++
++* ``max`` defines the maximal (inclusive) length of lines.
++* ``allow-non-breakable-words`` is used to allow non breakable words (without
++  spaces inside) to overflow the limit. This is useful for long URLs, for
++  instance. Use ``true`` to allow, ``false`` to forbid.
++* ``allow-non-breakable-inline-mappings`` implies ``allow-non-breakable-words``
++  and extends it to also allow non-breakable words in inline mappings.
++
++.. rubric:: Examples
++
++#. With ``line-length: {max: 70}``
++
++   the following code snippet would **PASS**:
++   ::
++
++    long sentence:
++      Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do
++      eiusmod tempor incididunt ut labore et dolore magna aliqua.
++
++   the following code snippet would **FAIL**:
++   ::
++
++    long sentence:
++      Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod
++      tempor incididunt ut labore et dolore magna aliqua.
++
++#. With ``line-length: {max: 60, allow-non-breakable-words: true}``
++
++   the following code snippet would **PASS**:
++   ::
++
++    this:
++      is:
++        - a:
++            http://localhost/very/very/very/very/very/very/very/very/long/url
++
++    # this comment is too long,
++    # but hard to split:
++    # http://localhost/another/very/very/very/very/very/very/very/very/long/url
++
++   the following code snippet would **FAIL**:
++   ::
++
++    - this line is waaaaaaaaaaaaaay too long but could be easily split...
++
++   and the following code snippet would also **FAIL**:
++   ::
++
++    - foobar: http://localhost/very/very/very/very/very/very/very/very/long/url
++
++#. With ``line-length: {max: 60, allow-non-breakable-words: true,
++   allow-non-breakable-inline-mappings: true}``
++
++   the following code snippet would **PASS**:
++   ::
++
++    - foobar: http://localhost/very/very/very/very/very/very/very/very/long/url
++
++#. With ``line-length: {max: 60, allow-non-breakable-words: false}``
++
++   the following code snippet would **FAIL**:
++   ::
++
++    this:
++      is:
++        - a:
++            http://localhost/very/very/very/very/very/very/very/very/long/url
++"""
++
++
++import yaml
++
++from yamllint.linter import LintProblem
++
++
++ID = 'line-length'
++TYPE = 'line'
++CONF = {'max': int,
++        'allow-non-breakable-words': bool,
++        'allow-non-breakable-inline-mappings': bool}
++DEFAULT = {'max': 80,
++           'allow-non-breakable-words': True,
++           'allow-non-breakable-inline-mappings': False}
++
++
++def check_inline_mapping(line):
++    loader = yaml.SafeLoader(line.content)
++    try:
++        while loader.peek_token():
++            if isinstance(loader.get_token(), yaml.BlockMappingStartToken):
++                while loader.peek_token():
++                    if isinstance(loader.get_token(), yaml.ValueToken):
++                        t = loader.get_token()
++                        if isinstance(t, yaml.ScalarToken):
++                            return (
++                                ' ' not in line.content[t.start_mark.column:])
++    except yaml.scanner.ScannerError:
++        pass
++
++    return False
++
++
++def check(conf, line):
++    if line.end - line.start > conf['max']:
++        conf['allow-non-breakable-words'] |= \
++            conf['allow-non-breakable-inline-mappings']
++        if conf['allow-non-breakable-words']:
++            start = line.start
++            while start < line.end and line.buffer[start] == ' ':
++                start += 1
++
++            if start != line.end:
++                if line.buffer[start] in ('#', '-'):
++                    start += 2
++
++                if line.buffer.find(' ', start, line.end) == -1:
++                    return
++
++                if (conf['allow-non-breakable-inline-mappings'] and
++                        check_inline_mapping(line)):
++                    return
++
++        yield LintProblem(line.line_no, conf['max'] + 1,
++                          'line too long (%d > %d characters)' %
++                          (line.end - line.start, conf['max']))
+diff --git a/third_party/python/yamllint/yamllint/rules/new_line_at_end_of_file.py b/third_party/python/yamllint/yamllint/rules/new_line_at_end_of_file.py
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/yamllint/yamllint/rules/new_line_at_end_of_file.py
+@@ -0,0 +1,37 @@
++# -*- coding: utf-8 -*-
++# Copyright (C) 2016 Adrien Vergé
++#
++# This program is free software: you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation, either version 3 of the License, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program.  If not, see <http://www.gnu.org/licenses/>.
++
++"""
++Use this rule to require a new line character (``\\n``) at the end of files.
++
++The POSIX standard `requires the last line to end with a new line character
++<http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap03.html#tag_03_206>`_.
++All UNIX tools expect a new line at the end of files. Most text editors use
++this convention too.
++"""
++
++
++from yamllint.linter import LintProblem
++
++
++ID = 'new-line-at-end-of-file'
++TYPE = 'line'
++
++
++def check(conf, line):
++    if line.end == len(line.buffer) and line.end > line.start:
++        yield LintProblem(line.line_no, line.end - line.start + 1,
++                          'no new line character at the end of file')
+diff --git a/third_party/python/yamllint/yamllint/rules/new_lines.py b/third_party/python/yamllint/yamllint/rules/new_lines.py
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/yamllint/yamllint/rules/new_lines.py
+@@ -0,0 +1,46 @@
++# -*- coding: utf-8 -*-
++# Copyright (C) 2016 Adrien Vergé
++#
++# This program is free software: you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation, either version 3 of the License, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program.  If not, see <http://www.gnu.org/licenses/>.
++
++"""
++Use this rule to force the type of new line characters.
++
++.. rubric:: Options
++
++* Set ``type`` to ``unix`` to use UNIX-typed new line characters (``\\n``), or
++  ``dos`` to use DOS-typed new line characters (``\\r\\n``).
++"""
++
++
++from yamllint.linter import LintProblem
++
++
++ID = 'new-lines'
++TYPE = 'line'
++CONF = {'type': ('unix', 'dos')}
++DEFAULT = {'type': 'unix'}
++
++
++def check(conf, line):
++    if line.start == 0 and len(line.buffer) > line.end:
++        if conf['type'] == 'dos':
++            if (line.end + 2 > len(line.buffer) or
++                    line.buffer[line.end:line.end + 2] != '\r\n'):
++                yield LintProblem(1, line.end - line.start + 1,
++                                  'wrong new line character: expected \\r\\n')
++        else:
++            if line.buffer[line.end] == '\r':
++                yield LintProblem(1, line.end - line.start + 1,
++                                  'wrong new line character: expected \\n')
+diff --git a/third_party/python/yamllint/yamllint/rules/octal_values.py b/third_party/python/yamllint/yamllint/rules/octal_values.py
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/yamllint/yamllint/rules/octal_values.py
+@@ -0,0 +1,95 @@
++# -*- coding: utf-8 -*-
++# Copyright (C) 2017 ScienJus
++#
++# This program is free software: you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation, either version 3 of the License, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program.  If not, see <http://www.gnu.org/licenses/>.
++
++"""
++Use this rule to prevent values with octal numbers. In YAML, numbers that
++start with ``0`` are interpreted as octal, but this is not always wanted.
++For instance ``010`` is the city code of Beijing, and should not be
++converted to ``8``.
++
++.. rubric:: Examples
++
++#. With ``octal-values: {forbid-implicit-octal: true}``
++
++   the following code snippets would **PASS**:
++   ::
++
++    user:
++      city-code: '010'
++
++   the following code snippets would **PASS**:
++   ::
++
++    user:
++      city-code: 010,021
++
++   the following code snippets would **FAIL**:
++   ::
++
++    user:
++      city-code: 010
++
++#. With ``octal-values: {forbid-explicit-octal: true}``
++
++   the following code snippets would **PASS**:
++   ::
++
++    user:
++      city-code: '0o10'
++
++   the following code snippets would **FAIL**:
++   ::
++
++    user:
++      city-code: 0o10
++"""
++
++import yaml
++
++from yamllint.linter import LintProblem
++
++
++ID = 'octal-values'
++TYPE = 'token'
++CONF = {'forbid-implicit-octal': bool,
++        'forbid-explicit-octal': bool}
++DEFAULT = {'forbid-implicit-octal': True,
++           'forbid-explicit-octal': True}
++
++
++def check(conf, token, prev, next, nextnext, context):
++    if prev and isinstance(prev, yaml.tokens.TagToken):
++        return
++
++    if conf['forbid-implicit-octal']:
++        if isinstance(token, yaml.tokens.ScalarToken):
++            if not token.style:
++                val = token.value
++                if val.isdigit() and len(val) > 1 and val[0] == '0':
++                    yield LintProblem(
++                        token.start_mark.line + 1, token.end_mark.column + 1,
++                        'forbidden implicit octal value "%s"' %
++                        token.value)
++
++    if conf['forbid-explicit-octal']:
++        if isinstance(token, yaml.tokens.ScalarToken):
++            if not token.style:
++                val = token.value
++                if len(val) > 2 and val[:2] == '0o' and val[2:].isdigit():
++                    yield LintProblem(
++                        token.start_mark.line + 1, token.end_mark.column + 1,
++                        'forbidden explicit octal value "%s"' %
++                        token.value)
+diff --git a/third_party/python/yamllint/yamllint/rules/quoted_strings.py b/third_party/python/yamllint/yamllint/rules/quoted_strings.py
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/yamllint/yamllint/rules/quoted_strings.py
+@@ -0,0 +1,78 @@
++# -*- coding: utf-8 -*-
++# Copyright (C) 2018 ClearScore
++#
++# This program is free software: you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation, either version 3 of the License, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program.  If not, see <http://www.gnu.org/licenses/>.
++
++"""
++Use this rule to forbid any string values that are not quoted.
++You can also enforce the type of the quote used using the ``quote-type`` option
++(``single``, ``double`` or ``any``).
++
++**Note**: Multi-line strings (with ``|`` or ``>``) will not be checked.
++
++.. rubric:: Examples
++
++#. With ``quoted-strings: {quote-type: any}``
++
++   the following code snippet would **PASS**:
++   ::
++
++    foo: "bar"
++    bar: 'foo'
++    number: 123
++    boolean: true
++
++   the following code snippet would **FAIL**:
++   ::
++
++    foo: bar
++"""
++
++import yaml
++
++from yamllint.linter import LintProblem
++
++ID = 'quoted-strings'
++TYPE = 'token'
++CONF = {'quote-type': ('any', 'single', 'double')}
++DEFAULT = {'quote-type': 'any'}
++
++
++def check(conf, token, prev, next, nextnext, context):
++    quote_type = conf['quote-type']
++
++    if (isinstance(token, yaml.tokens.ScalarToken) and
++            isinstance(prev, (yaml.ValueToken, yaml.TagToken))):
++        # Ignore explicit types, e.g. !!str testtest or !!int 42
++        if (prev and isinstance(prev, yaml.tokens.TagToken) and
++                prev.value[0] == '!!'):
++            return
++
++        # Ignore numbers, booleans, etc.
++        resolver = yaml.resolver.Resolver()
++        if resolver.resolve(yaml.nodes.ScalarNode, token.value,
++                            (True, False)) != 'tag:yaml.org,2002:str':
++            return
++
++        # Ignore multi-line strings
++        if (not token.plain) and (token.style == "|" or token.style == ">"):
++            return
++
++        if ((quote_type == 'single' and token.style != "'") or
++                (quote_type == 'double' and token.style != '"') or
++                (quote_type == 'any' and token.style is None)):
++            yield LintProblem(
++                token.start_mark.line + 1,
++                token.start_mark.column + 1,
++                "string value is not quoted with %s quotes" % (quote_type))
+diff --git a/third_party/python/yamllint/yamllint/rules/trailing_spaces.py b/third_party/python/yamllint/yamllint/rules/trailing_spaces.py
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/yamllint/yamllint/rules/trailing_spaces.py
+@@ -0,0 +1,62 @@
++# -*- coding: utf-8 -*-
++# Copyright (C) 2016 Adrien Vergé
++#
++# This program is free software: you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation, either version 3 of the License, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program.  If not, see <http://www.gnu.org/licenses/>.
++
++"""
++Use this rule to forbid trailing spaces at the end of lines.
++
++.. rubric:: Examples
++
++#. With ``trailing-spaces: {}``
++
++   the following code snippet would **PASS**:
++   ::
++
++    this document doesn't contain
++    any trailing
++    spaces
++
++   the following code snippet would **FAIL**:
++   ::
++
++    this document contains     """ """
++    trailing spaces
++    on lines 1 and 3         """ """
++"""
++
++
++import string
++
++from yamllint.linter import LintProblem
++
++
++ID = 'trailing-spaces'
++TYPE = 'line'
++
++
++def check(conf, line):
++    if line.end == 0:
++        return
++
++    # YAML recognizes two white space characters: space and tab.
++    # http://yaml.org/spec/1.2/spec.html#id2775170
++
++    pos = line.end
++    while line.buffer[pos - 1] in string.whitespace and pos > line.start:
++        pos -= 1
++
++    if pos != line.end and line.buffer[pos] in ' \t':
++        yield LintProblem(line.line_no, pos - line.start + 1,
++                          'trailing spaces')
+diff --git a/third_party/python/yamllint/yamllint/rules/truthy.py b/third_party/python/yamllint/yamllint/rules/truthy.py
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/yamllint/yamllint/rules/truthy.py
+@@ -0,0 +1,126 @@
++# -*- coding: utf-8 -*-
++# Copyright (C) 2016 Peter Ericson
++#
++# This program is free software: you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation, either version 3 of the License, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program.  If not, see <http://www.gnu.org/licenses/>.
++
++"""
++Use this rule to forbid non-explictly typed truthy values other than allowed
++ones (by default: ``true`` and ``false``), for example ``YES`` or ``off``.
++
++This can be useful to prevent surprises from YAML parsers transforming
++``[yes, FALSE, Off]`` into ``[true, false, false]`` or
++``{y: 1, yes: 2, on: 3, true: 4, True: 5}`` into ``{y: 1, true: 5}``.
++
++.. rubric:: Options
++
++* ``allowed-values`` defines the list of truthy values which will be ignored
++  during linting. The default is ``['true', 'false']``, but can be changed to
++  any list containing: ``'TRUE'``, ``'True'``,  ``'true'``, ``'FALSE'``,
++  ``'False'``, ``'false'``, ``'YES'``, ``'Yes'``, ``'yes'``, ``'NO'``,
++  ``'No'``, ``'no'``, ``'ON'``, ``'On'``, ``'on'``, ``'OFF'``, ``'Off'``,
++  ``'off'``.
++
++.. rubric:: Examples
++
++#. With ``truthy: {}``
++
++   the following code snippet would **PASS**:
++   ::
++
++    boolean: true
++
++    object: {"True": 1, 1: "True"}
++
++    "yes":  1
++    "on":   2
++    "True": 3
++
++     explicit:
++       string1: !!str True
++       string2: !!str yes
++       string3: !!str off
++       encoded: !!binary |
++                  True
++                  OFF
++                  pad==  # this decodes as 'N\xbb\x9e8Qii'
++       boolean1: !!bool true
++       boolean2: !!bool "false"
++       boolean3: !!bool FALSE
++       boolean4: !!bool True
++       boolean5: !!bool off
++       boolean6: !!bool NO
++
++   the following code snippet would **FAIL**:
++   ::
++
++    object: {True: 1, 1: True}
++
++   the following code snippet would **FAIL**:
++   ::
++
++    yes:  1
++    on:   2
++    True: 3
++
++#. With ``truthy: {allowed-values: ["yes", "no"]}``
++
++   the following code snippet would **PASS**:
++   ::
++
++    - yes
++    - no
++    - "true"
++    - 'false'
++    - foo
++    - bar
++
++   the following code snippet would **FAIL**:
++   ::
++
++    - true
++    - false
++    - on
++    - off
++"""
++
++import yaml
++
++from yamllint.linter import LintProblem
++
++
++TRUTHY = ['YES', 'Yes', 'yes',
++          'NO', 'No', 'no',
++          'TRUE', 'True', 'true',
++          'FALSE', 'False', 'false',
++          'ON', 'On', 'on',
++          'OFF', 'Off', 'off']
++
++
++ID = 'truthy'
++TYPE = 'token'
++CONF = {'allowed-values': list(TRUTHY)}
++DEFAULT = {'allowed-values': ['true', 'false']}
++
++
++def check(conf, token, prev, next, nextnext, context):
++    if prev and isinstance(prev, yaml.tokens.TagToken):
++        return
++
++    if isinstance(token, yaml.tokens.ScalarToken):
++        if (token.value in (set(TRUTHY) - set(conf['allowed-values'])) and
++                token.style is None):
++            yield LintProblem(token.start_mark.line + 1,
++                              token.start_mark.column + 1,
++                              "truthy value should be one of [" +
++                              ", ".join(sorted(conf['allowed-values'])) + "]")

File diff suppressed because it is too large
+ 0 - 1141
mozilla-release/patches/1619555-76a1.patch


+ 2739 - 0
mozilla-release/patches/1619555-partial-76a1.patch

@@ -0,0 +1,2739 @@
+# HG changeset patch
+# User Alessio Placitelli <alessio.placitelli@gmail.com>
+# Date 1583940441 0
+# Node ID 795632849f9f37c1ca0caf0e1807d3fafa7b395b
+# Parent  73887fc2caf82f8c4caf7eb6ae6bab8892f7aeae
+Bug 1619555 - Add the missing sources to vendored python libs. r=ahal
+
+The vendored packaged were fetched using vanilla "pip download", which downloads
+wheel files by default (missing LICENSE, setup.py, etc.). This changeset was
+created by downloading the "glean_parser" dependencies using "pip download
+--no-binary :all:" which is the same thing that "mach vendor python" does.
+It additionally removes all the /tests and /docs directories of the vendored
+dependencies.
+
+Differential Revision: https://phabricator.services.mozilla.com/D65171
+
+diff --git a/third_party/python/appdirs/CHANGES.rst b/third_party/python/appdirs/CHANGES.rst
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/appdirs/CHANGES.rst
+@@ -0,0 +1,84 @@
++appdirs Changelog
++=================
++
++appdirs 1.4.3
++-------------
++- [PR #76] Python 3.6 invalid escape sequence deprecation fixes
++- Fix for Python 3.6 support
++
++appdirs 1.4.2
++-------------
++- [PR #84] Allow installing without setuptools
++- [PR #86] Fix string delimiters in setup.py description
++- Add Python 3.6 support
++
++appdirs 1.4.1
++-------------
++- [issue #38] Fix _winreg import on Windows Py3
++- [issue #55] Make appname optional
++
++appdirs 1.4.0
++-------------
++- [PR #42] AppAuthor is now optional on Windows
++- [issue 41] Support Jython on Windows, Mac, and Unix-like platforms. Windows
++  support requires `JNA <https://github.com/twall/jna>`_.
++- [PR #44] Fix incorrect behaviour of the site_config_dir method
++
++appdirs 1.3.0
++-------------
++- [Unix, issue 16] Conform to XDG standard, instead of breaking it for
++  everybody
++- [Unix] Removes gratuitous case mangling of the case, since \*nix-es are
++  usually case sensitive, so mangling is not wise
++- [Unix] Fixes the utterly wrong behaviour in ``site_data_dir``, return result
++  based on XDG_DATA_DIRS and make room for respecting the standard which
++  specifies XDG_DATA_DIRS is a multiple-value variable
++- [Issue 6] Add ``*_config_dir`` which are distinct on nix-es, according to
++  XDG specs; on Windows and Mac return the corresponding ``*_data_dir``
++
++appdirs 1.2.0
++-------------
++
++- [Unix] Put ``user_log_dir`` under the *cache* dir on Unix. Seems to be more
++  typical.
++- [issue 9] Make ``unicode`` work on py3k.
++
++appdirs 1.1.0
++-------------
++
++- [issue 4] Add ``AppDirs.user_log_dir``.
++- [Unix, issue 2, issue 7] appdirs now conforms to `XDG base directory spec
++  <http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html>`_.
++- [Mac, issue 5] Fix ``site_data_dir()`` on Mac.
++- [Mac] Drop use of 'Carbon' module in favour of hardcoded paths; supports
++  Python3 now.
++- [Windows] Append "Cache" to ``user_cache_dir`` on Windows by default. Use
++  ``opinion=False`` option to disable this.
++- Add ``appdirs.AppDirs`` convenience class. Usage:
++
++        >>> dirs = AppDirs("SuperApp", "Acme", version="1.0")
++        >>> dirs.user_data_dir
++        '/Users/trentm/Library/Application Support/SuperApp/1.0'
++
++- [Windows] Cherry-pick Komodo's change to downgrade paths to the Windows short
++  paths if there are high bit chars.
++- [Linux] Change default ``user_cache_dir()`` on Linux to be singular, e.g.
++  "~/.superapp/cache".
++- [Windows] Add ``roaming`` option to ``user_data_dir()`` (for use on Windows only)
++  and change the default ``user_data_dir`` behaviour to use a *non*-roaming
++  profile dir (``CSIDL_LOCAL_APPDATA`` instead of ``CSIDL_APPDATA``). Why? Because
++  a large roaming profile can cause login speed issues. The "only syncs on
++  logout" behaviour can cause surprises in appdata info.
++
++
++appdirs 1.0.1 (never released)
++------------------------------
++
++Started this changelog 27 July 2010. Before that this module originated in the
++`Komodo <http://www.activestate.com/komodo>`_ product as ``applib.py`` and then
++as `applib/location.py
++<http://github.com/ActiveState/applib/blob/master/applib/location.py>`_ (used by
++`PyPM <http://code.activestate.com/pypm/>`_ in `ActivePython
++<http://www.activestate.com/activepython>`_). This is basically a fork of
++applib.py 1.0.1 and applib/location.py 1.0.1.
++
+diff --git a/third_party/python/appdirs/LICENSE.txt b/third_party/python/appdirs/LICENSE.txt
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/appdirs/LICENSE.txt
+@@ -0,0 +1,23 @@
++# This is the MIT license
++
++Copyright (c) 2010 ActiveState Software Inc.
++
++Permission is hereby granted, free of charge, to any person obtaining a
++copy of this software and associated documentation files (the
++"Software"), to deal in the Software without restriction, including
++without limitation the rights to use, copy, modify, merge, publish,
++distribute, sublicense, and/or sell copies of the Software, and to
++permit persons to whom the Software is furnished to do so, subject to
++the following conditions:
++
++The above copyright notice and this permission notice shall be included
++in all copies or substantial portions of the Software.
++
++THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
++CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
++TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
++SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++
+diff --git a/third_party/python/appdirs/MANIFEST.in b/third_party/python/appdirs/MANIFEST.in
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/appdirs/MANIFEST.in
+@@ -0,0 +1,5 @@
++include README.rst
++include CHANGES.rst
++include LICENSE.txt
++include *.py
++include test/*.py
+diff --git a/third_party/python/appdirs/PKG-INFO b/third_party/python/appdirs/PKG-INFO
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/appdirs/PKG-INFO
+@@ -0,0 +1,252 @@
++Metadata-Version: 1.1
++Name: appdirs
++Version: 1.4.3
++Summary: A small Python module for determining appropriate platform-specific dirs, e.g. a "user data dir".
++Home-page: http://github.com/ActiveState/appdirs
++Author: Trent Mick; Sridhar Ratnakumar; Jeff Rouse
++Author-email: trentm@gmail.com; github@srid.name; jr@its.to
++License: MIT
++Description: 
++        .. image:: https://secure.travis-ci.org/ActiveState/appdirs.png
++            :target: http://travis-ci.org/ActiveState/appdirs
++        
++        the problem
++        ===========
++        
++        What directory should your app use for storing user data? If running on Mac OS X, you
++        should use::
++        
++            ~/Library/Application Support/<AppName>
++        
++        If on Windows (at least English Win XP) that should be::
++        
++            C:\Documents and Settings\<User>\Application Data\Local Settings\<AppAuthor>\<AppName>
++        
++        or possibly::
++        
++            C:\Documents and Settings\<User>\Application Data\<AppAuthor>\<AppName>
++        
++        for `roaming profiles <http://bit.ly/9yl3b6>`_ but that is another story.
++        
++        On Linux (and other Unices) the dir, according to the `XDG
++        spec <http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html>`_, is::
++        
++            ~/.local/share/<AppName>
++        
++        
++        ``appdirs`` to the rescue
++        =========================
++        
++        This kind of thing is what the ``appdirs`` module is for. ``appdirs`` will
++        help you choose an appropriate:
++        
++        - user data dir (``user_data_dir``)
++        - user config dir (``user_config_dir``)
++        - user cache dir (``user_cache_dir``)
++        - site data dir (``site_data_dir``)
++        - site config dir (``site_config_dir``)
++        - user log dir (``user_log_dir``)
++        
++        and also:
++        
++        - is a single module so other Python packages can include their own private copy
++        - is slightly opinionated on the directory names used. Look for "OPINION" in
++          documentation and code for when an opinion is being applied.
++        
++        
++        some example output
++        ===================
++        
++        On Mac OS X::
++        
++            >>> from appdirs import *
++            >>> appname = "SuperApp"
++            >>> appauthor = "Acme"
++            >>> user_data_dir(appname, appauthor)
++            '/Users/trentm/Library/Application Support/SuperApp'
++            >>> site_data_dir(appname, appauthor)
++            '/Library/Application Support/SuperApp'
++            >>> user_cache_dir(appname, appauthor)
++            '/Users/trentm/Library/Caches/SuperApp'
++            >>> user_log_dir(appname, appauthor)
++            '/Users/trentm/Library/Logs/SuperApp'
++        
++        On Windows 7::
++        
++            >>> from appdirs import *
++            >>> appname = "SuperApp"
++            >>> appauthor = "Acme"
++            >>> user_data_dir(appname, appauthor)
++            'C:\\Users\\trentm\\AppData\\Local\\Acme\\SuperApp'
++            >>> user_data_dir(appname, appauthor, roaming=True)
++            'C:\\Users\\trentm\\AppData\\Roaming\\Acme\\SuperApp'
++            >>> user_cache_dir(appname, appauthor)
++            'C:\\Users\\trentm\\AppData\\Local\\Acme\\SuperApp\\Cache'
++            >>> user_log_dir(appname, appauthor)
++            'C:\\Users\\trentm\\AppData\\Local\\Acme\\SuperApp\\Logs'
++        
++        On Linux::
++        
++            >>> from appdirs import *
++            >>> appname = "SuperApp"
++            >>> appauthor = "Acme"
++            >>> user_data_dir(appname, appauthor)
++            '/home/trentm/.local/share/SuperApp
++            >>> site_data_dir(appname, appauthor)
++            '/usr/local/share/SuperApp'
++            >>> site_data_dir(appname, appauthor, multipath=True)
++            '/usr/local/share/SuperApp:/usr/share/SuperApp'
++            >>> user_cache_dir(appname, appauthor)
++            '/home/trentm/.cache/SuperApp'
++            >>> user_log_dir(appname, appauthor)
++            '/home/trentm/.cache/SuperApp/log'
++            >>> user_config_dir(appname)
++            '/home/trentm/.config/SuperApp'
++            >>> site_config_dir(appname)
++            '/etc/xdg/SuperApp'
++            >>> os.environ['XDG_CONFIG_DIRS'] = '/etc:/usr/local/etc'
++            >>> site_config_dir(appname, multipath=True)
++            '/etc/SuperApp:/usr/local/etc/SuperApp'
++        
++        
++        ``AppDirs`` for convenience
++        ===========================
++        
++        ::
++        
++            >>> from appdirs import AppDirs
++            >>> dirs = AppDirs("SuperApp", "Acme")
++            >>> dirs.user_data_dir
++            '/Users/trentm/Library/Application Support/SuperApp'
++            >>> dirs.site_data_dir
++            '/Library/Application Support/SuperApp'
++            >>> dirs.user_cache_dir
++            '/Users/trentm/Library/Caches/SuperApp'
++            >>> dirs.user_log_dir
++            '/Users/trentm/Library/Logs/SuperApp'
++        
++        
++            
++        Per-version isolation
++        =====================
++        
++        If you have multiple versions of your app in use that you want to be
++        able to run side-by-side, then you may want version-isolation for these
++        dirs::
++        
++            >>> from appdirs import AppDirs
++            >>> dirs = AppDirs("SuperApp", "Acme", version="1.0")
++            >>> dirs.user_data_dir
++            '/Users/trentm/Library/Application Support/SuperApp/1.0'
++            >>> dirs.site_data_dir
++            '/Library/Application Support/SuperApp/1.0'
++            >>> dirs.user_cache_dir
++            '/Users/trentm/Library/Caches/SuperApp/1.0'
++            >>> dirs.user_log_dir
++            '/Users/trentm/Library/Logs/SuperApp/1.0'
++        
++        
++        
++        appdirs Changelog
++        =================
++        
++        appdirs 1.4.3
++        -------------
++        - [PR #76] Python 3.6 invalid escape sequence deprecation fixes
++        - Fix for Python 3.6 support
++        
++        appdirs 1.4.2
++        -------------
++        - [PR #84] Allow installing without setuptools
++        - [PR #86] Fix string delimiters in setup.py description
++        - Add Python 3.6 support
++        
++        appdirs 1.4.1
++        -------------
++        - [issue #38] Fix _winreg import on Windows Py3
++        - [issue #55] Make appname optional
++        
++        appdirs 1.4.0
++        -------------
++        - [PR #42] AppAuthor is now optional on Windows
++        - [issue 41] Support Jython on Windows, Mac, and Unix-like platforms. Windows
++          support requires `JNA <https://github.com/twall/jna>`_.
++        - [PR #44] Fix incorrect behaviour of the site_config_dir method
++        
++        appdirs 1.3.0
++        -------------
++        - [Unix, issue 16] Conform to XDG standard, instead of breaking it for
++          everybody
++        - [Unix] Removes gratuitous case mangling of the case, since \*nix-es are
++          usually case sensitive, so mangling is not wise
++        - [Unix] Fixes the utterly wrong behaviour in ``site_data_dir``, return result
++          based on XDG_DATA_DIRS and make room for respecting the standard which
++          specifies XDG_DATA_DIRS is a multiple-value variable
++        - [Issue 6] Add ``*_config_dir`` which are distinct on nix-es, according to
++          XDG specs; on Windows and Mac return the corresponding ``*_data_dir``
++        
++        appdirs 1.2.0
++        -------------
++        
++        - [Unix] Put ``user_log_dir`` under the *cache* dir on Unix. Seems to be more
++          typical.
++        - [issue 9] Make ``unicode`` work on py3k.
++        
++        appdirs 1.1.0
++        -------------
++        
++        - [issue 4] Add ``AppDirs.user_log_dir``.
++        - [Unix, issue 2, issue 7] appdirs now conforms to `XDG base directory spec
++          <http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html>`_.
++        - [Mac, issue 5] Fix ``site_data_dir()`` on Mac.
++        - [Mac] Drop use of 'Carbon' module in favour of hardcoded paths; supports
++          Python3 now.
++        - [Windows] Append "Cache" to ``user_cache_dir`` on Windows by default. Use
++          ``opinion=False`` option to disable this.
++        - Add ``appdirs.AppDirs`` convenience class. Usage:
++        
++                >>> dirs = AppDirs("SuperApp", "Acme", version="1.0")
++                >>> dirs.user_data_dir
++                '/Users/trentm/Library/Application Support/SuperApp/1.0'
++        
++        - [Windows] Cherry-pick Komodo's change to downgrade paths to the Windows short
++          paths if there are high bit chars.
++        - [Linux] Change default ``user_cache_dir()`` on Linux to be singular, e.g.
++          "~/.superapp/cache".
++        - [Windows] Add ``roaming`` option to ``user_data_dir()`` (for use on Windows only)
++          and change the default ``user_data_dir`` behaviour to use a *non*-roaming
++          profile dir (``CSIDL_LOCAL_APPDATA`` instead of ``CSIDL_APPDATA``). Why? Because
++          a large roaming profile can cause login speed issues. The "only syncs on
++          logout" behaviour can cause surprises in appdata info.
++        
++        
++        appdirs 1.0.1 (never released)
++        ------------------------------
++        
++        Started this changelog 27 July 2010. Before that this module originated in the
++        `Komodo <http://www.activestate.com/komodo>`_ product as ``applib.py`` and then
++        as `applib/location.py
++        <http://github.com/ActiveState/applib/blob/master/applib/location.py>`_ (used by
++        `PyPM <http://code.activestate.com/pypm/>`_ in `ActivePython
++        <http://www.activestate.com/activepython>`_). This is basically a fork of
++        applib.py 1.0.1 and applib/location.py 1.0.1.
++        
++        
++Keywords: application directory log cache user
++Platform: UNKNOWN
++Classifier: Development Status :: 4 - Beta
++Classifier: Intended Audience :: Developers
++Classifier: License :: OSI Approved :: MIT License
++Classifier: Operating System :: OS Independent
++Classifier: Programming Language :: Python :: 2
++Classifier: Programming Language :: Python :: 2.6
++Classifier: Programming Language :: Python :: 2.7
++Classifier: Programming Language :: Python :: 3
++Classifier: Programming Language :: Python :: 3.2
++Classifier: Programming Language :: Python :: 3.3
++Classifier: Programming Language :: Python :: 3.4
++Classifier: Programming Language :: Python :: 3.5
++Classifier: Programming Language :: Python :: 3.6
++Classifier: Programming Language :: Python :: Implementation :: PyPy
++Classifier: Programming Language :: Python :: Implementation :: CPython
++Classifier: Topic :: Software Development :: Libraries :: Python Modules
+diff --git a/third_party/python/appdirs/README.rst b/third_party/python/appdirs/README.rst
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/appdirs/README.rst
+@@ -0,0 +1,138 @@
++.. image:: https://secure.travis-ci.org/ActiveState/appdirs.png
++    :target: http://travis-ci.org/ActiveState/appdirs
++
++the problem
++===========
++
++What directory should your app use for storing user data? If running on Mac OS X, you
++should use::
++
++    ~/Library/Application Support/<AppName>
++
++If on Windows (at least English Win XP) that should be::
++
++    C:\Documents and Settings\<User>\Application Data\Local Settings\<AppAuthor>\<AppName>
++
++or possibly::
++
++    C:\Documents and Settings\<User>\Application Data\<AppAuthor>\<AppName>
++
++for `roaming profiles <http://bit.ly/9yl3b6>`_ but that is another story.
++
++On Linux (and other Unices) the dir, according to the `XDG
++spec <http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html>`_, is::
++
++    ~/.local/share/<AppName>
++
++
++``appdirs`` to the rescue
++=========================
++
++This kind of thing is what the ``appdirs`` module is for. ``appdirs`` will
++help you choose an appropriate:
++
++- user data dir (``user_data_dir``)
++- user config dir (``user_config_dir``)
++- user cache dir (``user_cache_dir``)
++- site data dir (``site_data_dir``)
++- site config dir (``site_config_dir``)
++- user log dir (``user_log_dir``)
++
++and also:
++
++- is a single module so other Python packages can include their own private copy
++- is slightly opinionated on the directory names used. Look for "OPINION" in
++  documentation and code for when an opinion is being applied.
++
++
++some example output
++===================
++
++On Mac OS X::
++
++    >>> from appdirs import *
++    >>> appname = "SuperApp"
++    >>> appauthor = "Acme"
++    >>> user_data_dir(appname, appauthor)
++    '/Users/trentm/Library/Application Support/SuperApp'
++    >>> site_data_dir(appname, appauthor)
++    '/Library/Application Support/SuperApp'
++    >>> user_cache_dir(appname, appauthor)
++    '/Users/trentm/Library/Caches/SuperApp'
++    >>> user_log_dir(appname, appauthor)
++    '/Users/trentm/Library/Logs/SuperApp'
++
++On Windows 7::
++
++    >>> from appdirs import *
++    >>> appname = "SuperApp"
++    >>> appauthor = "Acme"
++    >>> user_data_dir(appname, appauthor)
++    'C:\\Users\\trentm\\AppData\\Local\\Acme\\SuperApp'
++    >>> user_data_dir(appname, appauthor, roaming=True)
++    'C:\\Users\\trentm\\AppData\\Roaming\\Acme\\SuperApp'
++    >>> user_cache_dir(appname, appauthor)
++    'C:\\Users\\trentm\\AppData\\Local\\Acme\\SuperApp\\Cache'
++    >>> user_log_dir(appname, appauthor)
++    'C:\\Users\\trentm\\AppData\\Local\\Acme\\SuperApp\\Logs'
++
++On Linux::
++
++    >>> from appdirs import *
++    >>> appname = "SuperApp"
++    >>> appauthor = "Acme"
++    >>> user_data_dir(appname, appauthor)
++    '/home/trentm/.local/share/SuperApp
++    >>> site_data_dir(appname, appauthor)
++    '/usr/local/share/SuperApp'
++    >>> site_data_dir(appname, appauthor, multipath=True)
++    '/usr/local/share/SuperApp:/usr/share/SuperApp'
++    >>> user_cache_dir(appname, appauthor)
++    '/home/trentm/.cache/SuperApp'
++    >>> user_log_dir(appname, appauthor)
++    '/home/trentm/.cache/SuperApp/log'
++    >>> user_config_dir(appname)
++    '/home/trentm/.config/SuperApp'
++    >>> site_config_dir(appname)
++    '/etc/xdg/SuperApp'
++    >>> os.environ['XDG_CONFIG_DIRS'] = '/etc:/usr/local/etc'
++    >>> site_config_dir(appname, multipath=True)
++    '/etc/SuperApp:/usr/local/etc/SuperApp'
++
++
++``AppDirs`` for convenience
++===========================
++
++::
++
++    >>> from appdirs import AppDirs
++    >>> dirs = AppDirs("SuperApp", "Acme")
++    >>> dirs.user_data_dir
++    '/Users/trentm/Library/Application Support/SuperApp'
++    >>> dirs.site_data_dir
++    '/Library/Application Support/SuperApp'
++    >>> dirs.user_cache_dir
++    '/Users/trentm/Library/Caches/SuperApp'
++    >>> dirs.user_log_dir
++    '/Users/trentm/Library/Logs/SuperApp'
++
++
++    
++Per-version isolation
++=====================
++
++If you have multiple versions of your app in use that you want to be
++able to run side-by-side, then you may want version-isolation for these
++dirs::
++
++    >>> from appdirs import AppDirs
++    >>> dirs = AppDirs("SuperApp", "Acme", version="1.0")
++    >>> dirs.user_data_dir
++    '/Users/trentm/Library/Application Support/SuperApp/1.0'
++    >>> dirs.site_data_dir
++    '/Library/Application Support/SuperApp/1.0'
++    >>> dirs.user_cache_dir
++    '/Users/trentm/Library/Caches/SuperApp/1.0'
++    >>> dirs.user_log_dir
++    '/Users/trentm/Library/Logs/SuperApp/1.0'
++
+diff --git a/third_party/python/appdirs/setup.cfg b/third_party/python/appdirs/setup.cfg
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/appdirs/setup.cfg
+@@ -0,0 +1,7 @@
++[wheel]
++universal = 1
++
++[egg_info]
++tag_build = 
++tag_date = 0
++
+diff --git a/third_party/python/appdirs/setup.py b/third_party/python/appdirs/setup.py
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/appdirs/setup.py
+@@ -0,0 +1,58 @@
++#!/usr/bin/env python
++import sys
++import os
++import os.path
++# appdirs is a dependency of setuptools, so allow installing without it.
++try:
++    from setuptools import setup
++except ImportError:
++    from distutils.core import setup
++import appdirs
++
++tests_require = []
++if sys.version_info < (2, 7):
++    tests_require.append("unittest2")
++
++
++def read(fname):
++    inf = open(os.path.join(os.path.dirname(__file__), fname))
++    out = "\n" + inf.read().replace("\r\n", "\n")
++    inf.close()
++    return out
++
++
++setup(
++    name='appdirs',
++    version=appdirs.__version__,
++    description='A small Python module for determining appropriate ' + \
++        'platform-specific dirs, e.g. a "user data dir".',
++    long_description=read('README.rst') + '\n' + read('CHANGES.rst'),
++    classifiers=[c.strip() for c in """
++        Development Status :: 4 - Beta
++        Intended Audience :: Developers
++        License :: OSI Approved :: MIT License
++        Operating System :: OS Independent
++        Programming Language :: Python :: 2
++        Programming Language :: Python :: 2.6
++        Programming Language :: Python :: 2.7
++        Programming Language :: Python :: 3
++        Programming Language :: Python :: 3.2
++        Programming Language :: Python :: 3.3
++        Programming Language :: Python :: 3.4
++        Programming Language :: Python :: 3.5
++        Programming Language :: Python :: 3.6
++        Programming Language :: Python :: Implementation :: PyPy
++        Programming Language :: Python :: Implementation :: CPython
++        Topic :: Software Development :: Libraries :: Python Modules
++        """.split('\n') if c.strip()],
++    test_suite='test.test_api',
++    tests_require=tests_require,
++    keywords='application directory log cache user',
++    author='Trent Mick',
++    author_email='trentm@gmail.com',
++    maintainer='Trent Mick; Sridhar Ratnakumar; Jeff Rouse',
++    maintainer_email='trentm@gmail.com; github@srid.name; jr@its.to',
++    url='http://github.com/ActiveState/appdirs',
++    license='MIT',
++    py_modules=["appdirs"],
++)
+diff --git a/third_party/python/pathspec/CHANGES.rst b/third_party/python/pathspec/CHANGES.rst
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/pathspec/CHANGES.rst
+@@ -0,0 +1,189 @@
++
++Change History
++==============
++
++
++0.7.0 (2019-12-27)
++------------------
++
++- `Issue #28`_: Add support for Python 3.8, and drop Python 3.4.
++- `Issue #29`_: Publish bdist wheel.
++
++.. _`Issue #28`: https://github.com/cpburnz/python-path-specification/pull/28
++.. _`Issue #29`: https://github.com/cpburnz/python-path-specification/pull/29
++
++
++0.6.0 (2019-10-03)
++------------------
++
++- `Issue #24`_: Drop support for Python 2.6, 3.2, and 3.3.
++- `Issue #25`_: Update README.rst.
++- `Issue #26`_: Method to escape gitwildmatch.
++
++.. _`Issue #24`: https://github.com/cpburnz/python-path-specification/pull/24
++.. _`Issue #25`: https://github.com/cpburnz/python-path-specification/pull/25
++.. _`Issue #26`: https://github.com/cpburnz/python-path-specification/pull/26
++
++
++0.5.9 (2018-09-15)
++------------------
++
++- Fixed file system error handling.
++
++
++0.5.8 (2018-09-15)
++------------------
++
++- Improved type checking.
++- Created scripts to test Python 2.6 because Tox removed support for it.
++- Improved byte string handling in Python 3.
++- `Issue #22`_: Handle dangling symlinks.
++
++.. _`Issue #22`: https://github.com/cpburnz/python-path-specification/issues/22
++
++
++0.5.7 (2018-08-14)
++------------------
++
++- `Issue #21`_: Fix collections deprecation warning.
++
++.. _`Issue #21`: https://github.com/cpburnz/python-path-specification/issues/21
++
++
++0.5.6 (2018-04-06)
++------------------
++
++- Improved unit tests.
++- Improved type checking.
++- `Issue #20`_: Support current directory prefix.
++
++.. _`Issue #20`: https://github.com/cpburnz/python-path-specification/issues/20
++
++
++0.5.5 (2017-09-09)
++------------------
++
++- Add documentation link to README.
++
++
++0.5.4 (2017-09-09)
++------------------
++
++- `Issue #17`_: Add link to Ruby implementation of *pathspec*.
++- Add sphinx documentation.
++
++.. _`Issue #17`: https://github.com/cpburnz/python-path-specification/pull/17
++
++
++0.5.3 (2017-07-01)
++------------------
++
++- `Issue #14`_: Fix byte strings for Python 3.
++- `Issue #15`_: Include "LICENSE" in source package.
++- `Issue #16`_: Support Python 2.6.
++
++.. _`Issue #14`: https://github.com/cpburnz/python-path-specification/issues/14
++.. _`Issue #15`: https://github.com/cpburnz/python-path-specification/pull/15
++.. _`Issue #16`: https://github.com/cpburnz/python-path-specification/issues/16
++
++
++0.5.2 (2017-04-04)
++------------------
++
++- Fixed change log.
++
++
++0.5.1 (2017-04-04)
++------------------
++
++- `Issue #13`_: Add equality methods to `PathSpec` and `RegexPattern`.
++
++.. _`Issue #13`: https://github.com/cpburnz/python-path-specification/pull/13
++
++
++0.5.0 (2016-08-22)
++------------------
++
++- `Issue #12`_: Add `PathSpec.match_file()`.
++- Renamed `gitignore.GitIgnorePattern` to `patterns.gitwildmatch.GitWildMatchPattern`.
++- Deprecated `gitignore.GitIgnorePattern`.
++
++.. _`Issue #12`: https://github.com/cpburnz/python-path-specification/issues/12
++
++
++0.4.0 (2016-07-15)
++------------------
++
++- `Issue #11`_: Support converting patterns into regular expressions without compiling them.
++- API change: Subclasses of `RegexPattern` should implement `pattern_to_regex()`.
++
++.. _`Issue #11`: https://github.com/cpburnz/python-path-specification/issues/11
++
++
++0.3.4 (2015-08-24)
++------------------
++
++- `Issue #7`_: Fixed non-recursive links.
++- `Issue #8`_: Fixed edge cases in gitignore patterns.
++- `Issue #9`_: Fixed minor usage documentation.
++- Fixed recursion detection.
++- Fixed trivial incompatibility with Python 3.2.
++
++.. _`Issue #7`: https://github.com/cpburnz/python-path-specification/pull/7
++.. _`Issue #8`: https://github.com/cpburnz/python-path-specification/pull/8
++.. _`Issue #9`: https://github.com/cpburnz/python-path-specification/pull/9
++
++
++0.3.3 (2014-11-21)
++------------------
++
++- Improved documentation.
++
++
++0.3.2 (2014-11-08)
++------------------
++
++- `Issue #5`_: Use tox for testing.
++- `Issue #6`_: Fixed matching Windows paths.
++- Improved documentation.
++- API change: `spec.match_tree()` and `spec.match_files()` now return iterators instead of sets.
++
++.. _`Issue #5`: https://github.com/cpburnz/python-path-specification/pull/5
++.. _`Issue #6`: https://github.com/cpburnz/python-path-specification/issues/6
++
++
++0.3.1 (2014-09-17)
++------------------
++
++- Updated README.
++
++
++0.3.0 (2014-09-17)
++------------------
++
++- `Issue #3`_: Fixed trailing slash in gitignore patterns.
++- `Issue #4`_: Fixed test for trailing slash in gitignore patterns.
++- Added registered patterns.
++
++.. _`Issue #3`: https://github.com/cpburnz/python-path-specification/pull/3
++.. _`Issue #4`: https://github.com/cpburnz/python-path-specification/pull/4
++
++
++0.2.2 (2013-12-17)
++------------------
++
++- Fixed setup.py.
++
++
++0.2.1 (2013-12-17)
++------------------
++
++- Added tests.
++- Fixed comment gitignore patterns.
++- Fixed relative path gitignore patterns.
++
++
++0.2.0 (2013-12-07)
++------------------
++
++- Initial release.
+diff --git a/third_party/python/pathspec/LICENSE b/third_party/python/pathspec/LICENSE
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/pathspec/LICENSE
+@@ -0,0 +1,373 @@
++Mozilla Public License Version 2.0
++==================================
++
++1. Definitions
++--------------
++
++1.1. "Contributor"
++    means each individual or legal entity that creates, contributes to
++    the creation of, or owns Covered Software.
++
++1.2. "Contributor Version"
++    means the combination of the Contributions of others (if any) used
++    by a Contributor and that particular Contributor's Contribution.
++
++1.3. "Contribution"
++    means Covered Software of a particular Contributor.
++
++1.4. "Covered Software"
++    means Source Code Form to which the initial Contributor has attached
++    the notice in Exhibit A, the Executable Form of such Source Code
++    Form, and Modifications of such Source Code Form, in each case
++    including portions thereof.
++
++1.5. "Incompatible With Secondary Licenses"
++    means
++
++    (a) that the initial Contributor has attached the notice described
++        in Exhibit B to the Covered Software; or
++
++    (b) that the Covered Software was made available under the terms of
++        version 1.1 or earlier of the License, but not also under the
++        terms of a Secondary License.
++
++1.6. "Executable Form"
++    means any form of the work other than Source Code Form.
++
++1.7. "Larger Work"
++    means a work that combines Covered Software with other material, in 
++    a separate file or files, that is not Covered Software.
++
++1.8. "License"
++    means this document.
++
++1.9. "Licensable"
++    means having the right to grant, to the maximum extent possible,
++    whether at the time of the initial grant or subsequently, any and
++    all of the rights conveyed by this License.
++
++1.10. "Modifications"
++    means any of the following:
++
++    (a) any file in Source Code Form that results from an addition to,
++        deletion from, or modification of the contents of Covered
++        Software; or
++
++    (b) any new file in Source Code Form that contains any Covered
++        Software.
++
++1.11. "Patent Claims" of a Contributor
++    means any patent claim(s), including without limitation, method,
++    process, and apparatus claims, in any patent Licensable by such
++    Contributor that would be infringed, but for the grant of the
++    License, by the making, using, selling, offering for sale, having
++    made, import, or transfer of either its Contributions or its
++    Contributor Version.
++
++1.12. "Secondary License"
++    means either the GNU General Public License, Version 2.0, the GNU
++    Lesser General Public License, Version 2.1, the GNU Affero General
++    Public License, Version 3.0, or any later versions of those
++    licenses.
++
++1.13. "Source Code Form"
++    means the form of the work preferred for making modifications.
++
++1.14. "You" (or "Your")
++    means an individual or a legal entity exercising rights under this
++    License. For legal entities, "You" includes any entity that
++    controls, is controlled by, or is under common control with You. For
++    purposes of this definition, "control" means (a) the power, direct
++    or indirect, to cause the direction or management of such entity,
++    whether by contract or otherwise, or (b) ownership of more than
++    fifty percent (50%) of the outstanding shares or beneficial
++    ownership of such entity.
++
++2. License Grants and Conditions
++--------------------------------
++
++2.1. Grants
++
++Each Contributor hereby grants You a world-wide, royalty-free,
++non-exclusive license:
++
++(a) under intellectual property rights (other than patent or trademark)
++    Licensable by such Contributor to use, reproduce, make available,
++    modify, display, perform, distribute, and otherwise exploit its
++    Contributions, either on an unmodified basis, with Modifications, or
++    as part of a Larger Work; and
++
++(b) under Patent Claims of such Contributor to make, use, sell, offer
++    for sale, have made, import, and otherwise transfer either its
++    Contributions or its Contributor Version.
++
++2.2. Effective Date
++
++The licenses granted in Section 2.1 with respect to any Contribution
++become effective for each Contribution on the date the Contributor first
++distributes such Contribution.
++
++2.3. Limitations on Grant Scope
++
++The licenses granted in this Section 2 are the only rights granted under
++this License. No additional rights or licenses will be implied from the
++distribution or licensing of Covered Software under this License.
++Notwithstanding Section 2.1(b) above, no patent license is granted by a
++Contributor:
++
++(a) for any code that a Contributor has removed from Covered Software;
++    or
++
++(b) for infringements caused by: (i) Your and any other third party's
++    modifications of Covered Software, or (ii) the combination of its
++    Contributions with other software (except as part of its Contributor
++    Version); or
++
++(c) under Patent Claims infringed by Covered Software in the absence of
++    its Contributions.
++
++This License does not grant any rights in the trademarks, service marks,
++or logos of any Contributor (except as may be necessary to comply with
++the notice requirements in Section 3.4).
++
++2.4. Subsequent Licenses
++
++No Contributor makes additional grants as a result of Your choice to
++distribute the Covered Software under a subsequent version of this
++License (see Section 10.2) or under the terms of a Secondary License (if
++permitted under the terms of Section 3.3).
++
++2.5. Representation
++
++Each Contributor represents that the Contributor believes its
++Contributions are its original creation(s) or it has sufficient rights
++to grant the rights to its Contributions conveyed by this License.
++
++2.6. Fair Use
++
++This License is not intended to limit any rights You have under
++applicable copyright doctrines of fair use, fair dealing, or other
++equivalents.
++
++2.7. Conditions
++
++Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
++in Section 2.1.
++
++3. Responsibilities
++-------------------
++
++3.1. Distribution of Source Form
++
++All distribution of Covered Software in Source Code Form, including any
++Modifications that You create or to which You contribute, must be under
++the terms of this License. You must inform recipients that the Source
++Code Form of the Covered Software is governed by the terms of this
++License, and how they can obtain a copy of this License. You may not
++attempt to alter or restrict the recipients' rights in the Source Code
++Form.
++
++3.2. Distribution of Executable Form
++
++If You distribute Covered Software in Executable Form then:
++
++(a) such Covered Software must also be made available in Source Code
++    Form, as described in Section 3.1, and You must inform recipients of
++    the Executable Form how they can obtain a copy of such Source Code
++    Form by reasonable means in a timely manner, at a charge no more
++    than the cost of distribution to the recipient; and
++
++(b) You may distribute such Executable Form under the terms of this
++    License, or sublicense it under different terms, provided that the
++    license for the Executable Form does not attempt to limit or alter
++    the recipients' rights in the Source Code Form under this License.
++
++3.3. Distribution of a Larger Work
++
++You may create and distribute a Larger Work under terms of Your choice,
++provided that You also comply with the requirements of this License for
++the Covered Software. If the Larger Work is a combination of Covered
++Software with a work governed by one or more Secondary Licenses, and the
++Covered Software is not Incompatible With Secondary Licenses, this
++License permits You to additionally distribute such Covered Software
++under the terms of such Secondary License(s), so that the recipient of
++the Larger Work may, at their option, further distribute the Covered
++Software under the terms of either this License or such Secondary
++License(s).
++
++3.4. Notices
++
++You may not remove or alter the substance of any license notices
++(including copyright notices, patent notices, disclaimers of warranty,
++or limitations of liability) contained within the Source Code Form of
++the Covered Software, except that You may alter any license notices to
++the extent required to remedy known factual inaccuracies.
++
++3.5. Application of Additional Terms
++
++You may choose to offer, and to charge a fee for, warranty, support,
++indemnity or liability obligations to one or more recipients of Covered
++Software. However, You may do so only on Your own behalf, and not on
++behalf of any Contributor. You must make it absolutely clear that any
++such warranty, support, indemnity, or liability obligation is offered by
++You alone, and You hereby agree to indemnify every Contributor for any
++liability incurred by such Contributor as a result of warranty, support,
++indemnity or liability terms You offer. You may include additional
++disclaimers of warranty and limitations of liability specific to any
++jurisdiction.
++
++4. Inability to Comply Due to Statute or Regulation
++---------------------------------------------------
++
++If it is impossible for You to comply with any of the terms of this
++License with respect to some or all of the Covered Software due to
++statute, judicial order, or regulation then You must: (a) comply with
++the terms of this License to the maximum extent possible; and (b)
++describe the limitations and the code they affect. Such description must
++be placed in a text file included with all distributions of the Covered
++Software under this License. Except to the extent prohibited by statute
++or regulation, such description must be sufficiently detailed for a
++recipient of ordinary skill to be able to understand it.
++
++5. Termination
++--------------
++
++5.1. The rights granted under this License will terminate automatically
++if You fail to comply with any of its terms. However, if You become
++compliant, then the rights granted under this License from a particular
++Contributor are reinstated (a) provisionally, unless and until such
++Contributor explicitly and finally terminates Your grants, and (b) on an
++ongoing basis, if such Contributor fails to notify You of the
++non-compliance by some reasonable means prior to 60 days after You have
++come back into compliance. Moreover, Your grants from a particular
++Contributor are reinstated on an ongoing basis if such Contributor
++notifies You of the non-compliance by some reasonable means, this is the
++first time You have received notice of non-compliance with this License
++from such Contributor, and You become compliant prior to 30 days after
++Your receipt of the notice.
++
++5.2. If You initiate litigation against any entity by asserting a patent
++infringement claim (excluding declaratory judgment actions,
++counter-claims, and cross-claims) alleging that a Contributor Version
++directly or indirectly infringes any patent, then the rights granted to
++You by any and all Contributors for the Covered Software under Section
++2.1 of this License shall terminate.
++
++5.3. In the event of termination under Sections 5.1 or 5.2 above, all
++end user license agreements (excluding distributors and resellers) which
++have been validly granted by You or Your distributors under this License
++prior to termination shall survive termination.
++
++************************************************************************
++*                                                                      *
++*  6. Disclaimer of Warranty                                           *
++*  -------------------------                                           *
++*                                                                      *
++*  Covered Software is provided under this License on an "as is"       *
++*  basis, without warranty of any kind, either expressed, implied, or  *
++*  statutory, including, without limitation, warranties that the       *
++*  Covered Software is free of defects, merchantable, fit for a        *
++*  particular purpose or non-infringing. The entire risk as to the     *
++*  quality and performance of the Covered Software is with You.        *
++*  Should any Covered Software prove defective in any respect, You     *
++*  (not any Contributor) assume the cost of any necessary servicing,   *
++*  repair, or correction. This disclaimer of warranty constitutes an   *
++*  essential part of this License. No use of any Covered Software is   *
++*  authorized under this License except under this disclaimer.         *
++*                                                                      *
++************************************************************************
++
++************************************************************************
++*                                                                      *
++*  7. Limitation of Liability                                          *
++*  --------------------------                                          *
++*                                                                      *
++*  Under no circumstances and under no legal theory, whether tort      *
++*  (including negligence), contract, or otherwise, shall any           *
++*  Contributor, or anyone who distributes Covered Software as          *
++*  permitted above, be liable to You for any direct, indirect,         *
++*  special, incidental, or consequential damages of any character      *
++*  including, without limitation, damages for lost profits, loss of    *
++*  goodwill, work stoppage, computer failure or malfunction, or any    *
++*  and all other commercial damages or losses, even if such party      *
++*  shall have been informed of the possibility of such damages. This   *
++*  limitation of liability shall not apply to liability for death or   *
++*  personal injury resulting from such party's negligence to the       *
++*  extent applicable law prohibits such limitation. Some               *
++*  jurisdictions do not allow the exclusion or limitation of           *
++*  incidental or consequential damages, so this exclusion and          *
++*  limitation may not apply to You.                                    *
++*                                                                      *
++************************************************************************
++
++8. Litigation
++-------------
++
++Any litigation relating to this License may be brought only in the
++courts of a jurisdiction where the defendant maintains its principal
++place of business and such litigation shall be governed by laws of that
++jurisdiction, without reference to its conflict-of-law provisions.
++Nothing in this Section shall prevent a party's ability to bring
++cross-claims or counter-claims.
++
++9. Miscellaneous
++----------------
++
++This License represents the complete agreement concerning the subject
++matter hereof. If any provision of this License is held to be
++unenforceable, such provision shall be reformed only to the extent
++necessary to make it enforceable. Any law or regulation which provides
++that the language of a contract shall be construed against the drafter
++shall not be used to construe this License against a Contributor.
++
++10. Versions of the License
++---------------------------
++
++10.1. New Versions
++
++Mozilla Foundation is the license steward. Except as provided in Section
++10.3, no one other than the license steward has the right to modify or
++publish new versions of this License. Each version will be given a
++distinguishing version number.
++
++10.2. Effect of New Versions
++
++You may distribute the Covered Software under the terms of the version
++of the License under which You originally received the Covered Software,
++or under the terms of any subsequent version published by the license
++steward.
++
++10.3. Modified Versions
++
++If you create software not governed by this License, and you want to
++create a new license for such software, you may create and use a
++modified version of this License if you rename the license and remove
++any references to the name of the license steward (except to note that
++such modified license differs from this License).
++
++10.4. Distributing Source Code Form that is Incompatible With Secondary
++Licenses
++
++If You choose to distribute Source Code Form that is Incompatible With
++Secondary Licenses under the terms of this version of the License, the
++notice described in Exhibit B of this License must be attached.
++
++Exhibit A - Source Code Form License Notice
++-------------------------------------------
++
++  This Source Code Form is subject to the terms of the Mozilla Public
++  License, v. 2.0. If a copy of the MPL was not distributed with this
++  file, You can obtain one at http://mozilla.org/MPL/2.0/.
++
++If it is not possible or desirable to put the notice in a particular
++file, then You may include the notice in a location (such as a LICENSE
++file in a relevant directory) where a recipient would be likely to look
++for such a notice.
++
++You may add additional accurate notices of copyright ownership.
++
++Exhibit B - "Incompatible With Secondary Licenses" Notice
++---------------------------------------------------------
++
++  This Source Code Form is "Incompatible With Secondary Licenses", as
++  defined by the Mozilla Public License, v. 2.0.
+diff --git a/third_party/python/pathspec/MANIFEST.in b/third_party/python/pathspec/MANIFEST.in
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/pathspec/MANIFEST.in
+@@ -0,0 +1,2 @@
++include *.rst
++include LICENSE
+diff --git a/third_party/python/pathspec/PKG-INFO b/third_party/python/pathspec/PKG-INFO
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/pathspec/PKG-INFO
+@@ -0,0 +1,367 @@
++Metadata-Version: 1.2
++Name: pathspec
++Version: 0.7.0
++Summary: Utility library for gitignore style pattern matching of file paths.
++Home-page: https://github.com/cpburnz/python-path-specification
++Author: Caleb P. Burns
++Author-email: cpburnz@gmail.com
++License: MPL 2.0
++Description: *pathspec*: Path Specification
++        ==============================
++        
++        *pathspec* is a utility library for pattern matching of file paths. So
++        far this only includes Git's wildmatch pattern matching which itself is
++        derived from Rsync's wildmatch. Git uses wildmatch for its `gitignore`_
++        files.
++        
++        .. _`gitignore`: http://git-scm.com/docs/gitignore
++        
++        
++        Tutorial
++        --------
++        
++        Say you have a "Projects" directory and you want to back it up, but only
++        certain files, and ignore others depending on certain conditions::
++        
++        	>>> import pathspec
++        	>>> # The gitignore-style patterns for files to select, but we're including
++        	>>> # instead of ignoring.
++        	>>> spec = """
++        	...
++        	... # This is a comment because the line begins with a hash: "#"
++        	...
++        	... # Include several project directories (and all descendants) relative to
++        	... # the current directory. To reference a directory you must end with a
++        	... # slash: "/"
++        	... /project-a/
++        	... /project-b/
++        	... /project-c/
++        	...
++        	... # Patterns can be negated by prefixing with exclamation mark: "!"
++        	...
++        	... # Ignore temporary files beginning or ending with "~" and ending with
++        	... # ".swp".
++        	... !~*
++        	... !*~
++        	... !*.swp
++        	...
++        	... # These are python projects so ignore compiled python files from
++        	... # testing.
++        	... !*.pyc
++        	...
++        	... # Ignore the build directories but only directly under the project
++        	... # directories.
++        	... !/*/build/
++        	...
++        	... """
++        
++        We want to use the ``GitWildMatchPattern`` class to compile our patterns. The
++        ``PathSpec`` class provides an interface around pattern implementations::
++        
++        	>>> spec = pathspec.PathSpec.from_lines(pathspec.patterns.GitWildMatchPattern, spec.splitlines())
++        
++        That may be a mouthful but it allows for additional patterns to be implemented
++        in the future without them having to deal with anything but matching the paths
++        sent to them. ``GitWildMatchPattern`` is the implementation of the actual
++        pattern which internally gets converted into a regular expression.
++        ``PathSpec`` is a simple wrapper around a list of compiled patterns.
++        
++        To make things simpler, we can use the registered name for a pattern class
++        instead of always having to provide a reference to the class itself. The
++        ``GitWildMatchPattern`` class is registered as **gitwildmatch**::
++        
++        	>>> spec = pathspec.PathSpec.from_lines('gitwildmatch', spec.splitlines())
++        
++        If we wanted to manually compile the patterns we can just do the following::
++        
++        	>>> patterns = map(pathspec.patterns.GitWildMatchPattern, spec.splitlines())
++        	>>> spec = PathSpec(patterns)
++        
++        ``PathSpec.from_lines()`` is simply a class method which does just that.
++        
++        If you want to load the patterns from file, you can pass the file instance
++        directly as well::
++        
++        	>>> with open('patterns.list', 'r') as fh:
++        	>>>     spec = pathspec.PathSpec.from_lines('gitwildmatch', fh)
++        
++        You can perform matching on a whole directory tree with::
++        
++        	>>> matches = spec.match_tree('path/to/directory')
++        
++        Or you can perform matching on a specific set of file paths with::
++        
++        	>>> matches = spec.match_files(file_paths)
++        
++        Or check to see if an individual file matches::
++        
++        	>>> is_matched = spec.match_file(file_path)
++        
++        
++        License
++        -------
++        
++        *pathspec* is licensed under the `Mozilla Public License Version 2.0`_. See
++        `LICENSE`_ or the `FAQ`_ for more information.
++        
++        In summary, you may use *pathspec* with any closed or open source project
++        without affecting the license of the larger work so long as you:
++        
++        - give credit where credit is due,
++        
++        - and release any custom changes made to *pathspec*.
++        
++        .. _`Mozilla Public License Version 2.0`: http://www.mozilla.org/MPL/2.0
++        .. _`LICENSE`: LICENSE
++        .. _`FAQ`: http://www.mozilla.org/MPL/2.0/FAQ.html
++        
++        
++        Source
++        ------
++        
++        The source code for *pathspec* is available from the GitHub repo
++        `cpburnz/python-path-specification`_.
++        
++        .. _`cpburnz/python-path-specification`: https://github.com/cpburnz/python-path-specification
++        
++        
++        Installation
++        ------------
++        
++        *pathspec* requires the following packages:
++        
++        - `setuptools`_
++        
++        *pathspec* can be installed from source with::
++        
++        	python setup.py install
++        
++        *pathspec* is also available for install through `PyPI`_::
++        
++        	pip install pathspec
++        
++        .. _`setuptools`: https://pypi.python.org/pypi/setuptools
++        .. _`PyPI`: http://pypi.python.org/pypi/pathspec
++        
++        
++        Documentation
++        -------------
++        
++        Documentation for *pathspec* is available on `Read the Docs`_.
++        
++        .. _`Read the Docs`: http://python-path-specification.readthedocs.io
++        
++        
++        Other Languages
++        ---------------
++        
++        *pathspec* is also available as a `Ruby gem`_.
++        
++        .. _`Ruby gem`: https://github.com/highb/pathspec-ruby
++        
++        Change History
++        ==============
++        
++        
++        0.7.0 (2019-12-27)
++        ------------------
++        
++        - `Issue #28`_: Add support for Python 3.8, and drop Python 3.4.
++        - `Issue #29`_: Publish bdist wheel.
++        
++        .. _`Issue #28`: https://github.com/cpburnz/python-path-specification/pull/28
++        .. _`Issue #29`: https://github.com/cpburnz/python-path-specification/pull/29
++        
++        
++        0.6.0 (2019-10-03)
++        ------------------
++        
++        - `Issue #24`_: Drop support for Python 2.6, 3.2, and 3.3.
++        - `Issue #25`_: Update README.rst.
++        - `Issue #26`_: Method to escape gitwildmatch.
++        
++        .. _`Issue #24`: https://github.com/cpburnz/python-path-specification/pull/24
++        .. _`Issue #25`: https://github.com/cpburnz/python-path-specification/pull/25
++        .. _`Issue #26`: https://github.com/cpburnz/python-path-specification/pull/26
++        
++        
++        0.5.9 (2018-09-15)
++        ------------------
++        
++        - Fixed file system error handling.
++        
++        
++        0.5.8 (2018-09-15)
++        ------------------
++        
++        - Improved type checking.
++        - Created scripts to test Python 2.6 because Tox removed support for it.
++        - Improved byte string handling in Python 3.
++        - `Issue #22`_: Handle dangling symlinks.
++        
++        .. _`Issue #22`: https://github.com/cpburnz/python-path-specification/issues/22
++        
++        
++        0.5.7 (2018-08-14)
++        ------------------
++        
++        - `Issue #21`_: Fix collections deprecation warning.
++        
++        .. _`Issue #21`: https://github.com/cpburnz/python-path-specification/issues/21
++        
++        
++        0.5.6 (2018-04-06)
++        ------------------
++        
++        - Improved unit tests.
++        - Improved type checking.
++        - `Issue #20`_: Support current directory prefix.
++        
++        .. _`Issue #20`: https://github.com/cpburnz/python-path-specification/issues/20
++        
++        
++        0.5.5 (2017-09-09)
++        ------------------
++        
++        - Add documentation link to README.
++        
++        
++        0.5.4 (2017-09-09)
++        ------------------
++        
++        - `Issue #17`_: Add link to Ruby implementation of *pathspec*.
++        - Add sphinx documentation.
++        
++        .. _`Issue #17`: https://github.com/cpburnz/python-path-specification/pull/17
++        
++        
++        0.5.3 (2017-07-01)
++        ------------------
++        
++        - `Issue #14`_: Fix byte strings for Python 3.
++        - `Issue #15`_: Include "LICENSE" in source package.
++        - `Issue #16`_: Support Python 2.6.
++        
++        .. _`Issue #14`: https://github.com/cpburnz/python-path-specification/issues/14
++        .. _`Issue #15`: https://github.com/cpburnz/python-path-specification/pull/15
++        .. _`Issue #16`: https://github.com/cpburnz/python-path-specification/issues/16
++        
++        
++        0.5.2 (2017-04-04)
++        ------------------
++        
++        - Fixed change log.
++        
++        
++        0.5.1 (2017-04-04)
++        ------------------
++        
++        - `Issue #13`_: Add equality methods to `PathSpec` and `RegexPattern`.
++        
++        .. _`Issue #13`: https://github.com/cpburnz/python-path-specification/pull/13
++        
++        
++        0.5.0 (2016-08-22)
++        ------------------
++        
++        - `Issue #12`_: Add `PathSpec.match_file()`.
++        - Renamed `gitignore.GitIgnorePattern` to `patterns.gitwildmatch.GitWildMatchPattern`.
++        - Deprecated `gitignore.GitIgnorePattern`.
++        
++        .. _`Issue #12`: https://github.com/cpburnz/python-path-specification/issues/12
++        
++        
++        0.4.0 (2016-07-15)
++        ------------------
++        
++        - `Issue #11`_: Support converting patterns into regular expressions without compiling them.
++        - API change: Subclasses of `RegexPattern` should implement `pattern_to_regex()`.
++        
++        .. _`Issue #11`: https://github.com/cpburnz/python-path-specification/issues/11
++        
++        
++        0.3.4 (2015-08-24)
++        ------------------
++        
++        - `Issue #7`_: Fixed non-recursive links.
++        - `Issue #8`_: Fixed edge cases in gitignore patterns.
++        - `Issue #9`_: Fixed minor usage documentation.
++        - Fixed recursion detection.
++        - Fixed trivial incompatibility with Python 3.2.
++        
++        .. _`Issue #7`: https://github.com/cpburnz/python-path-specification/pull/7
++        .. _`Issue #8`: https://github.com/cpburnz/python-path-specification/pull/8
++        .. _`Issue #9`: https://github.com/cpburnz/python-path-specification/pull/9
++        
++        
++        0.3.3 (2014-11-21)
++        ------------------
++        
++        - Improved documentation.
++        
++        
++        0.3.2 (2014-11-08)
++        ------------------
++        
++        - `Issue #5`_: Use tox for testing.
++        - `Issue #6`_: Fixed matching Windows paths.
++        - Improved documentation.
++        - API change: `spec.match_tree()` and `spec.match_files()` now return iterators instead of sets.
++        
++        .. _`Issue #5`: https://github.com/cpburnz/python-path-specification/pull/5
++        .. _`Issue #6`: https://github.com/cpburnz/python-path-specification/issues/6
++        
++        
++        0.3.1 (2014-09-17)
++        ------------------
++        
++        - Updated README.
++        
++        
++        0.3.0 (2014-09-17)
++        ------------------
++        
++        - `Issue #3`_: Fixed trailing slash in gitignore patterns.
++        - `Issue #4`_: Fixed test for trailing slash in gitignore patterns.
++        - Added registered patterns.
++        
++        .. _`Issue #3`: https://github.com/cpburnz/python-path-specification/pull/3
++        .. _`Issue #4`: https://github.com/cpburnz/python-path-specification/pull/4
++        
++        
++        0.2.2 (2013-12-17)
++        ------------------
++        
++        - Fixed setup.py.
++        
++        
++        0.2.1 (2013-12-17)
++        ------------------
++        
++        - Added tests.
++        - Fixed comment gitignore patterns.
++        - Fixed relative path gitignore patterns.
++        
++        
++        0.2.0 (2013-12-07)
++        ------------------
++        
++        - Initial release.
++Platform: UNKNOWN
++Classifier: Development Status :: 4 - Beta
++Classifier: Intended Audience :: Developers
++Classifier: License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)
++Classifier: Operating System :: OS Independent
++Classifier: Programming Language :: Python
++Classifier: Programming Language :: Python :: 2
++Classifier: Programming Language :: Python :: 2.7
++Classifier: Programming Language :: Python :: 3
++Classifier: Programming Language :: Python :: 3.5
++Classifier: Programming Language :: Python :: 3.6
++Classifier: Programming Language :: Python :: 3.7
++Classifier: Programming Language :: Python :: 3.8
++Classifier: Programming Language :: Python :: Implementation :: CPython
++Classifier: Programming Language :: Python :: Implementation :: PyPy
++Classifier: Topic :: Software Development :: Libraries :: Python Modules
++Classifier: Topic :: Utilities
++Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*
+diff --git a/third_party/python/pathspec/README.rst b/third_party/python/pathspec/README.rst
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/pathspec/README.rst
+@@ -0,0 +1,153 @@
++
++*pathspec*: Path Specification
++==============================
++
++*pathspec* is a utility library for pattern matching of file paths. So
++far this only includes Git's wildmatch pattern matching which itself is
++derived from Rsync's wildmatch. Git uses wildmatch for its `gitignore`_
++files.
++
++.. _`gitignore`: http://git-scm.com/docs/gitignore
++
++
++Tutorial
++--------
++
++Say you have a "Projects" directory and you want to back it up, but only
++certain files, and ignore others depending on certain conditions::
++
++	>>> import pathspec
++	>>> # The gitignore-style patterns for files to select, but we're including
++	>>> # instead of ignoring.
++	>>> spec = """
++	...
++	... # This is a comment because the line begins with a hash: "#"
++	...
++	... # Include several project directories (and all descendants) relative to
++	... # the current directory. To reference a directory you must end with a
++	... # slash: "/"
++	... /project-a/
++	... /project-b/
++	... /project-c/
++	...
++	... # Patterns can be negated by prefixing with exclamation mark: "!"
++	...
++	... # Ignore temporary files beginning or ending with "~" and ending with
++	... # ".swp".
++	... !~*
++	... !*~
++	... !*.swp
++	...
++	... # These are python projects so ignore compiled python files from
++	... # testing.
++	... !*.pyc
++	...
++	... # Ignore the build directories but only directly under the project
++	... # directories.
++	... !/*/build/
++	...
++	... """
++
++We want to use the ``GitWildMatchPattern`` class to compile our patterns. The
++``PathSpec`` class provides an interface around pattern implementations::
++
++	>>> spec = pathspec.PathSpec.from_lines(pathspec.patterns.GitWildMatchPattern, spec.splitlines())
++
++That may be a mouthful but it allows for additional patterns to be implemented
++in the future without them having to deal with anything but matching the paths
++sent to them. ``GitWildMatchPattern`` is the implementation of the actual
++pattern which internally gets converted into a regular expression.
++``PathSpec`` is a simple wrapper around a list of compiled patterns.
++
++To make things simpler, we can use the registered name for a pattern class
++instead of always having to provide a reference to the class itself. The
++``GitWildMatchPattern`` class is registered as **gitwildmatch**::
++
++	>>> spec = pathspec.PathSpec.from_lines('gitwildmatch', spec.splitlines())
++
++If we wanted to manually compile the patterns we can just do the following::
++
++	>>> patterns = map(pathspec.patterns.GitWildMatchPattern, spec.splitlines())
++	>>> spec = PathSpec(patterns)
++
++``PathSpec.from_lines()`` is simply a class method which does just that.
++
++If you want to load the patterns from file, you can pass the file instance
++directly as well::
++
++	>>> with open('patterns.list', 'r') as fh:
++	>>>     spec = pathspec.PathSpec.from_lines('gitwildmatch', fh)
++
++You can perform matching on a whole directory tree with::
++
++	>>> matches = spec.match_tree('path/to/directory')
++
++Or you can perform matching on a specific set of file paths with::
++
++	>>> matches = spec.match_files(file_paths)
++
++Or check to see if an individual file matches::
++
++	>>> is_matched = spec.match_file(file_path)
++
++
++License
++-------
++
++*pathspec* is licensed under the `Mozilla Public License Version 2.0`_. See
++`LICENSE`_ or the `FAQ`_ for more information.
++
++In summary, you may use *pathspec* with any closed or open source project
++without affecting the license of the larger work so long as you:
++
++- give credit where credit is due,
++
++- and release any custom changes made to *pathspec*.
++
++.. _`Mozilla Public License Version 2.0`: http://www.mozilla.org/MPL/2.0
++.. _`LICENSE`: LICENSE
++.. _`FAQ`: http://www.mozilla.org/MPL/2.0/FAQ.html
++
++
++Source
++------
++
++The source code for *pathspec* is available from the GitHub repo
++`cpburnz/python-path-specification`_.
++
++.. _`cpburnz/python-path-specification`: https://github.com/cpburnz/python-path-specification
++
++
++Installation
++------------
++
++*pathspec* requires the following packages:
++
++- `setuptools`_
++
++*pathspec* can be installed from source with::
++
++	python setup.py install
++
++*pathspec* is also available for install through `PyPI`_::
++
++	pip install pathspec
++
++.. _`setuptools`: https://pypi.python.org/pypi/setuptools
++.. _`PyPI`: http://pypi.python.org/pypi/pathspec
++
++
++Documentation
++-------------
++
++Documentation for *pathspec* is available on `Read the Docs`_.
++
++.. _`Read the Docs`: http://python-path-specification.readthedocs.io
++
++
++Other Languages
++---------------
++
++*pathspec* is also available as a `Ruby gem`_.
++
++.. _`Ruby gem`: https://github.com/highb/pathspec-ruby
+diff --git a/third_party/python/pathspec/setup.cfg b/third_party/python/pathspec/setup.cfg
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/pathspec/setup.cfg
+@@ -0,0 +1,7 @@
++[bdist_wheel]
++universal = 1
++
++[egg_info]
++tag_build = 
++tag_date = 0
++
+diff --git a/third_party/python/pathspec/setup.py b/third_party/python/pathspec/setup.py
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/pathspec/setup.py
+@@ -0,0 +1,44 @@
++# encoding: utf-8
++
++import io
++from setuptools import setup, find_packages
++
++from pathspec import __author__, __email__, __license__, __project__, __version__
++
++# Read readme and changes files.
++with io.open("README.rst", mode='r', encoding='UTF-8') as fh:
++	readme = fh.read().strip()
++with io.open("CHANGES.rst", mode='r', encoding='UTF-8') as fh:
++	changes = fh.read().strip()
++
++setup(
++	name=__project__,
++	version=__version__,
++	author=__author__,
++	author_email=__email__,
++	url="https://github.com/cpburnz/python-path-specification",
++	description="Utility library for gitignore style pattern matching of file paths.",
++	long_description=readme + "\n\n" + changes,
++	python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*",
++	classifiers=[
++		"Development Status :: 4 - Beta",
++		"Intended Audience :: Developers",
++		"License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)",
++		"Operating System :: OS Independent",
++		"Programming Language :: Python",
++		"Programming Language :: Python :: 2",
++		"Programming Language :: Python :: 2.7",
++		"Programming Language :: Python :: 3",
++		"Programming Language :: Python :: 3.5",
++		"Programming Language :: Python :: 3.6",
++		"Programming Language :: Python :: 3.7",
++		"Programming Language :: Python :: 3.8",
++		"Programming Language :: Python :: Implementation :: CPython",
++		"Programming Language :: Python :: Implementation :: PyPy",
++		"Topic :: Software Development :: Libraries :: Python Modules",
++		"Topic :: Utilities",
++	],
++	license=__license__,
++	packages=find_packages(),
++	test_suite='pathspec.tests',
++)
+diff --git a/third_party/python/yamllint/LICENSE b/third_party/python/yamllint/LICENSE
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/yamllint/LICENSE
+@@ -0,0 +1,674 @@
++                    GNU GENERAL PUBLIC LICENSE
++                       Version 3, 29 June 2007
++
++ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
++ Everyone is permitted to copy and distribute verbatim copies
++ of this license document, but changing it is not allowed.
++
++                            Preamble
++
++  The GNU General Public License is a free, copyleft license for
++software and other kinds of works.
++
++  The licenses for most software and other practical works are designed
++to take away your freedom to share and change the works.  By contrast,
++the GNU General Public License is intended to guarantee your freedom to
++share and change all versions of a program--to make sure it remains free
++software for all its users.  We, the Free Software Foundation, use the
++GNU General Public License for most of our software; it applies also to
++any other work released this way by its authors.  You can apply it to
++your programs, too.
++
++  When we speak of free software, we are referring to freedom, not
++price.  Our General Public Licenses are designed to make sure that you
++have the freedom to distribute copies of free software (and charge for
++them if you wish), that you receive source code or can get it if you
++want it, that you can change the software or use pieces of it in new
++free programs, and that you know you can do these things.
++
++  To protect your rights, we need to prevent others from denying you
++these rights or asking you to surrender the rights.  Therefore, you have
++certain responsibilities if you distribute copies of the software, or if
++you modify it: responsibilities to respect the freedom of others.
++
++  For example, if you distribute copies of such a program, whether
++gratis or for a fee, you must pass on to the recipients the same
++freedoms that you received.  You must make sure that they, too, receive
++or can get the source code.  And you must show them these terms so they
++know their rights.
++
++  Developers that use the GNU GPL protect your rights with two steps:
++(1) assert copyright on the software, and (2) offer you this License
++giving you legal permission to copy, distribute and/or modify it.
++
++  For the developers' and authors' protection, the GPL clearly explains
++that there is no warranty for this free software.  For both users' and
++authors' sake, the GPL requires that modified versions be marked as
++changed, so that their problems will not be attributed erroneously to
++authors of previous versions.
++
++  Some devices are designed to deny users access to install or run
++modified versions of the software inside them, although the manufacturer
++can do so.  This is fundamentally incompatible with the aim of
++protecting users' freedom to change the software.  The systematic
++pattern of such abuse occurs in the area of products for individuals to
++use, which is precisely where it is most unacceptable.  Therefore, we
++have designed this version of the GPL to prohibit the practice for those
++products.  If such problems arise substantially in other domains, we
++stand ready to extend this provision to those domains in future versions
++of the GPL, as needed to protect the freedom of users.
++
++  Finally, every program is threatened constantly by software patents.
++States should not allow patents to restrict development and use of
++software on general-purpose computers, but in those that do, we wish to
++avoid the special danger that patents applied to a free program could
++make it effectively proprietary.  To prevent this, the GPL assures that
++patents cannot be used to render the program non-free.
++
++  The precise terms and conditions for copying, distribution and
++modification follow.
++
++                       TERMS AND CONDITIONS
++
++  0. Definitions.
++
++  "This License" refers to version 3 of the GNU General Public License.
++
++  "Copyright" also means copyright-like laws that apply to other kinds of
++works, such as semiconductor masks.
++
++  "The Program" refers to any copyrightable work licensed under this
++License.  Each licensee is addressed as "you".  "Licensees" and
++"recipients" may be individuals or organizations.
++
++  To "modify" a work means to copy from or adapt all or part of the work
++in a fashion requiring copyright permission, other than the making of an
++exact copy.  The resulting work is called a "modified version" of the
++earlier work or a work "based on" the earlier work.
++
++  A "covered work" means either the unmodified Program or a work based
++on the Program.
++
++  To "propagate" a work means to do anything with it that, without
++permission, would make you directly or secondarily liable for
++infringement under applicable copyright law, except executing it on a
++computer or modifying a private copy.  Propagation includes copying,
++distribution (with or without modification), making available to the
++public, and in some countries other activities as well.
++
++  To "convey" a work means any kind of propagation that enables other
++parties to make or receive copies.  Mere interaction with a user through
++a computer network, with no transfer of a copy, is not conveying.
++
++  An interactive user interface displays "Appropriate Legal Notices"
++to the extent that it includes a convenient and prominently visible
++feature that (1) displays an appropriate copyright notice, and (2)
++tells the user that there is no warranty for the work (except to the
++extent that warranties are provided), that licensees may convey the
++work under this License, and how to view a copy of this License.  If
++the interface presents a list of user commands or options, such as a
++menu, a prominent item in the list meets this criterion.
++
++  1. Source Code.
++
++  The "source code" for a work means the preferred form of the work
++for making modifications to it.  "Object code" means any non-source
++form of a work.
++
++  A "Standard Interface" means an interface that either is an official
++standard defined by a recognized standards body, or, in the case of
++interfaces specified for a particular programming language, one that
++is widely used among developers working in that language.
++
++  The "System Libraries" of an executable work include anything, other
++than the work as a whole, that (a) is included in the normal form of
++packaging a Major Component, but which is not part of that Major
++Component, and (b) serves only to enable use of the work with that
++Major Component, or to implement a Standard Interface for which an
++implementation is available to the public in source code form.  A
++"Major Component", in this context, means a major essential component
++(kernel, window system, and so on) of the specific operating system
++(if any) on which the executable work runs, or a compiler used to
++produce the work, or an object code interpreter used to run it.
++
++  The "Corresponding Source" for a work in object code form means all
++the source code needed to generate, install, and (for an executable
++work) run the object code and to modify the work, including scripts to
++control those activities.  However, it does not include the work's
++System Libraries, or general-purpose tools or generally available free
++programs which are used unmodified in performing those activities but
++which are not part of the work.  For example, Corresponding Source
++includes interface definition files associated with source files for
++the work, and the source code for shared libraries and dynamically
++linked subprograms that the work is specifically designed to require,
++such as by intimate data communication or control flow between those
++subprograms and other parts of the work.
++
++  The Corresponding Source need not include anything that users
++can regenerate automatically from other parts of the Corresponding
++Source.
++
++  The Corresponding Source for a work in source code form is that
++same work.
++
++  2. Basic Permissions.
++
++  All rights granted under this License are granted for the term of
++copyright on the Program, and are irrevocable provided the stated
++conditions are met.  This License explicitly affirms your unlimited
++permission to run the unmodified Program.  The output from running a
++covered work is covered by this License only if the output, given its
++content, constitutes a covered work.  This License acknowledges your
++rights of fair use or other equivalent, as provided by copyright law.
++
++  You may make, run and propagate covered works that you do not
++convey, without conditions so long as your license otherwise remains
++in force.  You may convey covered works to others for the sole purpose
++of having them make modifications exclusively for you, or provide you
++with facilities for running those works, provided that you comply with
++the terms of this License in conveying all material for which you do
++not control copyright.  Those thus making or running the covered works
++for you must do so exclusively on your behalf, under your direction
++and control, on terms that prohibit them from making any copies of
++your copyrighted material outside their relationship with you.
++
++  Conveying under any other circumstances is permitted solely under
++the conditions stated below.  Sublicensing is not allowed; section 10
++makes it unnecessary.
++
++  3. Protecting Users' Legal Rights From Anti-Circumvention Law.
++
++  No covered work shall be deemed part of an effective technological
++measure under any applicable law fulfilling obligations under article
++11 of the WIPO copyright treaty adopted on 20 December 1996, or
++similar laws prohibiting or restricting circumvention of such
++measures.
++
++  When you convey a covered work, you waive any legal power to forbid
++circumvention of technological measures to the extent such circumvention
++is effected by exercising rights under this License with respect to
++the covered work, and you disclaim any intention to limit operation or
++modification of the work as a means of enforcing, against the work's
++users, your or third parties' legal rights to forbid circumvention of
++technological measures.
++
++  4. Conveying Verbatim Copies.
++
++  You may convey verbatim copies of the Program's source code as you
++receive it, in any medium, provided that you conspicuously and
++appropriately publish on each copy an appropriate copyright notice;
++keep intact all notices stating that this License and any
++non-permissive terms added in accord with section 7 apply to the code;
++keep intact all notices of the absence of any warranty; and give all
++recipients a copy of this License along with the Program.
++
++  You may charge any price or no price for each copy that you convey,
++and you may offer support or warranty protection for a fee.
++
++  5. Conveying Modified Source Versions.
++
++  You may convey a work based on the Program, or the modifications to
++produce it from the Program, in the form of source code under the
++terms of section 4, provided that you also meet all of these conditions:
++
++    a) The work must carry prominent notices stating that you modified
++    it, and giving a relevant date.
++
++    b) The work must carry prominent notices stating that it is
++    released under this License and any conditions added under section
++    7.  This requirement modifies the requirement in section 4 to
++    "keep intact all notices".
++
++    c) You must license the entire work, as a whole, under this
++    License to anyone who comes into possession of a copy.  This
++    License will therefore apply, along with any applicable section 7
++    additional terms, to the whole of the work, and all its parts,
++    regardless of how they are packaged.  This License gives no
++    permission to license the work in any other way, but it does not
++    invalidate such permission if you have separately received it.
++
++    d) If the work has interactive user interfaces, each must display
++    Appropriate Legal Notices; however, if the Program has interactive
++    interfaces that do not display Appropriate Legal Notices, your
++    work need not make them do so.
++
++  A compilation of a covered work with other separate and independent
++works, which are not by their nature extensions of the covered work,
++and which are not combined with it such as to form a larger program,
++in or on a volume of a storage or distribution medium, is called an
++"aggregate" if the compilation and its resulting copyright are not
++used to limit the access or legal rights of the compilation's users
++beyond what the individual works permit.  Inclusion of a covered work
++in an aggregate does not cause this License to apply to the other
++parts of the aggregate.
++
++  6. Conveying Non-Source Forms.
++
++  You may convey a covered work in object code form under the terms
++of sections 4 and 5, provided that you also convey the
++machine-readable Corresponding Source under the terms of this License,
++in one of these ways:
++
++    a) Convey the object code in, or embodied in, a physical product
++    (including a physical distribution medium), accompanied by the
++    Corresponding Source fixed on a durable physical medium
++    customarily used for software interchange.
++
++    b) Convey the object code in, or embodied in, a physical product
++    (including a physical distribution medium), accompanied by a
++    written offer, valid for at least three years and valid for as
++    long as you offer spare parts or customer support for that product
++    model, to give anyone who possesses the object code either (1) a
++    copy of the Corresponding Source for all the software in the
++    product that is covered by this License, on a durable physical
++    medium customarily used for software interchange, for a price no
++    more than your reasonable cost of physically performing this
++    conveying of source, or (2) access to copy the
++    Corresponding Source from a network server at no charge.
++
++    c) Convey individual copies of the object code with a copy of the
++    written offer to provide the Corresponding Source.  This
++    alternative is allowed only occasionally and noncommercially, and
++    only if you received the object code with such an offer, in accord
++    with subsection 6b.
++
++    d) Convey the object code by offering access from a designated
++    place (gratis or for a charge), and offer equivalent access to the
++    Corresponding Source in the same way through the same place at no
++    further charge.  You need not require recipients to copy the
++    Corresponding Source along with the object code.  If the place to
++    copy the object code is a network server, the Corresponding Source
++    may be on a different server (operated by you or a third party)
++    that supports equivalent copying facilities, provided you maintain
++    clear directions next to the object code saying where to find the
++    Corresponding Source.  Regardless of what server hosts the
++    Corresponding Source, you remain obligated to ensure that it is
++    available for as long as needed to satisfy these requirements.
++
++    e) Convey the object code using peer-to-peer transmission, provided
++    you inform other peers where the object code and Corresponding
++    Source of the work are being offered to the general public at no
++    charge under subsection 6d.
++
++  A separable portion of the object code, whose source code is excluded
++from the Corresponding Source as a System Library, need not be
++included in conveying the object code work.
++
++  A "User Product" is either (1) a "consumer product", which means any
++tangible personal property which is normally used for personal, family,
++or household purposes, or (2) anything designed or sold for incorporation
++into a dwelling.  In determining whether a product is a consumer product,
++doubtful cases shall be resolved in favor of coverage.  For a particular
++product received by a particular user, "normally used" refers to a
++typical or common use of that class of product, regardless of the status
++of the particular user or of the way in which the particular user
++actually uses, or expects or is expected to use, the product.  A product
++is a consumer product regardless of whether the product has substantial
++commercial, industrial or non-consumer uses, unless such uses represent
++the only significant mode of use of the product.
++
++  "Installation Information" for a User Product means any methods,
++procedures, authorization keys, or other information required to install
++and execute modified versions of a covered work in that User Product from
++a modified version of its Corresponding Source.  The information must
++suffice to ensure that the continued functioning of the modified object
++code is in no case prevented or interfered with solely because
++modification has been made.
++
++  If you convey an object code work under this section in, or with, or
++specifically for use in, a User Product, and the conveying occurs as
++part of a transaction in which the right of possession and use of the
++User Product is transferred to the recipient in perpetuity or for a
++fixed term (regardless of how the transaction is characterized), the
++Corresponding Source conveyed under this section must be accompanied
++by the Installation Information.  But this requirement does not apply
++if neither you nor any third party retains the ability to install
++modified object code on the User Product (for example, the work has
++been installed in ROM).
++
++  The requirement to provide Installation Information does not include a
++requirement to continue to provide support service, warranty, or updates
++for a work that has been modified or installed by the recipient, or for
++the User Product in which it has been modified or installed.  Access to a
++network may be denied when the modification itself materially and
++adversely affects the operation of the network or violates the rules and
++protocols for communication across the network.
++
++  Corresponding Source conveyed, and Installation Information provided,
++in accord with this section must be in a format that is publicly
++documented (and with an implementation available to the public in
++source code form), and must require no special password or key for
++unpacking, reading or copying.
++
++  7. Additional Terms.
++
++  "Additional permissions" are terms that supplement the terms of this
++License by making exceptions from one or more of its conditions.
++Additional permissions that are applicable to the entire Program shall
++be treated as though they were included in this License, to the extent
++that they are valid under applicable law.  If additional permissions
++apply only to part of the Program, that part may be used separately
++under those permissions, but the entire Program remains governed by
++this License without regard to the additional permissions.
++
++  When you convey a copy of a covered work, you may at your option
++remove any additional permissions from that copy, or from any part of
++it.  (Additional permissions may be written to require their own
++removal in certain cases when you modify the work.)  You may place
++additional permissions on material, added by you to a covered work,
++for which you have or can give appropriate copyright permission.
++
++  Notwithstanding any other provision of this License, for material you
++add to a covered work, you may (if authorized by the copyright holders of
++that material) supplement the terms of this License with terms:
++
++    a) Disclaiming warranty or limiting liability differently from the
++    terms of sections 15 and 16 of this License; or
++
++    b) Requiring preservation of specified reasonable legal notices or
++    author attributions in that material or in the Appropriate Legal
++    Notices displayed by works containing it; or
++
++    c) Prohibiting misrepresentation of the origin of that material, or
++    requiring that modified versions of such material be marked in
++    reasonable ways as different from the original version; or
++
++    d) Limiting the use for publicity purposes of names of licensors or
++    authors of the material; or
++
++    e) Declining to grant rights under trademark law for use of some
++    trade names, trademarks, or service marks; or
++
++    f) Requiring indemnification of licensors and authors of that
++    material by anyone who conveys the material (or modified versions of
++    it) with contractual assumptions of liability to the recipient, for
++    any liability that these contractual assumptions directly impose on
++    those licensors and authors.
++
++  All other non-permissive additional terms are considered "further
++restrictions" within the meaning of section 10.  If the Program as you
++received it, or any part of it, contains a notice stating that it is
++governed by this License along with a term that is a further
++restriction, you may remove that term.  If a license document contains
++a further restriction but permits relicensing or conveying under this
++License, you may add to a covered work material governed by the terms
++of that license document, provided that the further restriction does
++not survive such relicensing or conveying.
++
++  If you add terms to a covered work in accord with this section, you
++must place, in the relevant source files, a statement of the
++additional terms that apply to those files, or a notice indicating
++where to find the applicable terms.
++
++  Additional terms, permissive or non-permissive, may be stated in the
++form of a separately written license, or stated as exceptions;
++the above requirements apply either way.
++
++  8. Termination.
++
++  You may not propagate or modify a covered work except as expressly
++provided under this License.  Any attempt otherwise to propagate or
++modify it is void, and will automatically terminate your rights under
++this License (including any patent licenses granted under the third
++paragraph of section 11).
++
++  However, if you cease all violation of this License, then your
++license from a particular copyright holder is reinstated (a)
++provisionally, unless and until the copyright holder explicitly and
++finally terminates your license, and (b) permanently, if the copyright
++holder fails to notify you of the violation by some reasonable means
++prior to 60 days after the cessation.
++
++  Moreover, your license from a particular copyright holder is
++reinstated permanently if the copyright holder notifies you of the
++violation by some reasonable means, this is the first time you have
++received notice of violation of this License (for any work) from that
++copyright holder, and you cure the violation prior to 30 days after
++your receipt of the notice.
++
++  Termination of your rights under this section does not terminate the
++licenses of parties who have received copies or rights from you under
++this License.  If your rights have been terminated and not permanently
++reinstated, you do not qualify to receive new licenses for the same
++material under section 10.
++
++  9. Acceptance Not Required for Having Copies.
++
++  You are not required to accept this License in order to receive or
++run a copy of the Program.  Ancillary propagation of a covered work
++occurring solely as a consequence of using peer-to-peer transmission
++to receive a copy likewise does not require acceptance.  However,
++nothing other than this License grants you permission to propagate or
++modify any covered work.  These actions infringe copyright if you do
++not accept this License.  Therefore, by modifying or propagating a
++covered work, you indicate your acceptance of this License to do so.
++
++  10. Automatic Licensing of Downstream Recipients.
++
++  Each time you convey a covered work, the recipient automatically
++receives a license from the original licensors, to run, modify and
++propagate that work, subject to this License.  You are not responsible
++for enforcing compliance by third parties with this License.
++
++  An "entity transaction" is a transaction transferring control of an
++organization, or substantially all assets of one, or subdividing an
++organization, or merging organizations.  If propagation of a covered
++work results from an entity transaction, each party to that
++transaction who receives a copy of the work also receives whatever
++licenses to the work the party's predecessor in interest had or could
++give under the previous paragraph, plus a right to possession of the
++Corresponding Source of the work from the predecessor in interest, if
++the predecessor has it or can get it with reasonable efforts.
++
++  You may not impose any further restrictions on the exercise of the
++rights granted or affirmed under this License.  For example, you may
++not impose a license fee, royalty, or other charge for exercise of
++rights granted under this License, and you may not initiate litigation
++(including a cross-claim or counterclaim in a lawsuit) alleging that
++any patent claim is infringed by making, using, selling, offering for
++sale, or importing the Program or any portion of it.
++
++  11. Patents.
++
++  A "contributor" is a copyright holder who authorizes use under this
++License of the Program or a work on which the Program is based.  The
++work thus licensed is called the contributor's "contributor version".
++
++  A contributor's "essential patent claims" are all patent claims
++owned or controlled by the contributor, whether already acquired or
++hereafter acquired, that would be infringed by some manner, permitted
++by this License, of making, using, or selling its contributor version,
++but do not include claims that would be infringed only as a
++consequence of further modification of the contributor version.  For
++purposes of this definition, "control" includes the right to grant
++patent sublicenses in a manner consistent with the requirements of
++this License.
++
++  Each contributor grants you a non-exclusive, worldwide, royalty-free
++patent license under the contributor's essential patent claims, to
++make, use, sell, offer for sale, import and otherwise run, modify and
++propagate the contents of its contributor version.
++
++  In the following three paragraphs, a "patent license" is any express
++agreement or commitment, however denominated, not to enforce a patent
++(such as an express permission to practice a patent or covenant not to
++sue for patent infringement).  To "grant" such a patent license to a
++party means to make such an agreement or commitment not to enforce a
++patent against the party.
++
++  If you convey a covered work, knowingly relying on a patent license,
++and the Corresponding Source of the work is not available for anyone
++to copy, free of charge and under the terms of this License, through a
++publicly available network server or other readily accessible means,
++then you must either (1) cause the Corresponding Source to be so
++available, or (2) arrange to deprive yourself of the benefit of the
++patent license for this particular work, or (3) arrange, in a manner
++consistent with the requirements of this License, to extend the patent
++license to downstream recipients.  "Knowingly relying" means you have
++actual knowledge that, but for the patent license, your conveying the
++covered work in a country, or your recipient's use of the covered work
++in a country, would infringe one or more identifiable patents in that
++country that you have reason to believe are valid.
++
++  If, pursuant to or in connection with a single transaction or
++arrangement, you convey, or propagate by procuring conveyance of, a
++covered work, and grant a patent license to some of the parties
++receiving the covered work authorizing them to use, propagate, modify
++or convey a specific copy of the covered work, then the patent license
++you grant is automatically extended to all recipients of the covered
++work and works based on it.
++
++  A patent license is "discriminatory" if it does not include within
++the scope of its coverage, prohibits the exercise of, or is
++conditioned on the non-exercise of one or more of the rights that are
++specifically granted under this License.  You may not convey a covered
++work if you are a party to an arrangement with a third party that is
++in the business of distributing software, under which you make payment
++to the third party based on the extent of your activity of conveying
++the work, and under which the third party grants, to any of the
++parties who would receive the covered work from you, a discriminatory
++patent license (a) in connection with copies of the covered work
++conveyed by you (or copies made from those copies), or (b) primarily
++for and in connection with specific products or compilations that
++contain the covered work, unless you entered into that arrangement,
++or that patent license was granted, prior to 28 March 2007.
++
++  Nothing in this License shall be construed as excluding or limiting
++any implied license or other defenses to infringement that may
++otherwise be available to you under applicable patent law.
++
++  12. No Surrender of Others' Freedom.
++
++  If conditions are imposed on you (whether by court order, agreement or
++otherwise) that contradict the conditions of this License, they do not
++excuse you from the conditions of this License.  If you cannot convey a
++covered work so as to satisfy simultaneously your obligations under this
++License and any other pertinent obligations, then as a consequence you may
++not convey it at all.  For example, if you agree to terms that obligate you
++to collect a royalty for further conveying from those to whom you convey
++the Program, the only way you could satisfy both those terms and this
++License would be to refrain entirely from conveying the Program.
++
++  13. Use with the GNU Affero General Public License.
++
++  Notwithstanding any other provision of this License, you have
++permission to link or combine any covered work with a work licensed
++under version 3 of the GNU Affero General Public License into a single
++combined work, and to convey the resulting work.  The terms of this
++License will continue to apply to the part which is the covered work,
++but the special requirements of the GNU Affero General Public License,
++section 13, concerning interaction through a network will apply to the
++combination as such.
++
++  14. Revised Versions of this License.
++
++  The Free Software Foundation may publish revised and/or new versions of
++the GNU General Public License from time to time.  Such new versions will
++be similar in spirit to the present version, but may differ in detail to
++address new problems or concerns.
++
++  Each version is given a distinguishing version number.  If the
++Program specifies that a certain numbered version of the GNU General
++Public License "or any later version" applies to it, you have the
++option of following the terms and conditions either of that numbered
++version or of any later version published by the Free Software
++Foundation.  If the Program does not specify a version number of the
++GNU General Public License, you may choose any version ever published
++by the Free Software Foundation.
++
++  If the Program specifies that a proxy can decide which future
++versions of the GNU General Public License can be used, that proxy's
++public statement of acceptance of a version permanently authorizes you
++to choose that version for the Program.
++
++  Later license versions may give you additional or different
++permissions.  However, no additional obligations are imposed on any
++author or copyright holder as a result of your choosing to follow a
++later version.
++
++  15. Disclaimer of Warranty.
++
++  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
++APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
++HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
++OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
++THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
++PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
++IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
++ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
++
++  16. Limitation of Liability.
++
++  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
++WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
++THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
++GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
++USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
++DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
++PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
++EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
++SUCH DAMAGES.
++
++  17. Interpretation of Sections 15 and 16.
++
++  If the disclaimer of warranty and limitation of liability provided
++above cannot be given local legal effect according to their terms,
++reviewing courts shall apply local law that most closely approximates
++an absolute waiver of all civil liability in connection with the
++Program, unless a warranty or assumption of liability accompanies a
++copy of the Program in return for a fee.
++
++                     END OF TERMS AND CONDITIONS
++
++            How to Apply These Terms to Your New Programs
++
++  If you develop a new program, and you want it to be of the greatest
++possible use to the public, the best way to achieve this is to make it
++free software which everyone can redistribute and change under these terms.
++
++  To do so, attach the following notices to the program.  It is safest
++to attach them to the start of each source file to most effectively
++state the exclusion of warranty; and each file should have at least
++the "copyright" line and a pointer to where the full notice is found.
++
++    <one line to give the program's name and a brief idea of what it does.>
++    Copyright (C) <year>  <name of author>
++
++    This program is free software: you can redistribute it and/or modify
++    it under the terms of the GNU General Public License as published by
++    the Free Software Foundation, either version 3 of the License, or
++    (at your option) any later version.
++
++    This program is distributed in the hope that it will be useful,
++    but WITHOUT ANY WARRANTY; without even the implied warranty of
++    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++    GNU General Public License for more details.
++
++    You should have received a copy of the GNU General Public License
++    along with this program.  If not, see <http://www.gnu.org/licenses/>.
++
++Also add information on how to contact you by electronic and paper mail.
++
++  If the program does terminal interaction, make it output a short
++notice like this when it starts in an interactive mode:
++
++    <program>  Copyright (C) <year>  <name of author>
++    This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
++    This is free software, and you are welcome to redistribute it
++    under certain conditions; type `show c' for details.
++
++The hypothetical commands `show w' and `show c' should show the appropriate
++parts of the General Public License.  Of course, your program's commands
++might be different; for a GUI interface, you would use an "about box".
++
++  You should also get your employer (if you work as a programmer) or school,
++if any, to sign a "copyright disclaimer" for the program, if necessary.
++For more information on this, and how to apply and follow the GNU GPL, see
++<http://www.gnu.org/licenses/>.
++
++  The GNU General Public License does not permit incorporating your program
++into proprietary programs.  If your program is a subroutine library, you
++may consider it more useful to permit linking proprietary applications with
++the library.  If this is what you want to do, use the GNU Lesser General
++Public License instead of this License.  But first, please read
++<http://www.gnu.org/philosophy/why-not-lgpl.html>.
+diff --git a/third_party/python/yamllint/MANIFEST.in b/third_party/python/yamllint/MANIFEST.in
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/yamllint/MANIFEST.in
+@@ -0,0 +1,4 @@
++include LICENSE
++include README.rst
++include docs/*
++include tests/*.py tests/rules/*.py tests/yaml-1.2-spec-examples/*
+diff --git a/third_party/python/yamllint/PKG-INFO b/third_party/python/yamllint/PKG-INFO
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/yamllint/PKG-INFO
+@@ -0,0 +1,30 @@
++Metadata-Version: 1.2
++Name: yamllint
++Version: 1.20.0
++Summary: A linter for YAML files.
++Home-page: https://github.com/adrienverge/yamllint
++Author: Adrien Vergé
++License: GPLv3
++Description: A linter for YAML files.
++        
++        yamllint does not only check for syntax validity, but for weirdnesses like key
++        repetition and cosmetic problems such as lines length, trailing spaces,
++        indentation, etc.
++Keywords: yaml,lint,linter,syntax,checker
++Platform: UNKNOWN
++Classifier: Development Status :: 5 - Production/Stable
++Classifier: Environment :: Console
++Classifier: Intended Audience :: Developers
++Classifier: License :: OSI Approved :: GNU General Public License v3 (GPLv3)
++Classifier: Programming Language :: Python :: 2
++Classifier: Programming Language :: Python :: 2.7
++Classifier: Programming Language :: Python :: 3
++Classifier: Programming Language :: Python :: 3.4
++Classifier: Programming Language :: Python :: 3.5
++Classifier: Programming Language :: Python :: 3.6
++Classifier: Programming Language :: Python :: 3.7
++Classifier: Topic :: Software Development
++Classifier: Topic :: Software Development :: Debuggers
++Classifier: Topic :: Software Development :: Quality Assurance
++Classifier: Topic :: Software Development :: Testing
++Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*
+diff --git a/third_party/python/yamllint/README.rst b/third_party/python/yamllint/README.rst
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/yamllint/README.rst
+@@ -0,0 +1,140 @@
++yamllint
++========
++
++A linter for YAML files.
++
++yamllint does not only check for syntax validity, but for weirdnesses like key
++repetition and cosmetic problems such as lines length, trailing spaces,
++indentation, etc.
++
++.. image::
++   https://travis-ci.org/adrienverge/yamllint.svg?branch=master
++   :target: https://travis-ci.org/adrienverge/yamllint
++   :alt: CI tests status
++.. image::
++   https://coveralls.io/repos/github/adrienverge/yamllint/badge.svg?branch=master
++   :target: https://coveralls.io/github/adrienverge/yamllint?branch=master
++   :alt: Code coverage status
++.. image:: https://readthedocs.org/projects/yamllint/badge/?version=latest
++   :target: https://yamllint.readthedocs.io/en/latest/?badge=latest
++   :alt: Documentation status
++
++Written in Python (compatible with Python 2 & 3).
++
++Documentation
++-------------
++
++https://yamllint.readthedocs.io/
++
++Overview
++--------
++
++Screenshot
++^^^^^^^^^^
++
++.. image:: docs/screenshot.png
++   :alt: yamllint screenshot
++
++Installation
++^^^^^^^^^^^^
++
++Using pip, the Python package manager:
++
++.. code:: bash
++
++ pip install --user yamllint
++
++yamllint is also packaged for all major operating systems, see installation
++examples (``dnf``, ``apt-get``...) `in the documentation
++<https://yamllint.readthedocs.io/en/stable/quickstart.html>`_.
++
++Usage
++^^^^^
++
++.. code:: bash
++
++ # Lint one or more files
++ yamllint my_file.yml my_other_file.yaml ...
++
++.. code:: bash
++
++ # Lint all YAML files in a directory
++ yamllint .
++
++.. code:: bash
++
++ # Use a pre-defined lint configuration
++ yamllint -d relaxed file.yaml
++
++ # Use a custom lint configuration
++ yamllint -c /path/to/myconfig file-to-lint.yaml
++
++.. code:: bash
++
++ # Output a parsable format (for syntax checking in editors like Vim, emacs...)
++ yamllint -f parsable file.yaml
++
++`Read more in the complete documentation! <https://yamllint.readthedocs.io/>`_
++
++Features
++^^^^^^^^
++
++Here is a yamllint configuration file example:
++
++.. code:: yaml
++
++ extends: default
++
++ rules:
++   # 80 chars should be enough, but don't fail if a line is longer
++   line-length:
++     max: 80
++     level: warning
++
++   # don't bother me with this rule
++   indentation: disable
++
++Within a YAML file, special comments can be used to disable checks for a single
++line:
++
++.. code:: yaml
++
++ This line is waaaaaaaaaay too long  # yamllint disable-line
++
++or for a whole block:
++
++.. code:: yaml
++
++ # yamllint disable rule:colons
++ - Lorem       : ipsum
++   dolor       : sit amet,
++   consectetur : adipiscing elit
++ # yamllint enable
++
++Specific files can be ignored (totally or for some rules only) using a
++``.gitignore``-style pattern:
++
++.. code:: yaml
++
++ # For all rules
++ ignore: |
++   *.dont-lint-me.yaml
++   /bin/
++   !/bin/*.lint-me-anyway.yaml
++
++ rules:
++   key-duplicates:
++     ignore: |
++       generated
++       *.template.yaml
++   trailing-spaces:
++     ignore: |
++       *.ignore-trailing-spaces.yaml
++       /ascii-art/*
++
++`Read more in the complete documentation! <https://yamllint.readthedocs.io/>`_
++
++License
++-------
++
++`GPL version 3 <LICENSE>`_
+diff --git a/third_party/python/yamllint/setup.cfg b/third_party/python/yamllint/setup.cfg
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/yamllint/setup.cfg
+@@ -0,0 +1,17 @@
++[bdist_wheel]
++universal = 1
++
++[flake8]
++import-order-style = pep8
++application-import-names = yamllint
++
++[build_sphinx]
++all-files = 1
++source-dir = docs
++build-dir = docs/_build
++warning-is-error = 1
++
++[egg_info]
++tag_build = 
++tag_date = 0
++
+diff --git a/third_party/python/yamllint/setup.py b/third_party/python/yamllint/setup.py
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/yamllint/setup.py
+@@ -0,0 +1,56 @@
++# -*- coding: utf-8 -*-
++# Copyright (C) 2016 Adrien Vergé
++#
++# This program is free software: you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation, either version 3 of the License, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program.  If not, see <http://www.gnu.org/licenses/>.
++
++from setuptools import find_packages, setup
++
++from yamllint import (__author__, __license__,
++                      APP_NAME, APP_VERSION, APP_DESCRIPTION)
++
++
++setup(
++    name=APP_NAME,
++    version=APP_VERSION,
++    author=__author__,
++    description=APP_DESCRIPTION.split('\n')[0],
++    long_description=APP_DESCRIPTION,
++    license=__license__,
++    keywords=['yaml', 'lint', 'linter', 'syntax', 'checker'],
++    url='https://github.com/adrienverge/yamllint',
++    python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
++    classifiers=[
++        'Development Status :: 5 - Production/Stable',
++        'Environment :: Console',
++        'Intended Audience :: Developers',
++        'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
++        'Programming Language :: Python :: 2',
++        'Programming Language :: Python :: 2.7',
++        'Programming Language :: Python :: 3',
++        'Programming Language :: Python :: 3.4',
++        'Programming Language :: Python :: 3.5',
++        'Programming Language :: Python :: 3.6',
++        'Programming Language :: Python :: 3.7',
++        'Topic :: Software Development',
++        'Topic :: Software Development :: Debuggers',
++        'Topic :: Software Development :: Quality Assurance',
++        'Topic :: Software Development :: Testing',
++    ],
++
++    packages=find_packages(exclude=['tests', 'tests.*']),
++    entry_points={'console_scripts': ['yamllint=yamllint.cli:run']},
++    package_data={'yamllint': ['conf/*.yaml']},
++    install_requires=['pathspec >=0.5.3', 'pyyaml'],
++    test_suite='tests',
++)

+ 3 - 3
mozilla-release/patches/1636251-1-PARTIAL-78a1.patch

@@ -2,7 +2,7 @@
 # User Mitchell Hentges <mhentges@mozilla.com>
 # Date 1589314065 0
 # Node ID 21b555288b06d4a352ee9f1475b82075bcbf8f2b
-# Parent  79bf90e8c2f1e104cb56abaca0f152ae9f0526c3
+# Parent  66687da42369c7926d7fa99bdf5d40b6c17cac7b
 Bug 1636251: vendor sentry python package r=rstewart
 
 urllib3 is needed by Sentry for its HTTP communication.
@@ -12,7 +12,7 @@ Differential Revision: https://phabricator.services.mozilla.com/D74737
 diff --git a/build/virtualenv_packages.txt b/build/virtualenv_packages.txt
 --- a/build/virtualenv_packages.txt
 +++ b/build/virtualenv_packages.txt
-@@ -55,16 +55,17 @@ mozilla.pth:third_party/python/requests-
+@@ -46,16 +46,17 @@ mozilla.pth:third_party/python/requests-
  python2:mozilla.pth:third_party/python/scandir
  mozilla.pth:third_party/python/slugid
  mozilla.pth:third_party/python/py
@@ -25,11 +25,11 @@ diff --git a/build/virtualenv_packages.txt b/build/virtualenv_packages.txt
  mozilla.pth:third_party/python/voluptuous
  mozilla.pth:third_party/python/json-e
  mozilla.pth:third_party/python/yamllint
- mozilla.pth:third_party/python/zipp
  mozilla.pth:build
  mozilla.pth:build/pymake
  mozilla.pth:config
  mozilla.pth:config/mozunit
+ mozilla.pth:dom/bindings
 diff --git a/third_party/python/requirements.txt b/third_party/python/requirements.txt
 --- a/third_party/python/requirements.txt
 +++ b/third_party/python/requirements.txt

+ 0 - 81
mozilla-release/patches/1654663-80a1.patch

@@ -1,81 +0,0 @@
-# HG changeset patch
-# User Mitchell Hentges <mhentges@mozilla.com>
-# Date 1595606749 0
-# Node ID aae7491870ddb91b333d2a90b3dce3c60d84c3e9
-# Parent  55a7d1ffde71dcf1068670ee035d2cf06daad7da
-Bug 1654663: Removes glean_parser from virtualenv_packages.txt r=firefox-build-system-reviewers,rstewart
-
-In preparation for Glean telemetry, we scope the availability of the out-of-date vendored
-"glean_parser" library to its one usage: "run_glean_parser.py".
-
-This allows Glean telemetry to load its modern "glean_parser" dependency from the
-"--user" package environment.
-
-Differential Revision: https://phabricator.services.mozilla.com/D84610
-
-diff --git a/build/virtualenv_packages.txt b/build/virtualenv_packages.txt
---- a/build/virtualenv_packages.txt
-+++ b/build/virtualenv_packages.txt
-@@ -21,17 +21,16 @@ mozilla.pth:third_party/python/diskcache
- mozilla.pth:third_party/python/distro
- mozilla.pth:third_party/python/dlmanager
- mozilla.pth:third_party/python/ecdsa/src
- python2:mozilla.pth:third_party/python/enum34
- mozilla.pth:third_party/python/fluent.migrate
- mozilla.pth:third_party/python/fluent.syntax
- mozilla.pth:third_party/python/funcsigs
- python2:mozilla.pth:third_party/python/futures
--python3:mozilla.pth:third_party/python/glean_parser
- mozilla.pth:third_party/python/importlib_metadata
- mozilla.pth:third_party/python/iso8601
- mozilla.pth:third_party/python/Jinja2/src
- mozilla.pth:third_party/python/jsonschema
- mozilla.pth:third_party/python/MarkupSafe/src
- mozilla.pth:third_party/python/more-itertools
- mozilla.pth:third_party/python/packaging
- mozilla.pth:third_party/python/pathlib2
-diff --git a/toolkit/components/telemetry/build_scripts/run_glean_parser.py.1654663.later b/toolkit/components/telemetry/build_scripts/run_glean_parser.py.1654663.later
-new file mode 100644
---- /dev/null
-+++ b/toolkit/components/telemetry/build_scripts/run_glean_parser.py.1654663.later
-@@ -0,0 +1,40 @@
-+--- run_glean_parser.py
-++++ run_glean_parser.py
-+@@ -1,17 +1,35 @@
-+ # This Source Code Form is subject to the terms of the Mozilla Public
-+ # License, v. 2.0. If a copy of the MPL was not distributed with this
-+ # file, You can obtain one at http://mozilla.org/MPL/2.0/.
-+ 
-+ import sys
-+-
-+-from glean_parser import lint
-+ from pathlib import Path
-+ 
-+ 
-+ def main(output, *filenames):
-++    # Unlike most vendored packages, we don't want "glean_parser" in
-++    # "build/virtualenv_packages.txt" because it interferes with mach's usage of Glean.
-++    # This is because Glean ("glean_sdk"):
-++    # * Has native code (so, can't be vendored)
-++    # * Also depends on "glean_parser"
-++    # Since this file has different version constraints than mach on "glean_parser",
-++    # we want the two different consumers to own their own separate "glean_parser"
-++    # packages.
-++    #
-++    # This is solved by:
-++    # * Having mach import Glean (and transitively "glean_parser") via the "--user"
-++    #   package environment. To accomplish this, the vendored "glean_parser" is removed
-++    #   from "virtualenv_packages.txt".
-++    # * Having this script import "glean_parser" from the vendored location. This is
-++    #   done by manually adding it to the pythonpath.
-++
-++    srcdir = Path(__file__).joinpath('../../../../../')
-++    glean_parser_path = srcdir.joinpath('third_party/python/glean_parser')
-++    sys.path.insert(0, str(glean_parser_path.resolve()))
-++    from glean_parser import lint
-+     if lint.glinter([Path(x) for x in filenames], {"allow_reserved": False}):
-+         sys.exit(1)
-+ 
-+ 
-+ if __name__ == '__main__':
-+     main(sys.stdout, *sys.argv[1:])

+ 1 - 67
mozilla-release/patches/1655781-81a1.patch → mozilla-release/patches/1655781-PARTIAL-81a1.patch

@@ -2,7 +2,7 @@
 # User Ricky Stewart <rstewart@mozilla.com>
 # Date 1595960995 0
 # Node ID 3cf9282868b948b34759eb4924cffd18ba1ec7e7
-# Parent  11d1b0cf2db06a56d821b7d35d196b37b66af384
+# Parent  baaf91dbe0929e4b6aa6941034a4068e5bd93932
 Bug 1655781- Allow specifying that certain packages should only be included in a `virtualenv` when parsing `virtualenv_packages.txt` r=chutten,nalexander
 
 This solves the same problem we attempted to solve in bug 1654663. That was a low-cost, sensible solution when there was only one in-build reference to `glean_parser`, but with project FOG we're about to drastically increase the in-build reliance on the library, so the ad-hoc `sys.path` manipulation is an increasingly insensible solution. Here we address this in a first-class way by specifying that `glean_parser` should be imported in `virtualenv`s, but NOT by top-level `mach` commands that run outside of an in-`objdir` `virtualenv`.
@@ -32,27 +32,6 @@ diff --git a/build/mach_bootstrap.py b/build/mach_bootstrap.py
                  pass
  
          if package[0] in ('windows', '!windows'):
-diff --git a/build/virtualenv_packages.txt b/build/virtualenv_packages.txt
---- a/build/virtualenv_packages.txt
-+++ b/build/virtualenv_packages.txt
-@@ -21,16 +21,17 @@ mozilla.pth:third_party/python/diskcache
- mozilla.pth:third_party/python/distro
- mozilla.pth:third_party/python/dlmanager
- mozilla.pth:third_party/python/ecdsa/src
- python2:mozilla.pth:third_party/python/enum34
- mozilla.pth:third_party/python/fluent.migrate
- mozilla.pth:third_party/python/fluent.syntax
- mozilla.pth:third_party/python/funcsigs
- python2:mozilla.pth:third_party/python/futures
-+in-virtualenv:python3:mozilla.pth:third_party/python/glean_parser
- mozilla.pth:third_party/python/importlib_metadata
- mozilla.pth:third_party/python/iso8601
- mozilla.pth:third_party/python/Jinja2/src
- mozilla.pth:third_party/python/jsonschema
- mozilla.pth:third_party/python/MarkupSafe/src
- mozilla.pth:third_party/python/more-itertools
- mozilla.pth:third_party/python/packaging
- mozilla.pth:third_party/python/pathlib2
 diff --git a/python/mozbuild/mozbuild/virtualenv.py b/python/mozbuild/mozbuild/virtualenv.py
 --- a/python/mozbuild/mozbuild/virtualenv.py
 +++ b/python/mozbuild/mozbuild/virtualenv.py
@@ -93,48 +72,3 @@ diff --git a/python/mozbuild/mozbuild/virtualenv.py b/python/mozbuild/mozbuild/v
  
                  return True
  
-diff --git a/toolkit/components/telemetry/build_scripts/run_glean_parser.py.1655781.later b/toolkit/components/telemetry/build_scripts/run_glean_parser.py.1655781.later
-new file mode 100644
---- /dev/null
-+++ b/toolkit/components/telemetry/build_scripts/run_glean_parser.py.1655781.later
-@@ -0,0 +1,40 @@
-+--- run_glean_parser.py
-++++ run_glean_parser.py
-+@@ -1,35 +1,17 @@
-+ # This Source Code Form is subject to the terms of the Mozilla Public
-+ # License, v. 2.0. If a copy of the MPL was not distributed with this
-+ # file, You can obtain one at http://mozilla.org/MPL/2.0/.
-+ 
-+ import sys
-++
-++from glean_parser import lint
-+ from pathlib import Path
-+ 
-+ 
-+ def main(output, *filenames):
-+-    # Unlike most vendored packages, we don't want "glean_parser" in
-+-    # "build/virtualenv_packages.txt" because it interferes with mach's usage of Glean.
-+-    # This is because Glean ("glean_sdk"):
-+-    # * Has native code (so, can't be vendored)
-+-    # * Also depends on "glean_parser"
-+-    # Since this file has different version constraints than mach on "glean_parser",
-+-    # we want the two different consumers to own their own separate "glean_parser"
-+-    # packages.
-+-    #
-+-    # This is solved by:
-+-    # * Having mach import Glean (and transitively "glean_parser") via the "--user"
-+-    #   package environment. To accomplish this, the vendored "glean_parser" is removed
-+-    #   from "virtualenv_packages.txt".
-+-    # * Having this script import "glean_parser" from the vendored location. This is
-+-    #   done by manually adding it to the pythonpath.
-+-
-+-    srcdir = Path(__file__).joinpath('../../../../../')
-+-    glean_parser_path = srcdir.joinpath('third_party/python/glean_parser')
-+-    sys.path.insert(0, str(glean_parser_path.resolve()))
-+-    from glean_parser import lint
-+     if lint.glinter([Path(x) for x in filenames], {"allow_reserved": False}):
-+         sys.exit(1)
-+ 
-+ 
-+ if __name__ == '__main__':
-+     main(sys.stdout, *sys.argv[1:])

+ 26 - 21
mozilla-release/patches/1656993-81a1.patch

@@ -2,7 +2,7 @@
 # User Ricky Stewart <rstewart@mozilla.com>
 # Date 1597684862 0
 # Node ID eff0a199fae6727caebd03b687824a398fe132ba
-# Parent  ee519f4d4b740592a195bd0b0bec9cd6cb6a0bf5
+# Parent  2888589f7c57497cb2ea0b24fdd9a39d81c50444
 Bug 1656993: Create and require by default global `virtualenv`s in `~/.mozbuild` for `mach` r=mhentges,ahal
 
 In two different places we've been encountering issues regarding 1) how we configure the system Python environment and 2) how the system Python environment relates to the `virtualenv`s that we use for building, testing, and other dev tasks. Specifically:
@@ -47,26 +47,31 @@ new file mode 100644
 diff --git a/build/virtualenv_packages.txt b/build/common_virtualenv_packages.txt
 rename from build/virtualenv_packages.txt
 rename to build/common_virtualenv_packages.txt
---- a/build/virtualenv_packages.txt
-+++ b/build/common_virtualenv_packages.txt
-@@ -21,17 +21,16 @@ mozilla.pth:third_party/python/diskcache
- mozilla.pth:third_party/python/distro
- mozilla.pth:third_party/python/dlmanager
- mozilla.pth:third_party/python/ecdsa/src
- python2:mozilla.pth:third_party/python/enum34
- mozilla.pth:third_party/python/fluent.migrate
- mozilla.pth:third_party/python/fluent.syntax
- mozilla.pth:third_party/python/funcsigs
- python2:mozilla.pth:third_party/python/futures
--in-virtualenv:python3:mozilla.pth:third_party/python/glean_parser
- mozilla.pth:third_party/python/importlib_metadata
- mozilla.pth:third_party/python/iso8601
- mozilla.pth:third_party/python/Jinja2/src
- mozilla.pth:third_party/python/jsonschema
- mozilla.pth:third_party/python/MarkupSafe/src
- mozilla.pth:third_party/python/more-itertools
- mozilla.pth:third_party/python/packaging
- mozilla.pth:third_party/python/pathlib2
+diff --git a/build/common_virtualenv_packages.txt.1656993.later b/build/common_virtualenv_packages.txt.1656993.later
+new file mode 100644
+--- /dev/null
++++ b/build/common_virtualenv_packages.txt.1656993.later
+@@ -0,0 +1,20 @@
++--- common_virtualenv_packages.txt
+++++ common_virtualenv_packages.txt
++@@ -21,17 +21,16 @@ mozilla.pth:third_party/python/diskcache
++ mozilla.pth:third_party/python/distro
++ mozilla.pth:third_party/python/dlmanager
++ mozilla.pth:third_party/python/ecdsa/src
++ python2:mozilla.pth:third_party/python/enum34
++ mozilla.pth:third_party/python/fluent.migrate
++ mozilla.pth:third_party/python/fluent.syntax
++ mozilla.pth:third_party/python/funcsigs
++ python2:mozilla.pth:third_party/python/futures
++-in-virtualenv:python3:mozilla.pth:third_party/python/glean_parser
++ mozilla.pth:third_party/python/importlib_metadata
++ mozilla.pth:third_party/python/iso8601
++ mozilla.pth:third_party/python/Jinja2/src
++ mozilla.pth:third_party/python/jsonschema
++ mozilla.pth:third_party/python/MarkupSafe/src
++ mozilla.pth:third_party/python/more-itertools
++ mozilla.pth:third_party/python/packaging
++ mozilla.pth:third_party/python/pathlib2
 diff --git a/build/mach_bootstrap.py b/build/mach_bootstrap.py
 --- a/build/mach_bootstrap.py
 +++ b/build/mach_bootstrap.py

+ 4 - 4
mozilla-release/patches/1661624-85a1.patch

@@ -2,7 +2,7 @@
 # User Ricky Stewart <rstewart@mozilla.com>
 # Date 1606511100 0
 # Node ID 4058e147401f15cfca916f32156381725b4d2137
-# Parent  c99b23794af45a5011b0051700757f9e5869d40c
+# Parent  bec83f43b7c1979b32babd5c988f861699bb8452
 Bug 1661624 - Include `psutil` in the `mach` `virtualenv`s r=firefox-build-system-reviewers,rstewart
 
 Install `psutil` when setting up the `mach` `virtualenv`s and stop importing the in-tree version in the build.
@@ -16,12 +16,12 @@ Differential Revision: https://phabricator.services.mozilla.com/D90914
 diff --git a/build/common_virtualenv_packages.txt b/build/common_virtualenv_packages.txt
 --- a/build/common_virtualenv_packages.txt
 +++ b/build/common_virtualenv_packages.txt
-@@ -36,19 +36,16 @@ mozilla.pth:third_party/python/packaging
+@@ -28,19 +28,16 @@ python2:mozilla.pth:third_party/python/f
+ mozilla.pth:third_party/python/more-itertools
+ mozilla.pth:third_party/python/packaging
  mozilla.pth:third_party/python/pathlib2
  mozilla.pth:third_party/python/pathspec
- mozilla.pth:third_party/python/pep487/lib
  mozilla.pth:third_party/python/gyp/pylib
- mozilla.pth:third_party/python/pyrsistent
  mozilla.pth:third_party/python/python-hglib
  mozilla.pth:third_party/python/pluggy
  mozilla.pth:third_party/python/jsmin

+ 3 - 3
mozilla-release/patches/1692280-87a1.patch

@@ -2,7 +2,7 @@
 # User Mitchell Hentges <mhentges@mozilla.com>
 # Date 1613497632 0
 # Node ID 1acac7b48ef7dd5602ecf2ed64770171ef3eae58
-# Parent  c2152932cf397206efcf776137d93b5fb4b70c42
+# Parent  c24701a5209d337da985468ee7b7a4e75e0e05b4
 Bug 1692280: Remove unused vendored packages r=sheehan,firefox-build-system-reviewers,andi,glandium
 
 `pyflakes` is only used as a pip-downloaded package
@@ -30,12 +30,12 @@ diff --git a/build/common_virtualenv_packages.txt b/build/common_virtualenv_pack
 -mozilla.pth:third_party/python/configobj
  mozilla.pth:third_party/python/cookies
  mozilla.pth:third_party/python/cram
- mozilla.pth:third_party/python/diskcache
  mozilla.pth:third_party/python/distro
  mozilla.pth:third_party/python/dlmanager
  mozilla.pth:third_party/python/ecdsa/src
  python2:mozilla.pth:third_party/python/enum34
  mozilla.pth:third_party/python/fluent.migrate
+ mozilla.pth:third_party/python/fluent.syntax
 diff --git a/third_party/python/pyflakes/AUTHORS b/third_party/python/pyflakes/AUTHORS
 deleted file mode 100644
 --- a/third_party/python/pyflakes/AUTHORS
@@ -10705,7 +10705,7 @@ diff --git a/third_party/python/requirements.in b/third_party/python/requirement
 diff --git a/third_party/python/requirements.txt b/third_party/python/requirements.txt
 --- a/third_party/python/requirements.txt
 +++ b/third_party/python/requirements.txt
-@@ -122,20 +122,16 @@ ply==3.10 \
+@@ -123,20 +123,16 @@ ply==3.10 \
  py==1.5.4 \
      --hash=sha256:3fd59af7435864e1a243790d322d763925431213b6b8529c6ca71081ace3bbf7 \
      --hash=sha256:e31fb2767eb657cbde86c454f02e99cb846d3cd9d61b318525140214fdc0e98e

+ 3 - 4
mozilla-release/patches/series

@@ -6836,11 +6836,11 @@ NOBUG-nukemozlinker-25319.patch
 1605879-74a1.patch
 1605850-74a1.patch
 1602773-1-75a1.patch
-1602773-2no3-75a1.patch
+1602773-2no3-PARTIAL-75a1.patch
 1602773-4-75a1.patch
 1620860-76a1.patch
 1623433-76a1.patch
-1619555-76a1.patch
+1619555-partial-76a1.patch
 1619956-76a1.patch
 1626190-76a1.patch
 1626640-76a1.patch
@@ -6901,9 +6901,8 @@ NOBUG-nukemozlinker-25319.patch
 1650057-80a1.patch
 1606475-80a1.patch
 1654795-80a1.patch
-1654663-80a1.patch
 1482675-80a1.patch
-1655781-81a1.patch
+1655781-PARTIAL-81a1.patch
 1655701-81a1.patch
 1653560-81a1.patch
 985141-1-81a1.patch

Some files were not shown because too many files changed in this diff