Browse Source

third_party python backports

Ian Neal 4 months ago
parent
commit
db02218127

+ 32 - 0
mozilla-release/patches/1627484-81a1.patch

@@ -0,0 +1,32 @@
+# HG changeset patch
+# User Andi-Bogdan Postelnicu <bpostelnicu@mozilla.com>
+# Date 1596700449 0
+# Node ID 80ac94ba702fafdb4baac1f46eb5dd04429d4536
+# Parent  013c6284c5830e5c6353170c78b769a56755be3a
+Bug 1627484 - clang cannot understanding the mixing of double and single quotes. r=botond
+
+Differential Revision: https://phabricator.services.mozilla.com/D70300
+
+diff --git a/python/mozbuild/mozbuild/code_analysis/mach_commands.py b/python/mozbuild/mozbuild/code_analysis/mach_commands.py
+--- a/python/mozbuild/mozbuild/code_analysis/mach_commands.py
++++ b/python/mozbuild/mozbuild/code_analysis/mach_commands.py
+@@ -2134,16 +2134,19 @@ class StaticAnalysis(MachCommandBase):
+                 command = entry.split(" ")
+                 # Verify to see if we are dealing with an unified build
+                 if "Unified_" in command[-1]:
+                     # Translate the unified `TU` to per file basis TU
+                     command[-1] = file_with_abspath
+ 
+                 # We want syntax-only
+                 command.append("-fsyntax-only")
++                command = [
++                    re.sub(r'\'-D(.*)="(.*)"\'', r'-D\1="\2"', arg) for arg in command
++                ]
+                 commands.append(command)
+ 
+         max_workers = multiprocessing.cpu_count()
+ 
+         with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
+             futures = []
+             for command in commands:
+                 futures.append(

+ 22 - 0
mozilla-release/patches/1645097-79a1.patch

@@ -0,0 +1,22 @@
+# HG changeset patch
+# User Ricky Stewart <rstewart@mozilla.com>
+# Date 1591892359 0
+# Node ID 486326719413c28f64995cada09540b564189966
+# Parent  20433dd9f706337f6246cd9340b1564d8d48192a
+Bug 1645097 - Hand over ownership of the mach commands in python/mozbuild/mozbuild/code-analysis to Firefox Build System :: Source Code Analysis r=andi
+
+Differential Revision: https://phabricator.services.mozilla.com/D79309
+
+diff --git a/python/mozbuild/mozbuild/code-analysis/moz.build b/python/mozbuild/mozbuild/code-analysis/moz.build
+new file mode 100644
+--- /dev/null
++++ b/python/mozbuild/mozbuild/code-analysis/moz.build
+@@ -0,0 +1,8 @@
++# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
++# vim: set filetype=python:
++# This Source Code Form is subject to the terms of the Mozilla Public
++# License, v. 2.0. If a copy of the MPL was not distributed with this
++# file, You can obtain one at http://mozilla.org/MPL/2.0/.
++
++with Files('**'):
++     BUG_COMPONENT = ('Firefox Build System', 'Source Code Analysis')

+ 560 - 0
mozilla-release/patches/1656740-81a1.patch

@@ -0,0 +1,560 @@
+# HG changeset patch
+# User Andi-Bogdan Postelnicu <bpostelnicu@mozilla.com>
+# Date 1596695117 0
+# Node ID 1d35f9fe239eca93ff6c9907b5bb7865cc657467
+# Parent  6882db59049d04f0ebd1fe4db8feeddb1ab21099
+Bug 1656740 - Integrate `clangd` in `vscode` for C++ language support. r=froydnj
+
+In order to have a cross platform ide for C++ language support we've added `clangd`
+extenssion and artifact part of `vscode` suite.
+To generate the configuration you simply run:
+`./mach ide vscode `.
+
+Differential Revision: https://phabricator.services.mozilla.com/D85416
+
+diff --git a/.vscode/extensions.json b/.vscode/extensions.json
+--- a/.vscode/extensions.json
++++ b/.vscode/extensions.json
+@@ -3,16 +3,16 @@
+     // for the documentation about the extensions.json format
+     "recommendations": [
+         // Trim only touched lines.
+         "NathanRidley.autotrim",
+         // JS Babel ES6/ES7 syntax hilight.
+         "dzannotti.vscode-babel-coloring",
+         // ESLint support.
+         "dbaeumer.vscode-eslint",
+-        // C/C++ language support.
+-        "ms-vscode.cpptools",
++        // C/C++ language support with clangd
++       "llvm-vs-code-extensions.vscode-clangd",
+         // Rust language support.
+         "kalitaalexey.vscode-rust",
+         // CSS support for HTML documents.
+         "ecmel.vscode-html-css"
+     ]
+ }
+diff --git a/docs/contributing/editor.rst.1656740.later b/docs/contributing/editor.rst.1656740.later
+new file mode 100644
+--- /dev/null
++++ b/docs/contributing/editor.rst.1656740.later
+@@ -0,0 +1,41 @@
++--- editor.rst
+++++ editor.rst
++@@ -11,24 +11,31 @@ them.
++     This page is a work in progress. Please enhance this page with instructions
++     for your favourite editor.
++ 
++ Visual Studio Code
++ ------------------
++ 
++ For general information on using VS Code, see their
++ `home page <https://code.visualstudio.com/>`__,
++-`repo <https://github.com/Microsoft/vscode/>`__ and
++-`guide to working with C++ <https://code.visualstudio.com/docs/languages/cpp>`__.
+++`repo <https://github.com/Microsoft/vscode/>`__.
+++
+++For C++ support we offer an out of the box configuration based on
+++`clangd <https://clangd.llvm.org>`__. This covers code completion, compile errors,
+++go-to-definition and more.
++ 
++-For IntelliSense to work properly, a
++-:ref:`compilation database <CompileDB back-end / compileflags>` as described
++-below is required. When it is present when you open the mozilla source code
++-folder, it will be automatically detected and Visual Studio Code will ask you
++-if it should use it, which you should confirm.
+++In order to build the configuration for `VS Code` simply run from
+++the terminal:
+++
+++`./mach ide vscode`
+++
+++If `VS Code` is already open with a previous configuration generated, please make sure to
+++restart `VS Code` otherwise the new configuration will not be used, and the `compile_commands.json`
+++needed by `clangd` server will not be refreshed. This is a known `bug <https://github.com/clangd/vscode-clangd/issues/42>`__
+++in `clangd-vscode` extension
++ 
++ VS Code provides number of extensions for JavaScript, Rust, etc.
++ 
++ Useful preferences
++ ~~~~~~~~~~~~~~~~~~
++ 
++ When setting the preference
++ 
+diff --git a/python/mozbuild/mozbuild/backend/__init__.py b/python/mozbuild/mozbuild/backend/__init__.py
+--- a/python/mozbuild/mozbuild/backend/__init__.py
++++ b/python/mozbuild/mozbuild/backend/__init__.py
+@@ -1,15 +1,16 @@
+ # This Source Code Form is subject to the terms of the Mozilla Public
+ # License, v. 2.0. If a copy of the MPL was not distributed with this
+ # file, You can obtain one at http://mozilla.org/MPL/2.0/.
+ 
+ from __future__ import absolute_import, print_function
+ 
+ backends = {
++    'Clangd': 'mozbuild.backend.clangd',
+     'ChromeMap': 'mozbuild.codecoverage.chrome_map',
+     'CompileDB': 'mozbuild.compilation.database',
+     'CppEclipse': 'mozbuild.backend.cpp_eclipse',
+     'FasterMake': 'mozbuild.backend.fastermake',
+     'FasterMake+RecursiveMake': None,
+     'GnConfigGen': 'mozbuild.gn_processor',
+     'GnMozbuildWriter': 'mozbuild.gn_processor',
+     'RecursiveMake': 'mozbuild.backend.recursivemake',
+diff --git a/python/mozbuild/mozbuild/backend/clangd.py b/python/mozbuild/mozbuild/backend/clangd.py
+new file mode 100644
+--- /dev/null
++++ b/python/mozbuild/mozbuild/backend/clangd.py
+@@ -0,0 +1,47 @@
++# This Source Code Form is subject to the terms of the Mozilla Public
++# License, v. 2.0. If a copy of the MPL was not distributed with this file,
++# You can obtain one at http://mozilla.org/MPL/2.0/.
++
++# This module provides a backend for `clangd` in order to have support for
++# code completion, compile errors, go-to-definition and more.
++# It is based on `database.py` with the difference that we don't generate
++# an unified `compile_commands.json` but we generate a per file basis `command` in
++# `objdir/clangd/compile_commands.json`
++
++from __future__ import absolute_import, print_function
++
++import os
++
++from mozbuild.compilation.database import CompileDBBackend
++
++import mozpack.path as mozpath
++
++
++class ClangdBackend(CompileDBBackend):
++    """
++    Configuration that generates the backend for clangd, it is used with `clangd`
++    extension for vscode
++    """
++
++    def _init(self):
++        CompileDBBackend._init(self)
++
++    def _build_cmd(self, cmd, filename, unified):
++        cmd = list(cmd)
++
++        cmd.append(filename)
++
++        return cmd
++
++    def _outputfile_path(self):
++        clangd_cc_path = os.path.join(self.environment.topobjdir, "clangd")
++
++        if not os.path.exists(clangd_cc_path):
++            os.mkdir(clangd_cc_path)
++
++        # Output the database (a JSON file) to objdir/clangd/compile_commands.json
++        return mozpath.join(clangd_cc_path, "compile_commands.json")
++
++    def _process_unified_sources(self, obj):
++        for f in list(sorted(obj.files)):
++            self._build_db_line(obj.objdir, obj.relsrcdir, obj.config, f, obj.canonical_suffix)
+diff --git a/python/mozbuild/mozbuild/backend/mach_commands.py b/python/mozbuild/mozbuild/backend/mach_commands.py
+--- a/python/mozbuild/mozbuild/backend/mach_commands.py
++++ b/python/mozbuild/mozbuild/backend/mach_commands.py
+@@ -1,66 +1,296 @@
+ # This Source Code Form is subject to the terms of the Mozilla Public
+ # License, v. 2.0. If a copy of the MPL was not distributed with this
+ # file, You can obtain one at http://mozilla.org/MPL/2.0/.
+ 
+ from __future__ import absolute_import, print_function, unicode_literals
+ 
+ import argparse
++import logging
+ import os
+ import subprocess
+ 
+ from mozbuild.base import MachCommandBase
++from mozbuild.build_commands import Build
++
+ from mozfile import which
+ from mach.decorators import (
+     CommandArgument,
+     CommandProvider,
+     Command,
+ )
+ 
++import mozpack.path as mozpath
++
+ 
+ @CommandProvider
+ class MachCommands(MachCommandBase):
+-    @Command('ide', category='devenv',
+-             description='Generate a project and launch an IDE.')
+-    @CommandArgument('ide', choices=['eclipse', 'visualstudio'])
+-    @CommandArgument('args', nargs=argparse.REMAINDER)
++    @Command("ide", category="devenv", description="Generate a project and launch an IDE.")
++    @CommandArgument("ide", choices=["eclipse", "visualstudio", "vscode"])
++    @CommandArgument("args", nargs=argparse.REMAINDER)
+     def eclipse(self, ide, args):
+-        if ide == 'eclipse':
+-            backend = 'CppEclipse'
+-        elif ide == 'visualstudio':
+-            backend = 'VisualStudio'
+-
+-        if ide == 'eclipse' and not which('eclipse'):
+-            print('Eclipse CDT 8.4 or later must be installed in your PATH.')
+-            print('Download: http://www.eclipse.org/cdt/downloads.php')
++        if ide == "eclipse":
++            backend = "CppEclipse"
++        elif ide == "visualstudio":
++            backend = "VisualStudio"
++        elif ide == "vscode":
++            backend = "Clangd"
++
++        if ide == "eclipse" and not which("eclipse"):
++            self.log(
++                logging.ERROR,
++                "ide",
++                {},
++                "Eclipse CDT 8.4 or later must be installed in your PATH.",
++            )
++            self.log(
++                logging.ERROR, "ide", {}, "Download: http://www.eclipse.org/cdt/downloads.php"
++            )
+             return 1
+ 
+-        # Here we refresh the whole build. 'build export' is sufficient here and is probably more
+-        # correct but it's also nice having a single target to get a fully built and indexed
+-        # project (gives a easy target to use before go out to lunch).
+-        res = self._mach_context.commands.dispatch('build', self._mach_context)
+-        if res != 0:
+-            return 1
++        if ide == "vscode":
++            # Verify if platform has VSCode installed
++            if not self.found_vscode_path():
++                self.log(logging.ERROR, "ide", {}, "VSCode cannot be found, abording!")
++                return 1
++
++            # Create the Build environment to configure the tree
++            builder = Build(self._mach_context)
++
++            rc = builder.configure()
++            if rc != 0:
++                return rc
++
++            # First install what we can through install manifests.
++            rc = builder._run_make(
++                directory=self.topobjdir, target="pre-export", line_handler=None
++            )
++            if rc != 0:
++                return rc
++
++            # Then build the rest of the build dependencies by running the full
++            # export target, because we can't do anything better.
++            for target in ("export", "pre-compile"):
++                rc = builder._run_make(directory=self.topobjdir, target=target, line_handler=None)
++                if rc != 0:
++                    return rc
++        else:
++            # Here we refresh the whole build. 'build export' is sufficient here and is
++            # probably more correct but it's also nice having a single target to get a fully
++            # built and indexed project (gives a easy target to use before go out to lunch).
++            res = self._mach_context.commands.dispatch("build", self._mach_context)
++            if res != 0:
++                return 1
+ 
+         # Generate or refresh the IDE backend.
+         python = self.virtualenv_manager.python_path
+-        config_status = os.path.join(self.topobjdir, 'config.status')
+-        args = [python, config_status, '--backend=%s' % backend]
++        config_status = os.path.join(self.topobjdir, "config.status")
++        args = [python, config_status, "--backend=%s" % backend]
+         res = self._run_command_in_objdir(args=args, pass_thru=True, ensure_exit_code=False)
+         if res != 0:
+             return 1
+ 
+-        if ide == 'eclipse':
++        if ide == "eclipse":
+             eclipse_workspace_dir = self.get_eclipse_workspace_path()
+-            subprocess.check_call(['eclipse', '-data', eclipse_workspace_dir])
+-        elif ide == 'visualstudio':
++            subprocess.check_call(["eclipse", "-data", eclipse_workspace_dir])
++        elif ide == "visualstudio":
+             visual_studio_workspace_dir = self.get_visualstudio_workspace_path()
+-            subprocess.check_call(
+-                ['explorer.exe', visual_studio_workspace_dir]
+-            )
++            subprocess.check_call(["explorer.exe", visual_studio_workspace_dir])
++        elif ide == "vscode":
++            return self.setup_vscode()
+ 
+     def get_eclipse_workspace_path(self):
+         from mozbuild.backend.cpp_eclipse import CppEclipseBackend
++
+         return CppEclipseBackend.get_workspace_path(self.topsrcdir, self.topobjdir)
+ 
+     def get_visualstudio_workspace_path(self):
+-        return os.path.join(self.topobjdir, 'msvc', 'mozilla.sln')
++        return os.path.join(self.topobjdir, "msvc", "mozilla.sln")
++
++    def found_vscode_path(self):
++
++        if "linux" in self.platform[0]:
++            self.vscode_path = "/usr/bin/code"
++        elif "macos" in self.platform[0]:
++            self.vscode_path = "/usr/local/bin/code"
++        elif "win64" in self.platform[0]:
++            from pathlib import Path
++
++            self.vscode_path = mozpath.join(
++                str(Path.home()), "AppData", "Local", "Programs", "Microsoft VS Code", "Code.exe",
++            )
++
++        # Path found
++        if os.path.exists(self.vscode_path):
++            return True
++
++        for _ in range(5):
++            self.vscode_path = input(
++                "Could not find the VSCode binary. Please provide the full path to it:\n"
++            )
++            if os.path.exists(self.vscode_path):
++                return True
++
++        # Path cannot be found
++        return False
++
++    def setup_vscode(self):
++        vscode_settings = mozpath.join(self.topsrcdir, ".vscode", "settings.json")
++
++        clangd_cc_path = mozpath.join(self.topobjdir, "clangd")
++
++        # Verify if the required files are present
++        clang_tools_path = mozpath.join(self._mach_context.state_dir, "clang-tools")
++        clang_tidy_bin = mozpath.join(clang_tools_path, "clang-tidy", "bin")
++
++        clangd_path = mozpath.join(
++            clang_tidy_bin, "clangd" + self.config_environment.substs.get("BIN_SUFFIX", ""),
++        )
++
++        if not os.path.exists(clangd_path):
++            self.log(
++                logging.ERROR, "ide", {}, "Unable to locate clangd in {}.".format(clang_tidy_bin)
++            )
++            rc = self._get_clang_tools(clang_tools_path)
++
++            if rc != 0:
++                return rc
++
++        import multiprocessing
++        import json
++
++        clangd_json = json.loads(
++            """
++        {
++            "clangd.path": "%s",
++            "clangd.arguments": [
++                "--compile-commands-dir",
++                "%s",
++                "-j",
++                "%s",
++                "--limit-results",
++                "0",
++                "--completion-style",
++                "detailed",
++                "--background-index",
++                "--all-scopes-completion",
++                "--log",
++                "error",
++                "--pch-storage",
++                "memory"
++            ]
++        }
++        """
++            % (clangd_path, clangd_cc_path, multiprocessing.cpu_count(),)
++        )
++
++        # Create an empty settings dictionary
++        settings = {}
++
++        # Modify the .vscode/settings.json configuration file
++        if os.path.exists(vscode_settings):
++            # If exists prompt for a configuration change
++            choice = prompt_bool(
++                "Configuration for {settings} must change. "
++                "Do you want to proceed?".format(settings=vscode_settings)
++            )
++            if not choice:
++                return 1
++
++            # Read the original vscode settings
++            with open(vscode_settings) as fh:
++                try:
++                    settings = json.load(fh)
++                    print(
++                        "The following modifications will occur:\nOriginal:\n{orig}\n"
++                        "New:\n{new}".format(
++                            orig=json.dumps(
++                                {
++                                    key: settings[key] if key in settings else ""
++                                    for key in ["clangd.path", "clangd.arguments"]
++                                },
++                                indent=4,
++                            ),
++                            new=json.dumps(clangd_json, indent=4),
++                        )
++                    )
++
++                except ValueError:
++                    # Decoding has failed, work with an empty dict
++                    settings = {}
++
++        # Write our own Configuration
++        settings["clangd.path"] = clangd_json["clangd.path"]
++        settings["clangd.arguments"] = clangd_json["clangd.arguments"]
++
++        with open(vscode_settings, "w") as fh:
++            fh.write(json.dumps(settings, indent=4))
++
++        # Open vscode with new configuration
++        rc = subprocess.call([self.vscode_path, self.topsrcdir])
++
++        if rc != 0:
++            self.log(
++                logging.ERROR,
++                "ide",
++                {},
++                "Unable to open VS Code. Please open VS Code manually and load "
++                "directory: {}".format(self.topsrcdir),
++            )
++            return rc
++
++        return 0
++
++    def _get_clang_tools(self, clang_tools_path):
++
++        import shutil
++
++        if os.path.isdir(clang_tools_path):
++            shutil.rmtree(clang_tools_path)
++
++        # Create base directory where we store clang binary
++        os.mkdir(clang_tools_path)
++
++        from mozbuild.artifact_commands import PackageFrontend
++
++        self._artifact_manager = PackageFrontend(self._mach_context)
++
++        job, _ = self.platform
++
++        if job is None:
++            self.log(
++                logging.ERROR,
++                "ide",
++                {},
++                "The current platform isn't supported. "
++                "Currently only the following platforms are "
++                "supported: win32/win64, linux64 and macosx64.",
++            )
++            return 1
++
++        job += "-clang-tidy"
++
++        # We want to unpack data in the clang-tidy mozbuild folder
++        currentWorkingDir = os.getcwd()
++        os.chdir(clang_tools_path)
++        rc = self._artifact_manager.artifact_toolchain(
++            verbose=False, from_build=[job], no_unpack=False, retry=0
++        )
++        # Change back the cwd
++        os.chdir(currentWorkingDir)
++
++        return rc
++
++
++def prompt_bool(prompt, limit=5):
++    """ Prompts the user with prompt and requires a boolean value. """
++    from distutils.util import strtobool
++
++    for _ in range(limit):
++        try:
++            return strtobool(input(prompt + " [Y/N]\n"))
++        except ValueError:
++            print(
++                "ERROR! Please enter a valid option! Please use any of the following:"
++                " Y, N, True, False, 1, 0"
++            )
++    return False
+diff --git a/python/mozbuild/mozbuild/compilation/database.py b/python/mozbuild/mozbuild/compilation/database.py
+--- a/python/mozbuild/mozbuild/compilation/database.py
++++ b/python/mozbuild/mozbuild/compilation/database.py
+@@ -37,16 +37,25 @@ class CompileDBBackend(CommonBackend):
+ 
+         # The cache for per-directory flags
+         self._flags = {}
+ 
+         self._envs = {}
+         self._local_flags = defaultdict(dict)
+         self._per_source_flags = defaultdict(list)
+ 
++    def _build_cmd(self, cmd, filename, unified):
++        cmd = list(cmd)
++        if unified is None:
++            cmd.append(filename)
++        else:
++            cmd.append(unified)
++
++        return cmd
++
+     def consume_object(self, obj):
+         # Those are difficult directories, that will be handled later.
+         if obj.relsrcdir in (
+                 'build/unix/elfhack',
+                 'build/unix/elfhack/inject',
+                 'build/clang-plugin',
+                 'build/clang-plugin/tests'):
+             return True
+@@ -81,21 +90,17 @@ class CompileDBBackend(CommonBackend):
+ 
+     def consume_finished(self):
+         CommonBackend.consume_finished(self)
+ 
+         db = []
+ 
+         for (directory, filename, unified), cmd in self._db.items():
+             env = self._envs[directory]
+-            cmd = list(cmd)
+-            if unified is None:
+-                cmd.append(filename)
+-            else:
+-                cmd.append(unified)
++            cmd = self._build_cmd(cmd, filename, unified)
+             variables = {
+                 'DIST': mozpath.join(env.topobjdir, 'dist'),
+                 'DEPTH': env.topobjdir,
+                 'MOZILLA_DIR': env.topsrcdir,
+                 'topsrcdir': env.topsrcdir,
+                 'topobjdir': env.topobjdir,
+             }
+             variables.update(self._local_flags[directory])
+@@ -126,21 +131,24 @@ class CompileDBBackend(CommonBackend):
+                 c.extend(per_source_flags)
+             db.append({
+                 'directory': directory,
+                 'command': ' '.join(shell_quote(a) for a in c),
+                 'file': mozpath.join(directory, filename),
+             })
+ 
+         import json
+-        # Output the database (a JSON file) to objdir/compile_commands.json
+-        outputfile = os.path.join(self.environment.topobjdir, 'compile_commands.json')
++        outputfile = self._outputfile_path()
+         with self._write_file(outputfile) as jsonout:
+             json.dump(db, jsonout, indent=0)
+ 
++    def _outputfile_path(self):
++        # Output the database (a JSON file) to objdir/compile_commands.json
++        return os.path.join(self.environment.topobjdir, 'compile_commands.json')
++
+     def _process_unified_sources(self, obj):
+         if not obj.have_unified_mapping:
+             for f in list(sorted(obj.files)):
+                 self._build_db_line(obj.objdir, obj.relsrcdir, obj.config, f,
+                                     obj.canonical_suffix)
+             return
+ 
+         # For unified sources, only include the unified source file.

+ 33 - 0
mozilla-release/patches/1656764-81a1.patch

@@ -0,0 +1,33 @@
+# HG changeset patch
+# User Sylvestre Ledru <sledru@mozilla.com>
+# Date 1596458475 0
+# Node ID c5504cec2c4ada056c113cf25f4c860ce77abec6
+# Parent  d6e238f4171d3e5944fb3c8821c41a9fc3fed7ad
+Bug 1656764 - Relax the version check for clang-format r=andi DONTBUILD
+
+So that it works when CLANG_VENDOR is set
+
+Differential Revision: https://phabricator.services.mozilla.com/D85687
+
+diff --git a/python/mozbuild/mozbuild/code-analysis/mach_commands.py b/python/mozbuild/mozbuild/code-analysis/mach_commands.py
+--- a/python/mozbuild/mozbuild/code-analysis/mach_commands.py
++++ b/python/mozbuild/mozbuild/code-analysis/mach_commands.py
+@@ -980,17 +980,17 @@ class StaticAnalysis(MachCommandBase):
+         # Because the fact that we ship together clang-tidy and clang-format
+         # we are sure that these two will always share the same version.
+         # Thus in order to determine that the version is compatible we only
+         # need to check one of them, going with clang-format
+         cmd = [self._clang_format_path, '--version']
+         try:
+             output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('utf-8')
+             version_string = 'clang-format version ' + version
+-            if output.startswith(version_string):
++            if version_string in output:
+                 return True
+         except subprocess.CalledProcessError as e:
+             self.log(logging.ERROR, 'static-analysis', {},
+                      "ERROR: Error determining the version clang-tidy/format binary, "
+                      "please see the attached exception: \n{}".format(e.output))
+         return False
+ 
+     def _get_clang_tidy_command(self, checks, header_filter, sources, jobs, fix):

+ 652 - 0
mozilla-release/patches/1657299-81a1.patch

@@ -0,0 +1,652 @@
+# HG changeset patch
+# User Andi-Bogdan Postelnicu <bpostelnicu@mozilla.com>
+# Date 1596696676 0
+# Node ID 38dd1ab6680fc481232840e9370dd6311f485cba
+# Parent  5e9d5635a5201c12a6eccaa5051840a709e5f2b5
+Bug 1657299 - First step of refactor `static-analysis` integration, making it more modular. r=marco
+
+Add a modular approach for the integration of `static-analysis` module in order
+to be able to share components of it with other modules, like the integration of
+`clangd` in `vscode` where we need to have access to the configuration of `clang-tidy`
+in order to have `in-ide` `static-analysis` messages.
+In this initial step we make a separate module for the clang-tidy configuration.
+
+Differential Revision: https://phabricator.services.mozilla.com/D85979
+
+diff --git a/build/mach_bootstrap.py b/build/mach_bootstrap.py
+--- a/build/mach_bootstrap.py
++++ b/build/mach_bootstrap.py
+@@ -43,17 +43,17 @@ MACH_MODULES = [
+     'layout/tools/reftest/mach_commands.py',
+     'python/mach/mach/commands/commandinfo.py',
+     'python/mach/mach/commands/settings.py',
+     'python/mach_commands.py',
+     'python/mozboot/mozboot/mach_commands.py',
+     'python/mozbuild/mozbuild/artifact_commands.py',
+     'python/mozbuild/mozbuild/backend/mach_commands.py',
+     'python/mozbuild/mozbuild/build_commands.py',
+-    'python/mozbuild/mozbuild/code-analysis/mach_commands.py',
++    'python/mozbuild/mozbuild/code_analysis/mach_commands.py',
+     'python/mozbuild/mozbuild/compilation/codecomplete.py',
+     'python/mozbuild/mozbuild/frontend/mach_commands.py',
+     'python/mozbuild/mozbuild/vendor/mach_commands.py',
+     'python/mozbuild/mozbuild/mach_commands.py',
+     'python/mozperftest/mozperftest/mach_commands.py',
+     'testing/awsy/mach_commands.py',
+     'testing/firefox-ui/mach_commands.py',
+     'testing/geckodriver/mach_commands.py',
+diff --git a/python/mozbuild/mozbuild/backend/mach_commands.py b/python/mozbuild/mozbuild/backend/mach_commands.py
+--- a/python/mozbuild/mozbuild/backend/mach_commands.py
++++ b/python/mozbuild/mozbuild/backend/mach_commands.py
+@@ -152,16 +152,19 @@ class MachCommands(MachCommandBase):
+             )
+             rc = self._get_clang_tools(clang_tools_path)
+ 
+             if rc != 0:
+                 return rc
+ 
+         import multiprocessing
+         import json
++        from mozbuild.code_analysis.utils import ClangTidyConfig
++
++        clang_tidy_cfg = ClangTidyConfig(self.topsrcdir)
+ 
+         clangd_json = json.loads(
+             """
+         {
+             "clangd.path": "%s",
+             "clangd.arguments": [
+                 "--compile-commands-dir",
+                 "%s",
+@@ -171,21 +174,29 @@ class MachCommands(MachCommandBase):
+                 "0",
+                 "--completion-style",
+                 "detailed",
+                 "--background-index",
+                 "--all-scopes-completion",
+                 "--log",
+                 "error",
+                 "--pch-storage",
+-                "memory"
++                "memory",
++                "--clang-tidy",
++                "--clang-tidy-checks",
++                "%s"
+             ]
+         }
+         """
+-            % (clangd_path, clangd_cc_path, multiprocessing.cpu_count(),)
++            % (
++                clangd_path,
++                clangd_cc_path,
++                multiprocessing.cpu_count(),
++                clang_tidy_cfg.checks,
++            )
+         )
+ 
+         # Create an empty settings dictionary
+         settings = {}
+ 
+         # Modify the .vscode/settings.json configuration file
+         if os.path.exists(vscode_settings):
+             # If exists prompt for a configuration change
+diff --git a/python/mozbuild/mozbuild/code-analysis/mach_commands.py b/python/mozbuild/mozbuild/code_analysis/mach_commands.py
+rename from python/mozbuild/mozbuild/code-analysis/mach_commands.py
+rename to python/mozbuild/mozbuild/code_analysis/mach_commands.py
+--- a/python/mozbuild/mozbuild/code-analysis/mach_commands.py
++++ b/python/mozbuild/mozbuild/code_analysis/mach_commands.py
+@@ -32,16 +32,18 @@ from mach.main import Mach
+ 
+ from mozbuild.base import MachCommandBase
+ 
+ from mozbuild.build_commands import Build
+ from mozbuild.nodeutil import find_node_executable
+ 
+ import mozpack.path as mozpath
+ 
++from mozbuild.util import memoized_property
++
+ from mozversioncontrol import get_repository_object
+ 
+ from mozbuild.controller.clobber import Clobberer
+ 
+ 
+ # Function used to run clang-format on a batch of files. It is a helper function
+ # in order to integrate into the futures ecosystem clang-format.
+ def run_one_clang_format_batch(args):
+@@ -84,28 +86,28 @@ class StaticAnalysisSubCommand(SubComman
+             ),
+         ]
+         for arg in args:
+             after = arg(after)
+         return after
+ 
+ 
+ class StaticAnalysisMonitor(object):
+-    def __init__(self, srcdir, objdir, clang_tidy_config, total):
++    def __init__(self, srcdir, objdir, checks, total):
+         self._total = total
+         self._processed = 0
+         self._current = None
+         self._srcdir = srcdir
+ 
+         import copy
+ 
+-        self._clang_tidy_config = copy.deepcopy(clang_tidy_config["clang_checkers"])
++        self._checks = copy.deepcopy(checks)
+ 
+         # Transform the configuration to support Regex
+-        for item in self._clang_tidy_config:
++        for item in self._checks:
+             if item["name"] == "-*":
+                 continue
+             item["name"] = item["name"].replace("*", ".*")
+ 
+         from mozbuild.compilation.warnings import (
+             WarningsCollector,
+             WarningsDatabase,
+         )
+@@ -153,18 +155,18 @@ class StaticAnalysisMonitor(object):
+                 self._current = build_repo_relative_path(filename, self._srcdir)
+             else:
+                 self._current = None
+             self._processed = self._processed + 1
+             return (warning, False)
+         if warning is not None:
+ 
+             def get_check_config(checker_name):
+-                # get the matcher from self._clang_tidy_config that is the 'name' field
+-                for item in self._clang_tidy_config:
++                # get the matcher from self._checks that is the 'name' field
++                for item in self._checks:
+                     if item["name"] == checker_name:
+                         return item
+ 
+                     # We are using a regex in order to also match 'mozilla-.* like checkers'
+                     matcher = re.match(item["name"], checker_name)
+                     if matcher is not None and matcher.group(0) == checker_name:
+                         return item
+ 
+@@ -188,17 +190,16 @@ class StaticAnalysis(MachCommandBase):
+     # List of file extension to consider (should start with dot)
+     _format_include_extensions = (".cpp", ".c", ".cc", ".h", ".m", ".mm")
+     # File contaning all paths to exclude from formatting
+     _format_ignore_file = ".clang-format-ignore"
+ 
+     # List of file extension to consider (should start with dot)
+     _check_syntax_include_extensions = (".cpp", ".c", ".cc", ".cxx")
+ 
+-    _clang_tidy_config = None
+     _cov_config = None
+ 
+     @Command(
+         "static-analysis",
+         category="testing",
+         description="Run C++ static analysis checks",
+     )
+     def static_analysis(self):
+@@ -360,21 +361,22 @@ class StaticAnalysis(MachCommandBase):
+             )
+             return 0
+ 
+         # Escape the files from source
+         source = [re.escape(f) for f in source]
+ 
+         cwd = self.topobjdir
+         self._compilation_commands_path = self.topobjdir
+-        if self._clang_tidy_config is None:
+-            self._clang_tidy_config = self._get_clang_tidy_config()
+ 
+         monitor = StaticAnalysisMonitor(
+-            self.topsrcdir, self.topobjdir, self._clang_tidy_config, total
++            self.topsrcdir,
++            self.topobjdir,
++            self.get_clang_tidy_config.checks_with_data,
++            total,
+         )
+ 
+         footer = StaticAnalysisFooter(self.log_manager.terminal, monitor)
+ 
+         with StaticAnalysisOutputManager(
+             self.log_manager, monitor, footer
+         ) as output_manager:
+             import math
+@@ -1187,32 +1189,21 @@ class StaticAnalysis(MachCommandBase):
+             checkers.append("--" + checker)
+         for path in input_paths:
+             with open(path) as f:
+                 for line in f:
+                     excludes.append("--skip-analysis-in-path")
+                     excludes.append(line.strip("\n"))
+         return checkers, excludes
+ 
+-    def _get_clang_tidy_config(self):
+-        try:
+-            file_handler = open(
+-                mozpath.join(self.topsrcdir, "tools", "clang-tidy", "config.yaml")
+-            )
+-            config = yaml.safe_load(file_handler)
+-        except Exception:
+-            self.log(
+-                logging.ERROR,
+-                "static-analysis",
+-                {},
+-                "ERROR: Looks like config.yaml is not valid, we are going to use default"
+-                " values for the rest of the analysis for clang-tidy.",
+-            )
+-            return None
+-        return config
++    @memoized_property
++    def get_clang_tidy_config(self):
++        from mozbuild.code_analysis.utils import ClangTidyConfig
++
++        return ClangTidyConfig(self.topsrcdir)
+ 
+     def _get_cov_config(self):
+         try:
+             file_handler = open(
+                 mozpath.join(self.topsrcdir, "tools", "coverity", "config.yaml")
+             )
+             config = yaml.safe_load(file_handler)
+         except Exception:
+@@ -1222,24 +1213,19 @@ class StaticAnalysis(MachCommandBase):
+                 {},
+                 "ERROR: Looks like config.yaml is not valid, we are going to use default"
+                 " values for the rest of the analysis for coverity.",
+             )
+             return None
+         return config
+ 
+     def _is_version_eligible(self):
+-        # make sure that we've cached self._clang_tidy_config
+-        if self._clang_tidy_config is None:
+-            self._clang_tidy_config = self._get_clang_tidy_config()
+-
+-        version = None
+-        if "package_version" in self._clang_tidy_config:
+-            version = self._clang_tidy_config["package_version"]
+-        else:
++        version = self.get_clang_tidy_config.version
++
++        if version is None:
+             self.log(
+                 logging.ERROR,
+                 "static-analysis",
+                 {},
+                 "ERROR: Unable to find 'package_version' in the config.yml",
+             )
+             return False
+ 
+@@ -1263,17 +1249,17 @@ class StaticAnalysis(MachCommandBase):
+                 "ERROR: Error determining the version clang-tidy/format binary, "
+                 "please see the attached exception: \n{}".format(e.output),
+             )
+         return False
+ 
+     def _get_clang_tidy_command(self, checks, header_filter, sources, jobs, fix):
+ 
+         if checks == "-*":
+-            checks = self._get_checks()
++            checks = ",".join(self.get_clang_tidy_config.checks)
+ 
+         common_args = [
+             "-clang-tidy-binary",
+             self._clang_tidy_path,
+             "-clang-apply-replacements-binary",
+             self._clang_apply_replacements,
+             "-checks=%s" % checks,
+             "-extra-arg=-std=c++17",
+@@ -1287,17 +1273,17 @@ class StaticAnalysis(MachCommandBase):
+         common_args += [
+             "-header-filter=%s"
+             % (header_filter if len(header_filter) else "|".join(sources))
+         ]
+ 
+         # From our configuration file, config.yaml, we build the configuration list, for
+         # the checkers that are used. These configuration options are used to better fit
+         # the checkers to our code.
+-        cfg = self._get_checks_config()
++        cfg = self.get_clang_tidy_config.checks_config
+         if cfg:
+             common_args += ["-config=%s" % yaml.dump(cfg)]
+ 
+         if fix:
+             common_args += ["-fix"]
+ 
+         return (
+             [
+@@ -1473,20 +1459,19 @@ class StaticAnalysis(MachCommandBase):
+                 {},
+                 "ERROR: clang-tidy unable to locate package.",
+             )
+             return self.TOOLS_FAILED_DOWNLOAD
+ 
+         self._clang_tidy_base_path = mozpath.join(self.topsrcdir, "tools", "clang-tidy")
+ 
+         # For each checker run it
+-        self._clang_tidy_config = self._get_clang_tidy_config()
+         platform, _ = self.platform
+ 
+-        if platform not in self._clang_tidy_config["platforms"]:
++        if platform not in self.get_clang_tidy_config.platforms:
+             self.log(
+                 logging.ERROR,
+                 "static-analysis",
+                 {},
+                 "ERROR: RUNNING: clang-tidy autotest for platform {} not supported.".format(
+                     platform
+                 ),
+             )
+@@ -1507,24 +1492,22 @@ class StaticAnalysis(MachCommandBase):
+         cmd = [self._clang_tidy_path, "-list-checks", "-checks=*"]
+         clang_output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode(
+             "utf-8"
+         )
+         available_checks = clang_output.split("\n")[1:]
+         self._clang_tidy_checks = [c.strip() for c in available_checks if c]
+ 
+         # Build the dummy compile_commands.json
+-        self._compilation_commands_path = self._create_temp_compilation_db(
+-            self._clang_tidy_config
+-        )
++        self._compilation_commands_path = self._create_temp_compilation_db()
+         checkers_test_batch = []
+         checkers_results = []
+         with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
+             futures = []
+-            for item in self._clang_tidy_config["clang_checkers"]:
++            for item in self.get_clang_tidy_config.checks_with_data:
+                 # Skip if any of the following statements is true:
+                 # 1. Checker attribute 'publish' is False.
+                 not_published = not bool(item.get("publish", True))
+                 # 2. Checker has restricted-platforms and current platform is not of them.
+                 ignored_platform = (
+                     "restricted-platforms" in item
+                     and platform not in item["restricted-platforms"]
+                 )
+@@ -1715,27 +1698,27 @@ class StaticAnalysis(MachCommandBase):
+                 "This is the output generated by clang-tidy for the bulk build:\n{}".format(
+                     clang_output
+                 )
+             )
+             return self.TOOLS_CHECKER_DIFF_FAILED
+ 
+         return self.TOOLS_SUCCESS
+ 
+-    def _create_temp_compilation_db(self, config):
++    def _create_temp_compilation_db(self):
+         directory = tempfile.mkdtemp(prefix="cc")
+         with open(
+             mozpath.join(directory, "compile_commands.json"), "w"
+         ) as file_handler:
+             compile_commands = []
+             director = mozpath.join(self.topsrcdir, "tools", "clang-tidy", "test")
+-            for item in config["clang_checkers"]:
+-                if item["name"] in ["-*", "mozilla-*"]:
++            for item in self.get_clang_tidy_config.checks:
++                if item in ["-*", "mozilla-*"]:
+                     continue
+-                file = item["name"] + ".cpp"
++                file = item + ".cpp"
+                 element = {}
+                 element["directory"] = director
+                 element["command"] = "cpp " + file
+                 element["file"] = mozpath.join(director, file)
+                 compile_commands.append(element)
+ 
+             json.dump(compile_commands, file_handler)
+             file_handler.flush()
+@@ -2000,23 +1983,20 @@ class StaticAnalysis(MachCommandBase):
+     )
+     def print_checks(self, verbose=False):
+         self._set_log_level(verbose)
+         rc = self._get_clang_tools(verbose=verbose)
+ 
+         if rc != 0:
+             return rc
+ 
+-        if self._clang_tidy_config is None:
+-            self._clang_tidy_config = self._get_clang_tidy_config()
+-
+         args = [
+             self._clang_tidy_path,
+             "-list-checks",
+-            "-checks=%s" % self._get_checks(),
++            "-checks=%s" % self.get_clang_tidy_config.checks,
+         ]
+ 
+         rc = self.run_process(args=args, pass_thru=True)
+         if rc != 0:
+             return rc
+ 
+         job, _ = self.platform
+         if job != "linux64":
+@@ -2424,59 +2404,16 @@ class StaticAnalysis(MachCommandBase):
+         headers = sorted(regex_header.finditer(clang_output), key=lambda h: h.start())
+         issues = []
+         for _, header in enumerate(headers):
+             header_group = header.groups()
+             element = [header_group[3], header_group[4], header_group[5]]
+             issues.append(element)
+         return issues
+ 
+-    def _get_checks(self):
+-        checks = "-*"
+-        try:
+-            config = self._clang_tidy_config
+-            for item in config["clang_checkers"]:
+-                if item.get("publish", True):
+-                    checks += "," + item["name"]
+-        except Exception:
+-            print(
+-                "Looks like config.yaml is not valid, so we are unable to "
+-                "determine default checkers, using '-checks=-*,mozilla-*'"
+-            )
+-            checks += ",mozilla-*"
+-        finally:
+-            return checks
+-
+-    def _get_checks_config(self):
+-        config_list = []
+-        checker_config = {}
+-        try:
+-            config = self._clang_tidy_config
+-            for checker in config["clang_checkers"]:
+-                if checker.get("publish", True) and "config" in checker:
+-                    for checker_option in checker["config"]:
+-                        # Verify if the format of the Option is correct,
+-                        # possibilities are:
+-                        # 1. CheckerName.Option
+-                        # 2. Option -> that will become CheckerName.Option
+-                        if not checker_option["key"].startswith(checker["name"]):
+-                            checker_option["key"] = "{}.{}".format(
+-                                checker["name"], checker_option["key"]
+-                            )
+-                    config_list += checker["config"]
+-            checker_config["CheckOptions"] = config_list
+-        except Exception:
+-            print(
+-                "Looks like config.yaml is not valid, so we are unable to "
+-                "determine configuration for checkers, so using default"
+-            )
+-            checker_config = None
+-        finally:
+-            return checker_config
+-
+     def _get_config_environment(self):
+         ran_configure = False
+         config = None
+         builder = Build(self._mach_context)
+ 
+         try:
+             config = self.config_environment
+         except Exception:
+diff --git a/python/mozbuild/mozbuild/code-analysis/moz.build b/python/mozbuild/mozbuild/code_analysis/moz.build
+rename from python/mozbuild/mozbuild/code-analysis/moz.build
+rename to python/mozbuild/mozbuild/code_analysis/moz.build
+diff --git a/python/mozbuild/mozbuild/code_analysis/utils.py b/python/mozbuild/mozbuild/code_analysis/utils.py
+new file mode 100644
+--- /dev/null
++++ b/python/mozbuild/mozbuild/code_analysis/utils.py
+@@ -0,0 +1,139 @@
++# This Source Code Form is subject to the terms of the Mozilla Public
++# License, v. 2.0. If a copy of the MPL was not distributed with this
++# file, You can obtain one at http://mozilla.org/MPL/2.0/.
++
++from __future__ import absolute_import, print_function
++
++import logging
++import yaml
++
++from mozbuild.util import memoized_property
++import mozpack.path as mozpath
++
++
++class ClangTidyConfig(object):
++    def __init__(self, mozilla_src):
++        self._clang_tidy_config = self._get_clang_tidy_config(mozilla_src)
++
++    def _get_clang_tidy_config(self, mozilla_src):
++        try:
++            file_handler = open(
++                mozpath.join(mozilla_src, "tools", "clang-tidy", "config.yaml")
++            )
++            config = yaml.safe_load(file_handler)
++        except Exception:
++            self.log(
++                logging.ERROR,
++                "clang-tidy-config",
++                {},
++                "Looks like config.yaml is not valid, we are going to use default"
++                " values for the rest of the analysis for clang-tidy.",
++            )
++            return None
++        return config
++
++    @memoized_property
++    def checks(self):
++        """
++        Returns a list with all activated checks
++        """
++
++        checks = ["-*"]
++        try:
++            config = self._clang_tidy_config
++            for item in config["clang_checkers"]:
++                if item.get("publish", True):
++                    checks.append(item["name"])
++        except Exception:
++            self.log(
++                logging.ERROR,
++                "clang-tidy-config",
++                {},
++                "Looks like config.yaml is not valid, so we are unable to "
++                "determine default checkers, using '-checks=-*,mozilla-*'",
++            )
++            checks.append("mozilla-*")
++        finally:
++            return checks
++
++    @memoized_property
++    def checks_with_data(self):
++        """
++        Returns a list with all activated checks plus metadata for each check
++        """
++
++        checks_with_data = [{"name": "-*"}]
++        try:
++            config = self._clang_tidy_config
++            for item in config["clang_checkers"]:
++                if item.get("publish", True):
++                    checks_with_data.append(item)
++        except Exception:
++            self.log(
++                logging.ERROR,
++                "clang-tidy-config",
++                {},
++                "Looks like config.yaml is not valid, so we are unable to "
++                "determine default checkers, using '-checks=-*,mozilla-*'",
++            )
++            checks_with_data.append({"name": "mozilla-*", "reliability": "high"})
++        finally:
++            return checks_with_data
++
++    @memoized_property
++    def checks_config(self):
++        """
++        Returns the configuation for all checks
++        """
++
++        config_list = []
++        checks_config = {}
++        try:
++            config = self._clang_tidy_config
++            for checker in config["clang_checkers"]:
++                if checker.get("publish", True) and "config" in checker:
++                    for checker_option in checker["config"]:
++                        # Verify if the format of the Option is correct,
++                        # possibilities are:
++                        # 1. CheckerName.Option
++                        # 2. Option -> that will become CheckerName.Option
++                        if not checker_option["key"].startswith(checker["name"]):
++                            checker_option["key"] = "{}.{}".format(
++                                checker["name"], checker_option["key"]
++                            )
++                    config_list += checker["config"]
++            checks_config["CheckOptions"] = config_list
++        except Exception:
++            self.log(
++                logging.ERROR,
++                "clang-tidy-config",
++                {},
++                "Looks like config.yaml is not valid, so we are unable to "
++                "determine configuration for checkers, so using default",
++            )
++            checks_config = None
++        finally:
++            return checks_config
++
++    @memoized_property
++    def version(self):
++        """
++        Returns version of clang-tidy suitable for this configuration file
++        """
++
++        if "package_version" in self._clang_tidy_config:
++            return self._clang_tidy_config["package_version"]
++        self.log(
++            logging.ERROR,
++            "clang-tidy-confis",
++            {},
++            "Unable to find 'package_version' in the config.yml",
++        )
++        return None
++
++    @memoized_property
++    def platforms(self):
++        """
++        Returns a list of platforms suitable to work with `clang-tidy`
++        """
++        return self._clang_tidy_config.get("platforms", [])
+diff --git a/tools/lint/black.yml.1657299.later b/tools/lint/black.yml.1657299.later
+new file mode 100644
+--- /dev/null
++++ b/tools/lint/black.yml.1657299.later
+@@ -0,0 +1,17 @@
++--- black.yml
+++++ black.yml
++@@ -1,13 +1,13 @@
++ ---
++ black:
++     description: Reformat python
++     include:
++-        - python/mozbuild/mozbuild/code-analysis
+++        - python/mozbuild/mozbuild/code_analysis
++         - python/mozperftest/mozperftest
++         - python/mozrelease/mozrelease/scriptworker_canary.py
++         - taskcluster/docker/funsize-update-generator
++         - taskcluster/taskgraph/actions/scriptworker_canary.py
++         - taskcluster/taskgraph/test/conftest.py
++         - taskcluster/taskgraph/transforms/scriptworker_canary.py
++         - taskcluster/test
++         - testing/condprofile/condprof

+ 3845 - 0
mozilla-release/patches/1657301-81a1.patch

@@ -0,0 +1,3845 @@
+# HG changeset patch 
+# User Andi-Bogdan Postelnicu <bpostelnicu@mozilla.com>
+# Date 1596634366 0
+# Node ID ed78a72ce214798f905b8cd41a39e6a45f85c078
+# Parent  ff5700ef9bc696d2b477e12e4fcab8c729d16518
+Bug 1657301 - Reformat `static-analysis` with `black`. r=sylvestre
+
+# ignore-this-changeset
+
+Differential Revision: https://phabricator.services.mozilla.com/D85985
+
+diff --git a/python/mozbuild/mozbuild/code-analysis/mach_commands.py b/python/mozbuild/mozbuild/code-analysis/mach_commands.py
+--- a/python/mozbuild/mozbuild/code-analysis/mach_commands.py
++++ b/python/mozbuild/mozbuild/code-analysis/mach_commands.py
+@@ -47,77 +47,82 @@ from mozbuild.controller.clobber import 
+ def run_one_clang_format_batch(args):
+     try:
+         subprocess.check_output(args)
+     except subprocess.CalledProcessError as e:
+         return e
+ 
+ 
+ def build_repo_relative_path(abs_path, repo_path):
+-    '''Build path relative to repository root'''
++    """Build path relative to repository root"""
+ 
+     if os.path.islink(abs_path):
+         abs_path = mozpath.realpath(abs_path)
+ 
+     return mozpath.relpath(abs_path, repo_path)
+ 
+ 
+ def prompt_bool(prompt, limit=5):
+-    ''' Prompts the user with prompt and requires a boolean value. '''
++    """ Prompts the user with prompt and requires a boolean value. """
+     from distutils.util import strtobool
+ 
+     for _ in range(limit):
+         try:
+             return strtobool(raw_input(prompt + "[Y/N]\n"))
+         except ValueError:
+-            print("ERROR! Please enter a valid option! Please use any of the following:"
+-                  " Y, N, True, False, 1, 0")
++            print(
++                "ERROR! Please enter a valid option! Please use any of the following:"
++                " Y, N, True, False, 1, 0"
++            )
+     return False
+ 
+ 
+ class StaticAnalysisSubCommand(SubCommand):
+     def __call__(self, func):
+         after = SubCommand.__call__(self, func)
+         args = [
+-            CommandArgument('--verbose', '-v', action='store_true',
+-                            help='Print verbose output.'),
++            CommandArgument(
++                "--verbose", "-v", action="store_true", help="Print verbose output."
++            ),
+         ]
+         for arg in args:
+             after = arg(after)
+         return after
+ 
+ 
+ class StaticAnalysisMonitor(object):
+     def __init__(self, srcdir, objdir, clang_tidy_config, total):
+         self._total = total
+         self._processed = 0
+         self._current = None
+         self._srcdir = srcdir
+ 
+         import copy
+ 
+-        self._clang_tidy_config = copy.deepcopy(clang_tidy_config['clang_checkers'])
++        self._clang_tidy_config = copy.deepcopy(clang_tidy_config["clang_checkers"])
+ 
+         # Transform the configuration to support Regex
+         for item in self._clang_tidy_config:
+-            if item['name'] == '-*':
++            if item["name"] == "-*":
+                 continue
+-            item['name'] = item['name'].replace('*', '.*')
++            item["name"] = item["name"].replace("*", ".*")
+ 
+         from mozbuild.compilation.warnings import (
+             WarningsCollector,
+             WarningsDatabase,
+         )
+ 
+         self._warnings_database = WarningsDatabase()
+ 
+         def on_warning(warning):
+ 
+             # Output paths relative to repository root if the paths are under repo tree
+-            warning['filename'] = build_repo_relative_path(warning['filename'], self._srcdir)
++            warning["filename"] = build_repo_relative_path(
++                warning["filename"], self._srcdir
++            )
+ 
+             self._warnings_database.insert(warning)
+ 
+         self._warnings_collector = WarningsCollector(on_warning, objdir=objdir)
+ 
+     @property
+     def num_files(self):
+         return self._total
+@@ -137,325 +142,442 @@ class StaticAnalysisMonitor(object):
+     def on_line(self, line):
+         warning = None
+ 
+         try:
+             warning = self._warnings_collector.process_line(line)
+         except Exception:
+             pass
+ 
+-        if line.find('clang-tidy') != -1:
+-            filename = line.split(' ')[-1]
++        if line.find("clang-tidy") != -1:
++            filename = line.split(" ")[-1]
+             if os.path.isfile(filename):
+                 self._current = build_repo_relative_path(filename, self._srcdir)
+             else:
+                 self._current = None
+             self._processed = self._processed + 1
+             return (warning, False)
+         if warning is not None:
++
+             def get_check_config(checker_name):
+                 # get the matcher from self._clang_tidy_config that is the 'name' field
+                 for item in self._clang_tidy_config:
+-                    if item['name'] == checker_name:
++                    if item["name"] == checker_name:
+                         return item
+ 
+                     # We are using a regex in order to also match 'mozilla-.* like checkers'
+-                    matcher = re.match(item['name'], checker_name)
++                    matcher = re.match(item["name"], checker_name)
+                     if matcher is not None and matcher.group(0) == checker_name:
+                         return item
+ 
+-            check_config = get_check_config(warning['flag'])
++            check_config = get_check_config(warning["flag"])
+             if check_config is not None:
+-                warning['reliability'] = check_config.get('reliability', 'low')
+-                warning['reason'] = check_config.get('reason')
+-                warning['publish'] = check_config.get('publish', True)
++                warning["reliability"] = check_config.get("reliability", "low")
++                warning["reason"] = check_config.get("reason")
++                warning["publish"] = check_config.get("publish", True)
+             elif warning["flag"] == "clang-diagnostic-error":
+                 # For a "warning" that is flagged as "clang-diagnostic-error"
+                 # set it as "publish"
+-                warning['publish'] = True
++                warning["publish"] = True
+ 
+         return (warning, True)
+ 
+ 
+ @CommandProvider
+ class StaticAnalysis(MachCommandBase):
+     """Utilities for running C++ static analysis checks and format."""
+ 
+     # List of file extension to consider (should start with dot)
+-    _format_include_extensions = ('.cpp', '.c', '.cc', '.h', '.m', '.mm')
++    _format_include_extensions = (".cpp", ".c", ".cc", ".h", ".m", ".mm")
+     # File contaning all paths to exclude from formatting
+-    _format_ignore_file = '.clang-format-ignore'
++    _format_ignore_file = ".clang-format-ignore"
+ 
+     # List of file extension to consider (should start with dot)
+-    _check_syntax_include_extensions = ('.cpp', '.c', '.cc', '.cxx')
++    _check_syntax_include_extensions = (".cpp", ".c", ".cc", ".cxx")
+ 
+     _clang_tidy_config = None
+     _cov_config = None
+ 
+-    @Command('static-analysis', category='testing',
+-             description='Run C++ static analysis checks')
++    @Command(
++        "static-analysis",
++        category="testing",
++        description="Run C++ static analysis checks",
++    )
+     def static_analysis(self):
+         # If no arguments are provided, just print a help message.
+         """Detailed documentation:
+         https://firefox-source-docs.mozilla.org/code-quality/static-analysis.html
+         """
+         mach = Mach(os.getcwd())
+ 
+         def populate_context(key=None):
+-            if key == 'topdir':
++            if key == "topdir":
+                 return self.topsrcdir
+ 
+         mach.populate_context_handler = populate_context
+-        mach.run(['static-analysis', '--help'])
+-
+-    @StaticAnalysisSubCommand('static-analysis', 'check',
+-                              'Run the checks using the helper tool')
+-    @CommandArgument('source', nargs='*', default=['.*'],
+-                     help='Source files to be analyzed (regex on path). '
+-                          'Can be omitted, in which case the entire code base '
+-                          'is analyzed.  The source argument is ignored if '
+-                          'there is anything fed through stdin, in which case '
+-                          'the analysis is only performed on the files changed '
+-                          'in the patch streamed through stdin.  This is called '
+-                          'the diff mode.')
+-    @CommandArgument('--checks', '-c', default='-*', metavar='checks',
+-                     help='Static analysis checks to enable.  By default, this enables only '
+-                     'checks that are published here: https://mzl.la/2DRHeTh, but can be any '
+-                     'clang-tidy checks syntax.')
+-    @CommandArgument('--jobs', '-j', default='0', metavar='jobs', type=int,
+-                     help='Number of concurrent jobs to run. Default is the number of CPUs.')
+-    @CommandArgument('--strip', '-p', default='1', metavar='NUM',
+-                     help='Strip NUM leading components from file names in diff mode.')
+-    @CommandArgument('--fix', '-f', default=False, action='store_true',
+-                     help='Try to autofix errors detected by clang-tidy checkers.')
+-    @CommandArgument('--header-filter', '-h-f', default='', metavar='header_filter',
+-                     help='Regular expression matching the names of the headers to '
+-                          'output diagnostics from. Diagnostics from the main file '
+-                          'of each translation unit are always displayed')
+-    @CommandArgument('--output', '-o', default=None,
+-                     help='Write clang-tidy output in a file')
+-    @CommandArgument('--format', default='text', choices=('text', 'json'),
+-                     help='Output format to write in a file')
+-    @CommandArgument('--outgoing', default=False, action='store_true',
+-                     help='Run static analysis checks on outgoing files from mercurial repository')
+-    def check(self, source=None, jobs=2, strip=1, verbose=False, checks='-*',
+-              fix=False, header_filter='', output=None, format='text', outgoing=False):
++        mach.run(["static-analysis", "--help"])
++
++    @StaticAnalysisSubCommand(
++        "static-analysis", "check", "Run the checks using the helper tool"
++    )
++    @CommandArgument(
++        "source",
++        nargs="*",
++        default=[".*"],
++        help="Source files to be analyzed (regex on path). "
++        "Can be omitted, in which case the entire code base "
++        "is analyzed.  The source argument is ignored if "
++        "there is anything fed through stdin, in which case "
++        "the analysis is only performed on the files changed "
++        "in the patch streamed through stdin.  This is called "
++        "the diff mode.",
++    )
++    @CommandArgument(
++        "--checks",
++        "-c",
++        default="-*",
++        metavar="checks",
++        help="Static analysis checks to enable.  By default, this enables only "
++        "checks that are published here: https://mzl.la/2DRHeTh, but can be any "
++        "clang-tidy checks syntax.",
++    )
++    @CommandArgument(
++        "--jobs",
++        "-j",
++        default="0",
++        metavar="jobs",
++        type=int,
++        help="Number of concurrent jobs to run. Default is the number of CPUs.",
++    )
++    @CommandArgument(
++        "--strip",
++        "-p",
++        default="1",
++        metavar="NUM",
++        help="Strip NUM leading components from file names in diff mode.",
++    )
++    @CommandArgument(
++        "--fix",
++        "-f",
++        default=False,
++        action="store_true",
++        help="Try to autofix errors detected by clang-tidy checkers.",
++    )
++    @CommandArgument(
++        "--header-filter",
++        "-h-f",
++        default="",
++        metavar="header_filter",
++        help="Regular expression matching the names of the headers to "
++        "output diagnostics from. Diagnostics from the main file "
++        "of each translation unit are always displayed",
++    )
++    @CommandArgument(
++        "--output", "-o", default=None, help="Write clang-tidy output in a file"
++    )
++    @CommandArgument(
++        "--format",
++        default="text",
++        choices=("text", "json"),
++        help="Output format to write in a file",
++    )
++    @CommandArgument(
++        "--outgoing",
++        default=False,
++        action="store_true",
++        help="Run static analysis checks on outgoing files from mercurial repository",
++    )
++    def check(
++        self,
++        source=None,
++        jobs=2,
++        strip=1,
++        verbose=False,
++        checks="-*",
++        fix=False,
++        header_filter="",
++        output=None,
++        format="text",
++        outgoing=False,
++    ):
+         from mozbuild.controller.building import (
+             StaticAnalysisFooter,
+             StaticAnalysisOutputManager,
+         )
+ 
+         self._set_log_level(verbose)
+         self.activate_virtualenv()
+         self.log_manager.enable_unstructured()
+ 
+         rc = self._get_clang_tools(verbose=verbose)
+         if rc != 0:
+             return rc
+ 
+         if self._is_version_eligible() is False:
+-            self.log(logging.ERROR, 'static-analysis', {},
+-                     "ERROR: You're using an old version of clang-format binary."
+-                     " Please update to a more recent one by running: './mach bootstrap'")
++            self.log(
++                logging.ERROR,
++                "static-analysis",
++                {},
++                "ERROR: You're using an old version of clang-format binary."
++                " Please update to a more recent one by running: './mach bootstrap'",
++            )
+             return 1
+ 
+         rc = self._build_compile_db(verbose=verbose)
+         rc = rc or self._build_export(jobs=jobs, verbose=verbose)
+         if rc != 0:
+             return rc
+ 
+         # Use outgoing files instead of source files
+         if outgoing:
+             repo = get_repository_object(self.topsrcdir)
+             files = repo.get_outgoing_files()
+             source = [os.path.abspath(f) for f in files]
+ 
+         # Split in several chunks to avoid hitting Python's limit of 100 groups in re
+-        compile_db = json.loads(open(self._compile_db, 'r').read())
++        compile_db = json.loads(open(self._compile_db, "r").read())
+         total = 0
+         import re
++
+         chunk_size = 50
+         for offset in range(0, len(source), chunk_size):
+-            source_chunks = [re.escape(f) for f in source[offset:offset + chunk_size].copy()]
+-            name_re = re.compile('(' + ')|('.join(source_chunks) + ')')
++            source_chunks = [
++                re.escape(f) for f in source[offset : offset + chunk_size].copy()
++            ]
++            name_re = re.compile("(" + ")|(".join(source_chunks) + ")")
+             for f in compile_db:
+-                if name_re.search(f['file']):
++                if name_re.search(f["file"]):
+                     total = total + 1
+ 
+         # Filter source to remove excluded files
+         source = self._generate_path_list(source, verbose=verbose)
+ 
+         if not total or not source:
+-            self.log(logging.INFO, 'static-analysis', {},
+-                     "There are no files eligible for analysis. Please note that 'header' files "
+-                     "cannot be used for analysis since they do not consist compilation units.")
++            self.log(
++                logging.INFO,
++                "static-analysis",
++                {},
++                "There are no files eligible for analysis. Please note that 'header' files "
++                "cannot be used for analysis since they do not consist compilation units.",
++            )
+             return 0
+ 
+         # Escape the files from source
+         source = [re.escape(f) for f in source]
+ 
+         cwd = self.topobjdir
+         self._compilation_commands_path = self.topobjdir
+         if self._clang_tidy_config is None:
+             self._clang_tidy_config = self._get_clang_tidy_config()
+ 
+         monitor = StaticAnalysisMonitor(
+-            self.topsrcdir, self.topobjdir, self._clang_tidy_config, total)
++            self.topsrcdir, self.topobjdir, self._clang_tidy_config, total
++        )
+ 
+         footer = StaticAnalysisFooter(self.log_manager.terminal, monitor)
+ 
+-        with StaticAnalysisOutputManager(self.log_manager, monitor, footer) as output_manager:
++        with StaticAnalysisOutputManager(
++            self.log_manager, monitor, footer
++        ) as output_manager:
+             import math
+-            batch_size = int(math.ceil(float(len(source)) / multiprocessing.cpu_count()))
++
++            batch_size = int(
++                math.ceil(float(len(source)) / multiprocessing.cpu_count())
++            )
+             for i in range(0, len(source), batch_size):
+                 args = self._get_clang_tidy_command(
+                     checks=checks,
+                     header_filter=header_filter,
+-                    sources=source[i:(i + batch_size)],
++                    sources=source[i : (i + batch_size)],
+                     jobs=jobs,
+-                    fix=fix)
++                    fix=fix,
++                )
+                 rc = self.run_process(
+                     args=args,
+                     ensure_exit_code=False,
+                     line_handler=output_manager.on_line,
+-                    cwd=cwd)
+-
+-            self.log(logging.WARNING, 'warning_summary',
+-                     {'count': len(monitor.warnings_db)},
+-                     '{count} warnings present.')
++                    cwd=cwd,
++                )
++
++            self.log(
++                logging.WARNING,
++                "warning_summary",
++                {"count": len(monitor.warnings_db)},
++                "{count} warnings present.",
++            )
+ 
+             # Write output file
+             if output is not None:
+                 output_manager.write(output, format)
+ 
+         if rc != 0:
+             return rc
+         # if we are building firefox for android it might be nice to
+         # also analyze the java code base
+-        if self.substs['MOZ_BUILD_APP'] == 'mobile/android':
++        if self.substs["MOZ_BUILD_APP"] == "mobile/android":
+             rc = self.check_java(source, jobs, strip, verbose, skip_export=True)
+         return rc
+ 
+-    @StaticAnalysisSubCommand('static-analysis', 'check-coverity',
+-                              'Run coverity static-analysis tool on the given files. '
+-                              'Can only be run by automation! '
+-                              'It\'s result is stored as an json file on the artifacts server.')
+-    @CommandArgument('source', nargs='*', default=[],
+-                     help='Source files to be analyzed by Coverity Static Analysis Tool. '
+-                          'This is ran only in automation.')
+-    @CommandArgument('--output', '-o', default=None,
+-                     help='Write coverity output translated to json output in a file')
+-    @CommandArgument('--coverity_output_path', '-co', default=None,
+-                     help='Path where to write coverity results as cov-results.json. '
+-                     'If no path is specified the default path from the coverity working '
+-                     'directory, ~./mozbuild/coverity is used.')
+-    @CommandArgument('--outgoing', default=False, action='store_true',
+-                     help='Run coverity on outgoing files from mercurial or git repository')
+-    @CommandArgument('--full-build', default=False, action='store_true',
+-                     help='Run a full build for coverity analisys.')
+-    def check_coverity(self, source=[], output=None, coverity_output_path=None,
+-                       outgoing=False, full_build=False, verbose=False):
++    @StaticAnalysisSubCommand(
++        "static-analysis",
++        "check-coverity",
++        "Run coverity static-analysis tool on the given files. "
++        "Can only be run by automation! "
++        "It's result is stored as an json file on the artifacts server.",
++    )
++    @CommandArgument(
++        "source",
++        nargs="*",
++        default=[],
++        help="Source files to be analyzed by Coverity Static Analysis Tool. "
++        "This is ran only in automation.",
++    )
++    @CommandArgument(
++        "--output",
++        "-o",
++        default=None,
++        help="Write coverity output translated to json output in a file",
++    )
++    @CommandArgument(
++        "--coverity_output_path",
++        "-co",
++        default=None,
++        help="Path where to write coverity results as cov-results.json. "
++        "If no path is specified the default path from the coverity working "
++        "directory, ~./mozbuild/coverity is used.",
++    )
++    @CommandArgument(
++        "--outgoing",
++        default=False,
++        action="store_true",
++        help="Run coverity on outgoing files from mercurial or git repository",
++    )
++    @CommandArgument(
++        "--full-build",
++        default=False,
++        action="store_true",
++        help="Run a full build for coverity analisys.",
++    )
++    def check_coverity(
++        self,
++        source=[],
++        output=None,
++        coverity_output_path=None,
++        outgoing=False,
++        full_build=False,
++        verbose=False,
++    ):
+         self._set_log_level(verbose)
+         self.activate_virtualenv()
+         self.log_manager.enable_unstructured()
+ 
+-        if 'MOZ_AUTOMATION' not in os.environ:
+-            self.log(logging.INFO, 'static-analysis', {},
+-                     'Coverity based static-analysis cannot be ran outside automation.')
++        if "MOZ_AUTOMATION" not in os.environ:
++            self.log(
++                logging.INFO,
++                "static-analysis",
++                {},
++                "Coverity based static-analysis cannot be ran outside automation.",
++            )
+             return
+ 
+         if full_build and outgoing:
+-            self.log(logging.INFO, 'static-analysis', {},
+-                     'Coverity full build cannot be associated with outgoing.')
++            self.log(
++                logging.INFO,
++                "static-analysis",
++                {},
++                "Coverity full build cannot be associated with outgoing.",
++            )
+             return
+ 
+         # Use outgoing files instead of source files
+         if outgoing:
+             repo = get_repository_object(self.topsrcdir)
+             files = repo.get_outgoing_files()
+             source = [os.path.abspath(f) for f in files]
+ 
+         # Verify that we have source files or we are dealing with a full-build
+         if len(source) == 0 and not full_build:
+-            self.log(logging.ERROR, 'static-analysis', {},
+-                     'ERROR: There are no files that coverity can use to scan.')
++            self.log(
++                logging.ERROR,
++                "static-analysis",
++                {},
++                "ERROR: There are no files that coverity can use to scan.",
++            )
+             return 0
+ 
+         # Load the configuration file for coverity static-analysis
+         # For the moment we store only the reliability index for each checker
+         # as the rest is managed on the https://github.com/mozilla/release-services side.
+         self._cov_config = self._get_cov_config()
+ 
+         rc = self.setup_coverity()
+         if rc != 0:
+             return rc
+ 
+         # First run cov-run-desktop --setup in order to setup the analysis env
+         # We need this in both cases, per patch analysis or full tree build
+-        cmd = [self.cov_run_desktop, '--setup']
++        cmd = [self.cov_run_desktop, "--setup"]
+         if self.run_cov_command(cmd, self.cov_path):
+             # Avoiding a bug in Coverity where snapshot is not identified
+             # as beeing built with the current analysis binary.
+             if not full_build:
+                 return 1
+ 
+         # Run cov-configure for clang, javascript and python
+         langs = ["clang", "javascript", "python"]
+         for lang in langs:
+-            cmd = [self.cov_configure, '--{}'.format(lang)]
++            cmd = [self.cov_configure, "--{}".format(lang)]
+ 
+             if self.run_cov_command(cmd):
+                 return 1
+ 
+         if full_build:
+             # 1. Build the model file that is going to be used for analysis
+             model_path = mozpath.join("tools", "coverity", "model.cpp")
+             cmd = [self.cov_make_library, "-sf", self.cov_lic_path, model_path]
+ 
+             if self.run_cov_command(cmd):
+                 return 1
+ 
+             # 2. Run cov-build
+ 
+             # Add cov_build command
+-            cmd = [
+-                self.cov_build,
+-                '--dir',
+-                'cov-int'
+-            ]
++            cmd = [self.cov_build, "--dir", "cov-int"]
+             # Add fs capture search paths for languages that are not nuilt
+             cmd += [
+                 "--fs-capture-search={}".format(path)
+                 for path in self.cov_capture_search
+             ]
+ 
+             # Add the exclude criteria for test cases
+             cmd += [
+-                '--fs-capture-search-exclude-regex',
+-                '.*/test',
+-                './mach', '--log-no-times', 'build'
++                "--fs-capture-search-exclude-regex",
++                ".*/test",
++                "./mach",
++                "--log-no-times",
++                "build",
+             ]
+             if self.run_cov_command(cmd):
+                 return 1
+ 
+             # 3. Run cov-analyze and exclude disabled checkers
+             cmd = [
+                 self.cov_analyze,
+-                '--dir',
+-                'cov-int',
+-                '--all',
+-                '--enable-virtual',
+-                '--strip-path={}'.format(self.topsrcdir),
+-                '-sf',
+-                self.cov_lic_path
++                "--dir",
++                "cov-int",
++                "--all",
++                "--enable-virtual",
++                "--strip-path={}".format(self.topsrcdir),
++                "-sf",
++                self.cov_lic_path,
+             ]
+ 
+             cmd += [
+                 "--disable={}".format(key)
+-                for key, checker in self._cov_config['coverity_checkers'].items()
++                for key, checker in self._cov_config["coverity_checkers"].items()
+                 if checker.get("publish", True) is False
+             ]
+ 
+             if self.run_cov_command(cmd):
+                 return 1
+ 
+             # 4. Run cov-commit-defects
+             protocol = "https" if self.cov_server_ssl else "http"
+@@ -466,227 +588,290 @@ class StaticAnalysis(MachCommandBase):
+                 self.cov_auth_path,
+                 "--stream",
+                 self.cov_stream,
+                 "--dir",
+                 "cov-int",
+                 "--url",
+                 server_url,
+                 "-sf",
+-                self.cov_lic_path
++                self.cov_lic_path,
+             ]
+ 
+             if self.run_cov_command(cmd):
+                 return 1
+ 
+             return 0
+ 
+         rc = self._build_compile_db(verbose=verbose)
+         rc = rc or self._build_export(jobs=2, verbose=verbose)
+ 
+         if rc != 0:
+             return rc
+ 
+         commands_list = self.get_files_with_commands(source)
+         if len(commands_list) == 0:
+-            self.log(logging.INFO, 'static-analysis', {},
+-                     'There are no files that need to be analyzed.')
++            self.log(
++                logging.INFO,
++                "static-analysis",
++                {},
++                "There are no files that need to be analyzed.",
++            )
+             return 0
+ 
+         if len(self.cov_non_unified_paths):
+-            self.cov_non_unified_paths = [mozpath.join(
+-                self.topsrcdir, path) for path in self.cov_non_unified_paths]
++            self.cov_non_unified_paths = [
++                mozpath.join(self.topsrcdir, path)
++                for path in self.cov_non_unified_paths
++            ]
+ 
+         # For each element in commands_list run `cov-translate`
+         for element in commands_list:
+ 
+             def transform_cmd(cmd):
+                 # Coverity Analysis has a problem translating definitions passed as:
+                 # '-DSOME_DEF="ValueOfAString"', please see Bug 1588283.
+                 return [re.sub(r'\'-D(.*)="(.*)"\'', r'-D\1="\2"', arg) for arg in cmd]
+ 
+-            build_command = element['command'].split(' ')
++            build_command = element["command"].split(" ")
+             # For modules that are compatible with the non unified build environment
+             # use the the implicit file for analysis in the detriment of the unified
+-            if any(element['file'].startswith(path) for path in self.cov_non_unified_paths):
+-                build_command[-1] = element['file']
+-
+-            cmd = [self.cov_translate, '--dir', self.cov_idir_path] + \
+-                transform_cmd(build_command)
+-
+-            if self.run_cov_command(cmd, element['directory']):
++            if any(
++                element["file"].startswith(path) for path in self.cov_non_unified_paths
++            ):
++                build_command[-1] = element["file"]
++
++            cmd = [self.cov_translate, "--dir", self.cov_idir_path] + transform_cmd(
++                build_command
++            )
++
++            if self.run_cov_command(cmd, element["directory"]):
+                 return 1
+ 
+         if coverity_output_path is None:
+-            cov_result = mozpath.join(self.cov_state_path, 'cov-results.json')
++            cov_result = mozpath.join(self.cov_state_path, "cov-results.json")
+         else:
+-            cov_result = mozpath.join(coverity_output_path, 'cov-results.json')
++            cov_result = mozpath.join(coverity_output_path, "cov-results.json")
+ 
+         # Once the capture is performed we need to do the actual Coverity Desktop analysis
+-        cmd = [self.cov_run_desktop, '--json-output-v6',
+-               cov_result, '--analyze-captured-source']
++        cmd = [
++            self.cov_run_desktop,
++            "--json-output-v6",
++            cov_result,
++            "--analyze-captured-source",
++        ]
+ 
+         if self.run_cov_command(cmd, self.cov_state_path):
+             return 1
+ 
+         if output is not None:
+             self.dump_cov_artifact(cov_result, source, output)
+ 
+     def run_cov_command(self, cmd, path=None):
+         if path is None:
+             # We want to run it in topsrcdir
+             path = self.topsrcdir
+ 
+-        self.log(logging.INFO, 'static-analysis', {}, 'Running '+' '.join(cmd))
+-
+-        rc = self.run_process(args=cmd, cwd=path, pass_thru=True, ensure_exit_code=False)
++        self.log(logging.INFO, "static-analysis", {}, "Running " + " ".join(cmd))
++
++        rc = self.run_process(
++            args=cmd, cwd=path, pass_thru=True, ensure_exit_code=False
++        )
+ 
+         if rc != 0:
+-            self.log(logging.ERROR, 'static-analysis', {},
+-                     'ERROR: Running ' + ' '.join(cmd) + ' failed!')
++            self.log(
++                logging.ERROR,
++                "static-analysis",
++                {},
++                "ERROR: Running " + " ".join(cmd) + " failed!",
++            )
+             return rc
+         return 0
+ 
+     def get_reliability_index_for_cov_checker(self, checker_name):
+         if self._cov_config is None:
+-            self.log(logging.INFO, 'static-analysis', {}, 'Coverity config file not found, '
+-                     'using default-value \'reliablity\' = medium. for checker {}'.format(
+-                        checker_name))
+-            return 'medium'
+-
+-        checkers = self._cov_config['coverity_checkers']
++            self.log(
++                logging.INFO,
++                "static-analysis",
++                {},
++                "Coverity config file not found, "
++                "using default-value 'reliablity' = medium. for checker {}".format(
++                    checker_name
++                ),
++            )
++            return "medium"
++
++        checkers = self._cov_config["coverity_checkers"]
+         if checker_name not in checkers:
+-            self.log(logging.INFO, 'static-analysis', {},
+-                     'Coverity checker {} not found to determine reliability index. '
+-                     'For the moment we shall use the default \'reliablity\' = medium.'.format(
+-                        checker_name))
+-            return 'medium'
+-
+-        if 'reliability' not in checkers[checker_name]:
++            self.log(
++                logging.INFO,
++                "static-analysis",
++                {},
++                "Coverity checker {} not found to determine reliability index. "
++                "For the moment we shall use the default 'reliablity' = medium.".format(
++                    checker_name
++                ),
++            )
++            return "medium"
++
++        if "reliability" not in checkers[checker_name]:
+             # This checker doesn't have a reliability index
+-            self.log(logging.INFO, 'static-analysis', {},
+-                     'Coverity checker {} doesn\'t have a reliability index set, '
+-                     'field \'reliability is missing\', please cosinder adding it. '
+-                     'For the moment we shall use the default \'reliablity\' = medium.'.format(
+-                        checker_name))
+-            return 'medium'
+-
+-        return checkers[checker_name]['reliability']
++            self.log(
++                logging.INFO,
++                "static-analysis",
++                {},
++                "Coverity checker {} doesn't have a reliability index set, "
++                "field 'reliability is missing', please cosinder adding it. "
++                "For the moment we shall use the default 'reliablity' = medium.".format(
++                    checker_name
++                ),
++            )
++            return "medium"
++
++        return checkers[checker_name]["reliability"]
+ 
+     def dump_cov_artifact(self, cov_results, source, output):
+         # Parse Coverity json into structured issues
+ 
+         with open(cov_results) as f:
+             result = json.load(f)
+ 
+             # Parse the issues to a standard json format
+-            issues_dict = {'files': {}}
+-
+-            files_list = issues_dict['files']
++            issues_dict = {"files": {}}
++
++            files_list = issues_dict["files"]
+ 
+             def build_element(issue):
+                 # We look only for main event
+                 event_path = next(
+-                    (event for event in issue['events'] if event['main'] is True), None)
++                    (event for event in issue["events"] if event["main"] is True), None
++                )
+ 
+                 dict_issue = {
+-                    'line': issue['mainEventLineNumber'],
+-                    'flag': issue['checkerName'],
+-                    'message': event_path['eventDescription'],
+-                    'reliability': self.get_reliability_index_for_cov_checker(
+-                        issue['checkerName']
+-                        ),
+-                    'extra': {
+-                        'category': issue['checkerProperties']['category'],
+-                        'stateOnServer': issue['stateOnServer'],
+-                        'stack': []
+-                    }
++                    "line": issue["mainEventLineNumber"],
++                    "flag": issue["checkerName"],
++                    "message": event_path["eventDescription"],
++                    "reliability": self.get_reliability_index_for_cov_checker(
++                        issue["checkerName"]
++                    ),
++                    "extra": {
++                        "category": issue["checkerProperties"]["category"],
++                        "stateOnServer": issue["stateOnServer"],
++                        "stack": [],
++                    },
+                 }
+ 
+                 # Embed all events into extra message
+-                for event in issue['events']:
+-                    dict_issue['extra']['stack'].append(
+-                        {'file_path': build_repo_relative_path(event['strippedFilePathname'],
+-                                                               self.topsrcdir),
+-                         'line_number': event['lineNumber'],
+-                         'path_type': event['eventTag'],
+-                         'description': event['eventDescription']})
++                for event in issue["events"]:
++                    dict_issue["extra"]["stack"].append(
++                        {
++                            "file_path": build_repo_relative_path(
++                                event["strippedFilePathname"], self.topsrcdir
++                            ),
++                            "line_number": event["lineNumber"],
++                            "path_type": event["eventTag"],
++                            "description": event["eventDescription"],
++                        }
++                    )
+ 
+                 return dict_issue
+ 
+-            for issue in result['issues']:
+-                path = build_repo_relative_path(issue['strippedMainEventFilePathname'],
+-                                                self.topsrcdir)
++            for issue in result["issues"]:
++                path = build_repo_relative_path(
++                    issue["strippedMainEventFilePathname"], self.topsrcdir
++                )
+                 # Skip clang diagnostic messages
+-                if issue['checkerName'].startswith('RW.CLANG'):
++                if issue["checkerName"].startswith("RW.CLANG"):
+                     continue
+ 
+                 if path is None:
+                     # Since we skip a result we should log it
+-                    self.log(logging.INFO, 'static-analysis', {},
+-                             'Skipping CID: {0} from file: {1} since it\'s not related '
+-                             'with the current patch.'.format(
+-                                issue['stateOnServer']['cid'],
+-                                issue['strippedMainEventFilePathname'])
+-                             )
++                    self.log(
++                        logging.INFO,
++                        "static-analysis",
++                        {},
++                        "Skipping CID: {0} from file: {1} since it's not related "
++                        "with the current patch.".format(
++                            issue["stateOnServer"]["cid"],
++                            issue["strippedMainEventFilePathname"],
++                        ),
++                    )
+                     continue
+                 if path in files_list:
+-                    files_list[path]['warnings'].append(build_element(issue))
++                    files_list[path]["warnings"].append(build_element(issue))
+                 else:
+-                    files_list[path] = {'warnings': [build_element(issue)]}
+-
+-            with open(output, 'w') as f:
++                    files_list[path] = {"warnings": [build_element(issue)]}
++
++            with open(output, "w") as f:
+                 json.dump(issues_dict, f)
+ 
+     def get_coverity_secrets(self):
+         from taskgraph.util.taskcluster import get_root_url
+ 
+-        secret_name = 'project/relman/coverity'
+-        secrets_url = '{}/secrets/v1/secret/{}'.format(get_root_url(True), secret_name)
+-
+-        self.log(logging.INFO, 'static-analysis', {},
+-                 'Using symbol upload token from the secrets service: "{}"'.format(secrets_url))
++        secret_name = "project/relman/coverity"
++        secrets_url = "{}/secrets/v1/secret/{}".format(get_root_url(True), secret_name)
++
++        self.log(
++            logging.INFO,
++            "static-analysis",
++            {},
++            'Using symbol upload token from the secrets service: "{}"'.format(
++                secrets_url
++            ),
++        )
+ 
+         import requests
++
+         res = requests.get(secrets_url)
+         res.raise_for_status()
+         secret = res.json()
+-        cov_config = secret['secret'] if 'secret' in secret else None
++        cov_config = secret["secret"] if "secret" in secret else None
+ 
+         if cov_config is None:
+-            self.log(logging.ERROR, 'static-analysis', {},
+-                     'ERROR: Ill formatted secret for Coverity. Aborting analysis.')
++            self.log(
++                logging.ERROR,
++                "static-analysis",
++                {},
++                "ERROR: Ill formatted secret for Coverity. Aborting analysis.",
++            )
+             return 1
+ 
+-        self.cov_analysis_url = cov_config.get('package_url')
+-        self.cov_package_name = cov_config.get('package_name')
+-        self.cov_url = cov_config.get('server_url')
+-        self.cov_server_ssl = cov_config.get('server_ssl', True)
++        self.cov_analysis_url = cov_config.get("package_url")
++        self.cov_package_name = cov_config.get("package_name")
++        self.cov_url = cov_config.get("server_url")
++        self.cov_server_ssl = cov_config.get("server_ssl", True)
+         # In case we don't have a port in the secret we use the default one,
+         # for a default coverity deployment.
+-        self.cov_port = cov_config.get('server_port', 8443)
+-        self.cov_auth = cov_config.get('auth_key')
+-        self.cov_package_ver = cov_config.get('package_ver')
+-        self.cov_lic_name = cov_config.get('lic_name')
+-        self.cov_capture_search = cov_config.get('fs_capture_search', None)
+-        self.cov_full_stack = cov_config.get('full_stack', False)
+-        self.cov_stream = cov_config.get('stream', False)
+-        self.cov_non_unified_paths = cov_config.get('non_unified', [])
++        self.cov_port = cov_config.get("server_port", 8443)
++        self.cov_auth = cov_config.get("auth_key")
++        self.cov_package_ver = cov_config.get("package_ver")
++        self.cov_lic_name = cov_config.get("lic_name")
++        self.cov_capture_search = cov_config.get("fs_capture_search", None)
++        self.cov_full_stack = cov_config.get("full_stack", False)
++        self.cov_stream = cov_config.get("stream", False)
++        self.cov_non_unified_paths = cov_config.get("non_unified", [])
+ 
+         return 0
+ 
+     def download_coverity(self):
+-        if self.cov_url is None or self.cov_port is None or \
+-                self.cov_analysis_url is None or \
+-                self.cov_auth is None:
+-            self.log(logging.ERROR, 'static-analysis', {},
+-                     'ERROR: Missing Coverity secret on try job!')
++        if (
++            self.cov_url is None
++            or self.cov_port is None
++            or self.cov_analysis_url is None
++            or self.cov_auth is None
++        ):
++            self.log(
++                logging.ERROR,
++                "static-analysis",
++                {},
++                "ERROR: Missing Coverity secret on try job!",
++            )
+             return 1
+ 
+-        COVERITY_CONFIG = '''
++        COVERITY_CONFIG = """
+         {
+             "type": "Coverity configuration",
+             "format_version": 1,
+             "settings": {
+             "server": {
+                 "host": "%s",
+                 "ssl" : true,
+                 "port": %s,
+@@ -695,42 +880,43 @@ class StaticAnalysis(MachCommandBase):
+             },
+             "stream": "Firefox",
+             "cov_run_desktop": {
+                 "build_cmd": [],
+                 "clean_cmd": []
+             }
+             }
+         }
+-        '''
++        """
+         # Generate the coverity.conf and auth files
+-        self.cov_auth_path = mozpath.join(self.cov_state_path, 'auth')
+-        cov_setup_path = mozpath.join(self.cov_state_path, 'coverity.conf')
++        self.cov_auth_path = mozpath.join(self.cov_state_path, "auth")
++        cov_setup_path = mozpath.join(self.cov_state_path, "coverity.conf")
+         cov_conf = COVERITY_CONFIG % (self.cov_url, self.cov_port, self.cov_auth_path)
+ 
+         def download(artifact_url, target):
+             import requests
++
+             self.log_manager.enable_unstructured()
+             resp = requests.get(artifact_url, verify=False, stream=True)
+             self.log_manager.disable_unstructured()
+             resp.raise_for_status()
+ 
+             # Extract archive into destination
+             with tarfile.open(fileobj=io.BytesIO(resp.content)) as tar:
+                 tar.extractall(target)
+ 
+         download(self.cov_analysis_url, self.cov_state_path)
+ 
+-        with open(self.cov_auth_path, 'w') as f:
++        with open(self.cov_auth_path, "w") as f:
+             f.write(self.cov_auth)
+ 
+         # Modify it's permission to 600
+         os.chmod(self.cov_auth_path, 0o600)
+ 
+-        with open(cov_setup_path, 'a') as f:
++        with open(cov_setup_path, "a") as f:
+             f.write(cov_conf)
+ 
+     def setup_coverity(self, force_download=True):
+         rc, config, _ = self._get_config_environment()
+         rc = rc or self.get_coverity_secrets()
+ 
+         if rc != 0:
+             return rc
+@@ -742,347 +928,476 @@ class StaticAnalysis(MachCommandBase):
+             shutil.rmtree(self.cov_state_path)
+ 
+         os.mkdir(self.cov_state_path)
+ 
+         # Download everything that we need for Coverity from out private instance
+         self.download_coverity()
+ 
+         self.cov_path = mozpath.join(self.cov_state_path, self.cov_package_name)
+-        self.cov_run_desktop = mozpath.join(self.cov_path, 'bin', 'cov-run-desktop')
+-        self.cov_configure = mozpath.join(self.cov_path, 'bin', 'cov-configure')
+-        self.cov_make_library = mozpath.join(self.cov_path, 'bin', 'cov-make-library')
+-        self.cov_build = mozpath.join(self.cov_path, 'bin', 'cov-build')
+-        self.cov_analyze = mozpath.join(self.cov_path, 'bin', 'cov-analyze')
+-        self.cov_commit_defects = mozpath.join(self.cov_path, 'bin', 'cov-commit-defects')
+-        self.cov_translate = mozpath.join(self.cov_path, 'bin', 'cov-translate')
+-        self.cov_configure = mozpath.join(self.cov_path, 'bin', 'cov-configure')
+-        self.cov_work_path = mozpath.join(self.cov_state_path, 'data-coverity')
+-        self.cov_idir_path = mozpath.join(self.cov_work_path, self.cov_package_ver, 'idir')
++        self.cov_run_desktop = mozpath.join(self.cov_path, "bin", "cov-run-desktop")
++        self.cov_configure = mozpath.join(self.cov_path, "bin", "cov-configure")
++        self.cov_make_library = mozpath.join(self.cov_path, "bin", "cov-make-library")
++        self.cov_build = mozpath.join(self.cov_path, "bin", "cov-build")
++        self.cov_analyze = mozpath.join(self.cov_path, "bin", "cov-analyze")
++        self.cov_commit_defects = mozpath.join(
++            self.cov_path, "bin", "cov-commit-defects"
++        )
++        self.cov_translate = mozpath.join(self.cov_path, "bin", "cov-translate")
++        self.cov_configure = mozpath.join(self.cov_path, "bin", "cov-configure")
++        self.cov_work_path = mozpath.join(self.cov_state_path, "data-coverity")
++        self.cov_idir_path = mozpath.join(
++            self.cov_work_path, self.cov_package_ver, "idir"
++        )
+         self.cov_lic_path = mozpath.join(
+-            self.cov_work_path, self.cov_package_ver, 'lic', self.cov_lic_name)
++            self.cov_work_path, self.cov_package_ver, "lic", self.cov_lic_name
++        )
+ 
+         if not os.path.exists(self.cov_path):
+-            self.log(logging.ERROR, 'static-analysis', {},
+-                     'ERROR: Missing Coverity in {}'.format(self.cov_path))
++            self.log(
++                logging.ERROR,
++                "static-analysis",
++                {},
++                "ERROR: Missing Coverity in {}".format(self.cov_path),
++            )
+             return 1
+ 
+         return 0
+ 
+     def get_files_with_commands(self, source):
+-        '''
++        """
+         Returns an array of dictionaries having file_path with build command
+-        '''
+-
+-        compile_db = json.load(open(self._compile_db, 'r'))
++        """
++
++        compile_db = json.load(open(self._compile_db, "r"))
+ 
+         commands_list = []
+ 
+         for f in source:
+             # It must be a C/C++ file
+             _, ext = os.path.splitext(f)
+ 
+             if ext.lower() not in self._format_include_extensions:
+-                self.log(logging.INFO, 'static-analysis', {}, 'Skipping {}'.format(f))
++                self.log(logging.INFO, "static-analysis", {}, "Skipping {}".format(f))
+                 continue
+             file_with_abspath = os.path.join(self.topsrcdir, f)
+             for f in compile_db:
+                 # Found for a file that we are looking
+-                if file_with_abspath == f['file']:
++                if file_with_abspath == f["file"]:
+                     commands_list.append(f)
+ 
+         return commands_list
+ 
+-    @StaticAnalysisSubCommand('static-analysis', 'check-java',
+-                              'Run infer on the java codebase.')
+-    @CommandArgument('source', nargs='*', default=['mobile'],
+-                     help='Source files to be analyzed. '
+-                          'Can be omitted, in which case the entire code base '
+-                          'is analyzed.  The source argument is ignored if '
+-                          'there is anything fed through stdin, in which case '
+-                          'the analysis is only performed on the files changed '
+-                          'in the patch streamed through stdin.  This is called '
+-                          'the diff mode.')
+-    @CommandArgument('--checks', '-c', default=[], metavar='checks', nargs='*',
+-                     help='Static analysis checks to enable.')
+-    @CommandArgument('--jobs', '-j', default='0', metavar='jobs', type=int,
+-                     help='Number of concurrent jobs to run.'
+-                     ' Default is the number of CPUs.')
+-    @CommandArgument('--task', '-t', type=str,
+-                     default='compileWithGeckoBinariesDebugSources',
+-                     help='Which gradle tasks to use to compile the java codebase.')
+-    @CommandArgument('--outgoing', default=False, action='store_true',
+-                     help='Run infer checks on outgoing files from repository')
+-    @CommandArgument('--output', default=None,
+-                     help='Write infer json output in a file')
+-    def check_java(self, source=['mobile'], jobs=2, strip=1, verbose=False, checks=[],
+-                   task='compileWithGeckoBinariesDebugSources',
+-                   skip_export=False, outgoing=False, output=None):
++    @StaticAnalysisSubCommand(
++        "static-analysis", "check-java", "Run infer on the java codebase."
++    )
++    @CommandArgument(
++        "source",
++        nargs="*",
++        default=["mobile"],
++        help="Source files to be analyzed. "
++        "Can be omitted, in which case the entire code base "
++        "is analyzed.  The source argument is ignored if "
++        "there is anything fed through stdin, in which case "
++        "the analysis is only performed on the files changed "
++        "in the patch streamed through stdin.  This is called "
++        "the diff mode.",
++    )
++    @CommandArgument(
++        "--checks",
++        "-c",
++        default=[],
++        metavar="checks",
++        nargs="*",
++        help="Static analysis checks to enable.",
++    )
++    @CommandArgument(
++        "--jobs",
++        "-j",
++        default="0",
++        metavar="jobs",
++        type=int,
++        help="Number of concurrent jobs to run." " Default is the number of CPUs.",
++    )
++    @CommandArgument(
++        "--task",
++        "-t",
++        type=str,
++        default="compileWithGeckoBinariesDebugSources",
++        help="Which gradle tasks to use to compile the java codebase.",
++    )
++    @CommandArgument(
++        "--outgoing",
++        default=False,
++        action="store_true",
++        help="Run infer checks on outgoing files from repository",
++    )
++    @CommandArgument("--output", default=None, help="Write infer json output in a file")
++    def check_java(
++        self,
++        source=["mobile"],
++        jobs=2,
++        strip=1,
++        verbose=False,
++        checks=[],
++        task="compileWithGeckoBinariesDebugSources",
++        skip_export=False,
++        outgoing=False,
++        output=None,
++    ):
+         self._set_log_level(verbose)
+         self.activate_virtualenv()
+         self.log_manager.enable_unstructured()
+ 
+-        if self.substs['MOZ_BUILD_APP'] != 'mobile/android':
+-            self.log(logging.WARNING, 'static-analysis', {},
+-                     'Cannot check java source code unless you are building for android!')
++        if self.substs["MOZ_BUILD_APP"] != "mobile/android":
++            self.log(
++                logging.WARNING,
++                "static-analysis",
++                {},
++                "Cannot check java source code unless you are building for android!",
++            )
+             return 1
+         rc = self._check_for_java()
+         if rc != 0:
+             return 1
+         if output is not None:
+             output = os.path.abspath(output)
+             if not os.path.isdir(os.path.dirname(output)):
+-                self.log(logging.WARNING, 'static-analysis', {},
+-                         'Missing report destination folder for {}'.format(output))
++                self.log(
++                    logging.WARNING,
++                    "static-analysis",
++                    {},
++                    "Missing report destination folder for {}".format(output),
++                )
+ 
+         # if source contains the whole mobile folder, then we just have to
+         # analyze everything
+-        check_all = any(i.rstrip(os.sep).split(os.sep)[-1] == 'mobile' for i in source)
++        check_all = any(i.rstrip(os.sep).split(os.sep)[-1] == "mobile" for i in source)
+         # gather all java sources from the source variable
+         java_sources = []
+         if outgoing:
+             repo = get_repository_object(self.topsrcdir)
+             java_sources = self._get_java_files(repo.get_outgoing_files())
+             if not java_sources:
+-                self.log(logging.WARNING, 'static-analysis', {},
+-                         'No outgoing Java files to check')
++                self.log(
++                    logging.WARNING,
++                    "static-analysis",
++                    {},
++                    "No outgoing Java files to check",
++                )
+                 return 0
+         elif not check_all:
+             java_sources = self._get_java_files(source)
+             if not java_sources:
+                 return 0
+         if not skip_export:
+             rc = self._build_export(jobs=jobs, verbose=verbose)
+             if rc != 0:
+                 return rc
+         rc = self._get_infer(verbose=verbose)
+         if rc != 0:
+-            self.log(logging.WARNING, 'static-analysis', {},
+-                     'This command is only available for linux64!')
++            self.log(
++                logging.WARNING,
++                "static-analysis",
++                {},
++                "This command is only available for linux64!",
++            )
+             return rc
+         # which checkers to use, and which folders to exclude
+         all_checkers, third_party_path, generated_path = self._get_infer_config()
+-        checkers, excludes = self._get_infer_args(checks or all_checkers, third_party_path,
+-                                                  generated_path)
+-        rc = rc or self._gradle(['clean'])  # clean so that we can recompile
++        checkers, excludes = self._get_infer_args(
++            checks or all_checkers, third_party_path, generated_path
++        )
++        rc = rc or self._gradle(["clean"])  # clean so that we can recompile
+         # infer capture command
+-        capture_cmd = [self._infer_path, 'capture'] + excludes + ['--']
++        capture_cmd = [self._infer_path, "capture"] + excludes + ["--"]
+         rc = rc or self._gradle([task], infer_args=capture_cmd, verbose=verbose)
+         tmp_file, args = self._get_infer_source_args(java_sources)
+         # infer analyze command
+-        analysis_cmd = [self._infer_path, 'analyze', '--keep-going'] +  \
+-            checkers + args
+-        rc = rc or self.run_process(args=analysis_cmd, cwd=self.topsrcdir, pass_thru=True)
++        analysis_cmd = [self._infer_path, "analyze", "--keep-going"] + checkers + args
++        rc = rc or self.run_process(
++            args=analysis_cmd, cwd=self.topsrcdir, pass_thru=True
++        )
+         if tmp_file:
+             tmp_file.close()
+ 
+         # Copy the infer report
+-        report_path = os.path.join(self.topsrcdir, 'infer-out', 'report.json')
++        report_path = os.path.join(self.topsrcdir, "infer-out", "report.json")
+         if output is not None and os.path.exists(report_path):
+             shutil.copy(report_path, output)
+-            self.log(logging.INFO, 'static-analysis', {},
+-                     'Report available in {}'.format(output))
++            self.log(
++                logging.INFO,
++                "static-analysis",
++                {},
++                "Report available in {}".format(output),
++            )
+ 
+         return rc
+ 
+     def _get_java_files(self, sources):
+         java_sources = []
+         for i in sources:
+             f = mozpath.join(self.topsrcdir, i)
+             if os.path.isdir(f):
+                 for root, dirs, files in os.walk(f):
+                     dirs.sort()
+                     for file in sorted(files):
+-                        if file.endswith('.java'):
++                        if file.endswith(".java"):
+                             java_sources.append(mozpath.join(root, file))
+-            elif f.endswith('.java'):
++            elif f.endswith(".java"):
+                 java_sources.append(f)
+         return java_sources
+ 
+     def _get_infer_source_args(self, sources):
+-        '''Return the arguments to only analyze <sources>'''
++        """Return the arguments to only analyze <sources>"""
+         if not sources:
+             return (None, [])
+         # create a temporary file in which we place all sources
+         # this is used by the analysis command to only analyze certain files
+         f = tempfile.NamedTemporaryFile(mode="wt")
+         for source in sources:
+-            f.write(source+'\n')
++            f.write(source + "\n")
+         f.flush()
+-        return (f, ['--changed-files-index', f.name])
++        return (f, ["--changed-files-index", f.name])
+ 
+     def _get_infer_config(self):
+-        '''Load the infer config file.'''
++        """Load the infer config file."""
+         checkers = []
+-        tp_path = ''
+-        with open(mozpath.join(self.topsrcdir, 'tools',
+-                               'infer', 'config.yaml')) as f:
++        tp_path = ""
++        with open(mozpath.join(self.topsrcdir, "tools", "infer", "config.yaml")) as f:
+             try:
+                 config = yaml.safe_load(f)
+-                for item in config['infer_checkers']:
+-                    if item['publish']:
+-                        checkers.append(item['name'])
+-                tp_path = mozpath.join(self.topsrcdir, config['third_party'])
+-                generated_path = mozpath.join(self.topsrcdir, config['generated'])
++                for item in config["infer_checkers"]:
++                    if item["publish"]:
++                        checkers.append(item["name"])
++                tp_path = mozpath.join(self.topsrcdir, config["third_party"])
++                generated_path = mozpath.join(self.topsrcdir, config["generated"])
+             except Exception:
+-                print('Looks like config.yaml is not valid, so we are unable '
+-                      'to determine default checkers, and which folder to '
+-                      'exclude, using defaults provided by infer')
++                print(
++                    "Looks like config.yaml is not valid, so we are unable "
++                    "to determine default checkers, and which folder to "
++                    "exclude, using defaults provided by infer"
++                )
+         return checkers, tp_path, generated_path
+ 
+     def _get_infer_args(self, checks, *input_paths):
+-        '''Return the arguments which include the checkers <checks>, and
+-        excludes all folder in <third_party_path>.'''
+-        checkers = ['-a', 'checkers']
++        """Return the arguments which include the checkers <checks>, and
++        excludes all folder in <third_party_path>."""
++        checkers = ["-a", "checkers"]
+         excludes = []
+         for checker in checks:
+-            checkers.append('--' + checker)
++            checkers.append("--" + checker)
+         for path in input_paths:
+             with open(path) as f:
+                 for line in f:
+-                    excludes.append('--skip-analysis-in-path')
+-                    excludes.append(line.strip('\n'))
++                    excludes.append("--skip-analysis-in-path")
++                    excludes.append(line.strip("\n"))
+         return checkers, excludes
+ 
+     def _get_clang_tidy_config(self):
+         try:
+-            file_handler = open(mozpath.join(self.topsrcdir, "tools", "clang-tidy", "config.yaml"))
++            file_handler = open(
++                mozpath.join(self.topsrcdir, "tools", "clang-tidy", "config.yaml")
++            )
+             config = yaml.safe_load(file_handler)
+         except Exception:
+-            self.log(logging.ERROR, 'static-analysis', {},
+-                     'ERROR: Looks like config.yaml is not valid, we are going to use default'
+-                     ' values for the rest of the analysis for clang-tidy.')
++            self.log(
++                logging.ERROR,
++                "static-analysis",
++                {},
++                "ERROR: Looks like config.yaml is not valid, we are going to use default"
++                " values for the rest of the analysis for clang-tidy.",
++            )
+             return None
+         return config
+ 
+     def _get_cov_config(self):
+         try:
+-            file_handler = open(mozpath.join(self.topsrcdir, "tools", "coverity", "config.yaml"))
++            file_handler = open(
++                mozpath.join(self.topsrcdir, "tools", "coverity", "config.yaml")
++            )
+             config = yaml.safe_load(file_handler)
+         except Exception:
+-            self.log(logging.ERROR, 'static-analysis', {},
+-                     'ERROR: Looks like config.yaml is not valid, we are going to use default'
+-                     ' values for the rest of the analysis for coverity.')
++            self.log(
++                logging.ERROR,
++                "static-analysis",
++                {},
++                "ERROR: Looks like config.yaml is not valid, we are going to use default"
++                " values for the rest of the analysis for coverity.",
++            )
+             return None
+         return config
+ 
+     def _is_version_eligible(self):
+         # make sure that we've cached self._clang_tidy_config
+         if self._clang_tidy_config is None:
+             self._clang_tidy_config = self._get_clang_tidy_config()
+ 
+         version = None
+-        if 'package_version' in self._clang_tidy_config:
+-            version = self._clang_tidy_config['package_version']
++        if "package_version" in self._clang_tidy_config:
++            version = self._clang_tidy_config["package_version"]
+         else:
+-            self.log(logging.ERROR, 'static-analysis', {},
+-                     "ERROR: Unable to find 'package_version' in the config.yml")
++            self.log(
++                logging.ERROR,
++                "static-analysis",
++                {},
++                "ERROR: Unable to find 'package_version' in the config.yml",
++            )
+             return False
+ 
+         # Because the fact that we ship together clang-tidy and clang-format
+         # we are sure that these two will always share the same version.
+         # Thus in order to determine that the version is compatible we only
+         # need to check one of them, going with clang-format
+-        cmd = [self._clang_format_path, '--version']
++        cmd = [self._clang_format_path, "--version"]
+         try:
+-            output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('utf-8')
+-            version_string = 'clang-format version ' + version
++            output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode(
++                "utf-8"
++            )
++            version_string = "clang-format version " + version
+             if version_string in output:
+                 return True
+         except subprocess.CalledProcessError as e:
+-            self.log(logging.ERROR, 'static-analysis', {},
+-                     "ERROR: Error determining the version clang-tidy/format binary, "
+-                     "please see the attached exception: \n{}".format(e.output))
++            self.log(
++                logging.ERROR,
++                "static-analysis",
++                {},
++                "ERROR: Error determining the version clang-tidy/format binary, "
++                "please see the attached exception: \n{}".format(e.output),
++            )
+         return False
+ 
+     def _get_clang_tidy_command(self, checks, header_filter, sources, jobs, fix):
+ 
+-        if checks == '-*':
++        if checks == "-*":
+             checks = self._get_checks()
+ 
+-        common_args = ['-clang-tidy-binary', self._clang_tidy_path,
+-                       '-clang-apply-replacements-binary', self._clang_apply_replacements,
+-                       '-checks=%s' % checks,
+-                       '-extra-arg=-std=c++17', '-extra-arg=-DMOZ_CLANG_PLUGIN']
++        common_args = [
++            "-clang-tidy-binary",
++            self._clang_tidy_path,
++            "-clang-apply-replacements-binary",
++            self._clang_apply_replacements,
++            "-checks=%s" % checks,
++            "-extra-arg=-std=c++17",
++            "-extra-arg=-DMOZ_CLANG_PLUGIN",
++        ]
+ 
+         # Flag header-filter is passed in order to limit the diagnostic messages only
+         # to the specified header files. When no value is specified the default value
+         # is considered to be the source in order to limit the diagnostic message to
+         # the source files or folders.
+-        common_args += ['-header-filter=%s' % (header_filter
+-                                               if len(header_filter) else '|'.join(sources))]
++        common_args += [
++            "-header-filter=%s"
++            % (header_filter if len(header_filter) else "|".join(sources))
++        ]
+ 
+         # From our configuration file, config.yaml, we build the configuration list, for
+         # the checkers that are used. These configuration options are used to better fit
+         # the checkers to our code.
+         cfg = self._get_checks_config()
+         if cfg:
+-            common_args += ['-config=%s' % yaml.dump(cfg)]
++            common_args += ["-config=%s" % yaml.dump(cfg)]
+ 
+         if fix:
+-            common_args += ['-fix']
+-
+-        return [
+-            self.virtualenv_manager.python_path, self._run_clang_tidy_path, '-j',
+-            str(jobs), '-p', self._compilation_commands_path
+-        ] + common_args + sources
++            common_args += ["-fix"]
++
++        return (
++            [
++                self.virtualenv_manager.python_path,
++                self._run_clang_tidy_path,
++                "-j",
++                str(jobs),
++                "-p",
++                self._compilation_commands_path,
++            ]
++            + common_args
++            + sources
++        )
+ 
+     def _check_for_java(self):
+-        '''Check if javac can be found.'''
++        """Check if javac can be found."""
+         import distutils.spawn
+-        java = self.substs.get('JAVA')
+-        java = java or os.getenv('JAVA_HOME')
+-        java = java or distutils.spawn.find_executable('javac')
+-        error = 'javac was not found! Please install javac and either add it to your PATH, '
+-        error += 'set JAVA_HOME, or add the following to your mozconfig:\n'
+-        error += '  --with-java-bin-path=/path/to/java/bin/'
++
++        java = self.substs.get("JAVA")
++        java = java or os.getenv("JAVA_HOME")
++        java = java or distutils.spawn.find_executable("javac")
++        error = (
++            "javac was not found! Please install javac and either add it to your PATH, "
++        )
++        error += "set JAVA_HOME, or add the following to your mozconfig:\n"
++        error += "  --with-java-bin-path=/path/to/java/bin/"
+         if not java:
+-            self.log(logging.ERROR, 'ERROR: static-analysis', {}, error)
++            self.log(logging.ERROR, "ERROR: static-analysis", {}, error)
+             return 1
+         return 0
+ 
+-    def _gradle(self, args, infer_args=None, verbose=False, autotest=False,
+-                suppress_output=True):
++    def _gradle(
++        self, args, infer_args=None, verbose=False, autotest=False, suppress_output=True
++    ):
+         infer_args = infer_args or []
+         if autotest:
+-            cwd = mozpath.join(self.topsrcdir, 'tools', 'infer', 'test')
+-            gradle = mozpath.join(cwd, 'gradlew')
++            cwd = mozpath.join(self.topsrcdir, "tools", "infer", "test")
++            gradle = mozpath.join(cwd, "gradlew")
+         else:
+-            gradle = self.substs['GRADLE']
++            gradle = self.substs["GRADLE"]
+             cwd = self.topsrcdir
+         extra_env = {
+-            'GRADLE_OPTS': '-Dfile.encoding=utf-8',  # see mobile/android/mach_commands.py
+-            'JAVA_TOOL_OPTIONS': '-Dfile.encoding=utf-8',
++            "GRADLE_OPTS": "-Dfile.encoding=utf-8",  # see mobile/android/mach_commands.py
++            "JAVA_TOOL_OPTIONS": "-Dfile.encoding=utf-8",
+         }
+         if suppress_output:
+-            devnull = open(os.devnull, 'w')
++            devnull = open(os.devnull, "w")
+             return subprocess.call(
+                 infer_args + [gradle] + args,
+                 env=dict(os.environ, **extra_env),
+-                cwd=cwd, stdout=devnull, stderr=subprocess.STDOUT, close_fds=True)
++                cwd=cwd,
++                stdout=devnull,
++                stderr=subprocess.STDOUT,
++                close_fds=True,
++            )
+ 
+         return self.run_process(
+             infer_args + [gradle] + args,
+             append_env=extra_env,
+             pass_thru=True,  # Allow user to run gradle interactively.
+             ensure_exit_code=False,  # Don't throw on non-zero exit code.
+-            cwd=cwd)
+-
+-    @StaticAnalysisSubCommand('static-analysis', 'autotest',
+-                              'Run the auto-test suite in order to determine that'
+-                              ' the analysis did not regress.')
+-    @CommandArgument('--dump-results', '-d', default=False, action='store_true',
+-                     help='Generate the baseline for the regression test. Based on'
+-                     ' this baseline we will test future results.')
+-    @CommandArgument('--intree-tool', '-i', default=False, action='store_true',
+-                     help='Use a pre-aquired in-tree clang-tidy package from the automation env.'
+-                     ' This option is only valid on automation environments.')
+-    @CommandArgument('checker_names', nargs='*', default=[],
+-                     help='Checkers that are going to be auto-tested.')
+-    def autotest(self, verbose=False, dump_results=False, intree_tool=False, checker_names=[]):
++            cwd=cwd,
++        )
++
++    @StaticAnalysisSubCommand(
++        "static-analysis",
++        "autotest",
++        "Run the auto-test suite in order to determine that"
++        " the analysis did not regress.",
++    )
++    @CommandArgument(
++        "--dump-results",
++        "-d",
++        default=False,
++        action="store_true",
++        help="Generate the baseline for the regression test. Based on"
++        " this baseline we will test future results.",
++    )
++    @CommandArgument(
++        "--intree-tool",
++        "-i",
++        default=False,
++        action="store_true",
++        help="Use a pre-aquired in-tree clang-tidy package from the automation env."
++        " This option is only valid on automation environments.",
++    )
++    @CommandArgument(
++        "checker_names",
++        nargs="*",
++        default=[],
++        help="Checkers that are going to be auto-tested.",
++    )
++    def autotest(
++        self, verbose=False, dump_results=False, intree_tool=False, checker_names=[]
++    ):
+         # If 'dump_results' is True than we just want to generate the issues files for each
+         # checker in particulat and thus 'force_download' becomes 'False' since we want to
+         # do this on a local trusted clang-tidy package.
+         self._set_log_level(verbose)
+         self.activate_virtualenv()
+         self._dump_results = dump_results
+ 
+         force_download = not self._dump_results
+@@ -1097,605 +1412,866 @@ class StaticAnalysis(MachCommandBase):
+         self.TOOLS_CHECKER_DIFF_FAILED = 6
+         self.TOOLS_CHECKER_NOT_FOUND = 7
+         self.TOOLS_CHECKER_FAILED_FILE = 8
+         self.TOOLS_CHECKER_LIST_EMPTY = 9
+         self.TOOLS_GRADLE_FAILED = 10
+ 
+         # Configure the tree or download clang-tidy package, depending on the option that we choose
+         if intree_tool:
+-            if 'MOZ_AUTOMATION' not in os.environ:
+-                self.log(logging.INFO, 'static-analysis', {},
+-                         'The `autotest` with `--intree-tool` can only be ran in automation.')
++            if "MOZ_AUTOMATION" not in os.environ:
++                self.log(
++                    logging.INFO,
++                    "static-analysis",
++                    {},
++                    "The `autotest` with `--intree-tool` can only be ran in automation.",
++                )
+                 return 1
+-            if 'MOZ_FETCHES_DIR' not in os.environ:
+-                self.log(logging.INFO, 'static-analysis', {},
+-                         '`MOZ_FETCHES_DIR` is missing from the environment variables.')
++            if "MOZ_FETCHES_DIR" not in os.environ:
++                self.log(
++                    logging.INFO,
++                    "static-analysis",
++                    {},
++                    "`MOZ_FETCHES_DIR` is missing from the environment variables.",
++                )
+                 return 1
+ 
+             _, config, _ = self._get_config_environment()
+-            clang_tools_path = os.environ['MOZ_FETCHES_DIR']
++            clang_tools_path = os.environ["MOZ_FETCHES_DIR"]
+             self._clang_tidy_path = mozpath.join(
+-                clang_tools_path, "clang-tidy", "bin",
+-                "clang-tidy" + config.substs.get('BIN_SUFFIX', ''))
++                clang_tools_path,
++                "clang-tidy",
++                "bin",
++                "clang-tidy" + config.substs.get("BIN_SUFFIX", ""),
++            )
+             self._clang_format_path = mozpath.join(
+-                clang_tools_path, "clang-tidy", "bin",
+-                "clang-format" + config.substs.get('BIN_SUFFIX', ''))
++                clang_tools_path,
++                "clang-tidy",
++                "bin",
++                "clang-format" + config.substs.get("BIN_SUFFIX", ""),
++            )
+             self._clang_apply_replacements = mozpath.join(
+-                clang_tools_path, "clang-tidy", "bin",
+-                "clang-apply-replacements" + config.substs.get('BIN_SUFFIX', ''))
+-            self._run_clang_tidy_path = mozpath.join(clang_tools_path, "clang-tidy", "share",
+-                                                     "clang", "run-clang-tidy.py")
+-            self._clang_format_diff = mozpath.join(clang_tools_path, "clang-tidy", "share",
+-                                                   "clang", "clang-format-diff.py")
++                clang_tools_path,
++                "clang-tidy",
++                "bin",
++                "clang-apply-replacements" + config.substs.get("BIN_SUFFIX", ""),
++            )
++            self._run_clang_tidy_path = mozpath.join(
++                clang_tools_path, "clang-tidy", "share", "clang", "run-clang-tidy.py"
++            )
++            self._clang_format_diff = mozpath.join(
++                clang_tools_path, "clang-tidy", "share", "clang", "clang-format-diff.py"
++            )
+ 
+             # Ensure that clang-tidy is present
+             rc = not os.path.exists(self._clang_tidy_path)
+         else:
+             rc = self._get_clang_tools(force=force_download, verbose=verbose)
+ 
+         if rc != 0:
+-            self.log(logging.ERROR, 'ERROR: static-analysis', {},
+-                     'ERROR: clang-tidy unable to locate package.')
++            self.log(
++                logging.ERROR,
++                "ERROR: static-analysis",
++                {},
++                "ERROR: clang-tidy unable to locate package.",
++            )
+             return self.TOOLS_FAILED_DOWNLOAD
+ 
+         self._clang_tidy_base_path = mozpath.join(self.topsrcdir, "tools", "clang-tidy")
+ 
+         # For each checker run it
+         self._clang_tidy_config = self._get_clang_tidy_config()
+         platform, _ = self.platform
+ 
+-        if platform not in self._clang_tidy_config['platforms']:
+-            self.log(
+-                logging.ERROR, 'static-analysis', {},
++        if platform not in self._clang_tidy_config["platforms"]:
++            self.log(
++                logging.ERROR,
++                "static-analysis",
++                {},
+                 "ERROR: RUNNING: clang-tidy autotest for platform {} not supported.".format(
+-                    platform)
+-                )
++                    platform
++                ),
++            )
+             return self.TOOLS_UNSUPORTED_PLATFORM
+ 
+         max_workers = multiprocessing.cpu_count()
+ 
+-        self.log(logging.INFO, 'static-analysis', {},
+-                 "RUNNING: clang-tidy autotest for platform {0} with {1} workers.".format(
+-                     platform, max_workers))
++        self.log(
++            logging.INFO,
++            "static-analysis",
++            {},
++            "RUNNING: clang-tidy autotest for platform {0} with {1} workers.".format(
++                platform, max_workers
++            ),
++        )
+ 
+         # List all available checkers
+-        cmd = [self._clang_tidy_path, '-list-checks', '-checks=*']
+-        clang_output = subprocess.check_output(
+-            cmd, stderr=subprocess.STDOUT).decode('utf-8')
+-        available_checks = clang_output.split('\n')[1:]
++        cmd = [self._clang_tidy_path, "-list-checks", "-checks=*"]
++        clang_output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode(
++            "utf-8"
++        )
++        available_checks = clang_output.split("\n")[1:]
+         self._clang_tidy_checks = [c.strip() for c in available_checks if c]
+ 
+         # Build the dummy compile_commands.json
+-        self._compilation_commands_path = self._create_temp_compilation_db(self._clang_tidy_config)
++        self._compilation_commands_path = self._create_temp_compilation_db(
++            self._clang_tidy_config
++        )
+         checkers_test_batch = []
+         checkers_results = []
+         with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
+             futures = []
+-            for item in self._clang_tidy_config['clang_checkers']:
++            for item in self._clang_tidy_config["clang_checkers"]:
+                 # Skip if any of the following statements is true:
+                 # 1. Checker attribute 'publish' is False.
+-                not_published = not bool(item.get('publish', True))
++                not_published = not bool(item.get("publish", True))
+                 # 2. Checker has restricted-platforms and current platform is not of them.
+-                ignored_platform = ('restricted-platforms' in item and
+-                                    platform not in item['restricted-platforms'])
++                ignored_platform = (
++                    "restricted-platforms" in item
++                    and platform not in item["restricted-platforms"]
++                )
+                 # 3. Checker name is mozilla-* or -*.
+-                ignored_checker = item['name'] in ['mozilla-*', '-*']
++                ignored_checker = item["name"] in ["mozilla-*", "-*"]
+                 # 4. List checker_names is passed and the current checker is not part of the
+                 #    list or 'publish' is False
+                 checker_not_in_list = checker_names and (
+-                    item['name'] not in checker_names or not_published)
+-                if not_published or \
+-                   ignored_platform or \
+-                   ignored_checker or \
+-                   checker_not_in_list:
++                    item["name"] not in checker_names or not_published
++                )
++                if (
++                    not_published
++                    or ignored_platform
++                    or ignored_checker
++                    or checker_not_in_list
++                ):
+                     continue
+-                checkers_test_batch.append(item['name'])
+-                futures.append(executor.submit(self._verify_checker, item, checkers_results))
++                checkers_test_batch.append(item["name"])
++                futures.append(
++                    executor.submit(self._verify_checker, item, checkers_results)
++                )
+ 
+             error_code = self.TOOLS_SUCCESS
+             for future in concurrent.futures.as_completed(futures):
+                 # Wait for every task to finish
+                 ret_val = future.result()
+                 if ret_val != self.TOOLS_SUCCESS:
+                     # We are interested only in one error and we don't break
+                     # the execution of for loop since we want to make sure that all
+                     # tasks finished.
+                     error_code = ret_val
+ 
+             if error_code != self.TOOLS_SUCCESS:
+ 
+-                self.log(logging.INFO, 'static-analysis', {},
+-                         "FAIL: the following clang-tidy check(s) failed:")
++                self.log(
++                    logging.INFO,
++                    "static-analysis",
++                    {},
++                    "FAIL: the following clang-tidy check(s) failed:",
++                )
+                 for failure in checkers_results:
+-                    checker_error = failure['checker-error']
+-                    checker_name = failure['checker-name']
+-                    info1 = failure['info1']
+-                    info2 = failure['info2']
+-                    info3 = failure['info3']
+-
+-                    message_to_log = ''
++                    checker_error = failure["checker-error"]
++                    checker_name = failure["checker-name"]
++                    info1 = failure["info1"]
++                    info2 = failure["info2"]
++                    info3 = failure["info3"]
++
++                    message_to_log = ""
+                     if checker_error == self.TOOLS_CHECKER_NOT_FOUND:
+-                        message_to_log = \
+-                            "\tChecker {} not present in this clang-tidy version.".format(
+-                                checker_name)
++                        message_to_log = (
++                            "\tChecker "
++                            "{} not present in this clang-tidy version.".format(
++                                checker_name
++                            )
++                        )
+                     elif checker_error == self.TOOLS_CHECKER_NO_TEST_FILE:
+-                        message_to_log = \
+-                            "\tChecker {0} does not have a test file - {0}.cpp".format(
+-                                checker_name)
++                        message_to_log = (
++                            "\tChecker "
++                            "{0} does not have a test file - {0}.cpp".format(
++                                checker_name
++                            )
++                        )
+                     elif checker_error == self.TOOLS_CHECKER_RETURNED_NO_ISSUES:
+                         message_to_log = (
+                             "\tChecker {0} did not find any issues in its test file, "
+                             "clang-tidy output for the run is:\n{1}"
+-                            ).format(checker_name, info1)
++                        ).format(checker_name, info1)
+                     elif checker_error == self.TOOLS_CHECKER_RESULT_FILE_NOT_FOUND:
+-                        message_to_log = \
+-                            "\tChecker {0} does not have a result file - {0}.json".format(
+-                                checker_name)
++                        message_to_log = (
++                            "\tChecker {0} does not have a result file - {0}.json"
++                        ).format(checker_name)
+                     elif checker_error == self.TOOLS_CHECKER_DIFF_FAILED:
+                         message_to_log = (
+                             "\tChecker {0}\nExpected: {1}\n"
+                             "Got: {2}\n"
+                             "clang-tidy output for the run is:\n"
+                             "{3}"
+-                            ).format(checker_name, info1, info2, info3)
+-
+-                    print('\n'+message_to_log)
++                        ).format(checker_name, info1, info2, info3)
++
++                    print("\n" + message_to_log)
+ 
+                 # Also delete the tmp folder
+                 shutil.rmtree(self._compilation_commands_path)
+                 return error_code
+ 
+             # Run the analysis on all checkers at the same time only if we don't dump results.
+             if not self._dump_results:
+                 ret_val = self._run_analysis_batch(checkers_test_batch)
+                 if ret_val != self.TOOLS_SUCCESS:
+                     shutil.rmtree(self._compilation_commands_path)
+                     return ret_val
+ 
+-        self.log(logging.INFO, 'static-analysis', {}, "SUCCESS: clang-tidy all tests passed.")
++        self.log(
++            logging.INFO, "static-analysis", {}, "SUCCESS: clang-tidy all tests passed."
++        )
+         # Also delete the tmp folder
+         shutil.rmtree(self._compilation_commands_path)
+         return self._autotest_infer(intree_tool, force_download, verbose)
+ 
+-    def _run_analysis(self, checks, header_filter, sources, jobs=1, fix=False, print_out=False):
++    def _run_analysis(
++        self, checks, header_filter, sources, jobs=1, fix=False, print_out=False
++    ):
+         cmd = self._get_clang_tidy_command(
+-            checks=checks, header_filter=header_filter,
++            checks=checks,
++            header_filter=header_filter,
+             sources=sources,
+-            jobs=jobs, fix=fix)
++            jobs=jobs,
++            fix=fix,
++        )
+ 
+         try:
+-            clang_output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('utf-8')
++            clang_output = subprocess.check_output(
++                cmd, stderr=subprocess.STDOUT
++            ).decode("utf-8")
+         except subprocess.CalledProcessError as e:
+             print(e.output)
+             return None
+         return self._parse_issues(clang_output), clang_output
+ 
+     def _run_analysis_batch(self, items):
+-        self.log(logging.INFO, 'static-analysis', {},
+-                 "RUNNING: clang-tidy checker batch analysis.")
++        self.log(
++            logging.INFO,
++            "static-analysis",
++            {},
++            "RUNNING: clang-tidy checker batch analysis.",
++        )
+         if not len(items):
+-            self.log(logging.ERROR, 'static-analysis', {},
+-                     "ERROR: clang-tidy checker list is empty!")
++            self.log(
++                logging.ERROR,
++                "static-analysis",
++                {},
++                "ERROR: clang-tidy checker list is empty!",
++            )
+             return self.TOOLS_CHECKER_LIST_EMPTY
+ 
+         issues, clang_output = self._run_analysis(
+-            checks='-*,' + ",".join(items),
+-            header_filter='',
+-            sources=[mozpath.join(self._clang_tidy_base_path, "test", checker) + '.cpp'
+-                     for checker in items],
+-            print_out=True)
++            checks="-*," + ",".join(items),
++            header_filter="",
++            sources=[
++                mozpath.join(self._clang_tidy_base_path, "test", checker) + ".cpp"
++                for checker in items
++            ],
++            print_out=True,
++        )
+ 
+         if issues is None:
+             return self.TOOLS_CHECKER_FAILED_FILE
+ 
+         failed_checks = []
+         failed_checks_baseline = []
+         for checker in items:
+-            test_file_path_json = mozpath.join(
+-                self._clang_tidy_base_path, "test", checker) + '.json'
++            test_file_path_json = (
++                mozpath.join(self._clang_tidy_base_path, "test", checker) + ".json"
++            )
+             # Read the pre-determined issues
+             baseline_issues = self._get_autotest_stored_issues(test_file_path_json)
+ 
+             # We also stored the 'reliability' index so strip that from the baseline_issues
+-            baseline_issues[:] = [item for item in baseline_issues if 'reliability' not in item]
++            baseline_issues[:] = [
++                item for item in baseline_issues if "reliability" not in item
++            ]
+ 
+             found = all([element_base in issues for element_base in baseline_issues])
+ 
+             if not found:
+                 failed_checks.append(checker)
+                 failed_checks_baseline.append(baseline_issues)
+ 
+         if len(failed_checks) > 0:
+-            self.log(logging.ERROR, 'static-analysis', {},
+-                     'ERROR: The following check(s) failed for bulk analysis: ' +
+-                     ' '.join(failed_checks))
+-
+-            for failed_check, baseline_issue in zip(failed_checks, failed_checks_baseline):
+-                print('\tChecker {0} expect following results: \n\t\t{1}'.format(
+-                    failed_check, baseline_issue))
+-
+-            print('This is the output generated by clang-tidy for the bulk build:\n{}'.format(
+-                clang_output))
++            self.log(
++                logging.ERROR,
++                "static-analysis",
++                {},
++                "ERROR: The following check(s) failed for bulk analysis: "
++                + " ".join(failed_checks),
++            )
++
++            for failed_check, baseline_issue in zip(
++                failed_checks, failed_checks_baseline
++            ):
++                print(
++                    "\tChecker {0} expect following results: \n\t\t{1}".format(
++                        failed_check, baseline_issue
++                    )
++                )
++
++            print(
++                "This is the output generated by clang-tidy for the bulk build:\n{}".format(
++                    clang_output
++                )
++            )
+             return self.TOOLS_CHECKER_DIFF_FAILED
+ 
+         return self.TOOLS_SUCCESS
+ 
+     def _create_temp_compilation_db(self, config):
+-        directory = tempfile.mkdtemp(prefix='cc')
+-        with open(mozpath.join(directory, "compile_commands.json"), "w") as file_handler:
++        directory = tempfile.mkdtemp(prefix="cc")
++        with open(
++            mozpath.join(directory, "compile_commands.json"), "w"
++        ) as file_handler:
+             compile_commands = []
+-            director = mozpath.join(self.topsrcdir, 'tools', 'clang-tidy', 'test')
+-            for item in config['clang_checkers']:
+-                if item['name'] in ['-*', 'mozilla-*']:
++            director = mozpath.join(self.topsrcdir, "tools", "clang-tidy", "test")
++            for item in config["clang_checkers"]:
++                if item["name"] in ["-*", "mozilla-*"]:
+                     continue
+-                file = item['name'] + '.cpp'
++                file = item["name"] + ".cpp"
+                 element = {}
+                 element["directory"] = director
+-                element["command"] = 'cpp ' + file
++                element["command"] = "cpp " + file
+                 element["file"] = mozpath.join(director, file)
+                 compile_commands.append(element)
+ 
+             json.dump(compile_commands, file_handler)
+             file_handler.flush()
+ 
+             return directory
+ 
+     def _autotest_infer(self, intree_tool, force_download, verbose):
+         # infer is not available on other platforms, but autotest should work even without
+         # it being installed
+-        if self.platform[0] == 'linux64':
++        if self.platform[0] == "linux64":
+             rc = self._check_for_java()
+             if rc != 0:
+                 return 1
+-            rc = self._get_infer(force=force_download, verbose=verbose, intree_tool=intree_tool)
++            rc = self._get_infer(
++                force=force_download, verbose=verbose, intree_tool=intree_tool
++            )
+             if rc != 0:
+-                self.log(logging.ERROR, 'ERROR: static-analysis', {},
+-                         'ERROR: infer unable to locate package.')
++                self.log(
++                    logging.ERROR,
++                    "ERROR: static-analysis",
++                    {},
++                    "ERROR: infer unable to locate package.",
++                )
+                 return self.TOOLS_FAILED_DOWNLOAD
+-            self.__infer_tool = mozpath.join(self.topsrcdir, 'tools', 'infer')
+-            self.__infer_test_folder = mozpath.join(self.__infer_tool, 'test')
++            self.__infer_tool = mozpath.join(self.topsrcdir, "tools", "infer")
++            self.__infer_test_folder = mozpath.join(self.__infer_tool, "test")
+ 
+             max_workers = multiprocessing.cpu_count()
+-            self.log(logging.INFO, 'static-analysis', {},
+-                     "RUNNING: infer autotest for platform {0} with {1} workers.".format(
+-                         self.platform[0], max_workers))
++            self.log(
++                logging.INFO,
++                "static-analysis",
++                {},
++                "RUNNING: infer autotest for platform {0} with {1} workers.".format(
++                    self.platform[0], max_workers
++                ),
++            )
+             # clean previous autotest if it exists
+-            rc = self._gradle(['autotest:clean'], autotest=True)
++            rc = self._gradle(["autotest:clean"], autotest=True)
+             if rc != 0:
+                 return rc
+             import yaml
+-            with open(mozpath.join(self.__infer_tool, 'config.yaml')) as f:
++
++            with open(mozpath.join(self.__infer_tool, "config.yaml")) as f:
+                 config = yaml.safe_load(f)
+-            with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
++            with concurrent.futures.ThreadPoolExecutor(
++                max_workers=max_workers
++            ) as executor:
+                 futures = []
+-                for item in config['infer_checkers']:
+-                    if item['publish']:
+-                        futures.append(executor.submit(self._verify_infer_checker, item))
++                for item in config["infer_checkers"]:
++                    if item["publish"]:
++                        futures.append(
++                            executor.submit(self._verify_infer_checker, item)
++                        )
+                 # this is always included in check-java, but not in config.yaml
+-                futures.append(executor.submit(self._verify_infer_checker,
+-                                               {'name': 'checkers'}))
++                futures.append(
++                    executor.submit(self._verify_infer_checker, {"name": "checkers"})
++                )
+                 for future in concurrent.futures.as_completed(futures):
+                     ret_val = future.result()
+                     if ret_val != self.TOOLS_SUCCESS:
+                         return ret_val
+-            self.log(logging.INFO, 'static-analysis', {}, "SUCCESS: infer all tests passed.")
++            self.log(
++                logging.INFO, "static-analysis", {}, "SUCCESS: infer all tests passed."
++            )
+         else:
+-            self.log(logging.WARNING, 'static-analysis', {},
+-                     "Skipping infer autotest, because it is only available on linux64!")
++            self.log(
++                logging.WARNING,
++                "static-analysis",
++                {},
++                "Skipping infer autotest, because it is only available on linux64!",
++            )
+         return self.TOOLS_SUCCESS
+ 
+     def _verify_infer_checker(self, item):
+-        '''Given a checker, this method verifies the following:
++        """Given a checker, this method verifies the following:
+           1. if there is a `checker`.json and `checker`.java file in
+              `tools/infer/test/autotest/src`
+           2. if running infer on `checker`.java yields the same result as `checker`.json
+         An `item` is simply a dictionary, which needs to have a `name` field set, which is the
+         name of the checker.
+-        '''
++        """
++
+         def to_camelcase(str):
+-            return ''.join([s.capitalize() for s in str.split('-')])
+-        check = item['name']
+-        test_file_path = mozpath.join(self.__infer_tool, 'test', 'autotest', 'src',
+-                                      'main', 'java', to_camelcase(check))
+-        test_file_path_java = test_file_path + '.java'
+-        test_file_path_json = test_file_path + '.json'
+-        self.log(logging.INFO, 'static-analysis', {}, "RUNNING: infer check {}.".format(check))
++            return "".join([s.capitalize() for s in str.split("-")])
++
++        check = item["name"]
++        test_file_path = mozpath.join(
++            self.__infer_tool,
++            "test",
++            "autotest",
++            "src",
++            "main",
++            "java",
++            to_camelcase(check),
++        )
++        test_file_path_java = test_file_path + ".java"
++        test_file_path_json = test_file_path + ".json"
++        self.log(
++            logging.INFO,
++            "static-analysis",
++            {},
++            "RUNNING: infer check {}.".format(check),
++        )
+         # Verify if the test file exists for this checker
+         if not os.path.exists(test_file_path_java):
+-            self.log(logging.ERROR, 'static-analysis', {},
+-                     "ERROR: infer check {} doesn't have a test file.".format(check))
++            self.log(
++                logging.ERROR,
++                "static-analysis",
++                {},
++                "ERROR: infer check {} doesn't have a test file.".format(check),
++            )
+             return self.TOOLS_CHECKER_NO_TEST_FILE
+         # run infer on a particular test file
+-        out_folder = mozpath.join(self.__infer_test_folder, 'test-infer-{}'.format(check))
+-        if check == 'checkers':
+-            check_arg = ['-a', 'checkers']
++        out_folder = mozpath.join(
++            self.__infer_test_folder, "test-infer-{}".format(check)
++        )
++        if check == "checkers":
++            check_arg = ["-a", "checkers"]
+         else:
+-            check_arg = ['--{}-only'.format(check)]
+-        infer_args = [self._infer_path, 'run'] + check_arg + ['-o', out_folder, '--']
+-        gradle_args = ['autotest:compileInferTest{}'.format(to_camelcase(check))]
++            check_arg = ["--{}-only".format(check)]
++        infer_args = [self._infer_path, "run"] + check_arg + ["-o", out_folder, "--"]
++        gradle_args = ["autotest:compileInferTest{}".format(to_camelcase(check))]
+         rc = self._gradle(gradle_args, infer_args=infer_args, autotest=True)
+         if rc != 0:
+-            self.log(logging.ERROR, 'static-analysis', {},
+-                     "ERROR: infer failed to execute gradle {}.".format(gradle_args))
++            self.log(
++                logging.ERROR,
++                "static-analysis",
++                {},
++                "ERROR: infer failed to execute gradle {}.".format(gradle_args),
++            )
+             return self.TOOLS_GRADLE_FAILED
+-        issues = json.load(open(mozpath.join(out_folder, 'report.json')))
++        issues = json.load(open(mozpath.join(out_folder, "report.json")))
+         # remove folder that infer creates because the issues are loaded into memory
+         shutil.rmtree(out_folder)
+         # Verify to see if we got any issues, if not raise exception
+         if not issues:
+             self.log(
+-                logging.ERROR, 'static-analysis', {},
+-                "ERROR: infer check {0} did not find any issues in its associated test suite."
+-                .format(check)
++                logging.ERROR,
++                "static-analysis",
++                {},
++                "ERROR: infer check "
++                "{0} did not find any issues in its associated test suite.".format(
++                    check
++                ),
+             )
+             return self.TOOLS_CHECKER_RETURNED_NO_ISSUES
+         if self._dump_results:
+             self._build_autotest_result(test_file_path_json, json.dumps(issues))
+         else:
+             if not os.path.exists(test_file_path_json):
+                 # Result file for test not found maybe regenerate it?
+                 self.log(
+-                    logging.ERROR, 'static-analysis', {},
+-                    "ERROR: infer result file not found for check {0}".format(check)
++                    logging.ERROR,
++                    "static-analysis",
++                    {},
++                    "ERROR: infer result file not found for check {0}".format(check),
+                 )
+                 return self.TOOLS_CHECKER_RESULT_FILE_NOT_FOUND
+             # Read the pre-determined issues
+             baseline_issues = self._get_autotest_stored_issues(test_file_path_json)
+ 
+             def ordered(obj):
+                 if isinstance(obj, dict):
+                     return sorted((k, ordered(v)) for k, v in obj.items())
+                 if isinstance(obj, list):
+                     return sorted(ordered(x) for x in obj)
+                 return obj
++
+             # Compare the two lists
+             if ordered(issues) != ordered(baseline_issues):
+                 error_str = "ERROR: in check {} Expected: ".format(check)
+-                error_str += '\n' + json.dumps(baseline_issues, indent=2)
+-                error_str += '\n Got:\n' + json.dumps(issues, indent=2)
+-                self.log(logging.ERROR, 'static-analysis', {},
+-                         'ERROR: infer autotest for check {} failed, check stdout for more details'
+-                         .format(check))
++                error_str += "\n" + json.dumps(baseline_issues, indent=2)
++                error_str += "\n Got:\n" + json.dumps(issues, indent=2)
++                self.log(
++                    logging.ERROR,
++                    "static-analysis",
++                    {},
++                    "ERROR: infer autotest for check "
++                    "{} failed, check stdout for more details".format(check),
++                )
+                 print(error_str)
+                 return self.TOOLS_CHECKER_DIFF_FAILED
+         return self.TOOLS_SUCCESS
+ 
+-    @StaticAnalysisSubCommand('static-analysis', 'install',
+-                              'Install the static analysis helper tool')
+-    @CommandArgument('source', nargs='?', type=str,
+-                     help='Where to fetch a local archive containing the static-analysis and '
+-                     'format helper tool.'
+-                          'It will be installed in ~/.mozbuild/clang-tools and ~/.mozbuild/infer.'
+-                          'Can be omitted, in which case the latest clang-tools and infer '
+-                          'helper for the platform would be automatically detected and installed.')
+-    @CommandArgument('--skip-cache', action='store_true',
+-                     help='Skip all local caches to force re-fetching the helper tool.',
+-                     default=False)
+-    @CommandArgument('--force', action='store_true',
+-                     help='Force re-install even though the tool exists in mozbuild.',
+-                     default=False)
+-    @CommandArgument('--minimal-install', action='store_true',
+-                     help='Download only clang based tool.',
+-                     default=False)
+-    def install(self, source=None, skip_cache=False, force=False, minimal_install=False,
+-                verbose=False):
++    @StaticAnalysisSubCommand(
++        "static-analysis", "install", "Install the static analysis helper tool"
++    )
++    @CommandArgument(
++        "source",
++        nargs="?",
++        type=str,
++        help="Where to fetch a local archive containing the static-analysis and "
++        "format helper tool."
++        "It will be installed in ~/.mozbuild/clang-tools and ~/.mozbuild/infer."
++        "Can be omitted, in which case the latest clang-tools and infer "
++        "helper for the platform would be automatically detected and installed.",
++    )
++    @CommandArgument(
++        "--skip-cache",
++        action="store_true",
++        help="Skip all local caches to force re-fetching the helper tool.",
++        default=False,
++    )
++    @CommandArgument(
++        "--force",
++        action="store_true",
++        help="Force re-install even though the tool exists in mozbuild.",
++        default=False,
++    )
++    @CommandArgument(
++        "--minimal-install",
++        action="store_true",
++        help="Download only clang based tool.",
++        default=False,
++    )
++    def install(
++        self,
++        source=None,
++        skip_cache=False,
++        force=False,
++        minimal_install=False,
++        verbose=False,
++    ):
+         self._set_log_level(verbose)
+-        rc = self._get_clang_tools(force=force, skip_cache=skip_cache,
+-                                   source=source, verbose=verbose)
++        rc = self._get_clang_tools(
++            force=force, skip_cache=skip_cache, source=source, verbose=verbose
++        )
+         if rc == 0 and not minimal_install:
+             # XXX ignore the return code because if it fails or not, infer is
+             # not mandatory, but clang-tidy is
+             self._get_infer(force=force, skip_cache=skip_cache, verbose=verbose)
+         return rc
+ 
+-    @StaticAnalysisSubCommand('static-analysis', 'clear-cache',
+-                              'Delete local helpers and reset static analysis helper tool cache')
++    @StaticAnalysisSubCommand(
++        "static-analysis",
++        "clear-cache",
++        "Delete local helpers and reset static analysis helper tool cache",
++    )
+     def clear_cache(self, verbose=False):
+         self._set_log_level(verbose)
+         rc = self._get_clang_tools(
+-            force=True, download_if_needed=True, skip_cache=True, verbose=verbose)
++            force=True, download_if_needed=True, skip_cache=True, verbose=verbose
++        )
+ 
+         if rc != 0:
+             return rc
+ 
+         job, _ = self.platform
+-        if job == 'linux64':
++        if job == "linux64":
+             rc = self._get_infer(
+-                force=True, download_if_needed=True, skip_cache=True, verbose=verbose)
++                force=True, download_if_needed=True, skip_cache=True, verbose=verbose
++            )
+             if rc != 0:
+                 return rc
+ 
+         return self._artifact_manager.artifact_clear_cache()
+ 
+-    @StaticAnalysisSubCommand('static-analysis', 'print-checks',
+-                              'Print a list of the static analysis checks performed by default')
++    @StaticAnalysisSubCommand(
++        "static-analysis",
++        "print-checks",
++        "Print a list of the static analysis checks performed by default",
++    )
+     def print_checks(self, verbose=False):
+         self._set_log_level(verbose)
+         rc = self._get_clang_tools(verbose=verbose)
+ 
+         if rc != 0:
+             return rc
+ 
+         if self._clang_tidy_config is None:
+             self._clang_tidy_config = self._get_clang_tidy_config()
+ 
+-        args = [self._clang_tidy_path, '-list-checks', '-checks=%s' % self._get_checks()]
++        args = [
++            self._clang_tidy_path,
++            "-list-checks",
++            "-checks=%s" % self._get_checks(),
++        ]
+ 
+         rc = self.run_process(args=args, pass_thru=True)
+         if rc != 0:
+             return rc
+ 
+         job, _ = self.platform
+-        if job != 'linux64':
++        if job != "linux64":
+             return 0
+ 
+         rc = self._get_infer(verbose=verbose)
+         if rc != 0:
+             return rc
+ 
+         checkers, _ = self._get_infer_config()
+-        print('Infer checks:')
++        print("Infer checks:")
+         for checker in checkers:
+-            print(' '*4 + checker)
++            print(" " * 4 + checker)
+         return 0
+ 
+-    @Command('prettier-format',  category='misc', description='Run prettier on current changes')
+-    @CommandArgument('--path', '-p', nargs=1, required=True,
+-                     help='Specify the path to reformat to stdout.')
+-    @CommandArgument('--assume-filename', '-a', nargs=1, required=True,
+-                     help='This option is usually used in the context of hg-formatsource.'
+-                          'When reading from stdin, Prettier assumes this '
+-                          'filename to decide which style and parser to use.')
++    @Command(
++        "prettier-format",
++        category="misc",
++        description="Run prettier on current changes",
++    )
++    @CommandArgument(
++        "--path",
++        "-p",
++        nargs=1,
++        required=True,
++        help="Specify the path to reformat to stdout.",
++    )
++    @CommandArgument(
++        "--assume-filename",
++        "-a",
++        nargs=1,
++        required=True,
++        help="This option is usually used in the context of hg-formatsource."
++        "When reading from stdin, Prettier assumes this "
++        "filename to decide which style and parser to use.",
++    )
+     def prettier_format(self, path, assume_filename):
+         # With assume_filename we want to have stdout clean since the result of the
+         # format will be redirected to stdout.
+ 
+         binary, _ = find_node_executable()
+-        prettier = os.path.join(self.topsrcdir, "node_modules", "prettier", "bin-prettier.js")
++        prettier = os.path.join(
++            self.topsrcdir, "node_modules", "prettier", "bin-prettier.js"
++        )
+         path = os.path.join(self.topsrcdir, path[0])
+ 
+         # Bug 1564824. Prettier fails on patches with moved files where the
+         # original directory also does not exist.
+         assume_dir = os.path.dirname(os.path.join(self.topsrcdir, assume_filename[0]))
+         assume_filename = assume_filename[0] if os.path.isdir(assume_dir) else path
+ 
+         # We use --stdin-filepath in order to better determine the path for
+         # the prettier formatter when it is ran outside of the repo, for example
+         # by the extension hg-formatsource.
+-        args = [binary, prettier, '--stdin-filepath', assume_filename]
++        args = [binary, prettier, "--stdin-filepath", assume_filename]
+ 
+         process = subprocess.Popen(args, stdin=subprocess.PIPE)
+-        with open(path, 'r') as fin:
++        with open(path, "r") as fin:
+             process.stdin.write(fin.read())
+             process.stdin.close()
+             process.wait()
+             return process.returncode
+ 
+-    @StaticAnalysisSubCommand('static-analysis', 'check-syntax',
+-                              'Run the check-syntax for C/C++ files based on '
+-                              '`compile_commands.json`')
+-    @CommandArgument('source', nargs='*',
+-                     help='Source files to be compiled checked (regex on path).')
++    @StaticAnalysisSubCommand(
++        "static-analysis",
++        "check-syntax",
++        "Run the check-syntax for C/C++ files based on " "`compile_commands.json`",
++    )
++    @CommandArgument(
++        "source", nargs="*", help="Source files to be compiled checked (regex on path)."
++    )
+     def check_syntax(self, source, verbose=False):
+         self._set_log_level(verbose)
+         self.log_manager.enable_unstructured()
+ 
+         # Verify that we have a valid `source`
+         if len(source) == 0:
+-            self.log(logging.ERROR, 'static-analysis', {},
+-                     'ERROR: Specify files that need to be syntax checked.')
++            self.log(
++                logging.ERROR,
++                "static-analysis",
++                {},
++                "ERROR: Specify files that need to be syntax checked.",
++            )
+             return
+ 
+         rc = self._build_compile_db(verbose=verbose)
+         if rc != 0:
+-            self.log(logging.ERROR, 'static-analysis', {},
+-                     'ERROR: Unable to build the `compile_commands.json`.')
++            self.log(
++                logging.ERROR,
++                "static-analysis",
++                {},
++                "ERROR: Unable to build the `compile_commands.json`.",
++            )
+             return rc
+         rc = self._build_export(jobs=2, verbose=verbose)
+ 
+         if rc != 0:
+-            self.log(logging.ERROR, 'static-analysis', {},
+-                     'ERROR: Unable to build export.')
++            self.log(
++                logging.ERROR, "static-analysis", {}, "ERROR: Unable to build export."
++            )
+             return rc
+ 
+         # Build the list with all files from source
+         path_list = self._generate_path_list(source)
+ 
+-        compile_db = json.load(open(self._compile_db, 'r'))
++        compile_db = json.load(open(self._compile_db, "r"))
+ 
+         if compile_db is None:
+-            self.log(logging.ERROR, 'static-analysis', {},
+-                     'ERROR: Loading {}'.format(self._compile_db))
++            self.log(
++                logging.ERROR,
++                "static-analysis",
++                {},
++                "ERROR: Loading {}".format(self._compile_db),
++            )
+             return 1
+ 
+         commands = []
+ 
+-        compile_dict = {entry['file']: entry['command']
+-                        for entry in compile_db}
++        compile_dict = {entry["file"]: entry["command"] for entry in compile_db}
+         # Begin the compile check for each file
+         for file in path_list:
+             # It must be a C/C++ file
+             ext = mozpath.splitext(file)[-1]
+ 
+             if ext.lower() not in self._check_syntax_include_extensions:
+-                self.log(logging.INFO, 'static-analysis',
+-                         {}, 'Skipping {}'.format(file))
++                self.log(
++                    logging.INFO, "static-analysis", {}, "Skipping {}".format(file)
++                )
+                 continue
+             file_with_abspath = mozpath.join(self.topsrcdir, file)
+             # Found for a file that we are looking
+ 
+             entry = compile_dict.get(file_with_abspath, None)
+             if entry:
+-                command = entry.split(' ')
++                command = entry.split(" ")
+                 # Verify to see if we are dealing with an unified build
+-                if 'Unified_' in command[-1]:
++                if "Unified_" in command[-1]:
+                     # Translate the unified `TU` to per file basis TU
+                     command[-1] = file_with_abspath
+ 
+                 # We want syntax-only
+-                command.append('-fsyntax-only')
++                command.append("-fsyntax-only")
+                 commands.append(command)
+ 
+         max_workers = multiprocessing.cpu_count()
+ 
+         with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
+             futures = []
+             for command in commands:
+-                futures.append(executor.submit(self.run_process, args=command,
+-                                               cwd=self.topsrcdir, pass_thru=True,
+-                                               ensure_exit_code=False))
+-
+-    @Command('clang-format',  category='misc', description='Run clang-format on current changes')
+-    @CommandArgument('--show', '-s', action='store_const', const='stdout', dest='output_path',
+-                     help='Show diff output on stdout instead of applying changes')
+-    @CommandArgument('--assume-filename', '-a', nargs=1, default=None,
+-                     help='This option is usually used in the context of hg-formatsource.'
+-                          'When reading from stdin, clang-format assumes this '
+-                          'filename to look for a style config file (with '
+-                          '-style=file) and to determine the language. When '
+-                          'specifying this option only one file should be used '
+-                          'as an input and the output will be forwarded to stdin. '
+-                          'This option also impairs the download of the clang-tools '
+-                          'and assumes the package is already located in it\'s default '
+-                          'location')
+-    @CommandArgument('--path', '-p', nargs='+', default=None,
+-                     help='Specify the path(s) to reformat')
+-    @CommandArgument('--commit', '-c', default=None,
+-                     help='Specify a commit to reformat from. '
+-                          'For git you can also pass a range of commits (foo..bar) '
+-                          'to format all of them at the same time.')
+-    @CommandArgument('--output', '-o', default=None, dest='output_path',
+-                     help='Specify a file handle to write clang-format raw output instead of '
+-                          'applying changes. This can be stdout or a file path.')
+-    @CommandArgument('--format', '-f', choices=('diff', 'json'), default='diff',
+-                     dest='output_format',
+-                     help='Specify the output format used: diff is the raw patch provided by '
+-                     'clang-format, json is a list of atomic changes to process.')
+-    @CommandArgument('--outgoing', default=False, action='store_true',
+-                     help='Run clang-format on outgoing files from mercurial repository.')
+-    def clang_format(self, assume_filename, path, commit, output_path=None, output_format='diff',
+-                     verbose=False, outgoing=False):
++                futures.append(
++                    executor.submit(
++                        self.run_process,
++                        args=command,
++                        cwd=self.topsrcdir,
++                        pass_thru=True,
++                        ensure_exit_code=False,
++                    )
++                )
++
++    @Command(
++        "clang-format",
++        category="misc",
++        description="Run clang-format on current changes",
++    )
++    @CommandArgument(
++        "--show",
++        "-s",
++        action="store_const",
++        const="stdout",
++        dest="output_path",
++        help="Show diff output on stdout instead of applying changes",
++    )
++    @CommandArgument(
++        "--assume-filename",
++        "-a",
++        nargs=1,
++        default=None,
++        help="This option is usually used in the context of hg-formatsource."
++        "When reading from stdin, clang-format assumes this "
++        "filename to look for a style config file (with "
++        "-style=file) and to determine the language. When "
++        "specifying this option only one file should be used "
++        "as an input and the output will be forwarded to stdin. "
++        "This option also impairs the download of the clang-tools "
++        "and assumes the package is already located in it's default "
++        "location",
++    )
++    @CommandArgument(
++        "--path", "-p", nargs="+", default=None, help="Specify the path(s) to reformat"
++    )
++    @CommandArgument(
++        "--commit",
++        "-c",
++        default=None,
++        help="Specify a commit to reformat from. "
++        "For git you can also pass a range of commits (foo..bar) "
++        "to format all of them at the same time.",
++    )
++    @CommandArgument(
++        "--output",
++        "-o",
++        default=None,
++        dest="output_path",
++        help="Specify a file handle to write clang-format raw output instead of "
++        "applying changes. This can be stdout or a file path.",
++    )
++    @CommandArgument(
++        "--format",
++        "-f",
++        choices=("diff", "json"),
++        default="diff",
++        dest="output_format",
++        help="Specify the output format used: diff is the raw patch provided by "
++        "clang-format, json is a list of atomic changes to process.",
++    )
++    @CommandArgument(
++        "--outgoing",
++        default=False,
++        action="store_true",
++        help="Run clang-format on outgoing files from mercurial repository.",
++    )
++    def clang_format(
++        self,
++        assume_filename,
++        path,
++        commit,
++        output_path=None,
++        output_format="diff",
++        verbose=False,
++        outgoing=False,
++    ):
+         # Run clang-format or clang-format-diff on the local changes
+         # or files/directories
+         if path is None and outgoing:
+             repo = get_repository_object(self.topsrcdir)
+             path = repo.get_outgoing_files()
+ 
+         if path:
+             # Create the full path list
+-            def path_maker(f_name): return os.path.join(self.topsrcdir, f_name)
++            def path_maker(f_name):
++                return os.path.join(self.topsrcdir, f_name)
++
+             path = map(path_maker, path)
+ 
+         os.chdir(self.topsrcdir)
+ 
+         # Load output file handle, either stdout or a file handle in write mode
+         output = None
+         if output_path is not None:
+-            output = sys.stdout if output_path == 'stdout' else open(output_path, 'w')
++            output = sys.stdout if output_path == "stdout" else open(output_path, "w")
+ 
+         # With assume_filename we want to have stdout clean since the result of the
+         # format will be redirected to stdout. Only in case of errror we
+         # write something to stdout.
+         # We don't actually want to get the clang-tools here since we want in some
+         # scenarios to do this in parallel so we relay on the fact that the tools
+         # have already been downloaded via './mach bootstrap' or directly via
+         # './mach static-analysis install'
+@@ -1709,302 +2285,368 @@ class StaticAnalysis(MachCommandBase):
+                 print("clang-format: Unable to set locate clang-format tools.")
+                 return 1
+         else:
+             rc = self._get_clang_tools(verbose=verbose)
+             if rc != 0:
+                 return rc
+ 
+         if self._is_version_eligible() is False:
+-            self.log(logging.ERROR, 'static-analysis', {},
+-                     "ERROR: You're using an old version of clang-format binary."
+-                     " Please update to a more recent one by running: './mach bootstrap'")
++            self.log(
++                logging.ERROR,
++                "static-analysis",
++                {},
++                "ERROR: You're using an old version of clang-format binary."
++                " Please update to a more recent one by running: './mach bootstrap'",
++            )
+             return 1
+ 
+         if path is None:
+-            return self._run_clang_format_diff(self._clang_format_diff,
+-                                               self._clang_format_path, commit, output)
++            return self._run_clang_format_diff(
++                self._clang_format_diff, self._clang_format_path, commit, output
++            )
+ 
+         if assume_filename:
+-            return self._run_clang_format_in_console(self._clang_format_path,
+-                                                     path, assume_filename)
+-
+-        return self._run_clang_format_path(self._clang_format_path, path, output, output_format)
++            return self._run_clang_format_in_console(
++                self._clang_format_path, path, assume_filename
++            )
++
++        return self._run_clang_format_path(
++            self._clang_format_path, path, output, output_format
++        )
+ 
+     def _verify_checker(self, item, checkers_results):
+-        check = item['name']
++        check = item["name"]
+         test_file_path = mozpath.join(self._clang_tidy_base_path, "test", check)
+-        test_file_path_cpp = test_file_path + '.cpp'
+-        test_file_path_json = test_file_path + '.json'
+-
+-        self.log(logging.INFO, 'static-analysis', {},
+-                 "RUNNING: clang-tidy checker {}.".format(check))
++        test_file_path_cpp = test_file_path + ".cpp"
++        test_file_path_json = test_file_path + ".json"
++
++        self.log(
++            logging.INFO,
++            "static-analysis",
++            {},
++            "RUNNING: clang-tidy checker {}.".format(check),
++        )
+ 
+         # Structured information in case a checker fails
+         checker_error = {
+-            'checker-name': check,
+-            'checker-error': '',
+-            'info1': '',
+-            'info2': '',
+-            'info3': ''
++            "checker-name": check,
++            "checker-error": "",
++            "info1": "",
++            "info2": "",
++            "info3": "",
+         }
+ 
+         # Verify if this checker actually exists
+         if check not in self._clang_tidy_checks:
+-            checker_error['checker-error'] = self.TOOLS_CHECKER_NOT_FOUND
++            checker_error["checker-error"] = self.TOOLS_CHECKER_NOT_FOUND
+             checkers_results.append(checker_error)
+             return self.TOOLS_CHECKER_NOT_FOUND
+ 
+         # Verify if the test file exists for this checker
+         if not os.path.exists(test_file_path_cpp):
+-            checker_error['checker-error'] = self.TOOLS_CHECKER_NO_TEST_FILE
++            checker_error["checker-error"] = self.TOOLS_CHECKER_NO_TEST_FILE
+             checkers_results.append(checker_error)
+             return self.TOOLS_CHECKER_NO_TEST_FILE
+ 
+         issues, clang_output = self._run_analysis(
+-            checks='-*,' + check, header_filter='', sources=[test_file_path_cpp])
++            checks="-*," + check, header_filter="", sources=[test_file_path_cpp]
++        )
+         if issues is None:
+             return self.TOOLS_CHECKER_FAILED_FILE
+ 
+         # Verify to see if we got any issues, if not raise exception
+         if not issues:
+-            checker_error['checker-error'] = self.TOOLS_CHECKER_RETURNED_NO_ISSUES
+-            checker_error['info1'] = clang_output
++            checker_error["checker-error"] = self.TOOLS_CHECKER_RETURNED_NO_ISSUES
++            checker_error["info1"] = clang_output
+             checkers_results.append(checker_error)
+             return self.TOOLS_CHECKER_RETURNED_NO_ISSUES
+ 
+         # Also store the 'reliability' index for this checker
+-        issues.append({'reliability': item['reliability']})
++        issues.append({"reliability": item["reliability"]})
+ 
+         if self._dump_results:
+             self._build_autotest_result(test_file_path_json, json.dumps(issues))
+         else:
+             if not os.path.exists(test_file_path_json):
+                 # Result file for test not found maybe regenerate it?
+-                checker_error['checker-error'] = self.TOOLS_CHECKER_RESULT_FILE_NOT_FOUND
++                checker_error[
++                    "checker-error"
++                ] = self.TOOLS_CHECKER_RESULT_FILE_NOT_FOUND
+                 checkers_results.append(checker_error)
+                 return self.TOOLS_CHECKER_RESULT_FILE_NOT_FOUND
+ 
+             # Read the pre-determined issues
+             baseline_issues = self._get_autotest_stored_issues(test_file_path_json)
+ 
+             # Compare the two lists
+             if issues != baseline_issues:
+-                checker_error['checker-error'] = self.TOOLS_CHECKER_DIFF_FAILED
+-                checker_error['info1'] = baseline_issues
+-                checker_error['info2'] = issues
+-                checker_error['info3'] = clang_output
++                checker_error["checker-error"] = self.TOOLS_CHECKER_DIFF_FAILED
++                checker_error["info1"] = baseline_issues
++                checker_error["info2"] = issues
++                checker_error["info3"] = clang_output
+                 checkers_results.append(checker_error)
+                 return self.TOOLS_CHECKER_DIFF_FAILED
+ 
+         return self.TOOLS_SUCCESS
+ 
+     def _build_autotest_result(self, file, issues):
+-        with open(file, 'w') as f:
++        with open(file, "w") as f:
+             f.write(issues)
+ 
+     def _get_autotest_stored_issues(self, file):
+         with open(file) as f:
+             return json.load(f)
+ 
+     def _parse_issues(self, clang_output):
+-        '''
++        """
+         Parse clang-tidy output into structured issues
+-        '''
++        """
+ 
+         # Limit clang output parsing to 'Enabled checks:'
+-        end = re.search(r'^Enabled checks:\n', clang_output, re.MULTILINE)
++        end = re.search(r"^Enabled checks:\n", clang_output, re.MULTILINE)
+         if end is not None:
+-            clang_output = clang_output[:end.start()-1]
++            clang_output = clang_output[: end.start() - 1]
+ 
+         platform, _ = self.platform
+         # Starting with clang 8, for the diagnostic messages we have multiple `LF CR`
+         # in order to be compatiable with msvc compiler format, and for this
+         # we are not interested to match the end of line.
+-        regex_string = r'(.+):(\d+):(\d+): (warning|error): ([^\[\]\n]+)(?: \[([\.\w-]+)\])'
++        regex_string = (
++            r"(.+):(\d+):(\d+): (warning|error): ([^\[\]\n]+)(?: \[([\.\w-]+)\])"
++        )
+ 
+         # For non 'win' based platforms we also need the 'end of the line' regex
+-        if platform not in ('win64', 'win32'):
+-            regex_string += '?$'
++        if platform not in ("win64", "win32"):
++            regex_string += "?$"
+ 
+         regex_header = re.compile(regex_string, re.MULTILINE)
+ 
+         # Sort headers by positions
+-        headers = sorted(
+-            regex_header.finditer(clang_output),
+-            key=lambda h: h.start()
+-        )
++        headers = sorted(regex_header.finditer(clang_output), key=lambda h: h.start())
+         issues = []
+         for _, header in enumerate(headers):
+             header_group = header.groups()
+             element = [header_group[3], header_group[4], header_group[5]]
+             issues.append(element)
+         return issues
+ 
+     def _get_checks(self):
+-        checks = '-*'
++        checks = "-*"
+         try:
+             config = self._clang_tidy_config
+-            for item in config['clang_checkers']:
+-                if item.get('publish', True):
+-                    checks += ',' + item['name']
++            for item in config["clang_checkers"]:
++                if item.get("publish", True):
++                    checks += "," + item["name"]
+         except Exception:
+-            print('Looks like config.yaml is not valid, so we are unable to '
+-                  'determine default checkers, using \'-checks=-*,mozilla-*\'')
+-            checks += ',mozilla-*'
++            print(
++                "Looks like config.yaml is not valid, so we are unable to "
++                "determine default checkers, using '-checks=-*,mozilla-*'"
++            )
++            checks += ",mozilla-*"
+         finally:
+             return checks
+ 
+     def _get_checks_config(self):
+         config_list = []
+         checker_config = {}
+         try:
+             config = self._clang_tidy_config
+-            for checker in config['clang_checkers']:
+-                if checker.get('publish', True) and 'config' in checker:
+-                    for checker_option in checker['config']:
++            for checker in config["clang_checkers"]:
++                if checker.get("publish", True) and "config" in checker:
++                    for checker_option in checker["config"]:
+                         # Verify if the format of the Option is correct,
+                         # possibilities are:
+                         # 1. CheckerName.Option
+                         # 2. Option -> that will become CheckerName.Option
+-                        if not checker_option['key'].startswith(checker['name']):
+-                            checker_option['key'] = "{}.{}".format(
+-                                checker['name'], checker_option['key'])
+-                    config_list += checker['config']
+-            checker_config['CheckOptions'] = config_list
++                        if not checker_option["key"].startswith(checker["name"]):
++                            checker_option["key"] = "{}.{}".format(
++                                checker["name"], checker_option["key"]
++                            )
++                    config_list += checker["config"]
++            checker_config["CheckOptions"] = config_list
+         except Exception:
+-            print('Looks like config.yaml is not valid, so we are unable to '
+-                  'determine configuration for checkers, so using default')
++            print(
++                "Looks like config.yaml is not valid, so we are unable to "
++                "determine configuration for checkers, so using default"
++            )
+             checker_config = None
+         finally:
+             return checker_config
+ 
+     def _get_config_environment(self):
+         ran_configure = False
+         config = None
+         builder = Build(self._mach_context)
+ 
+         try:
+             config = self.config_environment
+         except Exception:
+-            self.log(logging.WARNING, 'static-analysis', {},
+-                     "Looks like configure has not run yet, running it now...")
++            self.log(
++                logging.WARNING,
++                "static-analysis",
++                {},
++                "Looks like configure has not run yet, running it now...",
++            )
+ 
+             clobber = Clobberer(self.topsrcdir, self.topobjdir)
+ 
+             if clobber.clobber_needed():
+                 choice = prompt_bool(
+                     "Configuration has changed and Clobber is needed. "
+                     "Do you want to proceed?"
+                 )
+                 if not choice:
+-                    self.log(logging.ERROR, 'static-analysis', {},
+-                             "ERROR: Without Clobber we cannot continue execution!")
++                    self.log(
++                        logging.ERROR,
++                        "static-analysis",
++                        {},
++                        "ERROR: Without Clobber we cannot continue execution!",
++                    )
+                     return (1, None, None)
+                 os.environ["AUTOCLOBBER"] = "1"
+ 
+             rc = builder.configure()
+             if rc != 0:
+                 return (rc, config, ran_configure)
+             ran_configure = True
+             try:
+                 config = self.config_environment
+             except Exception:
+                 pass
+ 
+         return (0, config, ran_configure)
+ 
+     def _build_compile_db(self, verbose=False):
+-        self._compile_db = mozpath.join(self.topobjdir, 'compile_commands.json')
++        self._compile_db = mozpath.join(self.topobjdir, "compile_commands.json")
+         if os.path.exists(self._compile_db):
+             return 0
+ 
+         rc, config, ran_configure = self._get_config_environment()
+         if rc != 0:
+             return rc
+ 
+         if ran_configure:
+             # Configure may have created the compilation database if the
+             # mozconfig enables building the CompileDB backend by default,
+             # So we recurse to see if the file exists once again.
+             return self._build_compile_db(verbose=verbose)
+ 
+         if config:
+-            print('Looks like a clang compilation database has not been '
+-                  'created yet, creating it now...')
++            print(
++                "Looks like a clang compilation database has not been "
++                "created yet, creating it now..."
++            )
+             builder = Build(self._mach_context)
+-            rc = builder.build_backend(['CompileDB'], verbose=verbose)
++            rc = builder.build_backend(["CompileDB"], verbose=verbose)
+             if rc != 0:
+                 return rc
+             assert os.path.exists(self._compile_db)
+             return 0
+ 
+     def _build_export(self, jobs, verbose=False):
+         def on_line(line):
+-            self.log(logging.INFO, 'build_output', {'line': line}, '{line}')
++            self.log(logging.INFO, "build_output", {"line": line}, "{line}")
+ 
+         builder = Build(self._mach_context)
+         # First install what we can through install manifests.
+-        rc = builder._run_make(directory=self.topobjdir, target='pre-export',
+-                               line_handler=None, silent=not verbose)
++        rc = builder._run_make(
++            directory=self.topobjdir,
++            target="pre-export",
++            line_handler=None,
++            silent=not verbose,
++        )
+         if rc != 0:
+             return rc
+ 
+         # Then build the rest of the build dependencies by running the full
+         # export target, because we can't do anything better.
+-        return builder._run_make(directory=self.topobjdir, target='export',
+-                                 line_handler=None, silent=not verbose,
+-                                 num_jobs=jobs)
++        return builder._run_make(
++            directory=self.topobjdir,
++            target='export',
++            line_handler=None,
++            silent=not verbose,
++            num_jobs=jobs,
++        )
+ 
+     def _set_clang_tools_paths(self):
+         rc, config, _ = self._get_config_environment()
+ 
+         if rc != 0:
+             return rc
+ 
+-        self._clang_tools_path = mozpath.join(self._mach_context.state_dir, "clang-tools")
+-        self._clang_tidy_path = mozpath.join(self._clang_tools_path, "clang-tidy", "bin",
+-                                             "clang-tidy" + config.substs.get('BIN_SUFFIX', ''))
++        self._clang_tools_path = mozpath.join(
++            self._mach_context.state_dir, "clang-tools"
++        )
++        self._clang_tidy_path = mozpath.join(
++            self._clang_tools_path,
++            "clang-tidy",
++            "bin",
++            "clang-tidy" + config.substs.get("BIN_SUFFIX", ""),
++        )
+         self._clang_format_path = mozpath.join(
+-            self._clang_tools_path, "clang-tidy", "bin",
+-            "clang-format" + config.substs.get('BIN_SUFFIX', ''))
++            self._clang_tools_path,
++            "clang-tidy",
++            "bin",
++            "clang-format" + config.substs.get("BIN_SUFFIX", ""),
++        )
+         self._clang_apply_replacements = mozpath.join(
+-            self._clang_tools_path, "clang-tidy", "bin",
+-            "clang-apply-replacements" + config.substs.get('BIN_SUFFIX', ''))
+-        self._run_clang_tidy_path = mozpath.join(self._clang_tools_path, "clang-tidy",
+-                                                 "share", "clang", "run-clang-tidy.py")
+-        self._clang_format_diff = mozpath.join(self._clang_tools_path, "clang-tidy",
+-                                               "share", "clang", "clang-format-diff.py")
++            self._clang_tools_path,
++            "clang-tidy",
++            "bin",
++            "clang-apply-replacements" + config.substs.get("BIN_SUFFIX", ""),
++        )
++        self._run_clang_tidy_path = mozpath.join(
++            self._clang_tools_path, "clang-tidy", "share", "clang", "run-clang-tidy.py"
++        )
++        self._clang_format_diff = mozpath.join(
++            self._clang_tools_path,
++            "clang-tidy",
++            "share",
++            "clang",
++            "clang-format-diff.py",
++        )
+         return 0
+ 
+     def _do_clang_tools_exist(self):
+-        return os.path.exists(self._clang_tidy_path) and \
+-               os.path.exists(self._clang_format_path) and \
+-               os.path.exists(self._clang_apply_replacements) and \
+-               os.path.exists(self._run_clang_tidy_path)
+-
+-    def _get_clang_tools(self, force=False, skip_cache=False,
+-                         source=None, download_if_needed=True,
+-                         verbose=False):
++        return (
++            os.path.exists(self._clang_tidy_path)
++            and os.path.exists(self._clang_format_path)
++            and os.path.exists(self._clang_apply_replacements)
++            and os.path.exists(self._run_clang_tidy_path)
++        )
++
++    def _get_clang_tools(
++        self,
++        force=False,
++        skip_cache=False,
++        source=None,
++        download_if_needed=True,
++        verbose=False,
++    ):
+ 
+         rc = self._set_clang_tools_paths()
+ 
+         if rc != 0:
+             return rc
+ 
+         if self._do_clang_tools_exist() and not force:
+             return 0
+ 
+         if os.path.isdir(self._clang_tools_path) and download_if_needed:
+             # The directory exists, perhaps it's corrupted?  Delete it
+             # and start from scratch.
+             shutil.rmtree(self._clang_tools_path)
+-            return self._get_clang_tools(force=force, skip_cache=skip_cache,
+-                                         source=source, verbose=verbose,
+-                                         download_if_needed=download_if_needed)
++            return self._get_clang_tools(
++                force=force,
++                skip_cache=skip_cache,
++                source=source,
++                verbose=verbose,
++                download_if_needed=download_if_needed,
++            )
+ 
+         # Create base directory where we store clang binary
+         os.mkdir(self._clang_tools_path)
+ 
+         if source:
+             return self._get_clang_tools_from_source(source)
+ 
+         from mozbuild.artifact_commands import PackageFrontend
+@@ -2012,137 +2654,162 @@ class StaticAnalysis(MachCommandBase):
+         self._artifact_manager = PackageFrontend(self._mach_context)
+ 
+         if not download_if_needed:
+             return 0
+ 
+         job, _ = self.platform
+ 
+         if job is None:
+-            raise Exception('The current platform isn\'t supported. '
+-                            'Currently only the following platforms are '
+-                            'supported: win32/win64, linux64 and macosx64.')
+-
+-        job += '-clang-tidy'
++            raise Exception(
++                "The current platform isn't supported. "
++                "Currently only the following platforms are "
++                "supported: win32/win64, linux64 and macosx64."
++            )
++
++        job += "-clang-tidy"
+ 
+         # We want to unpack data in the clang-tidy mozbuild folder
+         currentWorkingDir = os.getcwd()
+         os.chdir(self._clang_tools_path)
+-        rc = self._artifact_manager.artifact_toolchain(verbose=verbose,
+-                                                       skip_cache=skip_cache,
+-                                                       from_build=[job],
+-                                                       no_unpack=False,
+-                                                       retry=0)
++        rc = self._artifact_manager.artifact_toolchain(
++            verbose=verbose,
++            skip_cache=skip_cache,
++            from_build=[job],
++            no_unpack=False,
++            retry=0,
++        )
+         # Change back the cwd
+         os.chdir(currentWorkingDir)
+ 
+         return rc
+ 
+     def _get_clang_tools_from_source(self, filename):
+         from mozbuild.action.tooltool import unpack_file
+-        clang_tidy_path = mozpath.join(self._mach_context.state_dir,
+-                                       "clang-tools")
++
++        clang_tidy_path = mozpath.join(self._mach_context.state_dir, "clang-tools")
+ 
+         currentWorkingDir = os.getcwd()
+         os.chdir(clang_tidy_path)
+ 
+         unpack_file(filename)
+ 
+         # Change back the cwd
+         os.chdir(currentWorkingDir)
+ 
+-        clang_path = mozpath.join(clang_tidy_path, 'clang')
++        clang_path = mozpath.join(clang_tidy_path, "clang")
+ 
+         if not os.path.isdir(clang_path):
+-            raise Exception('Extracted the archive but didn\'t find '
+-                            'the expected output')
++            raise Exception(
++                "Extracted the archive but didn't find " "the expected output"
++            )
+ 
+         assert os.path.exists(self._clang_tidy_path)
+         assert os.path.exists(self._clang_format_path)
+         assert os.path.exists(self._clang_apply_replacements)
+         assert os.path.exists(self._run_clang_tidy_path)
+         return 0
+ 
+     def _get_clang_format_diff_command(self, commit):
+-        if self.repository.name == 'hg':
++        if self.repository.name == "hg":
+             args = ["hg", "diff", "-U0"]
+             if commit:
+                 args += ["-c", commit]
+             else:
+                 args += ["-r", ".^"]
+             for dot_extension in self._format_include_extensions:
+-                args += ['--include', 'glob:**{0}'.format(dot_extension)]
+-            args += ['--exclude', 'listfile:{0}'.format(self._format_ignore_file)]
++                args += ["--include", "glob:**{0}".format(dot_extension)]
++            args += ["--exclude", "listfile:{0}".format(self._format_ignore_file)]
+         else:
+             commit_range = "HEAD"  # All uncommitted changes.
+             if commit:
+-                commit_range = commit if ".." in commit else "{}~..{}".format(commit, commit)
++                commit_range = (
++                    commit if ".." in commit else "{}~..{}".format(commit, commit)
++                )
+             args = ["git", "diff", "--no-color", "-U0", commit_range, "--"]
+             for dot_extension in self._format_include_extensions:
+-                args += ['*{0}'.format(dot_extension)]
++                args += ["*{0}".format(dot_extension)]
+             # git-diff doesn't support an 'exclude-from-files' param, but
+             # allow to add individual exclude pattern since v1.9, see
+             # https://git-scm.com/docs/gitglossary#gitglossary-aiddefpathspecapathspec
+-            with open(self._format_ignore_file, 'rb') as exclude_pattern_file:
++            with open(self._format_ignore_file, "rb") as exclude_pattern_file:
+                 for pattern in exclude_pattern_file.readlines():
+                     pattern = six.ensure_str(pattern.rstrip())
+-                    pattern = pattern.replace('.*', '**')
+-                    if not pattern or pattern.startswith('#'):
++                    pattern = pattern.replace(".*", "**")
++                    if not pattern or pattern.startswith("#"):
+                         continue  # empty or comment
+-                    magics = ['exclude']
+-                    if pattern.startswith('^'):
+-                        magics += ['top']
++                    magics = ["exclude"]
++                    if pattern.startswith("^"):
++                        magics += ["top"]
+                         pattern = pattern[1:]
+-                    args += [':({0}){1}'.format(','.join(magics), pattern)]
++                    args += [":({0}){1}".format(",".join(magics), pattern)]
+         return args
+ 
+-    def _get_infer(self, force=False, skip_cache=False, download_if_needed=True,
+-                   verbose=False, intree_tool=False):
++    def _get_infer(
++        self,
++        force=False,
++        skip_cache=False,
++        download_if_needed=True,
++        verbose=False,
++        intree_tool=False,
++    ):
+         rc, config, _ = self._get_config_environment()
+         if rc != 0:
+             return rc
+-        infer_path = os.environ['MOZ_FETCHES_DIR'] if intree_tool else \
+-            mozpath.join(self._mach_context.state_dir, 'infer')
+-        self._infer_path = mozpath.join(infer_path, 'infer', 'bin', 'infer' +
+-                                        config.substs.get('BIN_SUFFIX', ''))
++        infer_path = (
++            os.environ["MOZ_FETCHES_DIR"]
++            if intree_tool
++            else mozpath.join(self._mach_context.state_dir, "infer")
++        )
++        self._infer_path = mozpath.join(
++            infer_path, "infer", "bin", "infer" + config.substs.get("BIN_SUFFIX", "")
++        )
+         if intree_tool:
+             return not os.path.exists(self._infer_path)
+         if os.path.exists(self._infer_path) and not force:
+             return 0
+ 
+         if os.path.isdir(infer_path) and download_if_needed:
+             # The directory exists, perhaps it's corrupted?  Delete it
+             # and start from scratch.
+             shutil.rmtree(infer_path)
+-            return self._get_infer(force=force, skip_cache=skip_cache,
+-                                   verbose=verbose,
+-                                   download_if_needed=download_if_needed)
++            return self._get_infer(
++                force=force,
++                skip_cache=skip_cache,
++                verbose=verbose,
++                download_if_needed=download_if_needed,
++            )
+         os.mkdir(infer_path)
+         from mozbuild.artifact_commands import PackageFrontend
++
+         self._artifact_manager = PackageFrontend(self._mach_context)
+         if not download_if_needed:
+             return 0
+         job, _ = self.platform
+-        if job != 'linux64':
++        if job != "linux64":
+             return -1
+         else:
+-            job += '-infer'
++            job += "-infer"
+         # We want to unpack data in the infer mozbuild folder
+         currentWorkingDir = os.getcwd()
+         os.chdir(infer_path)
+-        rc = self._artifact_manager.artifact_toolchain(verbose=verbose,
+-                                                       skip_cache=skip_cache,
+-                                                       from_build=[job],
+-                                                       no_unpack=False,
+-                                                       retry=0)
++        rc = self._artifact_manager.artifact_toolchain(
++            verbose=verbose,
++            skip_cache=skip_cache,
++            from_build=[job],
++            no_unpack=False,
++            retry=0,
++        )
+         # Change back the cwd
+         os.chdir(currentWorkingDir)
+         return rc
+ 
+-    def _run_clang_format_diff(self, clang_format_diff, clang_format, commit, output_file):
++    def _run_clang_format_diff(
++        self, clang_format_diff, clang_format, commit, output_file
++    ):
+         # Run clang-format on the diff
+         # Note that this will potentially miss a lot things
+         from subprocess import Popen, PIPE, check_output, CalledProcessError
+ 
+         diff_process = Popen(self._get_clang_format_diff_command(commit), stdout=PIPE)
+         args = [sys.executable, clang_format_diff, "-p1", "-binary=%s" % clang_format]
+ 
+         if not output_file:
+@@ -2156,53 +2823,54 @@ class StaticAnalysis(MachCommandBase):
+             return 0
+         except CalledProcessError as e:
+             # Something wrong happend
+             print("clang-format: An error occured while running clang-format-diff.")
+             return e.returncode
+ 
+     def _is_ignored_path(self, ignored_dir_re, f):
+         # Remove upto topsrcdir in pathname and match
+-        if f.startswith(self.topsrcdir + '/'):
+-            match_f = f[len(self.topsrcdir + '/'):]
++        if f.startswith(self.topsrcdir + "/"):
++            match_f = f[len(self.topsrcdir + "/") :]
+         else:
+             match_f = f
+         return re.match(ignored_dir_re, match_f)
+ 
+     def _generate_path_list(self, paths, verbose=True):
+         path_to_third_party = os.path.join(self.topsrcdir, self._format_ignore_file)
+         ignored_dir = []
+-        with open(path_to_third_party, 'r') as fh:
++        with open(path_to_third_party, "r") as fh:
+             for line in fh:
+                 # Remove comments and empty lines
+-                if line.startswith('#') or len(line.strip()) == 0:
++                if line.startswith("#") or len(line.strip()) == 0:
+                     continue
+                 # The regexp is to make sure we are managing relative paths
+                 ignored_dir.append(r"^[\./]*" + line.rstrip())
+ 
+         # Generates the list of regexp
+-        ignored_dir_re = '(%s)' % '|'.join(ignored_dir)
++        ignored_dir_re = "(%s)" % "|".join(ignored_dir)
+         extensions = self._format_include_extensions
+ 
+         path_list = []
+         for f in paths:
+             if self._is_ignored_path(ignored_dir_re, f):
+                 # Early exit if we have provided an ignored directory
+                 if verbose:
+                     print("static-analysis: Ignored third party code '{0}'".format(f))
+                 continue
+ 
+             if os.path.isdir(f):
+                 # Processing a directory, generate the file list
+                 for folder, subs, files in os.walk(f):
+                     subs.sort()
+                     for filename in sorted(files):
+                         f_in_dir = os.path.join(folder, filename)
+-                        if (f_in_dir.endswith(extensions)
+-                            and not self._is_ignored_path(ignored_dir_re, f_in_dir)):
++                        if f_in_dir.endswith(extensions) and not self._is_ignored_path(
++                            ignored_dir_re, f_in_dir
++                        ):
+                             # Supported extension and accepted path
+                             path_list.append(f_in_dir)
+             else:
+                 # Make sure that the file exists and it has a supported extension
+                 if os.path.isfile(f) and f.endswith(extensions):
+                     path_list.append(f)
+ 
+         return path_list
+@@ -2214,36 +2882,38 @@ class StaticAnalysis(MachCommandBase):
+             return 0
+ 
+         # We use -assume-filename in order to better determine the path for
+         # the .clang-format when it is ran outside of the repo, for example
+         # by the extension hg-formatsource
+         args = [clang_format, "-assume-filename={}".format(assume_filename[0])]
+ 
+         process = subprocess.Popen(args, stdin=subprocess.PIPE)
+-        with open(paths[0], 'r') as fin:
++        with open(paths[0], "r") as fin:
+             process.stdin.write(fin.read())
+             process.stdin.close()
+             process.wait()
+             return process.returncode
+ 
+     def _get_clang_format_cfg(self, current_dir):
+-        clang_format_cfg_path = mozpath.join(current_dir, '.clang-format')
++        clang_format_cfg_path = mozpath.join(current_dir, ".clang-format")
+ 
+         if os.path.exists(clang_format_cfg_path):
+             # Return found path for .clang-format
+             return clang_format_cfg_path
+ 
+         if current_dir != self.topsrcdir:
+             # Go to parent directory
+             return self._get_clang_format_cfg(os.path.split(current_dir)[0])
+         # We have reached self.topsrcdir so return None
+         return None
+ 
+-    def _copy_clang_format_for_show_diff(self, current_dir, cached_clang_format_cfg, tmpdir):
++    def _copy_clang_format_for_show_diff(
++        self, current_dir, cached_clang_format_cfg, tmpdir
++    ):
+         # Lookup for .clang-format first in cache
+         clang_format_cfg = cached_clang_format_cfg.get(current_dir, None)
+ 
+         if clang_format_cfg is None:
+             # Go through top directories
+             clang_format_cfg = self._get_clang_format_cfg(current_dir)
+ 
+             # This is unlikely to happen since we must have .clang-format from
+@@ -2259,101 +2929,108 @@ class StaticAnalysis(MachCommandBase):
+         shutil.copy(clang_format_cfg, tmpdir)
+         return 0
+ 
+     def _run_clang_format_path(self, clang_format, paths, output_file, output_format):
+ 
+         # Run clang-format on files or directories directly
+         from subprocess import check_output, CalledProcessError
+ 
+-        if output_format == 'json':
++        if output_format == "json":
+             # Get replacements in xml, then process to json
+-            args = [clang_format, '-output-replacements-xml']
++            args = [clang_format, "-output-replacements-xml"]
+         else:
+-            args = [clang_format, '-i']
++            args = [clang_format, "-i"]
+ 
+         if output_file:
+             # We just want to show the diff, we create the directory to copy it
+-            tmpdir = os.path.join(self.topobjdir, 'tmp')
++            tmpdir = os.path.join(self.topobjdir, "tmp")
+             if not os.path.exists(tmpdir):
+                 os.makedirs(tmpdir)
+ 
+         path_list = self._generate_path_list(paths)
+ 
+         if path_list == []:
+             return
+ 
+         print("Processing %d file(s)..." % len(path_list))
+ 
+         if output_file:
+             patches = {}
+             cached_clang_format_cfg = {}
+             for i in range(0, len(path_list)):
+-                l = path_list[i: (i + 1)]
++                l = path_list[i : (i + 1)]
+ 
+                 # Copy the files into a temp directory
+                 # and run clang-format on the temp directory
+                 # and show the diff
+                 original_path = l[0]
+                 local_path = ntpath.basename(original_path)
+                 current_dir = ntpath.dirname(original_path)
+                 target_file = os.path.join(tmpdir, local_path)
+                 faketmpdir = os.path.dirname(target_file)
+                 if not os.path.isdir(faketmpdir):
+                     os.makedirs(faketmpdir)
+                 shutil.copy(l[0], faketmpdir)
+                 l[0] = target_file
+ 
+-                ret = self._copy_clang_format_for_show_diff(current_dir,
+-                                                            cached_clang_format_cfg,
+-                                                            faketmpdir)
++                ret = self._copy_clang_format_for_show_diff(
++                    current_dir, cached_clang_format_cfg, faketmpdir
++                )
+                 if ret != 0:
+                     return ret
+ 
+                 # Run clang-format on the list
+                 try:
+                     output = check_output(args + l)
+-                    if output and output_format == 'json':
++                    if output and output_format == "json":
+                         # Output a relative path in json patch list
+                         relative_path = os.path.relpath(original_path, self.topsrcdir)
+-                        patches[relative_path] = self._parse_xml_output(original_path, output)
++                        patches[relative_path] = self._parse_xml_output(
++                            original_path, output
++                        )
+                 except CalledProcessError as e:
+                     # Something wrong happend
+                     print("clang-format: An error occured while running clang-format.")
+                     return e.returncode
+ 
+                 # show the diff
+-                if output_format == 'diff':
++                if output_format == "diff":
+                     diff_command = ["diff", "-u", original_path, target_file]
+                     try:
+                         output = check_output(diff_command)
+                     except CalledProcessError as e:
+                         # diff -u returns 0 when no change
+                         # here, we expect changes. if we are here, this means that
+                         # there is a diff to show
+                         if e.output:
+                             # Replace the temp path by the path relative to the repository to
+                             # display a valid patch
+-                            relative_path = os.path.relpath(original_path, self.topsrcdir)
++                            relative_path = os.path.relpath(
++                                original_path, self.topsrcdir
++                            )
+                             # We must modify the paths in order to be compatible with the
+                             # `diff` format.
+                             original_path_diff = os.path.join("a", relative_path)
+                             target_path_diff = os.path.join("b", relative_path)
+-                            e.output = e.output.decode('utf-8')
+-                            patch = e.output.replace("+++ {}".format(target_file),
+-                                                     "+++ {}".format(target_path_diff)).replace(
+-                                                         "-- {}".format(original_path),
+-                                                         "-- {}".format(original_path_diff))
++                            e.output = e.output.decode("utf-8")
++                            patch = e.output.replace(
++                                "+++ {}".format(target_file),
++                                "+++ {}".format(target_path_diff),
++                            ).replace(
++                                "-- {}".format(original_path),
++                                "-- {}".format(original_path_diff),
++                            )
+                             patches[original_path] = patch
+ 
+-            if output_format == 'json':
++            if output_format == "json":
+                 output = json.dumps(patches, indent=4)
+             else:
+                 # Display all the patches at once
+-                output = '\n'.join(patches.values())
++                output = "\n".join(patches.values())
+ 
+             # Output to specified file or stdout
+             print(output, file=output_file)
+ 
+             shutil.rmtree(tmpdir)
+             return 0
+ 
+         # Run clang-format in parallel trying to saturate all of the available cores.
+@@ -2368,17 +3045,17 @@ class StaticAnalysis(MachCommandBase):
+         batch_size = int(math.floor(float(len(path_list)) / max_workers))
+         outstanding_items = len(path_list) - batch_size * max_workers
+ 
+         batches = []
+ 
+         i = 0
+         while i < len(path_list):
+             num_items = batch_size + (1 if outstanding_items > 0 else 0)
+-            batches.append(args + path_list[i: (i + num_items)])
++            batches.append(args + path_list[i : (i + num_items)])
+ 
+             outstanding_items -= 1
+             i += num_items
+ 
+         error_code = None
+ 
+         with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
+             futures = []
+@@ -2391,35 +3068,35 @@ class StaticAnalysis(MachCommandBase):
+                 if ret_val is not None:
+                     error_code = ret_val
+ 
+             if error_code is not None:
+                 return error_code
+         return 0
+ 
+     def _parse_xml_output(self, path, clang_output):
+-        '''
++        """
+         Parse the clang-format XML output to convert it in a JSON compatible
+         list of patches, and calculates line level informations from the
+         character level provided changes.
+-        '''
+-        content = six.ensure_str(open(path, 'r').read())
++        """
++        content = six.ensure_str(open(path, "r").read())
+ 
+         def _nb_of_lines(start, end):
+             return len(content[start:end].splitlines())
+ 
+         def _build(replacement):
+-            offset = int(replacement.attrib['offset'])
+-            length = int(replacement.attrib['length'])
+-            last_line = content.rfind('\n', 0, offset)
++            offset = int(replacement.attrib["offset"])
++            length = int(replacement.attrib["length"])
++            last_line = content.rfind("\n", 0, offset)
+             return {
+-                'replacement': replacement.text,
+-                'char_offset': offset,
+-                'char_length': length,
+-                'line': _nb_of_lines(0, offset),
+-                'line_offset': last_line != -1 and (offset - last_line) or 0,
+-                'lines_modified': _nb_of_lines(offset, offset + length),
++                "replacement": replacement.text,
++                "char_offset": offset,
++                "char_length": length,
++                "line": _nb_of_lines(0, offset),
++                "line_offset": last_line != -1 and (offset - last_line) or 0,
++                "lines_modified": _nb_of_lines(offset, offset + length),
+             }
+ 
+         return [
+             _build(replacement)
+-            for replacement in ET.fromstring(clang_output).findall('replacement')
++            for replacement in ET.fromstring(clang_output).findall("replacement")
+         ]
+diff --git a/tools/lint/black.yml.1657301.later b/tools/lint/black.yml.1657301.later
+new file mode 100644
+--- /dev/null
++++ b/tools/lint/black.yml.1657301.later
+@@ -0,0 +1,16 @@
++--- black.yml
+++++ black.yml
++@@ -1,12 +1,13 @@
++ ---
++ black:
++     description: Reformat python
++     include:
+++        - python/mozbuild/mozbuild/code-analysis
++         - python/mozperftest/mozperftest
++         - python/mozrelease/mozrelease/scriptworker_canary.py
++         - taskcluster/docker/funsize-update-generator
++         - taskcluster/taskgraph/actions/scriptworker_canary.py
++         - taskcluster/taskgraph/test/conftest.py
++         - taskcluster/taskgraph/transforms/scriptworker_canary.py
++         - taskcluster/test
++         - testing/condprofile/condprof

+ 35 - 0
mozilla-release/patches/1659113-81a1.patch

@@ -0,0 +1,35 @@
+# HG changeset patch
+# User Ricky Stewart <rstewart@mozilla.com>
+# Date 1597414723 0
+# Node ID 1ad66f8136ca3cf40d15fc4b82ebf2e0e410c572
+# Parent  3b635d9c66c17fc8c5cd7690c1f2ef91c3596298
+Bug 1659113 - Fix bad call to constructor of Build object after bug 985141 r=froydnj
+
+Differential Revision: https://phabricator.services.mozilla.com/D87076
+
+diff --git a/testing/mach_commands.py.1659113.later b/testing/mach_commands.py.1659113.later
+new file mode 100644
+--- /dev/null
++++ b/testing/mach_commands.py.1659113.later
+@@ -0,0 +1,21 @@
++--- mach_commands.py
+++++ mach_commands.py
++@@ -775,17 +775,17 @@ class TestInfoCommand(MachCommandBase):
++                     branches, days, verbose):
++         import testinfo
++         from mozbuild.build_commands import Build
++ 
++         try:
++             self.config_environment
++         except BuildEnvironmentNotFoundException:
++             print("Looks like configure has not run yet, running it now...")
++-            builder = Build(self._mach_context)
+++            builder = Build(self._mach_context, None)
++             builder.configure()
++ 
++         ti = testinfo.TestInfoReport(verbose)
++         ti.report(components, flavor, subsuite, paths,
++                   show_manifests, show_tests, show_summary, show_annotations,
++                   show_activedata,
++                   filter_values, filter_keys, show_components, output_file,
++                   branches, days)

+ 31 - 0
mozilla-release/patches/1659154-81a1.patch

@@ -0,0 +1,31 @@
+# HG changeset patch
+# User Ricky Stewart <rstewart@mozilla.com>
+# Date 1597426779 0
+# Node ID 7da9836d8ab86b4af1d4ab9695eae8656c0e1cb6
+# Parent  da9110696adfceef3130567494f178ad5f4c2643
+Bug 1659154 - Bad call to __init__ of Build object in mach ide after bug 985141 r=froydnj,dmajor
+
+Differential Revision: https://phabricator.services.mozilla.com/D87103
+
+diff --git a/python/mozbuild/mozbuild/backend/mach_commands.py b/python/mozbuild/mozbuild/backend/mach_commands.py
+--- a/python/mozbuild/mozbuild/backend/mach_commands.py
++++ b/python/mozbuild/mozbuild/backend/mach_commands.py
+@@ -49,17 +49,17 @@ class MachCommands(MachCommandBase):
+ 
+         if ide == "vscode":
+             # Verify if platform has VSCode installed
+             if not self.found_vscode_path():
+                 self.log(logging.ERROR, "ide", {}, "VSCode cannot be found, abording!")
+                 return 1
+ 
+             # Create the Build environment to configure the tree
+-            builder = Build(self._mach_context)
++            builder = Build(self._mach_context, None)
+ 
+             rc = builder.configure()
+             if rc != 0:
+                 return rc
+ 
+             # First install what we can through install manifests.
+             rc = builder._run_make(
+                 directory=self.topobjdir, target="pre-export", line_handler=None

+ 36 - 0
mozilla-release/patches/1659411-1-81a1.patch

@@ -0,0 +1,36 @@
+# HG changeset patch
+# User Mike Hommey <mh+mozilla@glandium.org>
+# Date 1597754467 0
+# Node ID 0b1fae245dfd55872fb8801581bcea941a03589c
+# Parent  afb8a0e9cebb29a5788a0607d82dcde2ae566afd
+Bug 1659411 - Default virtualenv_name to None in MachCommandBase. r=firefox-build-system-reviewers,rstewart
+
+Bug #985141 added this argument without changing all the callers.
+Instead of fixing each caller individually, just allow a value not to be
+passed in. This is what the underlying MozbuildObject class does
+anyways.
+
+Differential Revision: https://phabricator.services.mozilla.com/D87386
+
+diff --git a/python/mozbuild/mozbuild/base.py b/python/mozbuild/mozbuild/base.py
+--- a/python/mozbuild/mozbuild/base.py
++++ b/python/mozbuild/mozbuild/base.py
+@@ -857,17 +857,17 @@ class MozbuildObject(ProcessExecutionMix
+ 
+ class MachCommandBase(MozbuildObject):
+     """Base class for mach command providers that wish to be MozbuildObjects.
+ 
+     This provides a level of indirection so MozbuildObject can be refactored
+     without having to change everything that inherits from it.
+     """
+ 
+-    def __init__(self, context, virtualenv_name):
++    def __init__(self, context, virtualenv_name=None):
+         # Attempt to discover topobjdir through environment detection, as it is
+         # more reliable than mozconfig when cwd is inside an objdir.
+         topsrcdir = context.topdir
+         topobjdir = None
+         detect_virtualenv_mozinfo = True
+         if hasattr(context, 'detect_virtualenv_mozinfo'):
+             detect_virtualenv_mozinfo = getattr(context,
+                                                 'detect_virtualenv_mozinfo')

+ 34 - 0
mozilla-release/patches/1659411-2-81a1.patch

@@ -0,0 +1,34 @@
+# HG changeset patch
+# User Tarek Ziadé <tarek@mozilla.com>
+# Date 1598456822 0
+# Node ID bce9ec148eef138c89fe2a0c0a3e7ed58e87e3cd
+# Parent  8622f9472e52d6046e28def8c3f6da86040f051b
+Bug 1659411 - add missing dep r=sparky
+
+Differential Revision: https://phabricator.services.mozilla.com/D88295
+
+diff --git a/python/mozperftest/mozperftest/runner.py.1659411-2.later b/python/mozperftest/mozperftest/runner.py.1659411-2.later
+new file mode 100644
+--- /dev/null
++++ b/python/mozperftest/mozperftest/runner.py.1659411-2.later
+@@ -0,0 +1,20 @@
++--- runner.py
+++++ runner.py
++@@ -51,16 +51,17 @@ SEARCH_PATHS = [
++     "testing/mozbase/mozprofile",
++     "testing/mozbase/mozproxy",
++     "third_party/python/attrs/src",
++     "third_party/python/blessings",
++     "third_party/python/distro",
++     "third_party/python/dlmanager",
++     "third_party/python/esprima",
++     "third_party/python/importlib_metadata",
+++    "third_party/python/jsmin",
++     "third_party/python/jsonschema",
++     "third_party/python/pyrsistent",
++     "third_party/python/PyYAML/lib3",
++     "third_party/python/redo",
++     "third_party/python/requests",
++     "third_party/python/six",
++     "third_party/python/zipp",
++ ]

+ 187 - 0
mozilla-release/patches/1659542-82a1.patch

@@ -0,0 +1,187 @@
+# HG changeset patch
+# User Ricky Stewart <rstewart@mozilla.com>
+# Date 1599076557 0
+# Node ID a6be919846bd27373128ecc24e6c99bc124f214c
+# Parent  2dad38173190f3aaaf64f81fe4f58d64be9b03e2
+Bug 1659542 - Remove support for `pipenv` in `mozilla-central` r=mhentges,dmajor
+
+Differential Revision: https://phabricator.services.mozilla.com/D89192
+
+diff --git a/python/mozbuild/mozbuild/base.py b/python/mozbuild/mozbuild/base.py
+--- a/python/mozbuild/mozbuild/base.py
++++ b/python/mozbuild/mozbuild/base.py
+@@ -824,34 +824,16 @@ class MozbuildObject(ProcessExecutionMix
+ 
+     def activate_virtualenv(self):
+         self.virtualenv_manager.ensure()
+         self.virtualenv_manager.activate()
+ 
+     def _set_log_level(self, verbose):
+         self.log_manager.terminal_handler.setLevel(logging.INFO if not verbose else logging.DEBUG)
+ 
+-    def ensure_pipenv(self):
+-        self.activate_virtualenv()
+-        pipenv = os.path.join(self.virtualenv_manager.bin_path, 'pipenv')
+-        if not os.path.exists(pipenv):
+-            for package in ['certifi', 'pipenv', 'six', 'virtualenv', 'virtualenv-clone']:
+-                path = os.path.normpath(os.path.join(
+-                    self.topsrcdir, 'third_party/python', package))
+-                self.virtualenv_manager.install_pip_package(path, vendored=True)
+-        return pipenv
+-
+-    def activate_pipenv(self, workon_home, pipfile=None, populate=False,
+-                        python=None):
+-        if pipfile is not None and not os.path.exists(pipfile):
+-            raise Exception('Pipfile not found: %s.' % pipfile)
+-        self.ensure_pipenv()
+-        self.virtualenv_manager.activate_pipenv(workon_home, pipfile, populate,
+-                                                python)
+-
+     def _ensure_zstd(self):
+         try:
+             import zstandard  # noqa: F401
+         except (ImportError, AttributeError):
+             self.activate_virtualenv()
+             self.virtualenv_manager.install_pip_package('zstandard>=0.9.0,<=0.13.0')
+ 
+ 
+diff --git a/python/mozbuild/mozbuild/virtualenv.py b/python/mozbuild/mozbuild/virtualenv.py
+--- a/python/mozbuild/mozbuild/virtualenv.py
++++ b/python/mozbuild/mozbuild/virtualenv.py
+@@ -3,17 +3,16 @@
+ # file, You can obtain one at http://mozilla.org/MPL/2.0/.
+ 
+ # This file contains code for populating the virtualenv environment for
+ # Mozilla's build system. It is typically called as part of configure.
+ 
+ from __future__ import absolute_import, print_function, unicode_literals
+ 
+ import os
+-import platform
+ import shutil
+ import subprocess
+ import sys
+ 
+ IS_NATIVE_WIN = (sys.platform == 'win32' and os.sep == '\\')
+ IS_CYGWIN = (sys.platform == 'cygwin')
+ 
+ PY2 = sys.version_info[0] == 2
+@@ -562,118 +561,16 @@ class VirtualenvManager(object):
+         # against the executing interpreter. By creating a new process, we
+         # force the virtualenv's interpreter to be used and all is well.
+         # It /might/ be possible to cheat and set sys.executable to
+         # self.python_path. However, this seems more risk than it's worth.
+         pip = os.path.join(self.bin_path, 'pip')
+         subprocess.check_call([pip] + args, stderr=subprocess.STDOUT, cwd=self.topsrcdir,
+                               universal_newlines=PY3)
+ 
+-    def activate_pipenv(self, workon_home, pipfile=None, populate=False,
+-                        python=None):
+-        """Activate a virtual environment managed by pipenv
+-
+-        If ``pipfile`` is not ``None`` then the Pipfile located at the path
+-        provided will be used to create the virtual environment. If
+-        ``populate`` is ``True`` then the virtual environment will be
+-        populated from the manifest file. The optional ``python`` argument
+-        indicates the version of Python for pipenv to use.
+-        """
+-        from distutils.version import LooseVersion
+-
+-        pipenv = os.path.join(self.bin_path, 'pipenv')
+-        env = ensure_subprocess_env(os.environ.copy())
+-        env.update(ensure_subprocess_env({
+-            'PIPENV_IGNORE_VIRTUALENVS': '1',
+-            'PIP_NO_INDEX': '1',
+-            'WORKON_HOME': str(os.path.normpath(workon_home)),
+-        }))
+-        # On mac, running pipenv with LC_CTYPE set to "UTF-8" (which happens
+-        # when wrapping with run-task on automation) fails.
+-        # Unsetting it doesn't really matter for what pipenv does.
+-        env.pop('LC_CTYPE', None)
+-
+-        # Avoid click RuntimeError under python 3 on linux: http://click.pocoo.org/python3/
+-        if PY3 and sys.platform == 'linux':
+-            env.update(ensure_subprocess_env({
+-                'LC_ALL': 'C.UTF-8',
+-                'LANG': 'C.UTF-8'
+-            }))
+-
+-        if python is not None:
+-            env.update(ensure_subprocess_env({
+-                'PIPENV_DEFAULT_PYTHON_VERSION': str(python),
+-                'PIPENV_PYTHON': str(python)
+-            }))
+-
+-        def ensure_venv():
+-            """Create virtual environment if needed and return path"""
+-            if python is not None:
+-                if os.path.exists(python):
+-                    pipenv_python = python
+-                else:
+-                    # If the desired python version matches the running python version,
+-                    # then have pipenv install that same identical version.
+-                    # Without this logic, pipenv would be more aggressive at finding the
+-                    # newest possible relevant python version available.
+-                    # However, due to "purge a venv if it has a different version" logic
+-                    # we have elsewhere in-tree, it is more useful to have consistent
+-                    # venv versions than new-as-possible venv versions.
+-                    target_version = LooseVersion(python)
+-                    current_version = LooseVersion(platform.python_version())
+-                    pipenv_python = sys.executable
+-
+-                    for target, current in zip(target_version.version, current_version.version):
+-                        if target != current:
+-                            pipenv_python = python
+-
+-            venv = get_venv()
+-            if venv is not None:
+-                return venv
+-            if python is not None:
+-                subprocess.check_call(
+-                    [pipenv, '--python', pipenv_python],
+-                    stderr=subprocess.STDOUT,
+-                    env=env)
+-            return get_venv()
+-
+-        def get_venv():
+-            """Return path to virtual environment or None"""
+-            try:
+-                sub_env = env.copy()
+-                sub_env.pop('PYCHARM_HOSTED', None)
+-                return subprocess.check_output(
+-                        [pipenv, '--venv'],
+-                        stderr=subprocess.STDOUT,
+-                        env=sub_env, universal_newlines=True).rstrip()
+-
+-            except subprocess.CalledProcessError:
+-                # virtual environment does not exist
+-                return None
+-
+-        if pipfile is not None:
+-            # Install from Pipfile
+-            env_ = env.copy()
+-            del env_['PIP_NO_INDEX']
+-            env_.update(ensure_subprocess_env({
+-                'PIPENV_PIPFILE': str(pipfile)
+-            }))
+-            subprocess.check_call([pipenv, 'install'], stderr=subprocess.STDOUT, env=env_)
+-
+-        self.virtualenv_root = ensure_venv()
+-
+-        if populate:
+-            # Populate from the manifest
+-            subprocess.check_call([
+-                pipenv, 'run', 'python', os.path.join(here, 'virtualenv.py'), 'populate',
+-                self.topsrcdir, self.virtualenv_root, self.manifest_path],
+-                stderr=subprocess.STDOUT, env=env)
+-
+-        self.activate()
+-
+ 
+ def verify_python_version(log_handle):
+     """Ensure the current version of Python is sufficient."""
+     from distutils.version import LooseVersion
+ 
+     major, minor, micro = sys.version_info[:3]
+     minimum_python_versions = {
+         2: LooseVersion('2.7.3'),

File diff suppressed because it is too large
+ 85920 - 0
mozilla-release/patches/1662851-82a1.patch


+ 1179 - 0
mozilla-release/patches/1666232-83a1.patch

@@ -0,0 +1,1179 @@
+# HG changeset patch
+# User Stas Malolepszy <stas@mozilla.com>
+# Date 1600691202 0
+# Node ID 08e86ba18177437431a33a712956aa4c3dc91120
+# Parent  c8c36b01e25bf5491b4fa20319b3473bfdd886a3
+Bug 1666232 - Vendor fluent.syntax 0.18.1, fluent.migrate 0.10, compare-locales 8.1.0. r=flod
+
+Differential Revision: https://phabricator.services.mozilla.com/D90851
+
+diff --git a/third_party/python/compare-locales/PKG-INFO b/third_party/python/compare-locales/PKG-INFO
+--- a/third_party/python/compare-locales/PKG-INFO
++++ b/third_party/python/compare-locales/PKG-INFO
+@@ -1,11 +1,11 @@
+ Metadata-Version: 2.1
+ Name: compare-locales
+-Version: 8.0.0
++Version: 8.1.0
+ Summary: Lint Mozilla localizations
+ Home-page: UNKNOWN
+ Author: Axel Hecht
+ Author-email: axel@mozilla.com
+ License: MPL 2.0
+ Description: [![Build Status](https://travis-ci.org/Pike/compare-locales.svg?branch=master)](https://travis-ci.org/Pike/compare-locales)
+         # compare-locales
+         Lint Mozilla localizations
+diff --git a/third_party/python/compare-locales/compare_locales/__init__.py b/third_party/python/compare-locales/compare_locales/__init__.py
+--- a/third_party/python/compare-locales/compare_locales/__init__.py
++++ b/third_party/python/compare-locales/compare_locales/__init__.py
+@@ -1,1 +1,1 @@
+-version = "8.0.0"
++version = "8.1.0"
+diff --git a/third_party/python/compare-locales/compare_locales/checks/fluent.py b/third_party/python/compare-locales/compare_locales/checks/fluent.py
+--- a/third_party/python/compare-locales/compare_locales/checks/fluent.py
++++ b/third_party/python/compare-locales/compare_locales/checks/fluent.py
+@@ -4,16 +4,17 @@
+ 
+ from __future__ import absolute_import
+ from __future__ import unicode_literals
+ import re
+ from collections import defaultdict
+ 
+ from fluent.syntax import ast as ftl
+ from fluent.syntax.serializer import serialize_variant_key
++from fluent.syntax.visitor import Visitor
+ 
+ from .base import Checker, CSSCheckMixin
+ from compare_locales import plurals
+ 
+ 
+ MSGS = {
+     'missing-msg-ref': 'Missing message reference: {ref}',
+     'missing-term-ref': 'Missing term reference: {ref}',
+@@ -40,17 +41,17 @@ def pattern_variants(pattern):
+     """
+     elements = pattern.elements
+     if len(elements) == 1:
+         if isinstance(elements[0], ftl.TextElement):
+             return [elements[0].value]
+     return []
+ 
+ 
+-class ReferenceMessageVisitor(ftl.Visitor, CSSCheckMixin):
++class ReferenceMessageVisitor(Visitor, CSSCheckMixin):
+     def __init__(self):
+         # References to Messages, their Attributes, and Terms
+         # Store reference name and type
+         self.entry_refs = defaultdict(dict)
+         # The currently active references
+         self.refs = {}
+         # Start with the Entry value (associated with None)
+         self.entry_refs[None] = self.refs
+@@ -279,17 +280,17 @@ class L10nMessageVisitor(GenericL10nChec
+             self.messages.append(
+                 (
+                     'warning', node.span.start,
+                     MSGS['obsolete-' + ref_type].format(ref=ref),
+                 )
+             )
+ 
+ 
+-class TermVisitor(GenericL10nChecks, ftl.Visitor):
++class TermVisitor(GenericL10nChecks, Visitor):
+     def __init__(self, locale):
+         super(TermVisitor, self).__init__()
+         self.locale = locale
+         self.messages = []
+ 
+     def generic_visit(self, node):
+         if isinstance(
+             node,
+diff --git a/third_party/python/compare-locales/compare_locales/parser/fluent.py b/third_party/python/compare-locales/compare_locales/parser/fluent.py
+--- a/third_party/python/compare-locales/compare_locales/parser/fluent.py
++++ b/third_party/python/compare-locales/compare_locales/parser/fluent.py
+@@ -4,25 +4,26 @@
+ 
+ from __future__ import absolute_import
+ from __future__ import unicode_literals
+ import re
+ 
+ from fluent.syntax import FluentParser as FTLParser
+ from fluent.syntax import ast as ftl
+ from fluent.syntax.serializer import serialize_comment
++from fluent.syntax.visitor import Visitor
+ from .base import (
+     CAN_SKIP,
+     Entry, Entity, Comment, Junk, Whitespace,
+     LiteralEntity,
+     Parser
+ )
+ 
+ 
+-class WordCounter(ftl.Visitor):
++class WordCounter(Visitor):
+     def __init__(self):
+         self.word_count = 0
+ 
+     def generic_visit(self, node):
+         if isinstance(
+             node,
+             (ftl.Span, ftl.Annotation, ftl.BaseComment)
+         ):
+diff --git a/third_party/python/compare-locales/compare_locales/plurals.py b/third_party/python/compare-locales/compare_locales/plurals.py
+--- a/third_party/python/compare-locales/compare_locales/plurals.py
++++ b/third_party/python/compare-locales/compare_locales/plurals.py
+@@ -172,16 +172,17 @@ CATEGORIES_BY_LOCALE = {
+     'sl': 10,
+     'son': 1,
+     'sq': 1,
+     'sr': 19,
+     'ss': 1,
+     'st': 1,
+     'sv': 1,
+     'sw': 1,
++    'szl': 9,
+     'ta': 1,
+     'ta': 1,
+     'te': 1,
+     'th': 0,
+     'tl': 1,
+     'tn': 1,
+     'tr': 1,
+     'trs': 1,
+diff --git a/third_party/python/compare-locales/setup.py b/third_party/python/compare-locales/setup.py
+--- a/third_party/python/compare-locales/setup.py
++++ b/third_party/python/compare-locales/setup.py
+@@ -47,16 +47,16 @@ setup(name="compare-locales",
+             'moz-l10n-lint = compare_locales.lint.cli:main',
+         ],
+       },
+       packages=find_packages(),
+       package_data={
+           'compare_locales.tests': ['data/*.properties', 'data/*.dtd']
+       },
+       install_requires=[
+-          'fluent.syntax >=0.17.0, <0.18',
++          'fluent.syntax >=0.18.0, <0.19',
+           'pytoml',
+           'six',
+       ],
+       tests_require=[
+           'mock<4.0',
+       ],
+       test_suite='compare_locales.tests')
+diff --git a/third_party/python/fluent.migrate/PKG-INFO b/third_party/python/fluent.migrate/PKG-INFO
+--- a/third_party/python/fluent.migrate/PKG-INFO
++++ b/third_party/python/fluent.migrate/PKG-INFO
+@@ -1,11 +1,11 @@
+ Metadata-Version: 2.1
+ Name: fluent.migrate
+-Version: 0.9
++Version: 0.10
+ Summary: Toolchain to migrate legacy translation to Fluent.
+ Home-page: https://hg.mozilla.org/l10n/fluent-migration/
+ Author: Mozilla
+ Author-email: l10n-drivers@mozilla.org
+ License: APL 2
+ Description: Fluent Migration Tools
+         ======================
+         
+diff --git a/third_party/python/fluent.migrate/fluent/migrate/_context.py b/third_party/python/fluent.migrate/fluent/migrate/_context.py
+--- a/third_party/python/fluent.migrate/fluent/migrate/_context.py
++++ b/third_party/python/fluent.migrate/fluent/migrate/_context.py
+@@ -9,16 +9,17 @@ import logging
+ from six.moves import zip_longest
+ 
+ import fluent.syntax.ast as FTL
+ from fluent.syntax.parser import FluentParser
+ from fluent.syntax.serializer import FluentSerializer
+ from compare_locales.parser import getParser
+ from compare_locales.plurals import get_plural
+ 
++from .evaluator import Evaluator
+ from .merge import merge_resource
+ from .errors import (
+     UnreadableReferenceError,
+ )
+ 
+ 
+ class InternalContext(object):
+     """Internal context for merging translation resources.
+@@ -47,16 +48,20 @@ class InternalContext(object):
+         self.reference_resources = {}
+         self.localization_resources = {}
+         self.target_resources = {}
+ 
+         # An iterable of `FTL.Message` objects some of whose nodes can be the
+         # transform operations.
+         self.transforms = {}
+ 
++        # The evaluator instance is an AST transformer capable of walking an
++        # AST hierarchy and evaluating nodes which are migration Transforms.
++        self.evaluator = Evaluator(self)
++
+     def read_ftl_resource(self, path):
+         """Read an FTL resource and parse it into an AST."""
+         f = codecs.open(path, 'r', 'utf8')
+         try:
+             contents = f.read()
+         except UnicodeDecodeError as err:
+             logger = logging.getLogger('migrate')
+             logger.warning('Unable to read file {}: {}'.format(path, err))
+@@ -316,10 +321,13 @@ class InternalContext(object):
+ 
+         return {
+             path: self.fluent_serializer.serialize(snapshot)
+             for path, snapshot in self.merge_changeset(
+                 changeset, known_translations
+             )
+         }
+ 
++    def evaluate(self, node):
++        return self.evaluator.visit(node)
++
+ 
+ logging.basicConfig()
+diff --git a/third_party/python/fluent.migrate/fluent/migrate/evaluator.py b/third_party/python/fluent.migrate/fluent/migrate/evaluator.py
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/fluent.migrate/fluent/migrate/evaluator.py
+@@ -0,0 +1,28 @@
++from fluent.syntax import ast as FTL
++from fluent.syntax.visitor import Transformer
++
++from .transforms import Transform
++
++
++class Evaluator(Transformer):
++    """An AST transformer for evaluating migration Transforms.
++
++    An AST transformer (i.e. a visitor capable of modifying the AST) which
++    walks an AST hierarchy and evaluates nodes which are migration Transforms.
++    """
++
++    def __init__(self, ctx):
++        self.ctx = ctx
++
++    def visit(self, node):
++        if not isinstance(node, FTL.BaseNode):
++            return node
++
++        if isinstance(node, Transform):
++            # Some transforms don't expect other transforms as children.
++            # Evaluate the children first.
++            transform = self.generic_visit(node)
++            # Then, evaluate this transform.
++            return transform(self.ctx)
++
++        return self.generic_visit(node)
+diff --git a/third_party/python/fluent.migrate/fluent/migrate/helpers.py b/third_party/python/fluent.migrate/fluent/migrate/helpers.py
+--- a/third_party/python/fluent.migrate/fluent/migrate/helpers.py
++++ b/third_party/python/fluent.migrate/fluent/migrate/helpers.py
+@@ -7,16 +7,17 @@ nodes.
+ They take a string argument and immediately return a corresponding AST node.
+ (As opposed to Transforms which are AST nodes on their own and only return the
+ migrated AST nodes when they are evaluated by a MigrationContext.) """
+ 
+ from __future__ import unicode_literals
+ from __future__ import absolute_import
+ 
+ from fluent.syntax import FluentParser, ast as FTL
++from fluent.syntax.visitor import Transformer
+ from .transforms import Transform, CONCAT, COPY, COPY_PATTERN
+ from .errors import NotSupportedError, InvalidTransformError
+ 
+ 
+ def VARIABLE_REFERENCE(name):
+     """Create an ExternalArgument expression."""
+ 
+     return FTL.VariableReference(
+@@ -45,17 +46,17 @@ def MESSAGE_REFERENCE(name):
+ def TERM_REFERENCE(name):
+     """Create a TermReference expression."""
+ 
+     return FTL.TermReference(
+         id=FTL.Identifier(name)
+     )
+ 
+ 
+-class IntoTranforms(FTL.Transformer):
++class IntoTranforms(Transformer):
+     IMPLICIT_TRANSFORMS = ("CONCAT",)
+     FORBIDDEN_TRANSFORMS = ("PLURALS", "REPLACE", "REPLACE_IN_TEXT")
+ 
+     def __init__(self, substitutions):
+         self.substitutions = substitutions
+ 
+     def visit_Junk(self, node):
+         anno = node.annotations[0]
+diff --git a/third_party/python/fluent.migrate/fluent/migrate/merge.py b/third_party/python/fluent.migrate/fluent/migrate/merge.py
+--- a/third_party/python/fluent.migrate/fluent/migrate/merge.py
++++ b/third_party/python/fluent.migrate/fluent/migrate/merge.py
+@@ -1,16 +1,15 @@
+ # coding=utf8
+ from __future__ import unicode_literals
+ from __future__ import absolute_import
+ 
+ import fluent.syntax.ast as FTL
+ 
+ from .errors import SkipTransform
+-from .transforms import evaluate
+ from .util import get_message, get_transform
+ 
+ 
+ def merge_resource(ctx, reference, current, transforms, in_changeset):
+     """Transform legacy translations into FTL.
+ 
+     Use the `reference` FTL AST as a template.  For each en-US string in the
+     reference, first check for an existing translation in the current FTL
+@@ -47,14 +46,14 @@ def merge_resource(ctx, reference, curre
+         transform = get_transform(transforms, ident)
+ 
+         # Make sure this message is supposed to be migrated as part of the
+         # current changeset.
+         if transform is not None and in_changeset(ident):
+             if transform.comment is None:
+                 transform.comment = entry.comment
+             try:
+-                return evaluate(ctx, transform)
++                return ctx.evaluate(transform)
+             except SkipTransform:
+                 return None
+ 
+     body = merge_body(reference.body)
+     return FTL.Resource(body)
+diff --git a/third_party/python/fluent.migrate/fluent/migrate/transforms.py b/third_party/python/fluent.migrate/fluent/migrate/transforms.py
+--- a/third_party/python/fluent.migrate/fluent/migrate/transforms.py
++++ b/third_party/python/fluent.migrate/fluent/migrate/transforms.py
+@@ -61,30 +61,21 @@ TextElement by PLURALS and then run thro
+         )
+     )
+ """
+ 
+ from __future__ import unicode_literals
+ from __future__ import absolute_import
+ import re
+ 
+-import fluent.syntax.ast as FTL
++from fluent.syntax import ast as FTL
++from fluent.syntax.visitor import Transformer
+ from .errors import NotSupportedError
+ 
+ 
+-def evaluate(ctx, node):
+-    def eval_node(subnode):
+-        if isinstance(subnode, Transform):
+-            return subnode(ctx)
+-        else:
+-            return subnode
+-
+-    return node.traverse(eval_node)
+-
+-
+ def chain_elements(elements):
+     '''Flatten a list of FTL nodes into an iterator over PatternElements.'''
+     for element in elements:
+         if isinstance(element, FTL.Pattern):
+             # PY3 yield from element.elements
+             for child in element.elements:
+                 yield child
+         elif isinstance(element, FTL.PatternElement):
+@@ -233,17 +224,17 @@ class COPY_PATTERN(FluentSource):
+ 
+     The given key can be a Message ID, Message ID.attribute_name, or
+     Term ID. Accessing Term attributes is not supported, as they're internal
+     to the localization.
+     """
+     pass
+ 
+ 
+-class TransformPattern(FluentSource, FTL.Transformer):
++class TransformPattern(FluentSource, Transformer):
+     """Base class for modifying a Fluent pattern as part of a migration.
+ 
+     Implement visit_* methods of the Transformer pattern to do the
+     actual modifications.
+     """
+     def __call__(self, ctx):
+         pattern = super(TransformPattern, self).__call__(ctx)
+         return self.visit(pattern)
+@@ -390,17 +381,17 @@ class REPLACE_IN_TEXT(Transform):
+         keys_indexed = {}
+         for key, indices in key_indices.items():
+             for index in indices:
+                 keys_indexed[index] = key
+ 
+         # Order the replacements by the position of the original placeable in
+         # the translation.
+         replacements = (
+-            (key, evaluate(ctx, self.replacements[key]))
++            (key, ctx.evaluate(self.replacements[key]))
+             for index, key
+             in sorted(keys_indexed.items(), key=lambda x: x[0])
+         )
+ 
+         # A list of PatternElements built from the legacy translation and the
+         # FTL replacements. It may contain empty or adjacent TextElements.
+         elements = []
+         tail = value
+@@ -457,17 +448,17 @@ class PLURALS(LegacySource):
+     def __init__(self, path, key, selector, foreach=Transform.pattern_of,
+                  **kwargs):
+         super(PLURALS, self).__init__(path, key, **kwargs)
+         self.selector = selector
+         self.foreach = foreach
+ 
+     def __call__(self, ctx):
+         element = super(PLURALS, self).__call__(ctx)
+-        selector = evaluate(ctx, self.selector)
++        selector = ctx.evaluate(self.selector)
+         keys = ctx.plural_categories
+         forms = [
+             FTL.TextElement(part)
+             for part in element.value.split(';')
+         ]
+ 
+         # The default CLDR form should be the last we have in DEFAULT_ORDER,
+         # usually `other`, but in some cases `many`. If we don't have a variant
+@@ -490,31 +481,31 @@ class PLURALS(LegacySource):
+         # plural forms.
+         if len(pairs) == 0:
+             return Transform.pattern_of()
+ 
+         # A special case for languages with one plural category or one legacy
+         # variant. We don't need to insert a SelectExpression for them.
+         if len(pairs) == 1:
+             _, only_form = pairs[0]
+-            only_variant = evaluate(ctx, self.foreach(only_form))
++            only_variant = ctx.evaluate(self.foreach(only_form))
+             return Transform.pattern_of(only_variant)
+ 
+         # Make sure the default key is defined. If it's missing, use the last
+         # form (in CLDR order) found in the legacy translation.
+         pairs.sort(key=lambda pair: self.DEFAULT_ORDER.index(pair[0]))
+         last_key, last_form = pairs[-1]
+         if last_key != default_key:
+             pairs.append((default_key, last_form))
+ 
+         def createVariant(key, form):
+             # Run the legacy plural form through `foreach` which returns an
+             # `FTL.Node` describing the transformation required for each
+             # variant. Then evaluate it to a migrated FTL node.
+-            value = evaluate(ctx, self.foreach(form))
++            value = ctx.evaluate(self.foreach(form))
+             return FTL.Variant(
+                 key=FTL.Identifier(key),
+                 value=value,
+                 default=key == default_key
+             )
+ 
+         select = FTL.SelectExpression(
+             selector=selector,
+diff --git a/third_party/python/fluent.migrate/fluent/migrate/validator.py b/third_party/python/fluent.migrate/fluent/migrate/validator.py
+--- a/third_party/python/fluent.migrate/fluent/migrate/validator.py
++++ b/third_party/python/fluent.migrate/fluent/migrate/validator.py
+@@ -5,16 +5,17 @@ import argparse
+ import ast
+ import six
+ from six.moves import zip_longest
+ 
+ from fluent.migrate import transforms
+ from fluent.migrate.errors import MigrationError
+ from fluent.migrate.helpers import transforms_from
+ from fluent.syntax import ast as FTL
++from fluent.syntax.visitor import Visitor
+ from compare_locales import mozpath
+ 
+ 
+ class MigrateNotFoundException(Exception):
+     pass
+ 
+ 
+ class BadContextAPIException(Exception):
+@@ -306,17 +307,17 @@ class MigrateAnalyzer(ast.NodeVisitor):
+         for arg, NODE_TYPE in zip_longest(node.args, argspec):
+             if NODE_TYPE is None:
+                 return True if allow_more else False
+             if not (isinstance(arg, NODE_TYPE)):
+                 return False
+         return True
+ 
+ 
+-class TransformsInspector(FTL.Visitor):
++class TransformsInspector(Visitor):
+     def __init__(self):
+         super(TransformsInspector, self).__init__()
+         self.issues = []
+ 
+     def generic_visit(self, node):
+         if isinstance(node, transforms.Source):
+             src = node.path
+             # Source needs paths to be normalized
+diff --git a/third_party/python/fluent.migrate/setup.py b/third_party/python/fluent.migrate/setup.py
+--- a/third_party/python/fluent.migrate/setup.py
++++ b/third_party/python/fluent.migrate/setup.py
+@@ -1,32 +1,32 @@
+ #!/usr/bin/env python
+ 
+ from setuptools import setup
+ 
+ setup(
+     name='fluent.migrate',
+-    version='0.9',
++    version='0.10',
+     description='Toolchain to migrate legacy translation to Fluent.',
+     author='Mozilla',
+     author_email='l10n-drivers@mozilla.org',
+     license='APL 2',
+     url='https://hg.mozilla.org/l10n/fluent-migration/',
+     keywords=['fluent', 'localization', 'l10n'],
+     classifiers=[
+         'Development Status :: 3 - Alpha',
+         'Intended Audience :: Developers',
+         'License :: OSI Approved :: Apache Software License',
+         'Programming Language :: Python :: 2.7',
+         'Programming Language :: Python :: 3.7',
+     ],
+     packages=['fluent', 'fluent.migrate'],
+     install_requires=[
+-        'compare-locales >=7.6, <8.1',
+-        'fluent.syntax >=0.17.0, <0.18',
++        'compare-locales >=8.1, <9.0',
++        'fluent.syntax >=0.18.0, <0.19',
+         'six',
+     ],
+     extras_require={
+         'hg': ['python-hglib',],
+     },
+     tests_require=[
+         'mock',
+     ],
+diff --git a/third_party/python/fluent.syntax/PKG-INFO b/third_party/python/fluent.syntax/PKG-INFO
+--- a/third_party/python/fluent.syntax/PKG-INFO
++++ b/third_party/python/fluent.syntax/PKG-INFO
+@@ -1,16 +1,39 @@
+-Metadata-Version: 1.1
++Metadata-Version: 2.1
+ Name: fluent.syntax
+-Version: 0.17.0
++Version: 0.18.1
+ Summary: Localization library for expressive translations.
+ Home-page: https://github.com/projectfluent/python-fluent
+ Author: Mozilla
+ Author-email: l10n-drivers@mozilla.org
+ License: APL 2
+-Description: See https://github.com/projectfluent/python-fluent/ for more info.
++Description: ``fluent.syntax`` |fluent.syntax|
++        ---------------------------------
++        
++        Read, write, and transform `Fluent`_ files.
++        
++        This package includes the parser, serializer, and traversal
++        utilities like Visitor and Transformer. You’re looking for this package
++        if you work on tooling for Fluent in Python.
++        
++        .. code-block:: python
++        
++           >>> from fluent.syntax import parse, ast, serialize
++           >>> resource = parse("a-key = String to localize")
++           >>> resource.body[0].value.elements[0].value = "Localized string"
++           >>> serialize(resource)
++           'a-key = Localized string\n'
++        
++        
++        Find the full documentation on https://projectfluent.org/python-fluent/fluent.syntax/.
++        
++        .. _fluent: https://projectfluent.org/
++        .. |fluent.syntax| image:: https://github.com/projectfluent/python-fluent/workflows/fluent.syntax/badge.svg
++        
+ Keywords: fluent,localization,l10n
+ Platform: UNKNOWN
+ Classifier: Development Status :: 3 - Alpha
+ Classifier: Intended Audience :: Developers
+ Classifier: License :: OSI Approved :: Apache Software License
+ Classifier: Programming Language :: Python :: 2.7
+ Classifier: Programming Language :: Python :: 3.5
++Description-Content-Type: text/x-rst
+diff --git a/third_party/python/fluent.syntax/README.rst b/third_party/python/fluent.syntax/README.rst
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/fluent.syntax/README.rst
+@@ -0,0 +1,22 @@
++``fluent.syntax`` |fluent.syntax|
++---------------------------------
++
++Read, write, and transform `Fluent`_ files.
++
++This package includes the parser, serializer, and traversal
++utilities like Visitor and Transformer. You’re looking for this package
++if you work on tooling for Fluent in Python.
++
++.. code-block:: python
++
++   >>> from fluent.syntax import parse, ast, serialize
++   >>> resource = parse("a-key = String to localize")
++   >>> resource.body[0].value.elements[0].value = "Localized string"
++   >>> serialize(resource)
++   'a-key = Localized string\n'
++
++
++Find the full documentation on https://projectfluent.org/python-fluent/fluent.syntax/.
++
++.. _fluent: https://projectfluent.org/
++.. |fluent.syntax| image:: https://github.com/projectfluent/python-fluent/workflows/fluent.syntax/badge.svg
+diff --git a/third_party/python/fluent.syntax/fluent/syntax/__init__.py b/third_party/python/fluent.syntax/fluent/syntax/__init__.py
+--- a/third_party/python/fluent.syntax/fluent/syntax/__init__.py
++++ b/third_party/python/fluent.syntax/fluent/syntax/__init__.py
+@@ -1,12 +1,16 @@
+ from .parser import FluentParser
+ from .serializer import FluentSerializer
+ 
+ 
+ def parse(source, **kwargs):
++    """Create an ast.Resource from a Fluent Syntax source.
++    """
+     parser = FluentParser(**kwargs)
+     return parser.parse(source)
+ 
+ 
+ def serialize(resource, **kwargs):
++    """Serialize an ast.Resource to a unicode string.
++    """
+     serializer = FluentSerializer(**kwargs)
+     return serializer.serialize(resource)
+diff --git a/third_party/python/fluent.syntax/fluent/syntax/ast.py b/third_party/python/fluent.syntax/fluent/syntax/ast.py
+--- a/third_party/python/fluent.syntax/fluent/syntax/ast.py
++++ b/third_party/python/fluent.syntax/fluent/syntax/ast.py
+@@ -1,77 +1,16 @@
+ # coding=utf-8
+ from __future__ import unicode_literals
+ import re
+ import sys
+ import json
+ import six
+ 
+ 
+-class Visitor(object):
+-    '''Read-only visitor pattern.
+-
+-    Subclass this to gather information from an AST.
+-    To generally define which nodes not to descend in to, overload
+-    `generic_visit`.
+-    To handle specific node types, add methods like `visit_Pattern`.
+-    If you want to still descend into the children of the node, call
+-    `generic_visit` of the superclass.
+-    '''
+-    def visit(self, node):
+-        if isinstance(node, list):
+-            for child in node:
+-                self.visit(child)
+-            return
+-        if not isinstance(node, BaseNode):
+-            return
+-        nodename = type(node).__name__
+-        visit = getattr(self, 'visit_{}'.format(nodename), self.generic_visit)
+-        visit(node)
+-
+-    def generic_visit(self, node):
+-        for propname, propvalue in vars(node).items():
+-            self.visit(propvalue)
+-
+-
+-class Transformer(Visitor):
+-    '''In-place AST Transformer pattern.
+-
+-    Subclass this to create an in-place modified variant
+-    of the given AST.
+-    If you need to keep the original AST around, pass
+-    a `node.clone()` to the transformer.
+-    '''
+-    def visit(self, node):
+-        if not isinstance(node, BaseNode):
+-            return node
+-
+-        nodename = type(node).__name__
+-        visit = getattr(self, 'visit_{}'.format(nodename), self.generic_visit)
+-        return visit(node)
+-
+-    def generic_visit(self, node):
+-        for propname, propvalue in vars(node).items():
+-            if isinstance(propvalue, list):
+-                new_vals = []
+-                for child in propvalue:
+-                    new_val = self.visit(child)
+-                    if new_val is not None:
+-                        new_vals.append(new_val)
+-                # in-place manipulation
+-                propvalue[:] = new_vals
+-            elif isinstance(propvalue, BaseNode):
+-                new_val = self.visit(propvalue)
+-                if new_val is None:
+-                    delattr(node, propname)
+-                else:
+-                    setattr(node, propname, new_val)
+-        return node
+-
+-
+ def to_json(value, fn=None):
+     if isinstance(value, BaseNode):
+         return value.to_json(fn)
+     if isinstance(value, list):
+         return list(to_json(item, fn) for item in value)
+     if isinstance(value, tuple):
+         return list(to_json(item, fn) for item in value)
+     else:
+@@ -107,43 +46,16 @@ def scalars_equal(node1, node2, ignored_
+ 
+ class BaseNode(object):
+     """Base class for all Fluent AST nodes.
+ 
+     All productions described in the ASDL subclass BaseNode, including Span and
+     Annotation.  Implements __str__, to_json and traverse.
+     """
+ 
+-    def traverse(self, fun):
+-        """DEPRECATED. Please use Visitor or Transformer.
+-
+-        Postorder-traverse this node and apply `fun` to all child nodes.
+-
+-        Traverse this node depth-first applying `fun` to subnodes and leaves.
+-        Children are processed before parents (postorder traversal).
+-
+-        Return a new instance of the node.
+-        """
+-
+-        def visit(value):
+-            """Call `fun` on `value` and its descendants."""
+-            if isinstance(value, BaseNode):
+-                return value.traverse(fun)
+-            if isinstance(value, list):
+-                return fun(list(map(visit, value)))
+-            else:
+-                return fun(value)
+-
+-        # Use all attributes found on the node as kwargs to the constructor.
+-        kwargs = vars(self).items()
+-        node = self.__class__(
+-            **{name: visit(value) for name, value in kwargs})
+-
+-        return fun(node)
+-
+     def clone(self):
+         """Create a deep clone of the current node."""
+         def visit(value):
+             """Clone node and its descendants."""
+             if isinstance(value, BaseNode):
+                 return value.clone()
+             if isinstance(value, list):
+                 return [visit(child) for child in value]
+diff --git a/third_party/python/fluent.syntax/fluent/syntax/parser.py b/third_party/python/fluent.syntax/fluent/syntax/parser.py
+--- a/third_party/python/fluent.syntax/fluent/syntax/parser.py
++++ b/third_party/python/fluent.syntax/fluent/syntax/parser.py
+@@ -21,20 +21,27 @@ def with_span(fn):
+         end = ps.index
+         node.add_span(start, end)
+         return node
+ 
+     return decorated
+ 
+ 
+ class FluentParser(object):
++    """This class is used to parse Fluent source content.
++
++    ``with_spans`` enables source information in the form of
++    :class:`.ast.Span` objects for each :class:`.ast.SyntaxNode`.
++    """
+     def __init__(self, with_spans=True):
+         self.with_spans = with_spans
+ 
+     def parse(self, source):
++        """Create a :class:`.ast.Resource` from a Fluent source.
++        """
+         ps = FluentParserStream(source)
+         ps.skip_blank_block()
+ 
+         entries = []
+         last_comment = None
+ 
+         while ps.current_char:
+             entry = self.get_entry_or_junk(ps)
+@@ -68,23 +75,23 @@ class FluentParser(object):
+         res = ast.Resource(entries)
+ 
+         if self.with_spans:
+             res.add_span(0, ps.index)
+ 
+         return res
+ 
+     def parse_entry(self, source):
+-        """Parse the first Message or Term in source.
+-
+-        Skip all encountered comments and start parsing at the first Mesage
+-        or Term start. Return Junk if the parsing is not successful.
++        """Parse the first :class:`.ast.Entry` in source.
++
++        Skip all encountered comments and start parsing at the first :class:`.ast.Message`
++        or :class:`.ast.Term` start. Return :class:`.ast.Junk` if the parsing is not successful.
+ 
+         Preceding comments are ignored unless they contain syntax errors
+-        themselves, in which case Junk for the invalid comment is returned.
++        themselves, in which case :class:`.ast.Junk` for the invalid comment is returned.
+         """
+         ps = FluentParserStream(source)
+         ps.skip_blank_block()
+ 
+         while ps.current_char == '#':
+             skipped = self.get_entry_or_junk(ps)
+             if isinstance(skipped, ast.Junk):
+                 # Don't skip Junk comments.
+diff --git a/third_party/python/fluent.syntax/fluent/syntax/serializer.py b/third_party/python/fluent.syntax/fluent/syntax/serializer.py
+--- a/third_party/python/fluent.syntax/fluent/syntax/serializer.py
++++ b/third_party/python/fluent.syntax/fluent/syntax/serializer.py
+@@ -1,50 +1,70 @@
+ from __future__ import unicode_literals
+ from . import ast
+ 
+ 
+-def indent(content):
++def indent_except_first_line(content):
+     return "    ".join(
+         content.splitlines(True)
+     )
+ 
+ 
+ def includes_new_line(elem):
+     return isinstance(elem, ast.TextElement) and "\n" in elem.value
+ 
+ 
+ def is_select_expr(elem):
+     return (
+         isinstance(elem, ast.Placeable) and
+         isinstance(elem.expression, ast.SelectExpression))
+ 
+ 
++def should_start_on_new_line(pattern):
++    is_multiline = any(is_select_expr(elem) for elem in pattern.elements) \
++        or any(includes_new_line(elem) for elem in pattern.elements)
++
++    if is_multiline:
++        first_element = pattern.elements[0]
++        if isinstance(first_element, ast.TextElement):
++            first_char = first_element.value[0]
++            if first_char in ("[", ".", "*"):
++                return False
++        return True
++    return False
++
++
+ class FluentSerializer(object):
++    """FluentSerializer converts :class:`.ast.SyntaxNode` objects to unicode strings.
++
++    `with_junk` controls if parse errors are written back or not.
++    """
+     HAS_ENTRIES = 1
+ 
+     def __init__(self, with_junk=False):
+         self.with_junk = with_junk
+ 
+     def serialize(self, resource):
++        "Serialize a :class:`.ast.Resource` to a string."
+         if not isinstance(resource, ast.Resource):
+             raise Exception('Unknown resource type: {}'.format(type(resource)))
+ 
+         state = 0
+ 
+         parts = []
+         for entry in resource.body:
+             if not isinstance(entry, ast.Junk) or self.with_junk:
+                 parts.append(self.serialize_entry(entry, state))
+                 if not state & self.HAS_ENTRIES:
+                     state |= self.HAS_ENTRIES
+ 
+         return "".join(parts)
+ 
+     def serialize_entry(self, entry, state=0):
++        "Serialize an :class:`.ast.Entry` to a string."
+         if isinstance(entry, ast.Message):
+             return serialize_message(entry)
+         if isinstance(entry, ast.Term):
+             return serialize_term(entry)
+         if isinstance(entry, ast.Comment):
+             if state & self.HAS_ENTRIES:
+                 return "\n{}\n".format(serialize_comment(entry, "#"))
+             return "{}\n".format(serialize_comment(entry, "#"))
+@@ -108,29 +128,26 @@ def serialize_term(term):
+ 
+     parts.append("\n")
+     return ''.join(parts)
+ 
+ 
+ def serialize_attribute(attribute):
+     return "\n    .{} ={}".format(
+         attribute.id.name,
+-        indent(serialize_pattern(attribute.value))
++        indent_except_first_line(serialize_pattern(attribute.value))
+     )
+ 
+ 
+ def serialize_pattern(pattern):
+-    content = "".join([
+-        serialize_element(elem)
+-        for elem in pattern.elements])
+-    start_on_new_line = any(
+-        includes_new_line(elem) or is_select_expr(elem)
+-        for elem in pattern.elements)
+-    if start_on_new_line:
+-        return '\n    {}'.format(indent(content))
++    content = "".join(serialize_element(elem) for elem in pattern.elements)
++    content = indent_except_first_line(content)
++
++    if should_start_on_new_line(pattern):
++        return '\n    {}'.format(content)
+ 
+     return ' {}'.format(content)
+ 
+ 
+ def serialize_element(element):
+     if isinstance(element, ast.TextElement):
+         return element.value
+     if isinstance(element, ast.Placeable):
+@@ -182,17 +199,17 @@ def serialize_expression(expression):
+         return serialize_placeable(expression)
+     raise Exception('Unknown expression type: {}'.format(type(expression)))
+ 
+ 
+ def serialize_variant(variant):
+     return "\n{}[{}]{}".format(
+         "   *" if variant.default else "    ",
+         serialize_variant_key(variant.key),
+-        indent(serialize_pattern(variant.value))
++        indent_except_first_line(serialize_pattern(variant.value))
+     )
+ 
+ 
+ def serialize_call_arguments(expr):
+     positional = ", ".join(
+         serialize_expression(arg) for arg in expr.positional)
+     named = ", ".join(
+         serialize_named_argument(arg) for arg in expr.named)
+diff --git a/third_party/python/fluent.syntax/fluent/syntax/visitor.py b/third_party/python/fluent.syntax/fluent/syntax/visitor.py
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/fluent.syntax/fluent/syntax/visitor.py
+@@ -0,0 +1,65 @@
++# coding=utf-8
++from __future__ import unicode_literals, absolute_import
++
++from .ast import BaseNode
++
++
++class Visitor(object):
++    '''Read-only visitor pattern.
++
++    Subclass this to gather information from an AST.
++    To generally define which nodes not to descend in to, overload
++    `generic_visit`.
++    To handle specific node types, add methods like `visit_Pattern`.
++    If you want to still descend into the children of the node, call
++    `generic_visit` of the superclass.
++    '''
++    def visit(self, node):
++        if isinstance(node, list):
++            for child in node:
++                self.visit(child)
++            return
++        if not isinstance(node, BaseNode):
++            return
++        nodename = type(node).__name__
++        visit = getattr(self, 'visit_{}'.format(nodename), self.generic_visit)
++        visit(node)
++
++    def generic_visit(self, node):
++        for propname, propvalue in vars(node).items():
++            self.visit(propvalue)
++
++
++class Transformer(Visitor):
++    '''In-place AST Transformer pattern.
++
++    Subclass this to create an in-place modified variant
++    of the given AST.
++    If you need to keep the original AST around, pass
++    a `node.clone()` to the transformer.
++    '''
++    def visit(self, node):
++        if not isinstance(node, BaseNode):
++            return node
++
++        nodename = type(node).__name__
++        visit = getattr(self, 'visit_{}'.format(nodename), self.generic_visit)
++        return visit(node)
++
++    def generic_visit(self, node):
++        for propname, propvalue in vars(node).items():
++            if isinstance(propvalue, list):
++                new_vals = []
++                for child in propvalue:
++                    new_val = self.visit(child)
++                    if new_val is not None:
++                        new_vals.append(new_val)
++                # in-place manipulation
++                propvalue[:] = new_vals
++            elif isinstance(propvalue, BaseNode):
++                new_val = self.visit(propvalue)
++                if new_val is None:
++                    delattr(node, propname)
++                else:
++                    setattr(node, propname, new_val)
++        return node
+diff --git a/third_party/python/fluent.syntax/setup.cfg b/third_party/python/fluent.syntax/setup.cfg
+--- a/third_party/python/fluent.syntax/setup.cfg
++++ b/third_party/python/fluent.syntax/setup.cfg
+@@ -1,8 +1,11 @@
++[metadata]
++version = 0.18.1
++
+ [bdist_wheel]
+ universal = 1
+ 
+ [flake8]
+ exclude = .tox
+ max-line-length = 120
+ 
+ [isort]
+diff --git a/third_party/python/fluent.syntax/setup.py b/third_party/python/fluent.syntax/setup.py
+--- a/third_party/python/fluent.syntax/setup.py
++++ b/third_party/python/fluent.syntax/setup.py
+@@ -1,24 +1,29 @@
+ #!/usr/bin/env python
+ from setuptools import setup
++import os
++
++this_directory = os.path.abspath(os.path.dirname(__file__))
++with open(os.path.join(this_directory, 'README.rst'), 'rb') as f:
++    long_description = f.read().decode('utf-8')
+ 
+ setup(name='fluent.syntax',
+-      version='0.17.0',
+       description='Localization library for expressive translations.',
+-      long_description='See https://github.com/projectfluent/python-fluent/ for more info.',
++      long_description=long_description,
++      long_description_content_type='text/x-rst',
+       author='Mozilla',
+       author_email='l10n-drivers@mozilla.org',
+       license='APL 2',
+       url='https://github.com/projectfluent/python-fluent',
+       keywords=['fluent', 'localization', 'l10n'],
+       classifiers=[
+           'Development Status :: 3 - Alpha',
+           'Intended Audience :: Developers',
+           'License :: OSI Approved :: Apache Software License',
+           'Programming Language :: Python :: 2.7',
+           'Programming Language :: Python :: 3.5',
+       ],
+       packages=['fluent', 'fluent.syntax'],
+-      # These should also be duplicated in tox.ini and ../.travis.yml
++      # These should also be duplicated in tox.ini and /.github/workflow/fluent.syntax.yml
+       tests_require=['six'],
+       test_suite='tests.syntax'
+-)
++      )
+diff --git a/third_party/python/requirements.in b/third_party/python/requirements.in
+--- a/third_party/python/requirements.in
++++ b/third_party/python/requirements.in
+@@ -13,22 +13,23 @@
+ #
+ # Note `pip download` may return `tar.gz` files if there is no `.whl` (wheel)
+ # available. When downloading wheels, make sure that they are cross-platform.
+ # If not you may need to specify `--no-binary :<package1>,<package2>:` to get
+ # the source distribution instead for those particular packages.
+ 
+ attrs==19.1.0
+ blessings==1.7
+-compare-locales==8.0.0
++compare-locales==8.1.0
+ cookies==2.2.1
+ coverage==5.1
+ distro==1.4.0
+ ecdsa==0.15
+-fluent.migrate==0.9
++fluent.migrate==0.10
++fluent.syntax==0.18.1
+ jsmin==2.1.0
+ json-e==2.7.0
+ packaging==21.0
+ pathlib2==2.3.2
+ pathspec==0.8
+ pip-tools==4.5.1
+ pipenv==2020.06.02
+ ply==3.10
+diff --git a/third_party/python/requirements.txt b/third_party/python/requirements.txt
+--- a/third_party/python/requirements.txt
++++ b/third_party/python/requirements.txt
+@@ -14,19 +14,19 @@ blessings==1.7 \
+ certifi==2018.4.16 \
+     --hash=sha256:13e698f54293db9f89122b0581843a782ad0934a4fe0172d2a980ba77fc61bb7 \
+     --hash=sha256:9fa520c1bacfb634fa7af20a76bcbd3d5fb390481724c597da32c719a7dca4b0 \
+     # via pipenv
+ click==7.0 \
+     --hash=sha256:2335065e6395b9e67ca716de5f7526736bfa6ceead690adf616d925bdc622b13 \
+     --hash=sha256:5b94b49521f6456670fdb30cd82a4eca9412788a93fa6dd6df72c94d5a8ff2d7 \
+     # via pip-tools
+-compare-locales==8.0.0 \
+-    --hash=sha256:077b007bd2c025284f73994970e7fada7fbdcbb4199ff010e378b305dee6d469 \
+-    --hash=sha256:ee02bdad012cdc9f6c6df24d7518ba2c5084f6bac0d176b4826156accc8d48d6 \
++compare-locales==8.1.0 \
++    --hash=sha256:286270797ce64f7a2f25e734bb437870661409884a4f0971c0bb94fdad6c1f35 \
++    --hash=sha256:3d374ff959d5de2cfd5b94caf6b0fa61445f1d8ede5af384002cb3542aacad3a \
+     # via -r requirements-mach-vendor-python.in, fluent.migrate
+ cookies==2.2.1 \
+     --hash=sha256:15bee753002dff684987b8df8c235288eb8d45f8191ae056254812dfd42c81d3 \
+     --hash=sha256:d6b698788cae4cfa4e62ef8643a9ca332b79bd96cb314294b864ae8d7eb3ee8e \
+     # via -r requirements-mach-vendor-python.in
+ coverage==5.1 \
+     --hash=sha256:00f1d23f4336efc3b311ed0d807feb45098fc86dee1ca13b3d6768cdab187c8a \
+     --hash=sha256:01333e1bd22c59713ba8a79f088b3955946e293114479bbfc2e37d522be03355 \
+@@ -63,24 +63,24 @@ coverage==5.1 \
+ distro==1.4.0 \
+     --hash=sha256:362dde65d846d23baee4b5c058c8586f219b5a54be1cf5fc6ff55c4578392f57 \
+     --hash=sha256:eedf82a470ebe7d010f1872c17237c79ab04097948800029994fa458e52fb4b4 \
+     # via -r requirements-mach-vendor-python.in
+ ecdsa==0.15 \
+     --hash=sha256:867ec9cf6df0b03addc8ef66b56359643cb5d0c1dc329df76ba7ecfe256c8061 \
+     --hash=sha256:8f12ac317f8a1318efa75757ef0a651abe12e51fc1af8838fb91079445227277 \
+     # via -r requirements-mach-vendor-python.in
+-fluent.migrate==0.9 \
+-    --hash=sha256:735c86816ef7b7b03b32ff9985685f2d99cb0ed135351e4760a85236538f0beb \
+-    --hash=sha256:d42a001bd7292cef400e63f3d77c0c813a6a6162e7bd2dfa14eb01172d21e788 \
+-    # via -r requirements-mach-vendor-python.in
+-fluent.syntax==0.17.0 \
+-    --hash=sha256:ac3db2f77d62b032fdf1f17ef5c390b7801a9e9fb58d41eca3825c0d47b88d79 \
+-    --hash=sha256:e26be470aeebe4badd84f7bb0b648414e0f2ef95d26e5336d634af99e402ea61 \
+-    # via compare-locales, fluent.migrate
++fluent.migrate==0.10 \
++    --hash=sha256:532322b53c895142cf7c1702f95b54b9d3d128fb92eab38f6e8c8a80c447d8c2 \
++    --hash=sha256:ee1b4d827cff6d1df7f9b6a4b3eb78a75f1dd425e2e71b2013fd0dd411167b3e \
++    # via -r requirements-mach-vendor-python.in
++fluent.syntax==0.18.1 \
++    --hash=sha256:0e63679fa4f1b3042565220a5127b4bab842424f07d6a13c12299e3b3835486a \
++    --hash=sha256:3a55f5e605d1b029a65cc8b6492c86ec4608e15447e73db1495de11fd46c104f \
++    # via -r requirements-mach-vendor-python.in, compare-locales, fluent.migrate
+ jsmin==2.1.0 \
+     --hash=sha256:5d07bf0251a4128e5e8e8eef603849b6b5741c337bff087731a248f9cc774f56 \
+     # via -r requirements-mach-vendor-python.in
+ json-e==2.7.0 \
+     --hash=sha256:d8c1ec3f5bbc7728c3a504ebe58829f283c64eca230871e4eefe974b4cdaae4a \
+     # via -r requirements-mach-vendor-python.in
+ more-itertools==4.3.0 \
+     --hash=sha256:c187a73da93e7a8acc0001572aebc7e3c69daf7bf6881a2cea10650bd4420092 \
+@@ -201,10 +201,9 @@ voluptuous==0.11.5 \
+     # via -r requirements-mach-vendor-python.in
+ yamllint==1.23 \
+     --hash=sha256:0fa69bf8a86182b7fe14918bdd3a30354c869966bbc7cbfff176af71bda9c806 \
+     --hash=sha256:59f3ff77f44e7f46be6aecdb985830f73a1c51e290b7082a7d38c2ae1940f4a9 \
+     # via -r requirements-mach-vendor-python.in
+ 
+ # WARNING: The following packages were not pinned, but pip requires them to be
+ # pinned when the requirements file includes hashes. Consider using the --allow-unsafe flag.
+-# pip
+ # setuptools

+ 278 - 0
mozilla-release/patches/1666347-85a1.patch

@@ -0,0 +1,278 @@
+# HG changeset patch
+# User Ricky Stewart <rstewart@mozilla.com>
+# Date 1606494067 0
+# Node ID 18d9b3798b38db1789632c5e78ba1720ff1e4d5f
+# Parent  2dad38173190f3aaaf64f81fe4f58d64be9b03e2
+Bug 1666347 - Delete assorted dead code after removal of vendored `psutil` r=firefox-build-system-reviewers,rstewart
+
+Most of the deletions here come from bug 1481612, the `--with-windows-wheel` option to `mach vendor python`, which according to that commit message "is very single-purpose: it's intended to let us vendor an unpacked
+wheel for psutil on Windows". Since vendoring `psutil` is something we're no longer doing, we can safely just delete that added code.
+
+Differential Revision: https://phabricator.services.mozilla.com/D90919
+
+diff --git a/.gitignore b/.gitignore
+--- a/.gitignore
++++ b/.gitignore
+@@ -65,22 +65,16 @@ parser/html/java/javaparser/
+ /captures/
+ 
+ # Gradle cache.
+ /.gradle/
+ 
+ # Local Gradle configuration properties.
+ /local.properties
+ 
+-# Python virtualenv artifacts.
+-third_party/python/psutil/**/*.so
+-third_party/python/psutil/**/*.pyd
+-third_party/python/psutil/build/
+-third_party/python/psutil/tmp/
+-
+ # Ignore chrome.manifest files from the devtools loader
+ devtools/client/chrome.manifest
+ devtools/shared/chrome.manifest
+ 
+ # Ignore node_modules directories in devtools
+ devtools/**/node_modules
+ 
+ # Ignore imported DOMi and chatZilla hg repos
+diff --git a/.hgignore b/.hgignore
+--- a/.hgignore
++++ b/.hgignore
+@@ -67,22 +67,16 @@
+ ^captures/
+ 
+ # Gradle cache.
+ ^.gradle/
+ 
+ # Local Gradle configuration properties.
+ ^local.properties$
+ 
+-# Python stuff installed at build time.
+-^third_party/python/psutil/.*\.so
+-^third_party/python/psutil/.*\.pyd
+-^third_party/python/psutil/build/
+-^third_party/python/psutil/tmp/
+-
+ # Git repositories
+ .git/
+ 
+ # Ignore chrome.manifest files from the devtools loader
+ ^devtools/client/chrome.manifest$
+ ^devtools/shared/chrome.manifest$
+ 
+ # Ignore node_modules directories in devtools
+diff --git a/build/mach_bootstrap.py b/build/mach_bootstrap.py
+--- a/build/mach_bootstrap.py
++++ b/build/mach_bootstrap.py
+@@ -3,16 +3,17 @@
+ # file, You can obtain one at http://mozilla.org/MPL/2.0/.
+ 
+ from __future__ import print_function, unicode_literals
+ 
+ import errno
+ import json
+ import os
+ import platform
++import shutil
+ import subprocess
+ import sys
+ import uuid
+ if sys.version_info[0] < 3:
+     import __builtin__ as builtins
+ else:
+     import builtins
+ 
+@@ -160,16 +161,23 @@ def bootstrap(topsrcdir, mozilla_dir=Non
+     # generate a user-friendly error message rather than a cryptic stack trace
+     # on module import.
+     major, minor = sys.version_info[:2]
+     if (major == 2 and minor < 7) or (major == 3 and minor < 5):
+         print('Python 2.7 or Python 3.5+ is required to run mach.')
+         print('You are running Python', platform.python_version())
+         sys.exit(1)
+ 
++    # This directory was deleted in bug 1666345, but there may be some ignored
++    # files here. We can safely just delete it for the user so they don't have
++    # to clean the repo themselves.
++    deleted_dir = os.path.join(topsrcdir, "third_party", "python", "psutil")
++    if os.path.exists(deleted_dir):
++        shutil.rmtree(deleted_dir)
++
+     # Global build system and mach state is stored in a central directory. By
+     # default, this is ~/.mozbuild. However, it can be defined via an
+     # environment variable. We detect first run (by lack of this directory
+     # existing) and notify the user that it will be created. The logic for
+     # creation is much simpler for the "advanced" environment variable use
+     # case. For default behavior, we educate users and give them an opportunity
+     # to react. We always exit after creating the directory because users don't
+     # like surprises.
+diff --git a/build/mach_bootstrap.py.1666347.later b/build/mach_bootstrap.py.1666347.later
+new file mode 100644
+--- /dev/null
++++ b/build/mach_bootstrap.py.1666347.later
+@@ -0,0 +1,22 @@
++--- mach_bootstrap.py
+++++ mach_bootstrap.py
++@@ -498,17 +506,18 @@ def _finalize_telemetry_glean(telemetry,
++     system_metrics = mach_metrics.mach.system
++     system_metrics.cpu_brand.set(get_cpu_brand())
++     distro, version = get_distro_and_version()
++     system_metrics.distro.set(distro)
++     system_metrics.distro_version.set(version)
++ 
++     has_psutil, logical_cores, physical_cores, memory_total = get_psutil_stats()
++     if has_psutil:
++-        # psutil may not be available if a successful build hasn't occurred yet.
+++        # psutil may not be available (we allow `mach create-mach-environment`
+++        # to fail to install it).
++         system_metrics.logical_cores.add(logical_cores)
++         system_metrics.physical_cores.add(physical_cores)
++         if memory_total is not None:
++             system_metrics.memory.accumulate(
++                 int(math.ceil(float(memory_total) / (1024 * 1024 * 1024)))
++             )
++     telemetry.submit(is_bootstrap)
++ 
+diff --git a/python/mozbuild/mozbuild/vendor/mach_commands.py b/python/mozbuild/mozbuild/vendor/mach_commands.py
+--- a/python/mozbuild/mozbuild/vendor/mach_commands.py
++++ b/python/mozbuild/mozbuild/vendor/mach_commands.py
+@@ -148,22 +148,16 @@ Please commit or stash these changes bef
+ 
+     @SubCommand(
+         "vendor",
+         "python",
+         description="Vendor Python packages from pypi.org into third_party/python. "
+                     "Some extra files like docs and tests will automatically be excluded.",
+     )
+     @CommandArgument(
+-        "--with-windows-wheel",
+-        action="store_true",
+-        help="Vendor a wheel for Windows along with the source package",
+-        default=False,
+-    )
+-    @CommandArgument(
+         "--keep-extra-files",
+         action="store_true",
+         default=False,
+         help="Keep all files, including tests and documentation.",
+     )
+     @CommandArgument(
+         "packages",
+         default=None,
+diff --git a/python/mozbuild/mozbuild/vendor/vendor_python.py b/python/mozbuild/mozbuild/vendor/vendor_python.py
+--- a/python/mozbuild/mozbuild/vendor/vendor_python.py
++++ b/python/mozbuild/mozbuild/vendor/vendor_python.py
+@@ -11,27 +11,23 @@ import subprocess
+ import mozfile
+ import mozpack.path as mozpath
+ from mozbuild.base import MozbuildObject
+ from mozfile import TemporaryDirectory
+ from mozpack.files import FileFinder
+ 
+ 
+ class VendorPython(MozbuildObject):
+-    def vendor(self, packages=None, with_windows_wheel=False, keep_extra_files=False):
++    def vendor(self, packages=None, keep_extra_files=False):
+         self.populate_logger()
+         self.log_manager.enable_unstructured()
+ 
+         vendor_dir = mozpath.join(self.topsrcdir, os.path.join("third_party", "python"))
+ 
+         packages = packages or []
+-        if with_windows_wheel and len(packages) != 1:
+-            raise Exception(
+-                "--with-windows-wheel is only supported for a single package!"
+-            )
+ 
+         self.activate_virtualenv()
+         pip_compile = os.path.join(self.virtualenv_manager.bin_path, "pip-compile")
+         if not os.path.exists(pip_compile):
+             path = os.path.normpath(
+                 os.path.join(self.topsrcdir, "third_party", "python", "pip-tools")
+             )
+             self.virtualenv_manager.install_pip_package(path, vendored=True)
+@@ -70,40 +66,16 @@ class VendorPython(MozbuildObject):
+                         "--no-deps",
+                         "--dest",
+                         tmp,
+                         "--no-binary",
+                         ":all:",
+                         "--disable-pip-version-check",
+                     ]
+                 )
+-                if with_windows_wheel:
+-                    # This is hardcoded to CPython 2.7 for win64, which is good
+-                    # enough for what we need currently. If we need psutil for Python 3
+-                    # in the future that could be added here as well.
+-                    self.virtualenv_manager._run_pip(
+-                        [
+-                            "download",
+-                            "--dest",
+-                            tmp,
+-                            "--no-deps",
+-                            "--only-binary",
+-                            ":all:",
+-                            "--platform",
+-                            "win_amd64",
+-                            "--implementation",
+-                            "cp",
+-                            "--python-version",
+-                            "27",
+-                            "--abi",
+-                            "none",
+-                            "--disable-pip-version-check",
+-                            packages[0],
+-                        ]
+-                    )
+                 self._extract(tmp, vendor_dir, keep_extra_files)
+ 
+             shutil.copyfile(tmpspec_absolute, spec)
+             self.repository.add_remove_files(vendor_dir)
+ 
+     def _update_packages(self, spec, packages):
+         for package in packages:
+             if not all(package.partition("==")):
+@@ -142,35 +114,24 @@ class VendorPython(MozbuildObject):
+                 '*/doc',
+                 '*/docs',
+                 '*/test',
+                 '*/tests',
+             )
+         finder = FileFinder(src)
+         for path, _ in finder.find("*"):
+             base, ext = os.path.splitext(path)
+-            if ext == ".whl":
+-                # Wheels would extract into a directory with the name of the package, but
+-                # we want the platform signifiers, minus the version number.
+-                # Wheel filenames look like:
+-                # {distribution}-{version}(-{build tag})?-{python tag}-{abi tag}-{platform tag}
+-                bits = base.split("-")
++            # packages extract into package-version directory name and we strip the version
++            tld = mozfile.extract(os.path.join(finder.base, path), dest, ignore=ignore)[
++                0
++            ]
++            target = os.path.join(dest, tld.rpartition("-")[0])
++            mozfile.remove(target)  # remove existing version of vendored package
++            mozfile.move(tld, target)
+ 
+-                # Remove the version number.
+-                bits.pop(1)
+-                target = os.path.join(dest, "-".join(bits))
+-                mozfile.remove(target)  # remove existing version of vendored package
+-                os.mkdir(target)
+-                mozfile.extract(os.path.join(finder.base, path), target, ignore=ignore)
+-            else:
+-                # packages extract into package-version directory name and we strip the version
+-                tld = mozfile.extract(os.path.join(finder.base, path), dest, ignore=ignore)[0]
+-                target = os.path.join(dest, tld.rpartition("-")[0])
+-                mozfile.remove(target)  # remove existing version of vendored package
+-                mozfile.move(tld, target)
+             # If any files inside the vendored package were symlinks, turn them into normal files
+             # because hg.mozilla.org forbids symlinks in the repository.
+             link_finder = FileFinder(target)
+             for _, f in link_finder.find("**"):
+                 if os.path.islink(f.path):
+                     link_target = os.path.realpath(f.path)
+                     os.unlink(f.path)
+                     shutil.copyfile(link_target, f.path)

+ 37 - 0
mozilla-release/patches/1680051-85a1.patch

@@ -0,0 +1,37 @@
+# HG changeset patch
+# User Mitchell Hentges <mhentges@mozilla.com>
+# Date 1606844410 0
+# Node ID ff9b0937b00b8a53ab243dd0b3dd0f5eb1e7de87
+# Parent  bf432fafe371cd58c4ce5da5935e266d6d8aa64f
+Bug 1680051: Ignore error when removing old psutil directory r=firefox-build-system-reviewers,dmajor
+
+Don't fail to run all mach commands when old psutil directory
+cannot be removed.
+
+Glandium mentioned that there shouldn't be any negative effects from the
+old directory lingering.
+
+Differential Revision: https://phabricator.services.mozilla.com/D98352
+
+diff --git a/build/mach_bootstrap.py b/build/mach_bootstrap.py
+--- a/build/mach_bootstrap.py
++++ b/build/mach_bootstrap.py
+@@ -166,17 +166,17 @@ def bootstrap(topsrcdir, mozilla_dir=Non
+         print('You are running Python', platform.python_version())
+         sys.exit(1)
+ 
+     # This directory was deleted in bug 1666345, but there may be some ignored
+     # files here. We can safely just delete it for the user so they don't have
+     # to clean the repo themselves.
+     deleted_dir = os.path.join(topsrcdir, "third_party", "python", "psutil")
+     if os.path.exists(deleted_dir):
+-        shutil.rmtree(deleted_dir)
++        shutil.rmtree(deleted_dir, ignore_errors=True)
+ 
+     # Global build system and mach state is stored in a central directory. By
+     # default, this is ~/.mozbuild. However, it can be defined via an
+     # environment variable. We detect first run (by lack of this directory
+     # existing) and notify the user that it will be created. The logic for
+     # creation is much simpler for the "advanced" environment variable use
+     # case. For default behavior, we educate users and give them an opportunity
+     # to react. We always exit after creating the directory because users don't

+ 58 - 0
mozilla-release/patches/1712819-1-90a1.patch

@@ -0,0 +1,58 @@
+# HG changeset patch
+# User Mitchell Hentges <mhentges@mozilla.com>
+# Date 1622156646 0
+# Node ID 9c25bbd071297bb44a0d36f02b40fda07411959e
+# Parent  07abde1fb1c5827cc617f22694373d602ad92dfc
+Bug 1712819: Fix VirtualenvManager not being expandable in debugger r=ahal
+
+At least in PyCharm, expanding a `VirtualenvManager` instance means
+resolving all the properties and fields of the instance.
+However, if that property is doing non-trivial work, the debugger
+wouldn't run that subprocess while we're stopped at a breakpoint.
+So, the instance would sit there with the "Collecting data..." text.
+
+Differential Revision: https://phabricator.services.mozilla.com/D115935
+
+diff --git a/python/mach_commands.py b/python/mach_commands.py
+--- a/python/mach_commands.py
++++ b/python/mach_commands.py
+@@ -165,17 +165,17 @@ class MachCommands(MachCommandBase):
+         if subsuite == 'default':
+             filters.append(mpf.subsuite(None))
+         elif subsuite:
+             filters.append(mpf.subsuite(subsuite))
+ 
+         tests = mp.active_tests(
+             filters=filters,
+             disabled=False,
+-            python=self.virtualenv_manager.version_info[0],
++            python=self.virtualenv_manager.version_info()[0],
+             **mozinfo.info)
+ 
+         if not tests:
+             submsg = "for subsuite '{}' ".format(subsuite) if subsuite else ""
+             message = "TEST-UNEXPECTED-FAIL | No tests collected " + \
+                       "{}(Not in PYTHON_UNITTEST_MANIFESTS?)".format(submsg)
+             self.log(logging.WARN, 'python-test', {}, message)
+             return 1
+diff --git a/python/mozbuild/mozbuild/virtualenv.py b/python/mozbuild/mozbuild/virtualenv.py
+--- a/python/mozbuild/mozbuild/virtualenv.py
++++ b/python/mozbuild/mozbuild/virtualenv.py
+@@ -77,17 +77,16 @@ class VirtualenvManager(object):
+     @property
+     def python_path(self):
+         binary = 'python'
+         if sys.platform in ('win32', 'cygwin'):
+             binary += '.exe'
+ 
+         return os.path.join(self.bin_path, binary)
+ 
+-    @property
+     def version_info(self):
+         return eval(subprocess.check_output([
+             self.python_path, '-c', 'import sys; print(sys.version_info[:])']))
+ 
+     @property
+     def activate_path(self):
+         return os.path.join(self.bin_path, 'activate_this.py')
+ 

+ 102 - 0
mozilla-release/patches/1712819-2-90a1.patch

@@ -0,0 +1,102 @@
+# HG changeset patch
+# User Mitchell Hentges <mhentges@mozilla.com>
+# Date 1622156646 0
+# Node ID 3a5b335bdc8c52a384f3f3a03a87ea03d12d2935
+# Parent  5dc310ecb395e9a5eb92ffcdcc3559ed8b0d1970
+Bug 1712819: Avoid pip's "outdated" warning in virtualenvs r=ahal
+
+Now, when running mach commands that invoke `pip`, it will no longer
+inform the user that it needs an update.
+
+We reach into `distutils` to determine the "site-packages" directory
+pattern, then apply it to our virtualenv.
+
+Differential Revision: https://phabricator.services.mozilla.com/D115940
+
+diff --git a/python/mozbuild/mozbuild/virtualenv.py b/python/mozbuild/mozbuild/virtualenv.py
+--- a/python/mozbuild/mozbuild/virtualenv.py
++++ b/python/mozbuild/mozbuild/virtualenv.py
+@@ -219,16 +219,17 @@ class VirtualenvManager(object):
+         result = self._log_process_output(args)
+ 
+         if result:
+             raise Exception(
+                 'Failed to create virtualenv: %s (virtualenv.py retcode: %s)' % (
+                     self.virtualenv_root, result))
+ 
+         self.write_exe_info(python)
++        self._disable_pip_outdated_warning()
+ 
+         return self.virtualenv_root
+ 
+     def packages(self):
+         mode = 'rU' if PY2 else 'r'
+         with open(self.manifest_path, mode) as fh:
+             packages = [line.rstrip().split(':')
+                         for line in fh]
+@@ -548,16 +549,65 @@ class VirtualenvManager(object):
+         if vendored:
+             args.extend([
+                 '--no-deps',
+                 '--no-index',
+             ])
+ 
+         return self._run_pip(args)
+ 
++    def _disable_pip_outdated_warning(self):
++        """Disables the pip outdated warning by changing pip's 'installer'
++
++        "pip" has behaviour to ensure that it doesn't print it's "outdated"
++        warning if it's part of a Linux distro package. This is because
++        Linux distros generally have a slightly out-of-date pip package
++        that they know to be stable, and users aren't always able to
++        (or want to) update it.
++
++        This behaviour works by checking if the "pip" installer
++        (encoded in the dist-info/INSTALLER file) is "pip" itself,
++        or a different value (e.g.: a distro).
++
++        We can take advantage of this behaviour by telling pip
++        that it was installed by "mach", so it won't print the
++        warning.
++
++        https://github.com/pypa/pip/blob/5ee933aab81273da3691c97f2a6e7016ecbe0ef9/src/pip/_internal/self_outdated_check.py#L100-L101 # noqa F401
++        """
++
++        # Defer "distutils" import until this function is called so that
++        # "mach bootstrap" doesn't fail due to Linux distro python-distutils
++        # package not being installed.
++        # By the time this function is called, "distutils" must be installed
++        # because it's needed by the "virtualenv" package.
++        from distutils import dist
++
++        distribution = dist.Distribution({"script_args": "--no-user-cfg"})
++        installer = distribution.get_command_obj("install")
++        installer.prefix = os.path.normpath(self.virtualenv_root)
++        installer.finalize_options()
++
++        # Path to virtualenv's "site-packages" directory
++        site_packages = installer.install_purelib
++
++        pip_dist_info = next(
++            (
++                file
++                for file in os.listdir(site_packages)
++                if file.startswith("pip-") and file.endswith(".dist-info")
++            ),
++            None,
++        )
++        if not pip_dist_info:
++            raise Exception("Failed to find pip dist-info in new virtualenv")
++
++        with open(os.path.join(site_packages, pip_dist_info, "INSTALLER"), "w") as file:
++            file.write("mach")
++
+     def _run_pip(self, args):
+         # It's tempting to call pip natively via pip.main(). However,
+         # the current Python interpreter may not be the virtualenv python.
+         # This will confuse pip and cause the package to attempt to install
+         # against the executing interpreter. By creating a new process, we
+         # force the virtualenv's interpreter to be used and all is well.
+         # It /might/ be possible to cheat and set sys.executable to
+         # self.python_path. However, this seems more risk than it's worth.

+ 34 - 0
mozilla-release/patches/1712819-3-91a1.patch

@@ -0,0 +1,34 @@
+# HG changeset patch
+# User Mitchell Hentges <mhentges@mozilla.com>
+# Date 1623077169 0
+# Node ID 4335f275d7b4836af2bd3f6a99a25c79147c9d69
+# Parent  99922244ae9237898b53a0ee44979194b54b1a3b
+Bug 1712819: Remove redundant pip warning suppression r=ahal
+
+Now that we configure pip within mach virtualenvs to turn off
+its "outdated" warning, we no longer need to apply the suppression
+as a CLI argument.
+
+Differential Revision: https://phabricator.services.mozilla.com/D116891
+
+diff --git a/python/mozbuild/mozbuild/vendor/vendor_python.py b/python/mozbuild/mozbuild/vendor/vendor_python.py
+--- a/python/mozbuild/mozbuild/vendor/vendor_python.py
++++ b/python/mozbuild/mozbuild/vendor/vendor_python.py
+@@ -63,17 +63,16 @@ class VendorPython(MozbuildObject):
+                         "download",
+                         "-r",
+                         requirements,
+                         "--no-deps",
+                         "--dest",
+                         tmp,
+                         "--no-binary",
+                         ":all:",
+-                        "--disable-pip-version-check",
+                     ]
+                 )
+                 self._extract(tmp, vendor_dir, keep_extra_files)
+ 
+             shutil.copyfile(tmpspec_absolute, spec)
+             self.repository.add_remove_files(vendor_dir)
+ 
+     def _update_packages(self, spec, packages):

+ 227 - 0
mozilla-release/patches/1713610-91a1.patch

@@ -0,0 +1,227 @@
+# HG changeset patch
+# User Mitchell Hentges <mhentges@mozilla.com>
+# Date 1623352746 0
+# Node ID 10c4df33bc803ff408ba1f40e03ee1e7448b5eb0
+# Parent  30977d7178b2aa1ef48f7b7db960e8090792821a
+Bug 1713610: Require PyPI-vendored packages be added to requirements.in r=ahal
+
+`./mach vendor python <package>` was already adding its new package
+to `requirements.txt`, so we were getting full resolver support, which
+is good. However, it caused our `requirements.in` file to start
+getting out-of-date, and therefore made it harder to identify the
+top-level dependencies.
+
+Arguably, we could have `./mach vendor python <package>` automatically
+update `requirements.in`, too, but then we need to solve the edge cases,
+such as "What if the package is already in `requirements.in`? What if
+that existing item has a different version?"
+
+The hardest part of updating `requirements.in` was finding it, so I've
+also modified the `./mach vendor python` help text to make it more
+identifiable.
+
+Differential Revision: https://phabricator.services.mozilla.com/D116386
+
+diff --git a/python/docs/index.rst b/python/docs/index.rst
+--- a/python/docs/index.rst
++++ b/python/docs/index.rst
+@@ -24,27 +24,20 @@ Where possible, the following policy app
+ * Vendored libraries **SHOULD NOT** be modified except as required to
+   successfully vendor them.
+ * Vendored libraries **SHOULD** be released copies of libraries available on
+   PyPI.
+ 
+ Adding a Python package
+ ~~~~~~~~~~~~~~~~~~~~~~~
+ 
+-To vendor a Python package, run ``mach vendor python [PACKAGE]``, where
+-``[PACKAGE]`` is one or more package names along with a version number in the
+-format ``pytest==3.5.1``. The package will be installed, transient dependencies
+-will be determined, and a ``requirements.txt`` file will be generated with the
+-full list of dependencies. The requirements file is then used with ``pip`` to
+-download and extract the source distributions of all packages into the
+-``third_party/python`` directory.
+-
+-Alternatively, you can also modify the direct dependencies in
+-``third_party/python/requirements.in`` and then run ``mach vendor python`` for your
+-changes to take effect.
++To vendor a Python package, add it to ``third_party/python/requirements.in``
++and then run ``mach vendor python``. This will update the tree of pinned
++dependencies in ``third_party/python/requirements.txt`` and download them all
++into the ``third_party/python`` directory.
+ 
+ What if the package isn't on PyPI?
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ 
+ If the package isn't available on any Python package index, then you can
+ manually copy the source distribution into the ``third_party/python`` directory.
+ 
+ Using a Python package index
+diff --git a/python/mozbuild/mozbuild/vendor/mach_commands.py b/python/mozbuild/mozbuild/vendor/mach_commands.py
+--- a/python/mozbuild/mozbuild/vendor/mach_commands.py
++++ b/python/mozbuild/mozbuild/vendor/mach_commands.py
+@@ -2,22 +2,17 @@
+ # License, v. 2.0. If a copy of the MPL was not distributed with this
+ # file, # You can obtain one at http://mozilla.org/MPL/2.0/.
+ 
+ from __future__ import absolute_import, print_function, unicode_literals
+ 
+ import sys
+ import logging
+ 
+-from mach.decorators import (
+-    CommandArgument,
+-    CommandProvider,
+-    Command,
+-    SubCommand,
+-)
++from mach.decorators import CommandArgument, CommandProvider, Command, SubCommand
+ 
+ from mozbuild.base import MachCommandBase
+ from mozbuild.vendor.moz_yaml import load_moz_yaml, MozYamlVerifyError
+ 
+ 
+ @CommandProvider
+ class Vendor(MachCommandBase):
+     """Vendor third-party dependencies into the source repository."""
+@@ -145,33 +140,26 @@ Please commit or stash these changes bef
+         vendor_command.vendor(**kwargs)
+ 
+     # =====================================================================
+ 
+     @SubCommand(
+         "vendor",
+         "python",
+         description="Vendor Python packages from pypi.org into third_party/python. "
+-                    "Some extra files like docs and tests will automatically be excluded.",
++                    "Some extra files like docs and tests will automatically be excluded."
++                    "Installs the packages listed in third_party/python/requirements.in and "
++                    "their dependencies.",
+     )
+     @CommandArgument(
+         "--keep-extra-files",
+         action="store_true",
+         default=False,
+         help="Keep all files, including tests and documentation.",
+     )
+-    @CommandArgument(
+-        "packages",
+-        default=None,
+-        nargs="*",
+-        help="Packages to vendor. If omitted, packages and their dependencies "
+-        "defined in Pipfile.lock will be vendored. If Pipfile has been modified, "
+-        "then Pipfile.lock will be regenerated. Note that transient dependencies "
+-        "may be updated when running this command.",
+-    )
+     def vendor_python(self, **kwargs):
+         from mozbuild.vendor.vendor_python import VendorPython
+ 
+         if sys.version_info[:2] != (3, 6):
+             print(
+                 "You must use Python 3.6 to vendor Python packages. If you don't "
+                 "have Python 3.6, you can request that your package be added by "
+                 "creating a bug: \n"
+diff --git a/python/mozbuild/mozbuild/vendor/vendor_python.py b/python/mozbuild/mozbuild/vendor/vendor_python.py
+--- a/python/mozbuild/mozbuild/vendor/vendor_python.py
++++ b/python/mozbuild/mozbuild/vendor/vendor_python.py
+@@ -11,39 +11,37 @@ import subprocess
+ import mozfile
+ import mozpack.path as mozpath
+ from mozbuild.base import MozbuildObject
+ from mozfile import TemporaryDirectory
+ from mozpack.files import FileFinder
+ 
+ 
+ class VendorPython(MozbuildObject):
+-    def vendor(self, packages=None, keep_extra_files=False):
++    def vendor(self, keep_extra_files=False):
+         self.populate_logger()
+         self.log_manager.enable_unstructured()
+ 
+         vendor_dir = mozpath.join(self.topsrcdir, os.path.join("third_party", "python"))
+ 
+-        packages = packages or []
+-
+         self.activate_virtualenv()
+         pip_compile = os.path.join(self.virtualenv_manager.bin_path, "pip-compile")
+         if not os.path.exists(pip_compile):
+             path = os.path.normpath(
+                 os.path.join(self.topsrcdir, "third_party", "python", "pip-tools")
+             )
+             self.virtualenv_manager.install_pip_package(path, vendored=True)
+         spec = os.path.join(vendor_dir, "requirements.in")
+         requirements = os.path.join(vendor_dir, "requirements.txt")
+ 
+         with TemporaryDirectory() as spec_dir:
+             tmpspec = "requirements-mach-vendor-python.in"
+             tmpspec_absolute = os.path.join(spec_dir, tmpspec)
+             shutil.copyfile(spec, tmpspec_absolute)
+-            self._update_packages(tmpspec_absolute, packages)
++            self._update_packages(tmpspec_absolute)
+ 
+             # resolve the dependencies and update requirements.txt
+             subprocess.check_output(
+                 [
+                     pip_compile,
+                     tmpspec,
+                     "--no-header",
+                     "--no-index",
+@@ -70,56 +68,41 @@ class VendorPython(MozbuildObject):
+                         ":all:",
+                     ]
+                 )
+                 self._extract(tmp, vendor_dir, keep_extra_files)
+ 
+             shutil.copyfile(tmpspec_absolute, spec)
+             self.repository.add_remove_files(vendor_dir)
+ 
+-    def _update_packages(self, spec, packages):
+-        for package in packages:
+-            if not all(package.partition("==")):
+-                raise Exception(
+-                    "Package {} must be in the format name==version".format(package)
+-                )
+-
++    def _update_packages(self, spec):
+         requirements = {}
+         with open(spec, "r") as f:
+             comments = []
+             for line in f.readlines():
+                 line = line.strip()
+                 if not line or line.startswith("#"):
+                     comments.append(line)
+                     continue
+                 name, version = line.split("==")
+                 requirements[name] = version, comments
+                 comments = []
+ 
+-        for package in packages:
+-            name, version = package.split("==")
+-            requirements[name] = version, []
+-
+         with open(spec, "w") as f:
+             for name, (version, comments) in sorted(requirements.items()):
+                 if comments:
+                     f.write("{}\n".format("\n".join(comments)))
+                 f.write("{}=={}\n".format(name, version))
+ 
+     def _extract(self, src, dest, keep_extra_files=False):
+         """extract source distribution into vendor directory"""
+ 
+         ignore = ()
+         if not keep_extra_files:
+-            ignore = (
+-                '*/doc',
+-                '*/docs',
+-                '*/test',
+-                '*/tests',
+-            )
++            ignore = ("*/doc", "*/docs", "*/test", "*/tests")
+         finder = FileFinder(src)
+         for path, _ in finder.find("*"):
+             base, ext = os.path.splitext(path)
+             # packages extract into package-version directory name and we strip the version
+             tld = mozfile.extract(os.path.join(finder.base, path), dest, ignore=ignore)[
+                 0
+             ]
+             target = os.path.join(dest, tld.rpartition("-")[0])

+ 39 - 0
mozilla-release/patches/1713613-1-91a1.patch

@@ -0,0 +1,39 @@
+# HG changeset patch
+# User Mitchell Hentges <mhentges@mozilla.com>
+# Date 1622841574 0
+# Node ID c8d96382da38d2afbcc846105ec7d559bfe74c65
+# Parent  3628f0b5e35231c52487ddd08386e9d86a3ad9d5
+Bug 1713613: Pin vendored dependencies as Python 3.6 r=ahal
+
+Python 3.6 requires additional dependencies, and these
+are already vendored in-tree. However, whenever
+`./mach vendor python` occurs with a newer Python
+version, they're removed from requirements.txt.
+
+Let's add them back so that the upcoming "wheel vendor"
+patch has less unexpected changes.
+
+Differential Revision: https://phabricator.services.mozilla.com/D116511
+
+diff --git a/third_party/python/requirements.txt b/third_party/python/requirements.txt
+--- a/third_party/python/requirements.txt
++++ b/third_party/python/requirements.txt
+@@ -173,17 +173,17 @@ requests==2.9.1 \
+     # via -r requirements-mach-vendor-python.in, responses
+ responses==0.10.6 \
+     --hash=sha256:502d9c0c8008439cfcdef7e251f507fcfdd503b56e8c0c87c3c3e3393953f790 \
+     --hash=sha256:97193c0183d63fba8cd3a041c75464e4b09ea0aff6328800d1546598567dde0b \
+     # via -r requirements-mach-vendor-python.in, responses
+ six==1.13.0 \
+     --hash=sha256:1f1b7d42e254082a9db6279deae68afb421ceba6158efa6131de7b3003ee93fd \
+     --hash=sha256:30f610279e8b2578cab6db20741130331735c781b56053c59c4076da27f06b66 \
+-    # via -r requirements-mach-vendor-python.in, blessings, compare-locales, ecdsa, fluent.migrate, more-itertools, pathlib2, pip-tools, pytest, responses
++    # via -r requirements-mach-vendor-python.in, blessings, compare-locales, ecdsa, fluent.migrate, more-itertools, pathlib2, pip-tools, pyrsistent, pytest, responses
+ urllib3==1.25.9 \
+     --hash=sha256:3018294ebefce6572a474f0604c2021e33b3fd8006ecd11d62107a5d2a963527 \
+     --hash=sha256:88206b0eb87e6d677d424843ac5209e3fb9d0190d0ee169599165ec25e9d9115 \
+     # via sentry-sdk
+ virtualenv==16.7.8 \
+     --hash=sha256:116655188441670978117d0ebb6451eb6a7526f9ae0796cc0dee6bd7356909b0 \
+     --hash=sha256:b57776b44f91511866594e477dd10e76a6eb44439cdd7f06dcd30ba4c5bd854f \
+     # via -r requirements-mach-vendor-python.in

+ 41 - 0
mozilla-release/patches/1713613-2-91a1.patch

@@ -0,0 +1,41 @@
+# HG changeset patch
+# User Mitchell Hentges <mhentges@mozilla.com>
+# Date 1622841575 0
+# Node ID c457b63994886c8080bd5d4f1cb79b9258d08163
+# Parent  05191eb45ac75d6c397050375cdfa160243335c6
+Bug 1713613: Require Python 3.6 to update vendored packages r=ahal
+
+Generating Python lockfiles is system-dependent, and this currently
+mostly affects us just on the Python 3.6 <=> 3.7 boundary - 3.6 requires
+a few additional packages like `importlib_metadata` and `iso8601`.
+
+If a different Python version is used, the test in CI is guaranteed to
+fail. By validating in advance, we help future vendoring developers
+by helping them avoid the wait before CI failure and the time it
+takes to troubleshoot.
+
+Differential Revision: https://phabricator.services.mozilla.com/D116773
+
+diff --git a/python/mozbuild/mozbuild/vendor/mach_commands.py b/python/mozbuild/mozbuild/vendor/mach_commands.py
+--- a/python/mozbuild/mozbuild/vendor/mach_commands.py
++++ b/python/mozbuild/mozbuild/vendor/mach_commands.py
+@@ -165,10 +165,19 @@ Please commit or stash these changes bef
+         help="Packages to vendor. If omitted, packages and their dependencies "
+         "defined in Pipfile.lock will be vendored. If Pipfile has been modified, "
+         "then Pipfile.lock will be regenerated. Note that transient dependencies "
+         "may be updated when running this command.",
+     )
+     def vendor_python(self, **kwargs):
+         from mozbuild.vendor.vendor_python import VendorPython
+ 
++        if sys.version_info[:2] != (3, 6):
++            print(
++                "You must use Python 3.6 to vendor Python packages. If you don't "
++                "have Python 3.6, you can request that your package be added by "
++                "creating a bug: \n"
++                "https://bugzilla.mozilla.org/enter_bug.cgi?product=Firefox%20Build%20System&component=Mach%20Core"  # noqa F401
++            )
++            return 1
++
+         vendor_command = self._spawn(VendorPython)
+         vendor_command.vendor(**kwargs)

+ 104 - 0
mozilla-release/patches/1713613-3-91a1.patch

@@ -0,0 +1,104 @@
+# HG changeset patch
+# User Mitchell Hentges <mhentges@mozilla.com>
+# Date 1622841575 0
+# Node ID 0a52e5d715d087b9f2c0f2c3ab572b05a0c9aaaf
+# Parent  4796a22fba7ac344b5a7cf6315a9691e5877e9e7
+Bug 1713613: Assert unmodified PyPI-vendored packages r=ahal
+
+Adds test to ensure that we don't modify files that are vendored
+with PyPI.
+
+If a modification needs to happen to a vendored library, we should
+instead:
+* Modify the library upstream and vendor the new version, or
+* Remove the library from requirements.in and requirements.txt so that
+  it's now a "manually-vendored" library. Note that there many be
+  issues if this occurs to a library depended on by 3rd-party
+  libraries.
+
+Differential Revision: https://phabricator.services.mozilla.com/D116428
+
+diff --git a/python/mozbuild/mozbuild/test/python.ini b/python/mozbuild/mozbuild/test/python.ini
+--- a/python/mozbuild/mozbuild/test/python.ini
++++ b/python/mozbuild/mozbuild/test/python.ini
+@@ -48,8 +48,14 @@ skip-if = python == 2 && os == 'mac'
+ [test_line_endings.py]
+ [test_makeutil.py]
+ [test_mozconfig.py]
+ [test_mozinfo.py]
+ [test_preprocessor.py]
+ [test_pythonutil.py]
+ [test_util.py]
+ [test_util_fileavoidwrite.py]
++[test_vendor.py]
++# Only run the test on Linux to ensure that the test is stable: depending
++# on packages, it can be system-dependent.
++skip-if =
++  os == "win"
++  os == "mac"
+diff --git a/python/mozbuild/mozbuild/test/test_vendor.py b/python/mozbuild/mozbuild/test/test_vendor.py
+new file mode 100644
+--- /dev/null
++++ b/python/mozbuild/mozbuild/test/test_vendor.py
+@@ -0,0 +1,61 @@
++# This Source Code Form is subject to the terms of the Mozilla Public
++# License, v. 2.0. If a copy of the MPL was not distributed with this
++# file, You can obtain one at http://mozilla.org/MPL/2.0/.
++
++from __future__ import absolute_import, print_function, unicode_literals
++
++import os
++import shutil
++import subprocess
++import tempfile
++
++from buildconfig import topsrcdir
++from mock import Mock
++from mozbuild.vendor.vendor_python import VendorPython
++import mozunit
++
++
++def test_up_to_date_vendor():
++    with tempfile.TemporaryDirectory() as work_dir:
++        subprocess.check_call(["hg", "init", work_dir])
++        os.makedirs(os.path.join(work_dir, "build"))
++        os.makedirs(os.path.join(work_dir, "third_party"))
++
++        # Create empty virtualenv_packages file
++        with open(
++            os.path.join(work_dir, "build", "build_virtualenv_packages.txt"), "a"
++        ) as file:
++            # Since VendorPython thinks "work_dir" is the topsrcdir,
++            # it will use its associated virtualenv and package configuration.
++            # Since it uses "pip-tools" within, and "pip-tools" needs
++            # the "Click" library, we need to make it available.
++            file.write("mozilla.pth:third_party/python/Click")
++
++        # Copy existing "third_party/python/" vendored files
++        existing_vendored = os.path.join(topsrcdir, "third_party", "python")
++        work_vendored = os.path.join(work_dir, "third_party", "python")
++        shutil.copytree(existing_vendored, work_vendored)
++
++        # Run the vendoring process
++        vendor = VendorPython(
++            work_dir, None, Mock(), topobjdir=os.path.join(work_dir, "obj")
++        )
++        vendor.vendor()
++
++        # Verify that re-vendoring did not cause file changes.
++        # Note that we don't want hg-ignored generated files
++        # to bust the diff, so we exclude them (pycache, egg-info).
++        subprocess.check_call(
++            [
++                "diff",
++                "-r",
++                existing_vendored,
++                work_vendored,
++                "--exclude=__pycache__",
++                "--exclude=*.egg-info",
++            ]
++        )
++
++
++if __name__ == "__main__":
++    mozunit.main()

+ 35 - 0
mozilla-release/patches/1714244-91a1.patch

@@ -0,0 +1,35 @@
+# HG changeset patch
+# User Mike Hommey <mh+mozilla@glandium.org>
+# Date 1622753105 0
+# Node ID 28aed716084dd9f76f21ad185babc4f2614c1612
+# Parent  09d42255f29f0aea75e92acf3925fba955b2a502
+Bug 1714244 - Use HEAD instead of master by default for `mach vendor yaml` r=releng-reviewers,bhearsum
+
+HEAD is a symbolic ref that points to the default branch of the
+repository, it will point to master, main, or anything else that the
+repository is using as its default branch.
+
+Differential Revision: https://phabricator.services.mozilla.com/D116678
+
+diff --git a/python/mozbuild/mozbuild/vendor/mach_commands.py b/python/mozbuild/mozbuild/vendor/mach_commands.py
+--- a/python/mozbuild/mozbuild/vendor/mach_commands.py
++++ b/python/mozbuild/mozbuild/vendor/mach_commands.py
+@@ -78,17 +78,17 @@ class Vendor(MachCommandBase):
+                 sys.exit(0)
+         except MozYamlVerifyError as e:
+             print(e)
+             sys.exit(1)
+ 
+         if not ignore_modified and not check_for_update:
+             self.check_modified_files()
+         if not revision:
+-            revision = "master"
++            revision = "HEAD"
+ 
+         from mozbuild.vendor.vendor_manifest import VendorManifest
+ 
+         vendor_command = self._spawn(VendorManifest)
+         vendor_command.vendor(library, manifest, revision, check_for_update)
+ 
+         sys.exit(0)
+ 

+ 740 - 0
mozilla-release/patches/985141-1-81a1.patch

@@ -0,0 +1,740 @@
+# HG changeset patch
+# User Andrew Halberstadt <ahalberstadt@mozilla.com>
+# Date 1595952370 0
+# Node ID 162f07f42162f5f36b9ed05aea26b1fe8ae72417
+# Parent  f06b54afcf9dafe8fa579599706ec0a01d87fa06
+Bug 985141 - [mozbuild] Remove leading underscore from MozbuildObject._activate_virtualenv, r=firefox-build-system-reviewers,perftest-reviewers,andi,AlexandruIonescu,rstewart
+
+This function is used all across the tree and should be considered a public API.
+
+Differential Revision: https://phabricator.services.mozilla.com/D85045
+
+diff --git a/build/upload_generated_sources.py b/build/upload_generated_sources.py
+--- a/build/upload_generated_sources.py
++++ b/build/upload_generated_sources.py
+@@ -143,17 +143,17 @@ def main(argv):
+     parser = argparse.ArgumentParser(
+         description='Upload generated source files in ARTIFACT to BUCKET in S3.')
+     parser.add_argument('artifact',
+                         help='generated-sources artifact from build task')
+     args = parser.parse_args(argv)
+     region, bucket = get_s3_region_and_bucket()
+ 
+     config = MozbuildObject.from_environment()
+-    config._activate_virtualenv()
++    config.activate_virtualenv()
+     config.virtualenv_manager.install_pip_package('boto3==1.4.4')
+ 
+     with timed() as elapsed:
+         do_work(region=region, bucket=bucket, artifact=args.artifact)
+         log.info('Finished in {:.03f}s'.format(elapsed()))
+     return 0
+ 
+ 
+diff --git a/python/mach_commands.py b/python/mach_commands.py
+--- a/python/mach_commands.py
++++ b/python/mach_commands.py
+@@ -60,17 +60,17 @@ class MachCommands(MachCommandBase):
+         append_env = {
+             'PYTHONDONTWRITEBYTECODE': str('1'),
+         }
+ 
+         if no_virtualenv:
+             python_path = sys.executable
+             append_env['PYTHONPATH'] = os.pathsep.join(sys.path)
+         else:
+-            self._activate_virtualenv()
++            self.activate_virtualenv()
+             python_path = self.virtualenv_manager.python_path
+ 
+         if exec_file:
+             exec(open(exec_file).read())
+             return 0
+ 
+         if ipython:
+             bindir = os.path.dirname(python_path)
+diff --git a/python/mozbuild/mozbuild/base.py b/python/mozbuild/mozbuild/base.py
+--- a/python/mozbuild/mozbuild/base.py
++++ b/python/mozbuild/mozbuild/base.py
+@@ -817,25 +817,25 @@ class MozbuildObject(ProcessExecutionMix
+         This is used as a convenience method to create other
+         MozbuildObject-derived class instances. It can only be used on
+         classes that have the same constructor arguments as us.
+         """
+ 
+         return cls(self.topsrcdir, self.settings, self.log_manager,
+                    topobjdir=self.topobjdir)
+ 
+-    def _activate_virtualenv(self):
++    def activate_virtualenv(self):
+         self.virtualenv_manager.ensure()
+         self.virtualenv_manager.activate()
+ 
+     def _set_log_level(self, verbose):
+         self.log_manager.terminal_handler.setLevel(logging.INFO if not verbose else logging.DEBUG)
+ 
+     def ensure_pipenv(self):
+-        self._activate_virtualenv()
++        self.activate_virtualenv()
+         pipenv = os.path.join(self.virtualenv_manager.bin_path, 'pipenv')
+         if not os.path.exists(pipenv):
+             for package in ['certifi', 'pipenv', 'six', 'virtualenv', 'virtualenv-clone']:
+                 path = os.path.normpath(os.path.join(
+                     self.topsrcdir, 'third_party/python', package))
+                 self.virtualenv_manager.install_pip_package(path, vendored=True)
+         return pipenv
+ 
+@@ -846,17 +846,17 @@ class MozbuildObject(ProcessExecutionMix
+         self.ensure_pipenv()
+         self.virtualenv_manager.activate_pipenv(workon_home, pipfile, populate,
+                                                 python)
+ 
+     def _ensure_zstd(self):
+         try:
+             import zstandard  # noqa: F401
+         except (ImportError, AttributeError):
+-            self._activate_virtualenv()
++            self.activate_virtualenv()
+             self.virtualenv_manager.install_pip_package('zstandard>=0.9.0,<=0.13.0')
+ 
+ 
+ class MachCommandBase(MozbuildObject):
+     """Base class for mach command providers that wish to be MozbuildObjects.
+ 
+     This provides a level of indirection so MozbuildObject can be refactored
+     without having to change everything that inherits from it.
+diff --git a/python/mozbuild/mozbuild/code-analysis/mach_commands.py b/python/mozbuild/mozbuild/code-analysis/mach_commands.py
+--- a/python/mozbuild/mozbuild/code-analysis/mach_commands.py
++++ b/python/mozbuild/mozbuild/code-analysis/mach_commands.py
+@@ -239,17 +239,17 @@ class StaticAnalysis(MachCommandBase):
+     def check(self, source=None, jobs=2, strip=1, verbose=False, checks='-*',
+               fix=False, header_filter='', output=None, format='text', outgoing=False):
+         from mozbuild.controller.building import (
+             StaticAnalysisFooter,
+             StaticAnalysisOutputManager,
+         )
+ 
+         self._set_log_level(verbose)
+-        self._activate_virtualenv()
++        self.activate_virtualenv()
+         self.log_manager.enable_unstructured()
+ 
+         rc = self._get_clang_tools(verbose=verbose)
+         if rc != 0:
+             return rc
+ 
+         if self._is_version_eligible() is False:
+             self.log(logging.ERROR, 'static-analysis', {},
+@@ -349,17 +349,17 @@ class StaticAnalysis(MachCommandBase):
+                      'directory, ~./mozbuild/coverity is used.')
+     @CommandArgument('--outgoing', default=False, action='store_true',
+                      help='Run coverity on outgoing files from mercurial or git repository')
+     @CommandArgument('--full-build', default=False, action='store_true',
+                      help='Run a full build for coverity analisys.')
+     def check_coverity(self, source=[], output=None, coverity_output_path=None,
+                        outgoing=False, full_build=False, verbose=False):
+         self._set_log_level(verbose)
+-        self._activate_virtualenv()
++        self.activate_virtualenv()
+         self.log_manager.enable_unstructured()
+ 
+         if 'MOZ_AUTOMATION' not in os.environ:
+             self.log(logging.INFO, 'static-analysis', {},
+                      'Coverity based static-analysis cannot be ran outside automation.')
+             return
+ 
+         if full_build and outgoing:
+@@ -812,17 +812,17 @@ class StaticAnalysis(MachCommandBase):
+     @CommandArgument('--outgoing', default=False, action='store_true',
+                      help='Run infer checks on outgoing files from repository')
+     @CommandArgument('--output', default=None,
+                      help='Write infer json output in a file')
+     def check_java(self, source=['mobile'], jobs=2, strip=1, verbose=False, checks=[],
+                    task='compileWithGeckoBinariesDebugSources',
+                    skip_export=False, outgoing=False, output=None):
+         self._set_log_level(verbose)
+-        self._activate_virtualenv()
++        self.activate_virtualenv()
+         self.log_manager.enable_unstructured()
+ 
+         if self.substs['MOZ_BUILD_APP'] != 'mobile/android':
+             self.log(logging.WARNING, 'static-analysis', {},
+                      'Cannot check java source code unless you are building for android!')
+             return 1
+         rc = self._check_for_java()
+         if rc != 0:
+@@ -1077,17 +1077,17 @@ class StaticAnalysis(MachCommandBase):
+                      ' This option is only valid on automation environments.')
+     @CommandArgument('checker_names', nargs='*', default=[],
+                      help='Checkers that are going to be auto-tested.')
+     def autotest(self, verbose=False, dump_results=False, intree_tool=False, checker_names=[]):
+         # If 'dump_results' is True than we just want to generate the issues files for each
+         # checker in particulat and thus 'force_download' becomes 'False' since we want to
+         # do this on a local trusted clang-tidy package.
+         self._set_log_level(verbose)
+-        self._activate_virtualenv()
++        self.activate_virtualenv()
+         self._dump_results = dump_results
+ 
+         force_download = not self._dump_results
+ 
+         # Function return codes
+         self.TOOLS_SUCCESS = 0
+         self.TOOLS_FAILED_DOWNLOAD = 1
+         self.TOOLS_UNSUPORTED_PLATFORM = 2
+diff --git a/python/mozbuild/mozbuild/frontend/mach_commands.py b/python/mozbuild/mozbuild/frontend/mach_commands.py
+--- a/python/mozbuild/mozbuild/frontend/mach_commands.py
++++ b/python/mozbuild/mozbuild/frontend/mach_commands.py
+@@ -32,17 +32,17 @@ class MozbuildFileCommands(MachCommandBa
+              description='View reference documentation on mozbuild files.')
+     @CommandArgument('symbol', default=None, nargs='*',
+                      help='Symbol to view help on. If not specified, all will be shown.')
+     @CommandArgument('--name-only', '-n', default=False, action='store_true',
+                      help='Print symbol names only.')
+     def reference(self, symbol, name_only=False):
+         # mozbuild.sphinx imports some Sphinx modules, so we need to be sure
+         # the optional Sphinx package is installed.
+-        self._activate_virtualenv()
++        self.activate_virtualenv()
+         self.virtualenv_manager.install_pip_package('Sphinx==1.1.3')
+ 
+         from mozbuild.sphinx import (
+             format_module,
+             function_reference,
+             special_reference,
+             variable_reference,
+         )
+diff --git a/python/mozbuild/mozbuild/mach_commands.py b/python/mozbuild/mozbuild/mach_commands.py
+--- a/python/mozbuild/mozbuild/mach_commands.py
++++ b/python/mozbuild/mozbuild/mach_commands.py
+@@ -87,17 +87,17 @@ class Watch(MachCommandBase):
+                   'https://developer.mozilla.org/docs/Mozilla/Developer_guide/Build_Instructions/Simple_Firefox_build')  # noqa
+             return 1
+ 
+         if not self.substs.get('WATCHMAN', None):
+             print('mach watch requires watchman to be installed. See '
+                   'https://developer.mozilla.org/docs/Mozilla/Developer_guide/Build_Instructions/Incremental_builds_with_filesystem_watching')  # noqa
+             return 1
+ 
+-        self._activate_virtualenv()
++        self.activate_virtualenv()
+         try:
+             self.virtualenv_manager.install_pip_package('pywatchman==1.4.1')
+         except Exception:
+             print('Could not install pywatchman from pip. See '
+                   'https://developer.mozilla.org/docs/Mozilla/Developer_guide/Build_Instructions/Incremental_builds_with_filesystem_watching')  # noqa
+             return 1
+ 
+         from mozbuild.faster_daemon import Daemon
+@@ -166,17 +166,17 @@ class CargoProvider(MachCommandBase):
+ @CommandProvider
+ class Doctor(MachCommandBase):
+     """Provide commands for diagnosing common build environment problems"""
+     @Command('doctor', category='devenv',
+              description='')
+     @CommandArgument('--fix', default=None, action='store_true',
+                      help='Attempt to fix found problems.')
+     def doctor(self, fix=None):
+-        self._activate_virtualenv()
++        self.activate_virtualenv()
+         from mozbuild.doctor import Doctor
+         doctor = Doctor(self.topsrcdir, self.topobjdir, fix)
+         return doctor.check_all()
+ 
+ 
+ @CommandProvider
+ class Clobber(MachCommandBase):
+     NO_AUTO_LOG = True
+diff --git a/python/mozbuild/mozbuild/vendor/vendor_python.py b/python/mozbuild/mozbuild/vendor/vendor_python.py
+--- a/python/mozbuild/mozbuild/vendor/vendor_python.py
++++ b/python/mozbuild/mozbuild/vendor/vendor_python.py
+@@ -23,17 +23,17 @@ class VendorPython(MozbuildObject):
+         vendor_dir = mozpath.join(self.topsrcdir, os.path.join("third_party", "python"))
+ 
+         packages = packages or []
+         if with_windows_wheel and len(packages) != 1:
+             raise Exception(
+                 "--with-windows-wheel is only supported for a single package!"
+             )
+ 
+-        self._activate_virtualenv()
++        self.activate_virtualenv()
+         pip_compile = os.path.join(self.virtualenv_manager.bin_path, "pip-compile")
+         if not os.path.exists(pip_compile):
+             path = os.path.normpath(
+                 os.path.join(self.topsrcdir, "third_party", "python", "pip-tools")
+             )
+             self.virtualenv_manager.install_pip_package(path, vendored=True)
+         spec = os.path.join(vendor_dir, "requirements.in")
+         requirements = os.path.join(vendor_dir, "requirements.txt")
+diff --git a/python/mozperftest/mozperftest/browser/noderunner.py b/python/mozperftest/mozperftest/browser/noderunner.py
+--- a/python/mozperftest/mozperftest/browser/noderunner.py
++++ b/python/mozperftest/mozperftest/browser/noderunner.py
+@@ -19,17 +19,17 @@ class NodeRunner(MachEnvironment):
+ 
+         from mozbuild.nodeutil import find_node_executable
+ 
+         self.node_path = os.path.abspath(find_node_executable()[0])
+ 
+     def setup(self):
+         """Install the Node.js package.
+         """
+-        self.mach_cmd._activate_virtualenv()
++        self.mach_cmd.activate_virtualenv()
+         self.verify_node_install()
+ 
+     def node(self, args):
+         """Invoke node (interactively) with the given arguments."""
+         return self.run_process(
+             [self.node_path] + args,
+             append_env=self.append_env(),
+             pass_thru=True,  # Allow user to run Node interactively.
+diff --git a/python/mozperftest/mozperftest/mach_commands.py.985141-1.later b/python/mozperftest/mozperftest/mach_commands.py.985141-1.later
+new file mode 100644
+--- /dev/null
++++ b/python/mozperftest/mozperftest/mach_commands.py.985141-1.later
+@@ -0,0 +1,40 @@
++--- mach_commands.py
+++++ mach_commands.py
++@@ -61,17 +61,17 @@ class Perftest(MachCommandBase):
++                 "try_mode": "try_task_config",
++             }
++ 
++             task_config = {"parameters": parameters, "version": 2}
++             push_to_try("perftest", "perftest", try_task_config=task_config)
++             return
++ 
++         # run locally
++-        MachCommandBase._activate_virtualenv(self)
+++        MachCommandBase.activate_virtualenv(self)
++ 
++         from mozperftest.runner import run_tests
++ 
++         run_tests(mach_cmd=self, **kwargs)
++ 
++ 
++ @CommandProvider
++ class PerftestTests(MachCommandBase):
++@@ -117,17 +117,17 @@ class PerftestTests(MachCommandBase):
++         action="store_true",
++         default=False,
++         help="Skip flake8 and black",
++     )
++     @CommandArgument(
++         "-v", "--verbose", action="store_true", default=False, help="Verbose mode",
++     )
++     def run_tests(self, **kwargs):
++-        MachCommandBase._activate_virtualenv(self)
+++        MachCommandBase.activate_virtualenv(self)
++ 
++         from pathlib import Path
++         from mozperftest.runner import _setup_path
++         from mozperftest.utils import install_package, temporary_env
++ 
++         skip_linters = kwargs.get("skip_linters", False)
++         verbose = kwargs.get("verbose", False)
++ 
+diff --git a/python/mozperftest/mozperftest/test/noderunner.py.985141-1.later b/python/mozperftest/mozperftest/test/noderunner.py.985141-1.later
+new file mode 100644
+--- /dev/null
++++ b/python/mozperftest/mozperftest/test/noderunner.py.985141-1.later
+@@ -0,0 +1,21 @@
++--- noderunner.py
+++++ noderunner.py
++@@ -21,17 +21,17 @@ class NodeRunner(Layer):
++ 
++         from mozbuild.nodeutil import find_node_executable
++ 
++         self.node_path = os.path.abspath(find_node_executable()[0])
++ 
++     def setup(self):
++         """Install the Node.js package.
++         """
++-        self.mach_cmd._activate_virtualenv()
+++        self.mach_cmd.activate_virtualenv()
++         self.verify_node_install()
++ 
++     def node(self, args):
++         """Invoke node (interactively) with the given arguments."""
++         return self.run_process(
++             [self.node_path] + args,
++             append_env=self.append_env(),
++             pass_thru=True,  # Allow user to run Node interactively.
+diff --git a/python/mozperftest/mozperftest/test/xpcshell.py.985141-1.later b/python/mozperftest/mozperftest/test/xpcshell.py.985141-1.later
+new file mode 100644
+--- /dev/null
++++ b/python/mozperftest/mozperftest/test/xpcshell.py.985141-1.later
+@@ -0,0 +1,21 @@
++--- xpcshell.py
+++++ xpcshell.py
++@@ -48,17 +48,17 @@ class XPCShell(Layer):
++         self.python_path = mach_cmd.virtualenv_manager.python_path
++         self.topobjdir = mach_cmd.topobjdir
++         self.distdir = mach_cmd.distdir
++         self.bindir = mach_cmd.bindir
++         self.statedir = mach_cmd.statedir
++         self.metrics = []
++ 
++     def setup(self):
++-        self.mach_cmd._activate_virtualenv()
+++        self.mach_cmd.activate_virtualenv()
++ 
++     def run(self, metadata):
++         tests = self.get_arg("tests", [])
++         if len(tests) != 1:
++             # for now we support one single test
++             raise NotImplementedError(str(tests))
++ 
++         test = Path(tests[0])
+diff --git a/testing/condprofile/mach_commands.py.985141-1.later b/testing/condprofile/mach_commands.py.985141-1.later
+new file mode 100644
+--- /dev/null
++++ b/testing/condprofile/mach_commands.py.985141-1.later
+@@ -0,0 +1,21 @@
++--- mach_commands.py
+++++ mach_commands.py
++@@ -10,17 +10,17 @@ from mach.decorators import CommandArgum
++ from mozbuild.base import MachCommandBase, BinaryNotFoundException
++ 
++ requirements = os.path.join(os.path.dirname(__file__), "requirements", "base.txt")
++ 
++ 
++ @CommandProvider
++ class CondprofileCommandProvider(MachCommandBase):
++     def _init(self):
++-        self._activate_virtualenv()
+++        self.activate_virtualenv()
++         self.virtualenv_manager.install_pip_requirements(
++             requirements, require_hashes=False
++         )
++ 
++     @Command("fetch-condprofile", category="testing")
++     @CommandArgument("--target-dir", default=None, help="Target directory")
++     @CommandArgument("--platform", default=None, help="Platform")
++     @CommandArgument("--scenario", default="full", help="Scenario")  # grab choices
+diff --git a/testing/mach_commands.py b/testing/mach_commands.py
+--- a/testing/mach_commands.py
++++ b/testing/mach_commands.py
+@@ -359,17 +359,17 @@ class CramTest(MachCommandBase):
+              description="Mercurial style .t tests for command line applications.")
+     @CommandArgument('test_paths', nargs='*', metavar='N',
+                      help="Test paths to run. Each path can be a test file or directory. "
+                           "If omitted, the entire suite will be run.")
+     @CommandArgument('cram_args', nargs=argparse.REMAINDER,
+                      help="Extra arguments to pass down to the cram binary. See "
+                           "'./mach python -m cram -- -h' for a list of available options.")
+     def cramtest(self, cram_args=None, test_paths=None, test_objects=None):
+-        self._activate_virtualenv()
++        self.activate_virtualenv()
+         import mozinfo
+         from manifestparser import TestManifest
+ 
+         if test_objects is None:
+             from moztest.resolve import TestResolver
+             resolver = self._spawn(TestResolver)
+             if test_paths:
+                 # If we were given test paths, try to find tests matching them.
+@@ -495,17 +495,17 @@ class ChunkFinder(MachCommandBase):
+             'chunkByDir': kwargs['chunk_by_dir'],
+             'chunkByRuntime': kwargs['chunk_by_runtime'],
+             'e10s': kwargs['e10s'],
+             'subsuite': subsuite,
+         }
+ 
+         temp_dir = None
+         if kwargs['platform'] or kwargs['debug']:
+-            self._activate_virtualenv()
++            self.activate_virtualenv()
+             self.virtualenv_manager.install_pip_package('mozdownload==1.17')
+             temp_dir, temp_path = download_mozinfo(
+                 kwargs['platform'], kwargs['debug'])
+             args['extra_mozinfo_json'] = temp_path
+ 
+         found = False
+         for this_chunk in range(1, total_chunks + 1):
+             args['thisChunk'] = this_chunk
+@@ -903,17 +903,17 @@ class TestInfoCommand(MachCommandBase):
+ class TestFluentMigration(MachCommandBase):
+     @Command('fluent-migration-test', category='testing',
+              description="Test Fluent migration recipes.")
+     @CommandArgument('test_paths', nargs='*', metavar='N',
+                      help="Recipe paths to test.")
+     def run_migration_tests(self, test_paths=None, **kwargs):
+         if not test_paths:
+             test_paths = []
+-        self._activate_virtualenv()
++        self.activate_virtualenv()
+         from test_fluent_migrations import fmt
+         rv = 0
+         with_context = []
+         for to_test in test_paths:
+             try:
+                 context = fmt.inspect_migration(to_test)
+                 for issue in context['issues']:
+                     self.log(logging.ERROR, 'fluent-migration-test', {
+diff --git a/testing/mach_commands.py.985141-1.later b/testing/mach_commands.py.985141-1.later
+new file mode 100644
+--- /dev/null
++++ b/testing/mach_commands.py.985141-1.later
+@@ -0,0 +1,21 @@
++--- mach_commands.py
+++++ mach_commands.py
++@@ -596,33 +596,33 @@ def get_jsshell_parser():
++ 
++ 
++ @CommandProvider
++ class JsShellTests(MachCommandBase):
++     @Command('jsshell-bench', category='testing',
++              parser=get_jsshell_parser,
++              description="Run benchmarks in the SpiderMonkey JS shell.")
++     def run_jsshelltests(self, **kwargs):
++-        self._activate_virtualenv()
+++        self.activate_virtualenv()
++         from jsshell import benchmark
++         return benchmark.run(**kwargs)
++ 
++ 
++ @CommandProvider
++ class CramTest(MachCommandBase):
++     @Command('cramtest', category='testing',
++              description="Mercurial style .t tests for command line applications.")
+diff --git a/testing/mochitest/mach_commands.py.985141-1.later b/testing/mochitest/mach_commands.py.985141-1.later
+new file mode 100644
+--- /dev/null
++++ b/testing/mochitest/mach_commands.py.985141-1.later
+@@ -0,0 +1,21 @@
++--- mach_commands.py
+++++ mach_commands.py
++@@ -285,17 +285,17 @@ class MachCommands(MachCommandBase):
++     def run_mochitest_general(self, flavor=None, test_objects=None, resolve_tests=True, **kwargs):
++         from mochitest_options import ALL_FLAVORS
++         from mozlog.commandline import setup_logging
++         from mozlog.handlers import StreamHandler
++         from moztest.resolve import get_suite_definition
++ 
++         # TODO: This is only strictly necessary while mochitest is using Python
++         # 2 and can be removed once the command is migrated to Python 3.
++-        self._activate_virtualenv()
+++        self.activate_virtualenv()
++ 
++         buildapp = None
++         for app in SUPPORTED_APPS:
++             if conditions.is_buildapp_in(self, apps=[app]):
++                 buildapp = app
++                 break
++ 
++         flavors = None
+diff --git a/testing/web-platform/mach_commands.py b/testing/web-platform/mach_commands.py
+--- a/testing/web-platform/mach_commands.py
++++ b/testing/web-platform/mach_commands.py
+@@ -398,17 +398,17 @@ def create_parser_testpaths():
+         "--json", action="store_true", default=False,
+         help="Output as JSON")
+     return parser
+ 
+ 
+ @CommandProvider
+ class MachCommands(MachCommandBase):
+     def setup(self):
+-        self._activate_virtualenv()
++        self.activate_virtualenv()
+ 
+     @Command("web-platform-tests",
+              category="testing",
+              conditions=[conditions.is_firefox],
+              parser=create_parser_wpt)
+     def run_web_platform_tests(self, **params):
+         self.setup()
+ 
+@@ -484,9 +484,9 @@ class MachCommands(MachCommandBase):
+ 
+     @Command("wpt-test-paths",
+              category="testing",
+              description="Get a mapping from test ids to files",
+              parser=create_parser_testpaths)
+     def wpt_test_paths(self, **params):
+         runner = self._spawn(WebPlatformTestsTestPathsRunner)
+         runner.run(**params)
+-        return 0
+\ No newline at end of file
++        return 0
+diff --git a/toolkit/crashreporter/tools/upload_symbols.py b/toolkit/crashreporter/tools/upload_symbols.py
+--- a/toolkit/crashreporter/tools/upload_symbols.py
++++ b/toolkit/crashreporter/tools/upload_symbols.py
+@@ -53,17 +53,17 @@ def get_taskcluster_secret(secret_name):
+     secret = res.json()
+     auth_token = secret['secret']['token']
+ 
+     return auth_token
+ 
+ 
+ def main():
+     config = MozbuildObject.from_environment()
+-    config._activate_virtualenv()
++    config.activate_virtualenv()
+ 
+     import redo
+     import requests
+ 
+     logging.basicConfig()
+     parser = argparse.ArgumentParser(
+         description='Upload symbols in ZIP using token from Taskcluster secrets service.')
+     parser.add_argument('zip',
+diff --git a/tools/browsertime/mach_commands.py.985141-1.later b/tools/browsertime/mach_commands.py.985141-1.later
+new file mode 100644
+--- /dev/null
++++ b/tools/browsertime/mach_commands.py.985141-1.later
+@@ -0,0 +1,70 @@
++--- mach_commands.py
+++++ mach_commands.py
++@@ -408,32 +408,32 @@ class MachBrowsertime(MachCommandBase):
++         req.check_if_exists(use_user_site=False)
++         if req.satisfied_by is None:
++             return True
++         venv_site_lib = os.path.abspath(os.path.join(self.virtualenv_manager.bin_path, "..",
++                                         "lib"))
++         site_packages = os.path.abspath(req.satisfied_by.location)
++         return not site_packages.startswith(venv_site_lib)
++ 
++-    def _activate_virtualenv(self, *args, **kwargs):
+++    def activate_virtualenv(self, *args, **kwargs):
++         r'''Activates virtualenv.
++ 
++         This function will also install Pillow and pyssim if needed.
++         It will raise an error in case the install failed.
++         '''
++-        MachCommandBase._activate_virtualenv(self, *args, **kwargs)
+++        MachCommandBase.activate_virtualenv(self, *args, **kwargs)
++ 
++         # installing Python deps on the fly
++         for dep in ("Pillow==%s" % PILLOW_VERSION, "pyssim==%s" % PYSSIM_VERSION):
++             if self._need_install(dep):
++                 self.virtualenv_manager._run_pip(["install", dep])
++ 
++     def check(self):
++         r'''Run `visualmetrics.py --check`.'''
++-        self._activate_virtualenv()
+++        self.activate_virtualenv()
++ 
++         args = ['--check']
++         status = self.run_process(
++             [self.virtualenv_manager.python_path, visualmetrics_path()] + args,
++             # For --check, don't allow user's path to interfere with
++             # path testing except on Linux, where ImageMagick needs to
++             # be installed manually.
++             append_env=self.append_env(append_path=host_platform().startswith('linux')),
++@@ -571,29 +571,29 @@ class MachBrowsertime(MachCommandBase):
++                 return 1
++ 
++         if check:
++             return self.check()
++ 
++         if browsertime_help:
++             args.append('--help')
++ 
++-        self._activate_virtualenv()
+++        self.activate_virtualenv()
++         default_args = self.extra_default_args(args)
++         if default_args == 1:
++             return 1
++         return self.node([browsertime_path()] + default_args + args)
++ 
++     @Command('visualmetrics', category='testing',
++              description='Run visualmetrics.py')
++     @CommandArgument('video')
++     @CommandArgument('args', nargs=argparse.REMAINDER)
++     def visualmetrics(self, video, args):
++         self._set_log_level(True)
++-        self._activate_virtualenv()
+++        self.activate_virtualenv()
++ 
++         # Turn '/path/to/video/1.mp4' into '/path/to/video' and '1'.
++         d, base = os.path.split(video)
++         index, _ = os.path.splitext(base)
++ 
++         # TODO: write a '--logfile' as well.
++         args = ['--dir',  # Images are written to `/path/to/video/images` (following browsertime).
++                 mozpath.join(d, 'images', index),
+diff --git a/tools/lint/mach_commands.py b/tools/lint/mach_commands.py
+--- a/tools/lint/mach_commands.py
++++ b/tools/lint/mach_commands.py
+@@ -60,17 +60,17 @@ def get_global_excludes(topsrcdir):
+ class MachCommands(MachCommandBase):
+ 
+     @Command(
+         'lint', category='devenv',
+         description='Run linters.',
+         parser=setup_argument_parser)
+     def lint(self, *runargs, **lintargs):
+         """Run linters."""
+-        self._activate_virtualenv()
++        self.activate_virtualenv()
+         from mozlint import cli, parser
+ 
+         try:
+             buildargs = {}
+             buildargs['substs'] = copy.deepcopy(dict(self.substs))
+             buildargs['defines'] = copy.deepcopy(dict(self.defines))
+             buildargs['topobjdir'] = self.topobjdir
+             lintargs.update(buildargs)
+diff --git a/tools/mach_commands.py b/tools/mach_commands.py
+--- a/tools/mach_commands.py
++++ b/tools/mach_commands.py
+@@ -171,17 +171,17 @@ def mozregression_import():
+     return mozregression.mach_interface
+ 
+ 
+ def mozregression_create_parser():
+     # Create the mozregression command line parser.
+     # if mozregression is not installed, or not up to date, it will
+     # first be installed.
+     cmd = MozbuildObject.from_environment()
+-    cmd._activate_virtualenv()
++    cmd.activate_virtualenv()
+     mozregression = mozregression_import()
+     if not mozregression:
+         # mozregression is not here at all, install it
+         cmd.virtualenv_manager.install_pip_package('mozregression')
+         print("mozregression was installed. please re-run your"
+               " command. If you keep getting this message please "
+               " manually run: 'pip install -U mozregression'.")
+     else:
+@@ -209,11 +209,11 @@ def mozregression_create_parser():
+ @CommandProvider
+ class MozregressionCommand(MachCommandBase):
+     @Command('mozregression',
+              category='misc',
+              description=("Regression range finder for nightly"
+                           " and inbound builds."),
+              parser=mozregression_create_parser)
+     def run(self, **options):
+-        self._activate_virtualenv()
++        self.activate_virtualenv()
+         mozregression = mozregression_import()
+         mozregression.run(options)
+diff --git a/tools/tryselect/mach_commands.py.985141-1.later b/tools/tryselect/mach_commands.py.985141-1.later
+new file mode 100644
+--- /dev/null
++++ b/tools/tryselect/mach_commands.py.985141-1.later
+@@ -0,0 +1,21 @@
++--- mach_commands.py
+++++ mach_commands.py
++@@ -322,17 +322,17 @@ class TrySelect(MachCommandBase):
++     def try_chooser(self, **kwargs):
++         """Push tasks selected from a web interface to try.
++ 
++         This selector will build the taskgraph and spin up a dynamically
++         created 'trychooser-like' web-page on the localhost. After a selection
++         has been made, pressing the 'Push' button will automatically push the
++         selection to try.
++         """
++-        self._activate_virtualenv()
+++        self.activate_virtualenv()
++         path = os.path.join('tools', 'tryselect', 'selectors', 'chooser', 'requirements.txt')
++         self.virtualenv_manager.install_pip_requirements(path, quiet=True)
++ 
++         return self.run(**kwargs)
++ 
++     @SubCommand('try',
++                 'auto',
++                 description='Automatically determine which tasks to run. This runs the same '

+ 112 - 0
mozilla-release/patches/985141-2-81a1.patch

@@ -0,0 +1,112 @@
+# HG changeset patch
+# User Andrew Halberstadt <ahalberstadt@mozilla.com>
+# Date 1595969698 0
+# Node ID 1747dd37d2c94252bea0905f4805961f1f3da75d
+# Parent  4802128c58467a5959ac1ade0f18e07550833beb
+Bug 985141 - [mozperftest] Fix missed instances of _activate_virtualenv in mozperftest framework, r=perftest-reviewers,sparky
+
+Differential Revision: https://phabricator.services.mozilla.com/D85203
+
+diff --git a/python/mozperftest/mozperftest/tests/test_mach_commands.py.985141-2.later b/python/mozperftest/mozperftest/tests/test_mach_commands.py.985141-2.later
+new file mode 100644
+--- /dev/null
++++ b/python/mozperftest/mozperftest/tests/test_mach_commands.py.985141-2.later
+@@ -0,0 +1,98 @@
++--- test_mach_commands.py
+++++ test_mach_commands.py
++@@ -57,88 +57,88 @@ def _get_command(klass=Perftest):
++ 
++     try:
++         yield klass(context())
++     finally:
++         shutil.rmtree(context.state_dir)
++ 
++ 
++ @mock.patch("mozperftest.MachEnvironment", new=_TestMachEnvironment)
++-@mock.patch("mozperftest.mach_commands.MachCommandBase._activate_virtualenv")
+++@mock.patch("mozperftest.mach_commands.MachCommandBase.activate_virtualenv")
++ def test_command(mocked_func):
++     with _get_command() as test, silence(test):
++         test.run_perftest(tests=[EXAMPLE_TESTS_DIR], flavor="desktop-browser")
++         # XXX add assertions
++ 
++ 
++ @mock.patch("mozperftest.MachEnvironment")
++-@mock.patch("mozperftest.mach_commands.MachCommandBase._activate_virtualenv")
+++@mock.patch("mozperftest.mach_commands.MachCommandBase.activate_virtualenv")
++ def test_command_iterations(venv, env):
++     kwargs = {
++         "tests": [EXAMPLE_TESTS_DIR],
++         "hooks": ITERATION_HOOKS,
++         "flavor": "desktop-browser",
++     }
++     with _get_command() as test, silence(test):
++         test.run_perftest(**kwargs)
++     # the hook changes the iteration value to 5.
++     # each iteration generates 5 calls, so we want to see 25
++     assert len(env.mock_calls) == 25
++ 
++ 
++ @mock.patch("mozperftest.MachEnvironment", new=_TestMachEnvironment)
++-@mock.patch("mozperftest.mach_commands.MachCommandBase._activate_virtualenv")
+++@mock.patch("mozperftest.mach_commands.MachCommandBase.activate_virtualenv")
++ @mock.patch("tryselect.push.push_to_try")
++ def test_push_command(push_to_try, venv):
++     with _get_command() as test, silence(test):
++         test.run_perftest(
++             tests=[EXAMPLE_TESTS_DIR],
++             flavor="desktop-browser",
++             push_to_try=True,
++             try_platform="g5",
++         )
++         push_to_try.assert_called()
++         # XXX add assertions
++ 
++ 
++ @mock.patch("mozperftest.MachEnvironment", new=_TestMachEnvironment)
++-@mock.patch("mozperftest.mach_commands.MachCommandBase._activate_virtualenv")
+++@mock.patch("mozperftest.mach_commands.MachCommandBase.activate_virtualenv")
++ def test_doc_flavor(mocked_func):
++     with _get_command() as test, silence(test):
++         test.run_perftest(tests=[EXAMPLE_TESTS_DIR], flavor="doc")
++ 
++ 
++ @mock.patch("mozperftest.MachEnvironment", new=_TestMachEnvironment)
++-@mock.patch("mozperftest.mach_commands.MachCommandBase._activate_virtualenv")
+++@mock.patch("mozperftest.mach_commands.MachCommandBase.activate_virtualenv")
++ @mock.patch("mozperftest.mach_commands.PerftestTests._run_python_script")
++ def test_test_runner(*mocked):
++     # simulating on try to run the paths parser
++     old = mach_commands.ON_TRY
++     mach_commands.ON_TRY = True
++     with _get_command(PerftestTests) as test, silence(test), temporary_env(
++         MOZ_AUTOMATION="1"
++     ):
++         test.run_tests(tests=[EXAMPLE_TESTS_DIR])
++ 
++     mach_commands.ON_TRY = old
++ 
++ 
++ @mock.patch("mozperftest.MachEnvironment", new=_TestMachEnvironment)
++-@mock.patch("mozperftest.mach_commands.MachCommandBase._activate_virtualenv")
+++@mock.patch("mozperftest.mach_commands.MachCommandBase.activate_virtualenv")
++ def test_run_python_script(*mocked):
++     with _get_command(PerftestTests) as test, silence(test) as captured:
++         test._run_python_script("lib2to3", *["--help"])
++ 
++     stdout, stderr = captured
++     stdout.seek(0)
++     assert stdout.read() == "=> lib2to3 [OK]\n"
++ 
++ 
++ @mock.patch("mozperftest.MachEnvironment", new=_TestMachEnvironment)
++-@mock.patch("mozperftest.mach_commands.MachCommandBase._activate_virtualenv")
+++@mock.patch("mozperftest.mach_commands.MachCommandBase.activate_virtualenv")
++ def test_run_python_script_failed(*mocked):
++     with _get_command(PerftestTests) as test, silence(test) as captured:
++         test._run_python_script("nothing")
++ 
++     stdout, stderr = captured
++     stdout.seek(0)
++     assert stdout.read().endswith("[FAILED]\n")
++ 

+ 322 - 0
mozilla-release/patches/985141-3-81a1.patch

@@ -0,0 +1,322 @@
+# HG changeset patch
+# User Ricky Stewart <rstewart@mozilla.com>
+# Date 1597357184 0
+# Node ID d2149ac954ff4b18ff278afab848d188e61e23f9
+# Parent  602637e1bfdd46c6e260f52dbabe713e95f86ac5
+Bug 985141 - Allow mach commands to specify what virtualenv they should use. r=mhentges,dmajor
+
+Now you can pass the `virtualenv_name` kwarg to the `Command` decorator which will configure the `_virtualenv_manager` accordingly.
+
+Differential Revision: https://phabricator.services.mozilla.com/D86256
+
+diff --git a/python/mach/mach/decorators.py b/python/mach/mach/decorators.py
+--- a/python/mach/mach/decorators.py
++++ b/python/mach/mach/decorators.py
+@@ -13,31 +13,29 @@ except ImportError:
+ 
+ 
+ from .base import MachError
+ from .registrar import Registrar
+ from mozbuild.base import MachCommandBase
+ 
+ 
+ class _MachCommand(object):
+-    """Container for mach command metadata.
+-
+-    Mach commands contain lots of attributes. This class exists to capture them
+-    in a sane way so tuples, etc aren't used instead.
+-    """
++    """Container for mach command metadata."""
++
+     __slots__ = (
+         # Content from decorator arguments to define the command.
+         'name',
+         'subcommand',
+         'category',
+         'description',
+         'conditions',
+         '_parser',
+         'arguments',
+         'argument_group_names',
++        'virtualenv_name',
+ 
+         # By default, subcommands will be sorted. If this is set to
+         # 'declaration', they will be left in declaration order.
+         'order',
+ 
+         # Describes how dispatch is performed.
+ 
+         # The Python class providing the command. This is the class type not
+@@ -56,25 +54,26 @@ class _MachCommand(object):
+ 
+         # For subcommands, the global order that the subcommand's declaration
+         # was seen.
+         'decl_order',
+     )
+ 
+     def __init__(self, name=None, subcommand=None, category=None,
+                  description=None, conditions=None, parser=None,
+-                 order=None):
++                 order=None, virtualenv_name=None):
+         self.name = name
+         self.subcommand = subcommand
+         self.category = category
+         self.description = description
+         self.conditions = conditions or []
+         self._parser = parser
+         self.arguments = []
+         self.argument_group_names = []
++        self.virtualenv_name = virtualenv_name
+         self.order = order
+ 
+         self.cls = None
+         self.method = None
+         self.subcommand_handlers = {}
+         self.decl_order = None
+ 
+     @property
+diff --git a/python/mach/mach/dispatcher.py b/python/mach/mach/dispatcher.py
+--- a/python/mach/mach/dispatcher.py
++++ b/python/mach/mach/dispatcher.py
+@@ -256,17 +256,17 @@ class CommandAction(argparse.Action):
+ 
+             for command in sorted(r.commands_by_category[category]):
+                 handler = r.command_handlers[command]
+ 
+                 # Instantiate a handler class to see if it should be filtered
+                 # out for the current context or not. Condition functions can be
+                 # applied to the command's decorator.
+                 if handler.conditions:
+-                    instance = handler.cls(self._context)
++                    instance = handler.cls(self._context, handler.virtualenv_name)
+ 
+                     is_filtered = False
+                     for c in handler.conditions:
+                         if not c(instance):
+                             is_filtered = True
+                             break
+                     if is_filtered:
+                         description = handler.description
+diff --git a/python/mach/mach/registrar.py b/python/mach/mach/registrar.py
+--- a/python/mach/mach/registrar.py
++++ b/python/mach/mach/registrar.py
+@@ -67,17 +67,17 @@ class MachRegistrar(object):
+         if context is None:
+             raise ValueError('Expected a non-None context.')
+ 
+         prerun = getattr(context, 'pre_dispatch_handler', None)
+         if prerun:
+             prerun(context, handler, args=kwargs)
+ 
+         context.handler = handler
+-        return cls(context)
++        return cls(context, handler.virtualenv_name)
+ 
+     @classmethod
+     def _fail_conditions(_, handler, instance):
+         fail_conditions = []
+         if handler.conditions:
+             for c in handler.conditions:
+                 if not c(instance):
+                     fail_conditions.append(c)
+diff --git a/python/mozbuild/mozbuild/base.py b/python/mozbuild/mozbuild/base.py
+--- a/python/mozbuild/mozbuild/base.py
++++ b/python/mozbuild/mozbuild/base.py
+@@ -88,33 +88,35 @@ class MozbuildObject(ProcessExecutionMix
+ 
+     Modules in this package typically require common functionality such as
+     accessing the current config, getting the location of the source directory,
+     running processes, etc. This classes provides that functionality. Other
+     modules can inherit from this class to obtain this functionality easily.
+     """
+ 
+     def __init__(self, topsrcdir, settings, log_manager, topobjdir=None,
+-                 mozconfig=MozconfigLoader.AUTODETECT):
++                 mozconfig=MozconfigLoader.AUTODETECT, virtualenv_name=None):
+         """Create a new Mozbuild object instance.
+ 
+         Instances are bound to a source directory, a ConfigSettings instance,
+         and a LogManager instance. The topobjdir may be passed in as well. If
+         it isn't, it will be calculated from the active mozconfig.
+         """
+         self.topsrcdir = mozpath.normsep(topsrcdir)
+         self.settings = settings
+ 
+         self.populate_logger()
+         self.log_manager = log_manager
+ 
+         self._make = None
+         self._topobjdir = mozpath.normsep(topobjdir) if topobjdir else topobjdir
+         self._mozconfig = mozconfig
+         self._config_environment = None
++        self._virtualenv_name = virtualenv_name or (
++            'init_py3' if six.PY3 else 'init')
+         self._virtualenv_manager = None
+ 
+     @classmethod
+     def from_environment(cls, cwd=None, detect_virtualenv_mozinfo=True, **kwargs):
+         """Create a MozbuildObject by detecting the proper one from the env.
+ 
+         This examines environment state like the current working directory and
+         creates a MozbuildObject from the found source directory, mozconfig, etc.
+@@ -257,22 +259,20 @@ class MozbuildObject(ProcessExecutionMix
+ 
+         return self._topobjdir
+ 
+     @property
+     def virtualenv_manager(self):
+         from .virtualenv import VirtualenvManager
+ 
+         if self._virtualenv_manager is None:
+-            name = "init"
+-            if six.PY3:
+-                name += "_py3"
+             self._virtualenv_manager = VirtualenvManager(
+                 self.topsrcdir,
+-                os.path.join(self.topobjdir, '_virtualenvs', name),
++                os.path.join(self.topobjdir, '_virtualenvs',
++                             self._virtualenv_name),
+                 sys.stdout,
+                 os.path.join(self.topsrcdir, 'build', 'virtualenv_packages.txt')
+                 )
+ 
+         return self._virtualenv_manager
+ 
+     @staticmethod
+     @memoize
+@@ -857,17 +857,17 @@ class MozbuildObject(ProcessExecutionMix
+ 
+ class MachCommandBase(MozbuildObject):
+     """Base class for mach command providers that wish to be MozbuildObjects.
+ 
+     This provides a level of indirection so MozbuildObject can be refactored
+     without having to change everything that inherits from it.
+     """
+ 
+-    def __init__(self, context):
++    def __init__(self, context, virtualenv_name):
+         # Attempt to discover topobjdir through environment detection, as it is
+         # more reliable than mozconfig when cwd is inside an objdir.
+         topsrcdir = context.topdir
+         topobjdir = None
+         detect_virtualenv_mozinfo = True
+         if hasattr(context, 'detect_virtualenv_mozinfo'):
+             detect_virtualenv_mozinfo = getattr(context,
+                                                 'detect_virtualenv_mozinfo')
+@@ -898,18 +898,20 @@ class MachCommandBase(MozbuildObject):
+                   'default mozconfig in searched paths.' % (e.objdir1,
+                                                             e.objdir2))
+             sys.exit(1)
+ 
+         except MozconfigLoadException as e:
+             print(e)
+             sys.exit(1)
+ 
+-        MozbuildObject.__init__(self, topsrcdir, context.settings,
+-                                context.log_manager, topobjdir=topobjdir)
++        MozbuildObject.__init__(
++            self, topsrcdir, context.settings,
++            context.log_manager, topobjdir=topobjdir,
++            virtualenv_name=virtualenv_name)
+ 
+         self._mach_context = context
+ 
+         # Incur mozconfig processing so we have unified error handling for
+         # errors. Otherwise, the exceptions could bubble back to mach's error
+         # handler.
+         try:
+             self.mozconfig
+diff --git a/python/mozbuild/mozbuild/code_analysis/mach_commands.py b/python/mozbuild/mozbuild/code_analysis/mach_commands.py
+--- a/python/mozbuild/mozbuild/code_analysis/mach_commands.py
++++ b/python/mozbuild/mozbuild/code_analysis/mach_commands.py
+@@ -2410,17 +2410,17 @@ class StaticAnalysis(MachCommandBase):
+             header_group = header.groups()
+             element = [header_group[3], header_group[4], header_group[5]]
+             issues.append(element)
+         return issues
+ 
+     def _get_config_environment(self):
+         ran_configure = False
+         config = None
+-        builder = Build(self._mach_context)
++        builder = Build(self._mach_context, None)
+ 
+         try:
+             config = self.config_environment
+         except Exception:
+             self.log(
+                 logging.WARNING,
+                 "static-analysis",
+                 {},
+@@ -2470,28 +2470,28 @@ class StaticAnalysis(MachCommandBase):
+             # So we recurse to see if the file exists once again.
+             return self._build_compile_db(verbose=verbose)
+ 
+         if config:
+             print(
+                 "Looks like a clang compilation database has not been "
+                 "created yet, creating it now..."
+             )
+-            builder = Build(self._mach_context)
++            builder = Build(self._mach_context, None)
+             rc = builder.build_backend(["CompileDB"], verbose=verbose)
+             if rc != 0:
+                 return rc
+             assert os.path.exists(self._compile_db)
+             return 0
+ 
+     def _build_export(self, jobs, verbose=False):
+         def on_line(line):
+             self.log(logging.INFO, "build_output", {"line": line}, "{line}")
+ 
+-        builder = Build(self._mach_context)
++        builder = Build(self._mach_context, None)
+         # First install what we can through install manifests.
+         rc = builder._run_make(
+             directory=self.topobjdir,
+             target="pre-export",
+             line_handler=None,
+             silent=not verbose,
+         )
+         if rc != 0:
+diff --git a/python/mozbuild/mozbuild/test/test_base.py b/python/mozbuild/mozbuild/test/test_base.py
+--- a/python/mozbuild/mozbuild/test/test_base.py
++++ b/python/mozbuild/mozbuild/test/test_base.py
+@@ -207,17 +207,17 @@ class TestMozbuildObject(unittest.TestCa
+ 
+             context = MockMachContext()
+             context.cwd = topobjdir
+             context.topdir = topsrcdir
+             context.settings = None
+             context.log_manager = None
+             context.detect_virtualenv_mozinfo = False
+ 
+-            o = MachCommandBase(context)
++            o = MachCommandBase(context, None)
+ 
+             self.assertEqual(o.topobjdir, mozpath.normsep(topobjdir))
+             self.assertEqual(o.topsrcdir, mozpath.normsep(topsrcdir))
+ 
+         finally:
+             os.chdir(self._old_cwd)
+             shutil.rmtree(d)
+ 
+@@ -277,17 +277,17 @@ class TestMozbuildObject(unittest.TestCa
+             context.settings = None
+             context.log_manager = None
+             context.detect_virtualenv_mozinfo = False
+ 
+             stdout = sys.stdout
+             sys.stdout = StringIO()
+             try:
+                 with self.assertRaises(SystemExit):
+-                    MachCommandBase(context)
++                    MachCommandBase(context, None)
+ 
+                 self.assertTrue(sys.stdout.getvalue().startswith(
+                     'Ambiguous object directory detected.'))
+             finally:
+                 sys.stdout = stdout
+ 
+         finally:
+             os.chdir(self._old_cwd)

+ 26 - 0
mozilla-release/patches/series

@@ -6979,9 +6979,35 @@ TOP-NOBUG-seamonkey-credits.patch
 1646427-2-79a1.patch
 1646427-3-79a1.patch
 1646427-4-79a1.patch
+1645097-79a1.patch
 1645948-80a1.patch
 1650057-80a1.patch
 1606475-80a1.patch
 1654795-80a1.patch
 1655701-81a1.patch
 1653560-81a1.patch
+985141-1-81a1.patch
+985141-2-81a1.patch
+1656764-81a1.patch
+1657301-81a1.patch
+1656740-81a1.patch
+1657299-81a1.patch
+1627484-81a1.patch
+985141-3-81a1.patch
+1659113-81a1.patch
+1659154-81a1.patch
+1659411-1-81a1.patch
+1659411-2-81a1.patch
+1659542-82a1.patch
+1666232-83a1.patch
+1662851-82a1.patch
+1666347-85a1.patch
+1680051-85a1.patch
+1712819-1-90a1.patch
+1712819-2-90a1.patch
+1712819-3-91a1.patch
+1714244-91a1.patch
+1713613-1-91a1.patch
+1713613-2-91a1.patch
+1713613-3-91a1.patch
+1713610-91a1.patch

Some files were not shown because too many files changed in this diff