Browse Source

backports and fixes

Frank-Rainer Grahl 1 year ago
parent
commit
5d82f99567
32 changed files with 25980 additions and 38 deletions
  1. 86 0
      comm-release/patches/1816266-5-112a1.patch
  2. 433 0
      comm-release/patches/1816266-6-112a1.patch
  3. 79 0
      comm-release/patches/9999999-fiximport-25318.patch
  4. 4 1
      comm-release/patches/series
  5. 7266 0
      mozilla-release/patches/1340901-87a1.patch
  6. 53 0
      mozilla-release/patches/1434513-61a1.patch
  7. 31 0
      mozilla-release/patches/1458129-64a1.patch
  8. 119 0
      mozilla-release/patches/1468539-65a1.patch
  9. 116 0
      mozilla-release/patches/1468542-65a1.patch
  10. 207 0
      mozilla-release/patches/1468544-65a1.patch
  11. 173 0
      mozilla-release/patches/1468552-65a1.patch
  12. 996 0
      mozilla-release/patches/1468556-65a1.patch
  13. 291 0
      mozilla-release/patches/1484846-63a1.patch
  14. 23 0
      mozilla-release/patches/1488217-64a1.patch
  15. 2020 0
      mozilla-release/patches/1489454-libmar-64a1.patch
  16. 32 0
      mozilla-release/patches/1497937-65a1.patch
  17. 579 0
      mozilla-release/patches/1508782-66a1.patch
  18. 256 0
      mozilla-release/patches/1511181-bspatch-65a1.patch
  19. 4068 0
      mozilla-release/patches/1514532-1-66a1.patch
  20. 31 0
      mozilla-release/patches/1514532-2-66a1.patch
  21. 47 0
      mozilla-release/patches/1540142-68a1.patch
  22. 51 5
      mozilla-release/patches/1567642-1-71a1.patch
  23. 12 12
      mozilla-release/patches/1583854-75a1.patch
  24. 1950 0
      mozilla-release/patches/1596660-PARTIAL-removeonly-73a1.patch
  25. 6 6
      mozilla-release/patches/1604360-7-73a1.patch
  26. 7 14
      mozilla-release/patches/1616989-75a1.patch
  27. 207 0
      mozilla-release/patches/1648336-120a1.patch
  28. 536 0
      mozilla-release/patches/1648336-79a1.patch
  29. 24 0
      mozilla-release/patches/1691957-87a1.patch
  30. 6061 0
      mozilla-release/patches/1743947-104a1.patch
  31. 193 0
      mozilla-release/patches/TOP-1846703-binutilsfix-11504.patch
  32. 23 0
      mozilla-release/patches/series

+ 86 - 0
comm-release/patches/1816266-5-112a1.patch

@@ -0,0 +1,86 @@
+# HG changeset patch
+# User Rob Lemley <rob@thunderbird.net>
+# Date 1676490730 0
+# Node ID 44eb37b7f6b91b782eccd8a5c45df5231f1bb52a
+# Parent  8c48aaa313d228a0b5bd3ac629ea3d277ecbbb27
+Bug 1816266 - Remove parent directory traversals (../) from manifest files. r=freaktechnik
+
+There's no need for things like "../../../browser" to get to parent paths,
+starting the source path with a "/" will start at $topsrcdir.
+
+Differential Revision: https://phabricator.services.mozilla.com/D169561
+
+diff --git a/mail/base/jar.mn b/mail/base/jar.mn
+--- a/mail/base/jar.mn
++++ b/mail/base/jar.mn
+@@ -107,30 +107,30 @@ messenger.jar:
+ *   content/messenger/safeMode.xul                  (content/safeMode.xul)
+     content/messenger/safeMode.js                   (content/safeMode.js)
+     content/messenger/sanitize.xul                  (content/sanitize.xul)
+     content/messenger/sanitize.js                   (content/sanitize.js)
+     content/messenger/sanitizeDialog.css            (content/sanitizeDialog.css)
+     content/messenger/sanitizeDialog.js             (content/sanitizeDialog.js)
+ *   content/messenger/toolbarIconColor.js           (content/toolbarIconColor.js)
+ # the following files are mail-specific overrides
+-*   content/messenger/license.html                  (/@mozreltopsrcdir@/toolkit/content/license.html)
++*   content/messenger/license.html                  (/toolkit/content/license.html)
+ % override chrome://global/content/license.html chrome://messenger/content/license.html
+ # L10n resource overrides
+ % override chrome://mozapps/locale/downloads/settingsChange.dtd chrome://messenger/locale/downloads/settingsChange.dtd
+ % override chrome://global/locale/netError.dtd chrome://messenger/locale/netError.dtd
+ 
+ *   content/messenger/buildconfig.html              (content/buildconfig.html)
+     content/messenger/buildconfig.css               (content/buildconfig.css)
+ % override chrome://global/content/buildconfig.html chrome://messenger/content/buildconfig.html
+ % override chrome://global/content/buildconfig.css chrome://messenger/content/buildconfig.css
+ 
+ comm.jar:
+ % content communicator %content/communicator/
+    content/communicator/charsetOverlay.xul          (content/charsetOverlay.xul)
+    content/communicator/contentAreaClick.js         (content/contentAreaClick.js)
+-   content/communicator/labelsencodings.properties  (/@mozreltopsrcdir@/dom/encoding/labelsencodings.properties)
++   content/communicator/labelsencodings.properties  (/dom/encoding/labelsencodings.properties)
+ *  content/communicator/utilityOverlay.xul          (content/utilityOverlay.xul)
+    content/communicator/utilityOverlay.js           (content/utilityOverlay.js)
+ 
+ toolkit.jar:
+ % overlay chrome://global/content/customizeToolbar.xul chrome://messenger/content/customizeToolbarOverlay.xul
+ % overlay chrome://mozapps/content/downloads/downloads.xul chrome://messenger/content/downloadsOverlay.xul
+diff --git a/mail/extensions/smime/jar.mn b/mail/extensions/smime/jar.mn
+--- a/mail/extensions/smime/jar.mn
++++ b/mail/extensions/smime/jar.mn
+@@ -2,21 +2,21 @@
+ # License, v. 2.0. If a copy of the MPL was not distributed with this
+ # file, You can obtain one at http://mozilla.org/MPL/2.0/.
+ 
+ messenger.jar:
+ % content messenger-smime %content/messenger-smime/
+ % overlay chrome://messenger/content/am-identity-edit.xul chrome://messenger/content/am-smimeIdentityEditOverlay.xul
+    content/messenger-smime/msgCompSMIMEOverlay.js                   (content/msgCompSMIMEOverlay.js)
+    content/messenger-smime/msgHdrViewSMIMEOverlay.js                (content/msgHdrViewSMIMEOverlay.js)
+-   content/messenger/am-smime.xul                                   (../../../mailnews/extensions/smime/content/am-smime.xul)
+-   content/messenger/am-smime.js                                    (../../../mailnews/extensions/smime/content/am-smime.js)
+-   content/messenger/am-smimeIdentityEditOverlay.xul                (../../../mailnews/extensions/smime/content/am-smimeIdentityEditOverlay.xul)
+-   content/messenger/am-smimeOverlay.xul                            (../../../mailnews/extensions/smime/content/am-smimeOverlay.xul)
+-   content/messenger/certpicker.js                                  (../../../mailnews/extensions/smime/content/certpicker.js)
+-   content/messenger/certpicker.xul                                 (../../../mailnews/extensions/smime/content/certpicker.xul)
+-   content/messenger-smime/msgReadSMIMEOverlay.js                   (../../../mailnews/extensions/smime/content/msgReadSMIMEOverlay.js)
+-   content/messenger-smime/msgCompSecurityInfo.js                   (../../../mailnews/extensions/smime/content/msgCompSecurityInfo.js)
+-   content/messenger-smime/msgCompSecurityInfo.xul                  (../../../mailnews/extensions/smime/content/msgCompSecurityInfo.xul)
+-   content/messenger-smime/msgReadSecurityInfo.xul                  (../../../mailnews/extensions/smime/content/msgReadSecurityInfo.xul)
+-   content/messenger-smime/msgReadSecurityInfo.js                   (../../../mailnews/extensions/smime/content/msgReadSecurityInfo.js)
+-   content/messenger-smime/certFetchingStatus.xul                   (../../../mailnews/extensions/smime/content/certFetchingStatus.xul)
+-   content/messenger-smime/certFetchingStatus.js                    (../../../mailnews/extensions/smime/content/certFetchingStatus.js)
++   content/messenger/am-smime.xul                                   (/comm/mailnews/extensions/smime/content/am-smime.xul)
++   content/messenger/am-smime.js                                    (/comm/mailnews/extensions/smime/content/am-smime.js)
++   content/messenger/am-smimeIdentityEditOverlay.xul                (/comm/mailnews/extensions/smime/content/am-smimeIdentityEditOverlay.xul)
++   content/messenger/am-smimeOverlay.xul                            (/comm/mailnews/extensions/smime/content/am-smimeOverlay.xul)
++   content/messenger/certpicker.js                                  (/comm/mailnews/extensions/smime/content/certpicker.js)
++   content/messenger/certpicker.xul                                 (/comm/mailnews/extensions/smime/content/certpicker.xul)
++   content/messenger-smime/msgReadSMIMEOverlay.js                   (/comm/mailnews/extensions/smime/content/msgReadSMIMEOverlay.js)
++   content/messenger-smime/msgCompSecurityInfo.js                   (/comm/mailnews/extensions/smime/content/msgCompSecurityInfo.js)
++   content/messenger-smime/msgCompSecurityInfo.xul                  (/comm/mailnews/extensions/smime/content/msgCompSecurityInfo.xul)
++   content/messenger-smime/msgReadSecurityInfo.xul                  (/comm/mailnews/extensions/smime/content/msgReadSecurityInfo.xul)
++   content/messenger-smime/msgReadSecurityInfo.js                   (/comm/mailnews/extensions/smime/content/msgReadSecurityInfo.js)
++   content/messenger-smime/certFetchingStatus.xul                   (/comm/mailnews/extensions/smime/content/certFetchingStatus.xul)
++   content/messenger-smime/certFetchingStatus.js                    (/comm/mailnews/extensions/smime/content/certFetchingStatus.js)

+ 433 - 0
comm-release/patches/1816266-6-112a1.patch

@@ -0,0 +1,433 @@
+# HG changeset patch
+# User Rob Lemley <rob@thunderbird.net>
+# Date 1676490730 0
+# Node ID 8fe81e869effdd880a52d4d6fc1e98aebda50938
+# Parent  86c40bd0272aea77a712f25ac43dddee5e4be0f3
+Bug 1816266 - Remove "%s" replacements from moz.build files. r=dandarnell
+
+Most cases of using "%s" in path-related values are to find $topsrcdir. In
+moz.build files, paths starting with "/" are rooted at $topsrcdir, so a string
+replacement like "/%s/browser" % CONFIG["mozreltopsrcdir"] evaluates to
+"/./browser", which is just unnecessary.
+
+Differential Revision: https://phabricator.services.mozilla.com/D169562
+
+diff --git a/mail/app.mozbuild b/mail/app.mozbuild
+--- a/mail/app.mozbuild
++++ b/mail/app.mozbuild
+@@ -3,28 +3,28 @@
+ # License, v. 2.0. If a copy of the MPL was not distributed with this
+ # file, You can obtain one at http://mozilla.org/MPL/2.0/.
+ 
+ GENERATED_FILES['source-repo.h'].script = 'comm/build/source_repos.py:source_repo_header'
+ 
+ # Note that paths in this file are relative to the top directory, which may be
+ # m-c or c-c.
+ 
+-include('../mailnews/mailnews.mozbuild')
++include('/comm/mailnews/mailnews.mozbuild')
+ 
+-DIRS += ['/%s/mail/components' % CONFIG['commreltopsrcdir']]
++DIRS += ['/comm/mail/components']
+ 
+-include('/%s/toolkit/toolkit.mozbuild' % CONFIG['mozreltopsrcdir'])
++include('/toolkit/toolkit.mozbuild')
+ 
+ DIRS += ['/%s' % CONFIG['MOZ_BRANDING_DIRECTORY']]
+ 
+ 
+ if CONFIG['MOZ_CALENDAR']:
+     DIRS += [
+-        '/%s/calendar/lightning' % CONFIG['commreltopsrcdir'],
+-        '/%s/calendar/providers/gdata' % CONFIG['commreltopsrcdir'],
+-        '/%s/calendar/timezones' % CONFIG['commreltopsrcdir'],
++        '/comm/calendar/lightning',
++        '/comm/calendar/providers/gdata',
++        '/comm/calendar/timezones',
+     ]
+ 
+ DIRS += [
+-    '/%s/chat' % CONFIG['commreltopsrcdir'],
+-    '/%s/mail' % CONFIG['commreltopsrcdir'],
++    '/comm/chat',
++    '/comm/mail',
+ ]
+diff --git a/mail/app/moz.build b/mail/app/moz.build
+--- a/mail/app/moz.build
++++ b/mail/app/moz.build
+@@ -11,37 +11,37 @@ if CONFIG['MOZ_NO_PIE_COMPAT']:
+     DIRS += ['no-pie']
+ else:
+     GeckoProgram(CONFIG['MOZ_APP_NAME'])
+ 
+ USE_LIBS += ['mozglue']
+ SOURCES += ['nsMailApp.cpp']
+ LOCAL_INCLUDES += [
+     '!/build',
+-    '/%s/ipc/contentproc/' % CONFIG['mozreltopsrcdir'],
+-    '/%s/toolkit/xre' % CONFIG['mozreltopsrcdir'],
+-    '/%s/xpcom/base' % CONFIG['mozreltopsrcdir'],
+-    '/%s/xpcom/build' % CONFIG['mozreltopsrcdir'],
++    '/ipc/contentproc/',
++    '/toolkit/xre',
++    '/xpcom/base',
++    '/xpcom/build',
+ ]
+ 
+ if CONFIG['LIBFUZZER']:
+     USE_LIBS += [ 'fuzzer' ]
+     LOCAL_INCLUDES += [
+-        '/%s/tools/fuzzing/libfuzzer' % CONFIG['mozreltopsrcdir'],
++        '/tools/fuzzing/libfuzzer',
+     ]
+ 
+ if CONFIG['OS_ARCH'] == 'WINNT':
+     RCINCLUDE = 'splash.rc'
+     DEFINES['MOZ_THUNDERBIRD'] = True
+ 
+ if CONFIG['MOZ_SANDBOX'] and CONFIG['OS_ARCH'] == 'WINNT':
+     # For sandbox includes and the include dependencies those have
+     LOCAL_INCLUDES += [
+-        '/%s/security/sandbox/chromium' % CONFIG['mozreltopsrcdir'],
+-        '/%s/security/sandbox/chromium-shim' % CONFIG['mozreltopsrcdir'],
++        '/security/sandbox/chromium',
++        '/security/sandbox/chromium-shim',
+     ]
+ 
+     USE_LIBS += [
+         'sandbox_s',
+     ]
+ 
+     DELAYLOAD_DLLS += [
+         'winmm.dll',
+diff --git a/mail/base/moz.build b/mail/base/moz.build
+--- a/mail/base/moz.build
++++ b/mail/base/moz.build
+@@ -7,17 +7,16 @@ DIRS += ['modules']
+ 
+ TEST_DIRS += ['test']
+ 
+ JAR_MANIFESTS += ['jar.mn']
+ 
+ DEFINES['PRE_RELEASE_SUFFIX'] = ''
+ DEFINES['MOZ_APP_VERSION'] = CONFIG['MOZ_APP_VERSION']
+ DEFINES['MOZ_APP_VERSION_DISPLAY'] = CONFIG['MOZ_APP_VERSION_DISPLAY']
+-DEFINES['mozreltopsrcdir'] = CONFIG['mozreltopsrcdir']
+ DEFINES['APP_LICENSE_BLOCK'] = '%s/content/overrides/app-license.html' % SRCDIR
+ DEFINES['APP_LICENSE_PRODUCT_NAME'] = '%s/content/overrides/app-license-name.html' % SRCDIR
+ DEFINES['APP_LICENSE_LIST_BLOCK'] = '%s/content/overrides/app-license-list.html' % SRCDIR
+ DEFINES['APP_LICENSE_BODY_BLOCK'] = '%s/content/overrides/app-license-body.html' % SRCDIR
+ 
+ if CONFIG['MOZILLA_OFFICIAL']:
+     DEFINES['OFFICIAL_BUILD'] = 1
+ 
+@@ -25,17 +24,17 @@ if CONFIG['MOZ_WIDGET_TOOLKIT'] in ('win
+     DEFINES['HAVE_SHELL_SERVICE'] = 1
+ 
+ if CONFIG['MOZ_WIDGET_TOOLKIT'] in ('windows', 'cocoa'):
+     DEFINES['CAN_DRAW_IN_TITLEBAR'] = 1
+ 
+ if CONFIG['MOZ_UPDATER']:
+     DEFINES['MOZ_UPDATER'] = 1
+ 
+-DEFINES['TOOLKIT_DIR'] = "%s/toolkit" % (CONFIG['moztopsrcdir'],)
++DEFINES['TOOLKIT_DIR'] = "/toolkit"
+ 
+ # For customized buildconfig
+ DEFINES['TOPOBJDIR'] = TOPOBJDIR
+ 
+ DEFINES['MOZ_APP_DISPLAYNAME'] = CONFIG['MOZ_APP_DISPLAYNAME']
+ DEFINES['THUNDERBIRD_VERSION_DISPLAY'] = CONFIG['THUNDERBIRD_VERSION_DISPLAY']
+ DEFINES['THUNDERBIRD_DEVELOPER_WWW'] = "https://developer.thunderbird.net/"
+ 
+diff --git a/mail/components/addrbook/moz.build b/mail/components/addrbook/moz.build
+--- a/mail/components/addrbook/moz.build
++++ b/mail/components/addrbook/moz.build
+@@ -1,8 +1,8 @@
+ # vim: set filetype=python:
+ # This Source Code Form is subject to the terms of the Mozilla Public
+ # License, v. 2.0. If a copy of the MPL was not distributed with this
+ # file, You can obtain one at http://mozilla.org/MPL/2.0/.
+ 
+ JAR_MANIFESTS += ['jar.mn']
+ 
+-DEFINES['TOOLKIT_DIR'] = "%s/toolkit" % (CONFIG['moztopsrcdir'],)
++DEFINES['TOOLKIT_DIR'] = "/toolkit"
+diff --git a/mail/components/compose/moz.build b/mail/components/compose/moz.build
+--- a/mail/components/compose/moz.build
++++ b/mail/components/compose/moz.build
+@@ -8,9 +8,9 @@ JAR_MANIFESTS += ['jar.mn']
+ EXTRA_JS_MODULES += [
+     'editorUtilities.jsm',
+ ]
+ 
+ JS_PREFERENCE_PP_FILES += [
+     'composer.js'
+ ]
+ 
+-DEFINES['TOOLKIT_DIR'] = "%s/toolkit" % (CONFIG['moztopsrcdir'],)
++DEFINES['TOOLKIT_DIR'] = "/toolkit"
+diff --git a/mail/components/shell/moz.build b/mail/components/shell/moz.build
+--- a/mail/components/shell/moz.build
++++ b/mail/components/shell/moz.build
+@@ -11,17 +11,17 @@ SOURCES += [
+ 
+ DEFINES['MOZ_APP_NAME'] = '"%s"' % CONFIG['MOZ_APP_NAME']
+ 
+ if CONFIG['OS_ARCH'] == 'WINNT':
+     SOURCES += [
+         'nsMailWinIntegration.cpp',
+     ]
+     LOCAL_INCLUDES += [
+-        '/%s/other-licenses/nsis/Contrib/CityHash/cityhash' % (CONFIG['mozreltopsrcdir'],),
++        '/other-licenses/nsis/Contrib/CityHash/cityhash',
+     ]
+ 
+ if CONFIG['MOZ_WIDGET_TOOLKIT'] == 'gtk':
+     SOURCES += ['nsMailGNOMEIntegration.cpp']
+ 
+ if CONFIG['MOZ_WIDGET_TOOLKIT'] == 'cocoa':
+     SOURCES += ['nsMailMacIntegration.cpp']
+ 
+diff --git a/mail/moz.build b/mail/moz.build
+--- a/mail/moz.build
++++ b/mail/moz.build
+@@ -14,12 +14,12 @@ DIRS += [
+     'themes',
+     'app',
+ ]
+ 
+ if CONFIG['MAKENSISU']:
+     DIRS += ['installer/windows']
+ 
+ if CONFIG['MOZ_BUNDLED_FONTS']:
+-    DIRS += ['/%s/browser/fonts' % CONFIG['mozreltopsrcdir']]
++    DIRS += ['/browser/fonts']
+ 
+ TEST_DIRS += ['test/mozmill']
+ 
+diff --git a/mail/test/mozmill/moz.build b/mail/test/mozmill/moz.build
+--- a/mail/test/mozmill/moz.build
++++ b/mail/test/mozmill/moz.build
+@@ -5,24 +5,23 @@
+ 
+ TEST_HARNESS_FILES.mozmill += [
+     'mozmilltests.list',
+     'runtest.py',
+     'runtestlist.py',
+ ]
+ 
+ TEST_HARNESS_FILES.mozmill.resources += [
+-    '/{}/mail/base/test/unit/resources/viewWrapperTestUtils.js'.format(CONFIG['commreltopsrcdir']),
++    '/comm/mail/base/test/unit/resources/viewWrapperTestUtils.js',
+ ]
+ 
+ 
+-def mailnews_files(files, comm=CONFIG['commreltopsrcdir']):
++def mailnews_files(files):
+     return [
+-        '/{comm}/mailnews/{file}'.format(
+-            comm=comm,
++        '/comm/mailnews/{file}'.format(
+             file=file,
+         ) for file in files
+     ]
+ 
+ TEST_HARNESS_FILES.mozmill.fakeserver += mailnews_files([
+     'test/fakeserver/auth.js',
+     'test/fakeserver/imapd.js',
+     'test/fakeserver/maild.js',
+diff --git a/mailnews/base/test/moz.build b/mailnews/base/test/moz.build
+--- a/mailnews/base/test/moz.build
++++ b/mailnews/base/test/moz.build
+@@ -7,11 +7,11 @@ XPCSHELL_TESTS_MANIFESTS += ['unit/xpcsh
+ 
+ FINAL_LIBRARY = 'xul-gtest'
+ 
+ UNIFIED_SOURCES += [
+     'TestMailCookie.cpp',
+ ]
+ 
+ LOCAL_INCLUDES += [
+-    '/%s/netwerk/test' % CONFIG['mozreltopsrcdir'],
+-    '/%s/xpcom/tests' % CONFIG['mozreltopsrcdir'],
++    '/netwerk/test',
++    '/xpcom/tests',
+ ]
+diff --git a/mailnews/base/util/moz.build b/mailnews/base/util/moz.build
+--- a/mailnews/base/util/moz.build
++++ b/mailnews/base/util/moz.build
+@@ -63,16 +63,16 @@ EXTRA_JS_MODULES += [
+     'OAuth2.jsm',
+     'OAuth2Providers.jsm',
+     'StringBundle.js',
+     'templateUtils.js',
+     'traceHelper.js',
+ ]
+ 
+ LOCAL_INCLUDES += [
+-  '/%s/netwerk/base' % CONFIG['mozreltopsrcdir']
++  '/netwerk/base',
+ ]
+ 
+ FINAL_LIBRARY = 'mail'
+ 
+ Library('msgbsutl_s')
+ 
+ DEFINES['_IMPL_NS_MSG_BASE'] = True
+diff --git a/mailnews/compose/src/moz.build b/mailnews/compose/src/moz.build
+--- a/mailnews/compose/src/moz.build
++++ b/mailnews/compose/src/moz.build
+@@ -47,16 +47,16 @@ if CONFIG['MOZ_WIDGET_TOOLKIT'] == 'coco
+     ]
+ 
+ EXTRA_COMPONENTS += [
+     'nsSMTPProtocolHandler.js',
+     'nsSMTPProtocolHandler.manifest',
+ ]
+ 
+ LOCAL_INCLUDES += [
+-  '/%s/dom/base' % CONFIG['mozreltopsrcdir'],
++  '/dom/base',
+ ]
+ 
+ FINAL_LIBRARY = 'mail'
+ 
+ # clang-cl rightly complains about switch on nsresult.
+ if CONFIG['CC_TYPE'] == 'clang-cl':
+     CXXFLAGS += ['-Wno-switch']
+diff --git a/mailnews/extensions/smime/src/moz.build b/mailnews/extensions/smime/src/moz.build
+--- a/mailnews/extensions/smime/src/moz.build
++++ b/mailnews/extensions/smime/src/moz.build
+@@ -13,10 +13,10 @@ SOURCES += [
+ EXTRA_COMPONENTS += [
+     'smime-service.js',
+     'smime-service.manifest',
+ ]
+ 
+ FINAL_LIBRARY = 'mail'
+ 
+ LOCAL_INCLUDES += [
+-    '/%s/security/manager/pki' % CONFIG['mozreltopsrcdir'],
++    '/security/manager/pki',
+ ]
+diff --git a/mailnews/imap/test/moz.build b/mailnews/imap/test/moz.build
+--- a/mailnews/imap/test/moz.build
++++ b/mailnews/imap/test/moz.build
+@@ -3,17 +3,17 @@
+ # License, v. 2.0. If a copy of the MPL was not distributed with this
+ # file, You can obtain one at http://mozilla.org/MPL/2.0/.
+ 
+ XPCSHELL_TESTS_MANIFESTS += ['unit/xpcshell.ini']
+ 
+ LOCAL_INCLUDES += [
+    '../../base/util',
+    '../src',
+-   '/%s/xpcom/tests' % CONFIG['mozreltopsrcdir'],
++   '/xpcom/tests',
+ ]
+ 
+ USE_LIBS += [
+     'msgbsutl_s',
+     'msgimap_s',
+     'nspr',
+     'xpcomglue_s',
+     'xul',
+diff --git a/mailnews/intl/moz.build b/mailnews/intl/moz.build
+--- a/mailnews/intl/moz.build
++++ b/mailnews/intl/moz.build
+@@ -15,24 +15,24 @@ UNIFIED_SOURCES += [
+     'nsUnicodeToMUTF7.cpp',
+     'nsUnicodeToUTF7.cpp',
+     'nsUTF7ToUnicode.cpp',
+ ]
+ 
+ XPIDL_MODULE = 'commuconv'
+ 
+ LOCAL_INCLUDES += [
+-    '/%s/intl/locale' % CONFIG['mozreltopsrcdir'],
++    '/intl/locale',
+ ]
+ 
+ GENERATED_FILES += [
+     'charsetalias.properties.h',
+ ]
+ charsetalias = GENERATED_FILES['charsetalias.properties.h']
+-charsetalias.script = '/%s/intl/locale/props2arrays.py' % CONFIG['mozreltopsrcdir']
++charsetalias.script = '/intl/locale/props2arrays.py'
+ charsetalias.inputs = ['charsetalias.properties']
+ 
+ FINAL_LIBRARY = 'mail'
+ 
+ # Tests need more attention before they can be enabled.
+ TEST_DIRS += ['test']
+ 
+ JAR_MANIFESTS += ['jar.mn']
+diff --git a/mailnews/mailnews.mozbuild b/mailnews/mailnews.mozbuild
+--- a/mailnews/mailnews.mozbuild
++++ b/mailnews/mailnews.mozbuild
+@@ -1,12 +1,11 @@
+ # vim: set filetype=python:
+ # This Source Code Form is subject to the terms of the Mozilla Public
+ # License, v. 2.0. If a copy of the MPL was not distributed with this
+ # file, You can obtain one at http://mozilla.org/MPL/2.0/.
+ 
+ DIRS += [
+-    '/%s/db' % CONFIG['commreltopsrcdir'],
+-    '/%s/ldap' % CONFIG['commreltopsrcdir'],
+-    '/%s/ldap/xpcom' % CONFIG['commreltopsrcdir'],
+-    '/%s/mailnews' % CONFIG['commreltopsrcdir'],
++    '/comm/db',
++    '/comm/ldap',
++    '/comm/ldap/xpcom',
++    '/comm/mailnews',
+ ]
+-
+diff --git a/mailnews/mime/src/moz.build b/mailnews/mime/src/moz.build
+--- a/mailnews/mime/src/moz.build
++++ b/mailnews/mime/src/moz.build
+@@ -66,18 +66,18 @@ SOURCES += [
+     'nsCMS.cpp',
+     'nsCMSSecureMessage.cpp',
+     'nsMimeObjectClassAccess.cpp',
+     'nsSimpleMimeConverterStub.cpp',
+     'nsStreamConverter.cpp',
+ ]
+ 
+ LOCAL_INCLUDES += [
+-     '/%s/security/certverifier' % CONFIG['mozreltopsrcdir'],
+-     '/%s/security/manager/ssl' % CONFIG['mozreltopsrcdir'],
++     '/security/certverifier',
++     '/security/manager/ssl',
+ ]
+ 
+ EXTRA_COMPONENTS += [
+     'mimeJSComponents.js',
+     'msgMime.manifest',
+ ]
+ 
+ EXTRA_JS_MODULES += [
+diff --git a/mailnews/moz.build b/mailnews/moz.build
+--- a/mailnews/moz.build
++++ b/mailnews/moz.build
+@@ -78,18 +78,18 @@ TESTING_JS_MODULES.mailnews += [
+ ]
+ 
+ if 'comm' in CONFIG['MOZ_BUILD_APP']:
+   test_harness_base = TEST_HARNESS_FILES.xpcshell.comm
+ else:
+   test_harness_base = TEST_HARNESS_FILES.xpcshell
+ 
+ test_harness_base.mailnews.data += [
+-    '/%s/mailnews/test/data/**' % CONFIG['commreltopsrcdir'],
++    '/comm/mailnews/test/data/**',
+ ]
+ 
+ test_harness_base.mailnews.resources += [
+-    '/%s/mailnews/test/resources/**' % CONFIG['commreltopsrcdir'],
++    '/comm/mailnews/test/resources/**',
+ ]
+ 
+ JS_PREFERENCE_PP_FILES += [
+     'mailnews.js',
+ ]

+ 79 - 0
comm-release/patches/9999999-fiximport-25318.patch

@@ -0,0 +1,79 @@
+# HG changeset patch
+# User Frank-Rainer Grahl <frgrahl@gmx.net>
+# Date 1696762617 -7200
+# Parent  4484c94f2f525bd4780d2ec803ca8522dff9a853
+Bug 9999999 - Remove cruft from SeaMonkeys Thunderbird importer. r=IanN a=IanN
+
+diff --git a/suite/components/migration/src/nsThunderbirdProfileMigrator.cpp b/suite/components/migration/src/nsThunderbirdProfileMigrator.cpp
+--- a/suite/components/migration/src/nsThunderbirdProfileMigrator.cpp
++++ b/suite/components/migration/src/nsThunderbirdProfileMigrator.cpp
+@@ -17,23 +17,21 @@
+ #include "nsIProperties.h"
+ #include "nsServiceManagerUtils.h"
+ #include "nsThunderbirdProfileMigrator.h"
+ #include "prprf.h"
+ 
+ ///////////////////////////////////////////////////////////////////////////////
+ // nsThunderbirdProfileMigrator
+ 
+-#define FILE_NAME_SITEPERM_OLD    "cookperm.txt"
+ #define FILE_NAME_SITEPERM_NEW    "hostperm.1"
+-#define FILE_NAME_CERT8DB         "cert8.db"
+-#define FILE_NAME_KEY3DB          "key3.db"
+-#define FILE_NAME_SECMODDB        "secmod.db"
++#define FILE_NAME_CERT9DB         "cert9.db"
++#define FILE_NAME_KEY4DB          "key4.db"
+ #define FILE_NAME_HISTORY         "history.dat"
+-#define FILE_NAME_SIGNONS         "signons.sqlite"
++#define FILE_NAME_SIGNONS         "logins.json"
+ #define FILE_NAME_MIMETYPES       "mimeTypes.rdf"
+ #define FILE_NAME_USER_PREFS      "user.js"
+ #define FILE_NAME_PERSONALDICTIONARY "persdict.dat"
+ #define FILE_NAME_MAILVIEWS       "mailViews.dat"
+ 
+ NS_IMPL_ISUPPORTS(nsThunderbirdProfileMigrator, nsISuiteProfileMigrator,
+                   nsITimerCallback)
+ 
+@@ -90,18 +88,16 @@ nsThunderbirdProfileMigrator::Migrate(ui
+ 
+   if (aReplace &&
+       (aItems & nsISuiteProfileMigrator::SETTINGS ||
+        aItems & nsISuiteProfileMigrator::PASSWORDS ||
+        !aItems)) {
+     // Permissions (Images)
+     if (NS_SUCCEEDED(rv))
+       rv = CopyFile(FILE_NAME_SITEPERM_NEW, FILE_NAME_SITEPERM_NEW);
+-    if (NS_SUCCEEDED(rv))
+-      rv = CopyFile(FILE_NAME_SITEPERM_OLD, FILE_NAME_SITEPERM_OLD);
+   }
+ 
+   // the last thing to do is to actually copy over any mail folders
+   // we have marked for copying we want to do this last and it will be
+   // asynchronous so the UI doesn't freeze up while we perform
+   // this potentially very long operation.
+   CopyMailFolders();
+ 
+@@ -535,21 +531,19 @@ nsThunderbirdProfileMigrator::CopyPrefer
+ 
+   if (NS_SUCCEEDED(rv))
+     rv = TransformPreferences(FILE_NAME_PREFS, FILE_NAME_PREFS);
+   if (NS_SUCCEEDED(rv))
+     rv = CopyFile(FILE_NAME_USER_PREFS, FILE_NAME_USER_PREFS);
+ 
+   // Security Stuff
+   if (NS_SUCCEEDED(rv))
+-    rv = CopyFile(FILE_NAME_CERT8DB, FILE_NAME_CERT8DB);
++    rv = CopyFile(FILE_NAME_CERT9DB, FILE_NAME_CERT9DB);
+   if (NS_SUCCEEDED(rv))
+-    rv = CopyFile(FILE_NAME_KEY3DB, FILE_NAME_KEY3DB);
+-  if (NS_SUCCEEDED(rv))
+-    rv = CopyFile(FILE_NAME_SECMODDB, FILE_NAME_SECMODDB);
++    rv = CopyFile(FILE_NAME_KEY4DB, FILE_NAME_KEY4DB);
+ 
+   // User MIME Type overrides
+   if (NS_SUCCEEDED(rv))
+     rv = CopyFile(FILE_NAME_MIMETYPES, FILE_NAME_MIMETYPES);
+   if (NS_SUCCEEDED(rv))
+     rv = CopyFile(FILE_NAME_PERSONALDICTIONARY, FILE_NAME_PERSONALDICTIONARY);
+   if (NS_SUCCEEDED(rv))
+     rv = CopyFile(FILE_NAME_MAILVIEWS, FILE_NAME_MAILVIEWS);

+ 4 - 1
comm-release/patches/series

@@ -2105,5 +2105,8 @@ WIP-9999999-lintglobals.patch
 1597095-1-72a1.patch
 1597095-2-72a1.patch
 1853588-minidump-25318.patch
-1835524-bugsplat-cr-25318.patch
+1816266-5-112a1.patch
+1816266-6-112a1.patch
 9999999-urlbarborder-25318.patch
+1835524-bugsplat-cr-25318.patch
+9999999-fiximport-25318.patch

+ 7266 - 0
mozilla-release/patches/1340901-87a1.patch

@@ -0,0 +1,7266 @@
+# HG changeset patch
+# User ssengupta <ssengupta@mozilla.com>
+# Date 1611682034 0
+#      Tue Jan 26 17:27:14 2021 +0000
+# Node ID 2252ab087d8039acf6a5231155270dbef28205db
+# Parent  905c4bd465611986cdd62cdf44e3a5bcc6246200
+Bug 1340901 - Update Snappy to version 1.1.8. r=dom-workers-and-storage-reviewers,asuth
+
+Add a static assertion in IndexedDB to detect future updates.
+
+Differential Revision: https://phabricator.services.mozilla.com/D56708
+
+diff --git a/dom/indexedDB/ActorsParent.cpp b/dom/indexedDB/ActorsParent.cpp
+--- a/dom/indexedDB/ActorsParent.cpp
++++ b/dom/indexedDB/ActorsParent.cpp
+@@ -132,16 +132,18 @@ namespace mozilla {
+ 
+ MOZ_TYPE_SPECIFIC_SCOPED_POINTER_TEMPLATE(ScopedPRFileDesc,
+                                           PRFileDesc,
+                                           PR_Close);
+ 
+ namespace dom {
+ namespace indexedDB {
+ 
++static_assert(SNAPPY_VERSION == 0x010108);
++
+ using namespace mozilla::dom::quota;
+ using namespace mozilla::ipc;
+ 
+ namespace {
+ 
+ class ConnectionPool;
+ class Cursor;
+ class Database;
+diff --git a/other-licenses/snappy/README b/other-licenses/snappy/README
+--- a/other-licenses/snappy/README
++++ b/other-licenses/snappy/README
+@@ -1,26 +1,24 @@
+ See src/README for the README that ships with snappy.
+ 
+ Mozilla does not modify the actual snappy source with the exception of the
+ 'snappy-stubs-public.h' header. We have replaced its build system with our own.
+ 
+ Snappy comes from:
+-  http://code.google.com/p/snappy/
++  https://github.com/google/snappy
+ 
+-We are currently using revision: 114
++We are currently using revision: 1.1.8
+ 
+ To upgrade to a newer version:
+   1. Check out the new code using subversion.
+   2. Update 'snappy-stubs-public.h' in this directory with any changes that were
+      made to 'snappy-stubs-public.h.in' in the new source.
+-  3. Copy the major/minor/patch versions from 'configure.ac' into
++  3. Copy the major/minor/patch versions from 'CMakeLists.txt' into
+      'snappy-stubs-public.h'.
+   4. Copy all source files from the new version into the src subdirectory. The
+-     following files are not needed:
+-       - 'autom4te.cache' subdirectory
+-       - 'm4' subdirectory
++     following are not needed:
++       - 'CMakeLists.txt' file
++       - 'cmake' subdirectory
++       - 'docs' subdirectory
+        - 'testdata' subdirectory
+-       - 'autogen.sh'
+-       - 'configure.ac'
+-       - 'Makefile.am'
+-       - 'snappy.pc.in'
+   5. Update the revision stamp in this file.
++
+diff --git a/other-licenses/snappy/snappy-stubs-public.h b/other-licenses/snappy/snappy-stubs-public.h
+--- a/other-licenses/snappy/snappy-stubs-public.h
++++ b/other-licenses/snappy/snappy-stubs-public.h
+@@ -34,24 +34,24 @@
+ // from snappy-stubs-public.h.in at configure time.
+ 
+ #ifndef THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
+ #define THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
+ 
+ #include <stdint.h>
+ 
+ #if defined IS_BIG_ENDIAN || defined __BIG_ENDIAN__
+-#define WORDS_BIGENDIAN
++#  define WORDS_BIGENDIAN
+ #endif
+ 
+ #define SNAPPY_MAJOR 1
+ #define SNAPPY_MINOR 1
+-#define SNAPPY_PATCHLEVEL 3
++#define SNAPPY_PATCHLEVEL 8
+ #define SNAPPY_VERSION \
+-    ((SNAPPY_MAJOR << 16) | (SNAPPY_MINOR << 8) | SNAPPY_PATCHLEVEL)
++  ((SNAPPY_MAJOR << 16) | (SNAPPY_MINOR << 8) | SNAPPY_PATCHLEVEL)
+ 
+ #include <string>
+ 
+ namespace snappy {
+ 
+ typedef int8_t int8;
+ typedef uint8_t uint8;
+ typedef int16_t int16;
+@@ -59,31 +59,31 @@ typedef uint16_t uint16;
+ typedef int32_t int32;
+ typedef uint32_t uint32;
+ typedef int64_t int64;
+ typedef uint64_t uint64;
+ 
+ typedef std::string string;
+ 
+ #ifndef DISALLOW_COPY_AND_ASSIGN
+-#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
+-  TypeName(const TypeName&);               \
+-  void operator=(const TypeName&)
++#  define DISALLOW_COPY_AND_ASSIGN(TypeName) \
++    TypeName(const TypeName&);               \
++    void operator=(const TypeName&)
+ #endif
+ 
+ struct iovec {
+-	void* iov_base;
+-	size_t iov_len;
++  void* iov_base;
++  size_t iov_len;
+ };
+ 
+ #if defined(_WIN32) || defined(_WIN64)
+-#if defined(_WIN64)
++#  if defined(_WIN64)
+ typedef __int64 LONG_PTR;
+-#else
++#  else
+ typedef long LONG_PTR;
+-#endif
++#  endif
+ typedef LONG_PTR SSIZE_T;
+ typedef SSIZE_T ssize_t;
+ #endif
+ 
+ }  // namespace snappy
+ 
+ #endif  // THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
+diff --git a/other-licenses/snappy/src/CONTRIBUTING.md b/other-licenses/snappy/src/CONTRIBUTING.md
+new file mode 100644
+--- /dev/null
++++ b/other-licenses/snappy/src/CONTRIBUTING.md
+@@ -0,0 +1,26 @@
++# How to Contribute
++
++We'd love to accept your patches and contributions to this project. There are
++just a few small guidelines you need to follow.
++
++## Contributor License Agreement
++
++Contributions to this project must be accompanied by a Contributor License
++Agreement. You (or your employer) retain the copyright to your contribution,
++this simply gives us permission to use and redistribute your contributions as
++part of the project. Head over to <https://cla.developers.google.com/> to see
++your current agreements on file or to sign a new one.
++
++You generally only need to submit a CLA once, so if you've already submitted one
++(even if it was for a different project), you probably don't need to do it
++again.
++
++## Code reviews
++
++All submissions, including submissions by project members, require review. We
++use GitHub pull requests for this purpose. Consult
++[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more
++information on using pull requests.
++
++Please make sure that all the automated checks (CLA, AppVeyor, Travis) pass for
++your pull requests. Pull requests whose checks fail may be ignored.
+diff --git a/other-licenses/snappy/src/ChangeLog b/other-licenses/snappy/src/ChangeLog
+deleted file mode 100644
+--- a/other-licenses/snappy/src/ChangeLog
++++ /dev/null
+@@ -1,2468 +0,0 @@
+-commit eb66d8176b3d1f560ee012e1b488cb1540c45f88
+-Author: Steinar H. Gunderson <sesse@google.com>
+-Date:   Mon Jun 22 16:10:47 2015 +0200
+-
+-    Initialized members of SnappyArrayWriter and SnappyDecompressionValidator.
+-    These members were almost surely initialized before use by other member
+-    functions, but Coverity was warning about this. Eliminating these warnings
+-    minimizes clutter in that report and the likelihood of overlooking a real bug.
+-    
+-    A=cmumford
+-    R=jeff
+-
+-commit b2312c4c25883ab03b5110f1b006dce95f419a4f
+-Author: Steinar H. Gunderson <sesse@google.com>
+-Date:   Mon Jun 22 16:03:28 2015 +0200
+-
+-    Add support for Uncompress(source, sink). Various changes to allow
+-    Uncompress(source, sink) to get the same performance as the different
+-    variants of Uncompress to Cord/DataBuffer/String/FlatBuffer.
+-    
+-    Changes to efficiently support Uncompress(source, sink)
+-    --------
+-    
+-    a) For strings - we add support to StringByteSink to do GetAppendBuffer so we
+-       can write to it without copying.
+-    b) For flat array buffers, we do GetAppendBuffer and see if we can get a full buffer.
+-    
+-    With the above changes we get performance with ByteSource/ByteSink
+-    that is	very close to directly using flat arrays and strings.
+-    
+-    We add various benchmark cases to demonstrate that.
+-    
+-    Orthogonal change
+-    ------------------
+-    
+-    Add support for TryFastAppend() for SnappyScatteredWriter.
+-    
+-    Benchmark results are below
+-    
+-    CPU: Intel Core2 dL1:32KB dL2:4096KB
+-    Benchmark              Time(ns)    CPU(ns) Iterations
+-    -----------------------------------------------------
+-    BM_UFlat/0               109065     108996       6410 896.0MB/s  html
+-    BM_UFlat/1              1012175    1012343        691 661.4MB/s  urls
+-    BM_UFlat/2                26775      26771      26149 4.4GB/s  jpg
+-    BM_UFlat/3                48947      48940      14363 1.8GB/s  pdf
+-    BM_UFlat/4               441029     440835       1589 886.1MB/s  html4
+-    BM_UFlat/5                39861      39880      17823 588.3MB/s  cp
+-    BM_UFlat/6                18315      18300      38126 581.1MB/s  c
+-    BM_UFlat/7                 5254       5254     100000 675.4MB/s  lsp
+-    BM_UFlat/8              1568060    1567376        447 626.6MB/s  xls
+-    BM_UFlat/9               337512     337734       2073 429.5MB/s  txt1
+-    BM_UFlat/10              287269     287054       2434 415.9MB/s  txt2
+-    BM_UFlat/11              890098     890219        787 457.2MB/s  txt3
+-    BM_UFlat/12             1186593    1186863        590 387.2MB/s  txt4
+-    BM_UFlat/13              573927     573318       1000 853.7MB/s  bin
+-    BM_UFlat/14               64250      64294      10000 567.2MB/s  sum
+-    BM_UFlat/15                7301       7300      96153 552.2MB/s  man
+-    BM_UFlat/16              109617     109636       6375 1031.5MB/s  pb
+-    BM_UFlat/17              364438     364497       1921 482.3MB/s  gaviota
+-    BM_UFlatSink/0           108518     108465       6450 900.4MB/s  html
+-    BM_UFlatSink/1           991952     991997        705 675.0MB/s  urls
+-    BM_UFlatSink/2            26815      26798      26065 4.4GB/s  jpg
+-    BM_UFlatSink/3            49127      49122      14255 1.8GB/s  pdf
+-    BM_UFlatSink/4           436674     436731       1604 894.4MB/s  html4
+-    BM_UFlatSink/5            39738      39733      17345 590.5MB/s  cp
+-    BM_UFlatSink/6            18413      18416      37962 577.4MB/s  c
+-    BM_UFlatSink/7             5677       5676     100000 625.2MB/s  lsp
+-    BM_UFlatSink/8          1552175    1551026        451 633.2MB/s  xls
+-    BM_UFlatSink/9           338526     338489       2065 428.5MB/s  txt1
+-    BM_UFlatSink/10          289387     289307       2420 412.6MB/s  txt2
+-    BM_UFlatSink/11          893803     893706        783 455.4MB/s  txt3
+-    BM_UFlatSink/12         1195919    1195459        586 384.4MB/s  txt4
+-    BM_UFlatSink/13          559637     559779       1000 874.3MB/s  bin
+-    BM_UFlatSink/14           65073      65094      10000 560.2MB/s  sum
+-    BM_UFlatSink/15            7618       7614      92823 529.5MB/s  man
+-    BM_UFlatSink/16          110085     110121       6352 1027.0MB/s  pb
+-    BM_UFlatSink/17          369196     368915       1896 476.5MB/s  gaviota
+-    BM_UValidate/0            46954      46957      14899 2.0GB/s  html
+-    BM_UValidate/1           500621     500868       1000 1.3GB/s  urls
+-    BM_UValidate/2              283        283    2481447 417.2GB/s  jpg
+-    BM_UValidate/3            16230      16228      43137 5.4GB/s  pdf
+-    BM_UValidate/4           189129     189193       3701 2.0GB/s  html4
+-    
+-    A=uday
+-    R=sanjay
+-
+-commit b2ad96006741d40935db2f73194a3e489b467338
+-Author: Steinar H. Gunderson <sesse@google.com>
+-Date:   Mon Jun 22 15:48:29 2015 +0200
+-
+-    Changes to eliminate compiler warnings on MSVC
+-    
+-    This code was not compiling under Visual Studio 2013 with warnings being treated
+-    as errors. Specifically:
+-    
+-    1. Changed int -> size_t to eliminate signed/unsigned mismatch warning.
+-    2. Added some missing return values to functions.
+-    3. Inserting character instead of integer literals into strings to avoid type
+-       conversions.
+-    
+-    A=cmumford
+-    R=jeff
+-
+-commit e7a897e187e90b33f87bd9e64872cf561de9ebca
+-Author: Steinar H. Gunderson <sesse@google.com>
+-Date:   Mon Jun 22 15:45:11 2015 +0200
+-
+-    Fixed unit tests to compile under MSVC.
+-    
+-    1. Including config.h in test.
+-    2. Including windows.h before zippy-test.h.
+-    3. Removed definition of WIN32_LEAN_AND_MEAN. This caused problems in
+-       build environments that define WIN32_LEAN_AND_MEAN as our
+-       definition didn't check for prior existence. This constant is old
+-       and no longer needed anyhow.
+-    4. Disable MSVC warning 4722 since ~LogMessageCrash() never returns.
+-    
+-    A=cmumford
+-    R=jeff
+-
+-commit 86eb8b152bdb065ad11bf331a9f7d65b72616acf
+-Author: Steinar H. Gunderson <sesse@google.com>
+-Date:   Mon Jun 22 15:41:30 2015 +0200
+-
+-    Change a few branch annotations that profiling found to be wrong.
+-    Overall performance is neutral or slightly positive.
+-    
+-    Westmere (64-bit, opt):
+-    
+-    Benchmark               Base (ns)  New (ns)                                Improvement
+-    --------------------------------------------------------------------------------------
+-    BM_UFlat/0                  73798     71464  1.3GB/s  html                    +3.3%
+-    BM_UFlat/1                 715223    704318  953.5MB/s  urls                  +1.5%
+-    BM_UFlat/2                   8137      8871  13.0GB/s  jpg                    -8.3%
+-    BM_UFlat/3                    200       204  935.5MB/s  jpg_200               -2.0%
+-    BM_UFlat/4                  21627     21281  4.5GB/s  pdf                     +1.6%
+-    BM_UFlat/5                 302806    290350  1.3GB/s  html4                   +4.3%
+-    BM_UFlat/6                 218920    219017  664.1MB/s  txt1                  -0.0%
+-    BM_UFlat/7                 190437    191212  626.1MB/s  txt2                  -0.4%
+-    BM_UFlat/8                 584192    580484  703.4MB/s  txt3                  +0.6%
+-    BM_UFlat/9                 776537    779055  591.6MB/s  txt4                  -0.3%
+-    BM_UFlat/10                 76056     72606  1.5GB/s  pb                      +4.8%
+-    BM_UFlat/11                235962    239043  737.4MB/s  gaviota               -1.3%
+-    BM_UFlat/12                 28049     28000  840.1MB/s  cp                    +0.2%
+-    BM_UFlat/13                 12225     12021  886.9MB/s  c                     +1.7%
+-    BM_UFlat/14                  3362      3544  1004.0MB/s  lsp                  -5.1%
+-    BM_UFlat/15                937015    939206  1048.9MB/s  xls                  -0.2%
+-    BM_UFlat/16                   236       233  823.1MB/s  xls_200               +1.3%
+-    BM_UFlat/17                373170    361947  1.3GB/s  bin                     +3.1%
+-    BM_UFlat/18                   264       264  725.5MB/s  bin_200               +0.0%
+-    BM_UFlat/19                 42834     43577  839.2MB/s  sum                   -1.7%
+-    BM_UFlat/20                  4770      4736  853.6MB/s  man                   +0.7%
+-    BM_UValidate/0              39671     39944  2.4GB/s  html                    -0.7%
+-    BM_UValidate/1             443391    443391  1.5GB/s  urls                    +0.0%
+-    BM_UValidate/2                163       163  703.3GB/s  jpg                   +0.0%
+-    BM_UValidate/3                113       112  1.7GB/s  jpg_200                 +0.9%
+-    BM_UValidate/4               7555      7608  12.6GB/s  pdf                    -0.7%
+-    BM_ZFlat/0                 157616    157568  621.5MB/s  html (22.31 %)        +0.0%
+-    BM_ZFlat/1                1997290   2014486  333.4MB/s  urls (47.77 %)        -0.9%
+-    BM_ZFlat/2                  23035     22237  5.2GB/s  jpg (99.95 %)           +3.6%
+-    BM_ZFlat/3                    539       540  354.5MB/s  jpg_200 (73.00 %)     -0.2%
+-    BM_ZFlat/4                  80709     81369  1.2GB/s  pdf (81.85 %)           -0.8%
+-    BM_ZFlat/5                 639059    639220  613.0MB/s  html4 (22.51 %)       -0.0%
+-    BM_ZFlat/6                 577203    583370  249.3MB/s  txt1 (57.87 %)        -1.1%
+-    BM_ZFlat/7                 510887    516094  232.0MB/s  txt2 (61.93 %)        -1.0%
+-    BM_ZFlat/8                1535843   1556973  262.2MB/s  txt3 (54.92 %)        -1.4%
+-    BM_ZFlat/9                2070068   2102380  219.3MB/s  txt4 (66.22 %)        -1.5%
+-    BM_ZFlat/10                152396    152148  745.5MB/s  pb (19.64 %)          +0.2%
+-    BM_ZFlat/11                447367    445859  395.4MB/s  gaviota (37.72 %)     +0.3%
+-    BM_ZFlat/12                 76375     76797  306.3MB/s  cp (48.12 %)          -0.5%
+-    BM_ZFlat/13                 31518     31987  333.3MB/s  c (42.40 %)           -1.5%
+-    BM_ZFlat/14                 10598     10827  328.6MB/s  lsp (48.37 %)         -2.1%
+-    BM_ZFlat/15               1782243   1802728  546.5MB/s  xls (41.23 %)         -1.1%
+-    BM_ZFlat/16                   526       539  355.0MB/s  xls_200 (78.00 %)     -2.4%
+-    BM_ZFlat/17                598141    597311  822.1MB/s  bin (18.11 %)         +0.1%
+-    BM_ZFlat/18                   121       120  1.6GB/s  bin_200 (7.50 %)        +0.8%
+-    BM_ZFlat/19                109981    112173  326.0MB/s  sum (48.96 %)         -2.0%
+-    BM_ZFlat/20                 14355     14575  277.4MB/s  man (59.36 %)         -1.5%
+-    Sum of all benchmarks    33882722  33879325                                   +0.0%
+-    
+-    Sandy Bridge (64-bit, opt):
+-    
+-    Benchmark               Base (ns)  New (ns)                                Improvement
+-    --------------------------------------------------------------------------------------
+-    BM_UFlat/0                  43764     41600  2.3GB/s  html                    +5.2%
+-    BM_UFlat/1                 517990    507058  1.3GB/s  urls                    +2.2%
+-    BM_UFlat/2                   6625      5529  20.8GB/s  jpg                   +19.8%
+-    BM_UFlat/3                    154       155  1.2GB/s  jpg_200                 -0.6%
+-    BM_UFlat/4                  12795     11747  8.1GB/s  pdf                     +8.9%
+-    BM_UFlat/5                 200335    193413  2.0GB/s  html4                   +3.6%
+-    BM_UFlat/6                 156574    156426  929.2MB/s  txt1                  +0.1%
+-    BM_UFlat/7                 137574    137464  870.4MB/s  txt2                  +0.1%
+-    BM_UFlat/8                 422551    421603  967.4MB/s  txt3                  +0.2%
+-    BM_UFlat/9                 577749    578985  795.6MB/s  txt4                  -0.2%
+-    BM_UFlat/10                 42329     39362  2.8GB/s  pb                      +7.5%
+-    BM_UFlat/11                170615    169751  1037.9MB/s  gaviota              +0.5%
+-    BM_UFlat/12                 12800     12719  1.8GB/s  cp                      +0.6%
+-    BM_UFlat/13                  6585      6579  1.6GB/s  c                       +0.1%
+-    BM_UFlat/14                  2066      2044  1.7GB/s  lsp                     +1.1%
+-    BM_UFlat/15                750861    746911  1.3GB/s  xls                     +0.5%
+-    BM_UFlat/16                   188       192  996.0MB/s  xls_200               -2.1%
+-    BM_UFlat/17                271622    264333  1.8GB/s  bin                     +2.8%
+-    BM_UFlat/18                   208       207  923.6MB/s  bin_200               +0.5%
+-    BM_UFlat/19                 24667     24845  1.4GB/s  sum                     -0.7%
+-    BM_UFlat/20                  2663      2662  1.5GB/s  man                     +0.0%
+-    BM_ZFlat/0                 115173    115624  846.5MB/s  html (22.31 %)        -0.4%
+-    BM_ZFlat/1                1530331   1537769  436.5MB/s  urls (47.77 %)        -0.5%
+-    BM_ZFlat/2                  17503     17013  6.8GB/s  jpg (99.95 %)           +2.9%
+-    BM_ZFlat/3                    385       385  496.3MB/s  jpg_200 (73.00 %)     +0.0%
+-    BM_ZFlat/4                  61753     61540  1.6GB/s  pdf (81.85 %)           +0.3%
+-    BM_ZFlat/5                 484806    483356  810.1MB/s  html4 (22.51 %)       +0.3%
+-    BM_ZFlat/6                 464143    467609  310.9MB/s  txt1 (57.87 %)        -0.7%
+-    BM_ZFlat/7                 410315    413319  289.5MB/s  txt2 (61.93 %)        -0.7%
+-    BM_ZFlat/8                1244082   1249381  326.5MB/s  txt3 (54.92 %)        -0.4%
+-    BM_ZFlat/9                1696914   1709685  269.4MB/s  txt4 (66.22 %)        -0.7%
+-    BM_ZFlat/10                104148    103372  1096.7MB/s  pb (19.64 %)         +0.8%
+-    BM_ZFlat/11                363522    359722  489.8MB/s  gaviota (37.72 %)     +1.1%
+-    BM_ZFlat/12                 47021     50095  469.3MB/s  cp (48.12 %)          -6.1%
+-    BM_ZFlat/13                 16888     16985  627.4MB/s  c (42.40 %)           -0.6%
+-    BM_ZFlat/14                  5496      5469  650.3MB/s  lsp (48.37 %)         +0.5%
+-    BM_ZFlat/15               1460713   1448760  679.5MB/s  xls (41.23 %)         +0.8%
+-    BM_ZFlat/16                   387       393  486.8MB/s  xls_200 (78.00 %)     -1.5%
+-    BM_ZFlat/17                457654    451462  1086.6MB/s  bin (18.11 %)        +1.4%
+-    BM_ZFlat/18                    97        87  2.1GB/s  bin_200 (7.50 %)       +11.5%
+-    BM_ZFlat/19                 77904     80924  451.7MB/s  sum (48.96 %)         -3.7%
+-    BM_ZFlat/20                  7648      7663  527.1MB/s  man (59.36 %)         -0.2%
+-    Sum of all benchmarks    25493635  25482069                                   +0.0%
+-    
+-    A=dehao
+-    R=sesse
+-
+-commit 11ccdfb868387e56d845766d89ddab9d489c4128
+-Author: Steinar H. Gunderson <sesse@google.com>
+-Date:   Mon Jun 22 16:07:58 2015 +0200
+-
+-    Sync with various Google-internal changes.
+-    
+-    Should not mean much for the open-source version.
+-
+-commit 22acaf438ed93ab21a2ff1919d173206798b996e
+-Author: Steinar H. Gunderson <sesse@google.com>
+-Date:   Mon Jun 22 15:39:08 2015 +0200
+-
+-    Change some internal path names.
+-    
+-    This is mostly to sync up with some changes from Google's internal
+-    repositories; it does not affect the open-source distribution in itself.
+-
+-commit 1ff9be9b8fafc8528ca9e055646f5932aa5db9c4
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Fri Feb 28 11:18:07 2014 +0000
+-
+-    Release Snappy 1.1.2.
+-    
+-    R=jeff
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@84 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit 19690d78e83f8963f497585031efa3d9ca66b807
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Wed Feb 19 10:31:49 2014 +0000
+-
+-    Fix public issue 82: Stop distributing benchmark data files that have
+-    unclear or unsuitable licensing.
+-    
+-    In general, we replace the files we can with liberally licensed data,
+-    and remove all the others (in particular all the parts of the Canterbury
+-    corpus that are not clearly in the public domain). The replacements
+-    do not always have the exact same characteristics as the original ones,
+-    but they are more than good enough to be useful for benchmarking.
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@83 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit f82bff66afe0de4c9ae22f8c4ef84e3c2233e799
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Fri Oct 25 13:31:27 2013 +0000
+-
+-    Add support for padding in the Snappy framed format.
+-    
+-    This is specifically motivated by DICOM's demands that embedded data
+-    must be of an even number of bytes, but could in principle be used for
+-    any sort of padding/alignment needed.
+-    
+-    R=sanjay
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@82 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit eeead8dc38ea359f027fb6e89f345448e8e9d723
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Tue Oct 15 15:21:31 2013 +0000
+-
+-    Release Snappy 1.1.1.
+-    
+-    R=jeff
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@81 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit 6bc39e24c76adbbff26ae629fafbf7dfc795f554
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Tue Aug 13 12:55:00 2013 +0000
+-
+-    Add autoconf tests for size_t and ssize_t. Sort-of resolves public issue 79;
+-    it would solve the problem if MSVC typically used autoconf. However, it gives
+-    a natural place (config.h) to put the typedef even for MSVC.
+-    
+-    R=jsbell
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@80 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit 7c3c01df77e191ad1f8377448961fe88db2802e9
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Mon Jul 29 11:06:44 2013 +0000
+-
+-    When we compare the number of bytes produced with the offset for a
+-    backreference, make the signedness of the bytes produced clear,
+-    by sticking it into a size_t. This avoids a signed/unsigned compare
+-    warning from MSVC (public issue 71), and also is slightly clearer.
+-    
+-    Since the line is now so long the explanatory comment about the -1u
+-    trick has to go somewhere else anyway, I used the opportunity to
+-    explain it in slightly more detail.
+-    
+-    This is a purely stylistic change; the emitted assembler from GCC
+-    is identical.
+-    
+-    R=jeff
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@79 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit 2f0aaf8631d8fb2475ca1a6687c181efb14ed286
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Sun Jun 30 19:24:03 2013 +0000
+-
+-    In the fast path for decompressing literals, instead of checking
+-    whether there's 16 bytes free and then checking right afterwards
+-    (when having subtracted the literal size) that there are now
+-    5 bytes free, just check once for 21 bytes. This skips a compare
+-    and a branch; although it is easily predictable, it is still
+-    a few cycles on a fast path that we would like to get rid of.
+-    
+-    Benchmarking this yields very confusing results. On open-source
+-    GCC 4.8.1 on Haswell, we get exactly the expected results; the
+-    benchmarks where we hit the fast path for literals (in particular
+-    the two HTML benchmarks and the protobuf benchmark) give very nice
+-    speedups, and the others are not really affected.
+-    
+-    However, benchmarks with Google's GCC branch on other hardware
+-    is much less clear. It seems that we have a weak loss in some cases
+-    (and the win for the “typical” win cases are not nearly as clear),
+-    but that it depends on microarchitecture and plain luck in how we run
+-    the benchmark. Looking at the generated assembler, it seems that
+-    the removal of the if causes other large-scale changes in how the
+-    function is laid out, which makes it likely that this is just bad luck.
+-    
+-    Thus, we should keep this change, even though its exact current impact is
+-    unclear; it's a sensible change per se, and dropping it on the basis of
+-    microoptimization for a given compiler (or even branch of a compiler)
+-    would seem like a bad strategy in the long run.
+-    
+-    Microbenchmark results (all in 64-bit, opt mode):
+-    
+-      Nehalem, Google GCC:
+-    
+-      Benchmark                Base (ns)  New (ns)                       Improvement
+-      ------------------------------------------------------------------------------
+-      BM_UFlat/0                   76747     75591  1.3GB/s  html           +1.5%
+-      BM_UFlat/1                  765756    757040  886.3MB/s  urls         +1.2%
+-      BM_UFlat/2                   10867     10893  10.9GB/s  jpg           -0.2%
+-      BM_UFlat/3                     124       131  1.4GB/s  jpg_200        -5.3%
+-      BM_UFlat/4                   31663     31596  2.8GB/s  pdf            +0.2%
+-      BM_UFlat/5                  314162    308176  1.2GB/s  html4          +1.9%
+-      BM_UFlat/6                   29668     29746  790.6MB/s  cp           -0.3%
+-      BM_UFlat/7                   12958     13386  796.4MB/s  c            -3.2%
+-      BM_UFlat/8                    3596      3682  966.0MB/s  lsp          -2.3%
+-      BM_UFlat/9                 1019193   1033493  953.3MB/s  xls          -1.4%
+-      BM_UFlat/10                    239       247  775.3MB/s  xls_200      -3.2%
+-      BM_UFlat/11                 236411    240271  606.9MB/s  txt1         -1.6%
+-      BM_UFlat/12                 206639    209768  571.2MB/s  txt2         -1.5%
+-      BM_UFlat/13                 627803    635722  641.4MB/s  txt3         -1.2%
+-      BM_UFlat/14                 845932    857816  538.2MB/s  txt4         -1.4%
+-      BM_UFlat/15                 402107    391670  1.2GB/s  bin            +2.7%
+-      BM_UFlat/16                    283       279  683.6MB/s  bin_200      +1.4%
+-      BM_UFlat/17                  46070     46815  781.5MB/s  sum          -1.6%
+-      BM_UFlat/18                   5053      5163  782.0MB/s  man          -2.1%
+-      BM_UFlat/19                  79721     76581  1.4GB/s  pb             +4.1%
+-      BM_UFlat/20                 251158    252330  697.5MB/s  gaviota      -0.5%
+-      Sum of all benchmarks      4966150   4980396                          -0.3%
+-    
+-    
+-      Sandy Bridge, Google GCC:
+-    
+-      Benchmark                Base (ns)  New (ns)                       Improvement
+-      ------------------------------------------------------------------------------
+-      BM_UFlat/0                   42850     42182  2.3GB/s  html           +1.6%
+-      BM_UFlat/1                  525660    515816  1.3GB/s  urls           +1.9%
+-      BM_UFlat/2                    7173      7283  16.3GB/s  jpg           -1.5%
+-      BM_UFlat/3                      92        91  2.1GB/s  jpg_200        +1.1%
+-      BM_UFlat/4                   15147     14872  5.9GB/s  pdf            +1.8%
+-      BM_UFlat/5                  199936    192116  2.0GB/s  html4          +4.1%
+-      BM_UFlat/6                   12796     12443  1.8GB/s  cp             +2.8%
+-      BM_UFlat/7                    6588      6400  1.6GB/s  c              +2.9%
+-      BM_UFlat/8                    2010      1951  1.8GB/s  lsp            +3.0%
+-      BM_UFlat/9                  761124    763049  1.3GB/s  xls            -0.3%
+-      BM_UFlat/10                    186       189  1016.1MB/s  xls_200     -1.6%
+-      BM_UFlat/11                 159354    158460  918.6MB/s  txt1         +0.6%
+-      BM_UFlat/12                 139732    139950  856.1MB/s  txt2         -0.2%
+-      BM_UFlat/13                 429917    425027  961.7MB/s  txt3         +1.2%
+-      BM_UFlat/14                 585255    587324  785.8MB/s  txt4         -0.4%
+-      BM_UFlat/15                 276186    266173  1.8GB/s  bin            +3.8%
+-      BM_UFlat/16                    205       207  925.5MB/s  bin_200      -1.0%
+-      BM_UFlat/17                  24925     24935  1.4GB/s  sum            -0.0%
+-      BM_UFlat/18                   2632      2576  1.5GB/s  man            +2.2%
+-      BM_UFlat/19                  40546     39108  2.8GB/s  pb             +3.7%
+-      BM_UFlat/20                 175803    168209  1048.9MB/s  gaviota     +4.5%
+-      Sum of all benchmarks      3408117   3368361                          +1.2%
+-    
+-    
+-      Haswell, upstream GCC 4.8.1:
+-    
+-      Benchmark                Base (ns)  New (ns)                       Improvement
+-      ------------------------------------------------------------------------------
+-      BM_UFlat/0                   46308     40641  2.3GB/s  html          +13.9%
+-      BM_UFlat/1                  513385    514706  1.3GB/s  urls           -0.3%
+-      BM_UFlat/2                    6197      6151  19.2GB/s  jpg           +0.7%
+-      BM_UFlat/3                      61        61  3.0GB/s  jpg_200        +0.0%
+-      BM_UFlat/4                   13551     13429  6.5GB/s  pdf            +0.9%
+-      BM_UFlat/5                  198317    190243  2.0GB/s  html4          +4.2%
+-      BM_UFlat/6                   14768     12560  1.8GB/s  cp            +17.6%
+-      BM_UFlat/7                    6453      6447  1.6GB/s  c              +0.1%
+-      BM_UFlat/8                    1991      1980  1.8GB/s  lsp            +0.6%
+-      BM_UFlat/9                  766947    770424  1.2GB/s  xls            -0.5%
+-      BM_UFlat/10                    170       169  1.1GB/s  xls_200        +0.6%
+-      BM_UFlat/11                 164350    163554  888.7MB/s  txt1         +0.5%
+-      BM_UFlat/12                 145444    143830  832.1MB/s  txt2         +1.1%
+-      BM_UFlat/13                 437849    438413  929.2MB/s  txt3         -0.1%
+-      BM_UFlat/14                 603587    605309  759.8MB/s  txt4         -0.3%
+-      BM_UFlat/15                 249799    248067  1.9GB/s  bin            +0.7%
+-      BM_UFlat/16                    191       188  1011.4MB/s  bin_200     +1.6%
+-      BM_UFlat/17                  26064     24778  1.4GB/s  sum            +5.2%
+-      BM_UFlat/18                   2620      2601  1.5GB/s  man            +0.7%
+-      BM_UFlat/19                  44551     37373  3.0GB/s  pb            +19.2%
+-      BM_UFlat/20                 165408    164584  1.0GB/s  gaviota        +0.5%
+-      Sum of all benchmarks      3408011   3385508                          +0.7%
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@78 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit 062bf544a61107db730b6d08cb0b159c4dd9b24c
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Fri Jun 14 21:42:26 2013 +0000
+-
+-    Make the two IncrementalCopy* functions take in an ssize_t instead of a len,
+-    in order to avoid having to do 32-to-64-bit signed conversions on a hot path
+-    during decompression. (Also fixes some MSVC warnings, mentioned in public
+-    issue 75, but more of those remain.) They cannot be size_t because we expect
+-    them to go negative and test for that.
+-    
+-    This saves a few movzwl instructions, yielding ~2% speedup in decompression.
+-    
+-    
+-    Sandy Bridge:
+-    
+-    Benchmark                          Base (ns)  New (ns)                                Improvement
+-    -------------------------------------------------------------------------------------------------
+-    BM_UFlat/0                             48009     41283  2.3GB/s  html                   +16.3%
+-    BM_UFlat/1                            531274    513419  1.3GB/s  urls                    +3.5%
+-    BM_UFlat/2                              7378      7062  16.8GB/s  jpg                    +4.5%
+-    BM_UFlat/3                                92        92  2.0GB/s  jpg_200                 +0.0%
+-    BM_UFlat/4                             15057     14974  5.9GB/s  pdf                     +0.6%
+-    BM_UFlat/5                            204323    193140  2.0GB/s  html4                   +5.8%
+-    BM_UFlat/6                             13282     12611  1.8GB/s  cp                      +5.3%
+-    BM_UFlat/7                              6511      6504  1.6GB/s  c                       +0.1%
+-    BM_UFlat/8                              2014      2030  1.7GB/s  lsp                     -0.8%
+-    BM_UFlat/9                            775909    768336  1.3GB/s  xls                     +1.0%
+-    BM_UFlat/10                              182       184  1043.2MB/s  xls_200              -1.1%
+-    BM_UFlat/11                           167352    161630  901.2MB/s  txt1                  +3.5%
+-    BM_UFlat/12                           147393    142246  842.8MB/s  txt2                  +3.6%
+-    BM_UFlat/13                           449960    432853  944.4MB/s  txt3                  +4.0%
+-    BM_UFlat/14                           620497    594845  775.9MB/s  txt4                  +4.3%
+-    BM_UFlat/15                           265610    267356  1.8GB/s  bin                     -0.7%
+-    BM_UFlat/16                              206       205  932.7MB/s  bin_200               +0.5%
+-    BM_UFlat/17                            25561     24730  1.4GB/s  sum                     +3.4%
+-    BM_UFlat/18                             2620      2644  1.5GB/s  man                     -0.9%
+-    BM_UFlat/19                            45766     38589  2.9GB/s  pb                     +18.6%
+-    BM_UFlat/20                           171107    169832  1039.5MB/s  gaviota              +0.8%
+-    Sum of all benchmarks                3500103   3394565                                   +3.1%
+-    
+-    
+-    Westmere:
+-    
+-    Benchmark                          Base (ns)  New (ns)                                Improvement
+-    -------------------------------------------------------------------------------------------------
+-    BM_UFlat/0                             72624     71526  1.3GB/s  html                    +1.5%
+-    BM_UFlat/1                            735821    722917  930.8MB/s  urls                  +1.8%
+-    BM_UFlat/2                             10450     10172  11.7GB/s  jpg                    +2.7%
+-    BM_UFlat/3                               117       117  1.6GB/s  jpg_200                 +0.0%
+-    BM_UFlat/4                             29817     29648  3.0GB/s  pdf                     +0.6%
+-    BM_UFlat/5                            297126    293073  1.3GB/s  html4                   +1.4%
+-    BM_UFlat/6                             28252     27994  842.0MB/s  cp                    +0.9%
+-    BM_UFlat/7                             12672     12391  862.1MB/s  c                     +2.3%
+-    BM_UFlat/8                              3507      3425  1040.9MB/s  lsp                  +2.4%
+-    BM_UFlat/9                           1004268    969395  1018.0MB/s  xls                  +3.6%
+-    BM_UFlat/10                              233       227  844.8MB/s  xls_200               +2.6%
+-    BM_UFlat/11                           230054    224981  647.8MB/s  txt1                  +2.3%
+-    BM_UFlat/12                           201229    196447  610.5MB/s  txt2                  +2.4%
+-    BM_UFlat/13                           609547    596761  685.3MB/s  txt3                  +2.1%
+-    BM_UFlat/14                           824362    804821  573.8MB/s  txt4                  +2.4%
+-    BM_UFlat/15                           371095    374899  1.3GB/s  bin                     -1.0%
+-    BM_UFlat/16                              267       267  717.8MB/s  bin_200               +0.0%
+-    BM_UFlat/17                            44623     43828  835.9MB/s  sum                   +1.8%
+-    BM_UFlat/18                             5077      4815  841.0MB/s  man                   +5.4%
+-    BM_UFlat/19                            74964     73210  1.5GB/s  pb                      +2.4%
+-    BM_UFlat/20                           237987    236745  746.0MB/s  gaviota               +0.5%
+-    Sum of all benchmarks                4794092   4697659                                   +2.1%
+-    
+-    
+-    Istanbul:
+-    
+-    Benchmark                          Base (ns)  New (ns)                                Improvement
+-    -------------------------------------------------------------------------------------------------
+-    BM_UFlat/0                             98614     96376  1020.4MB/s  html                 +2.3%
+-    BM_UFlat/1                            963740    953241  707.2MB/s  urls                  +1.1%
+-    BM_UFlat/2                             25042     24769  4.8GB/s  jpg                     +1.1%
+-    BM_UFlat/3                               180       180  1065.6MB/s  jpg_200              +0.0%
+-    BM_UFlat/4                             45942     45403  1.9GB/s  pdf                     +1.2%
+-    BM_UFlat/5                            400135    390226  1008.2MB/s  html4                +2.5%
+-    BM_UFlat/6                             37768     37392  631.9MB/s  cp                    +1.0%
+-    BM_UFlat/7                             18585     18200  588.2MB/s  c                     +2.1%
+-    BM_UFlat/8                              5751      5690  627.7MB/s  lsp                   +1.1%
+-    BM_UFlat/9                           1543154   1542209  641.4MB/s  xls                   +0.1%
+-    BM_UFlat/10                              381       388  494.6MB/s  xls_200               -1.8%
+-    BM_UFlat/11                           339715    331973  440.1MB/s  txt1                  +2.3%
+-    BM_UFlat/12                           294807    289418  415.4MB/s  txt2                  +1.9%
+-    BM_UFlat/13                           906160    884094  463.3MB/s  txt3                  +2.5%
+-    BM_UFlat/14                          1224221   1198435  386.1MB/s  txt4                  +2.2%
+-    BM_UFlat/15                           516277    502923  979.5MB/s  bin                   +2.7%
+-    BM_UFlat/16                              405       402  477.2MB/s  bin_200               +0.7%
+-    BM_UFlat/17                            61640     60621  605.6MB/s  sum                   +1.7%
+-    BM_UFlat/18                             7326      7383  549.5MB/s  man                   -0.8%
+-    BM_UFlat/19                            94720     92653  1.2GB/s  pb                      +2.2%
+-    BM_UFlat/20                           360435    346687  510.6MB/s  gaviota               +4.0%
+-    Sum of all benchmarks                6944998   6828663                                   +1.7%
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@77 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit 328aafa1980824a9afdcd50edc30d9d5157e417f
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Thu Jun 13 16:19:52 2013 +0000
+-
+-    Add support for uncompressing to iovecs (scatter I/O).
+-    Windows does not have struct iovec defined anywhere,
+-    so we define our own version that's equal to what UNIX
+-    typically has.
+-    
+-    The bulk of this patch was contributed by Mohit Aron.
+-    
+-    R=jeff
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@76 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit cd92eb0852e2339187b693eef3595a07d2276c1d
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Wed Jun 12 19:51:15 2013 +0000
+-
+-    Some code reorganization needed for an internal change.
+-    
+-    R=fikes
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@75 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit a3e928d62bbd61b523b988c07b560253950cf73b
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Tue Apr 9 15:33:30 2013 +0000
+-
+-    Supports truncated test data in zippy benchmark.
+-    
+-    R=sesse
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@74 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit bde324c0169763688f35ee44630a26ad1f49eec3
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Tue Feb 5 14:36:15 2013 +0000
+-
+-    Release Snappy 1.1.0.
+-    
+-    R=sanjay
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@73 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit 8168446c7eaaa0594e1f4ca923376dcf3a2846fa
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Tue Feb 5 14:30:05 2013 +0000
+-
+-    Make ./snappy_unittest pass without "srcdir" being defined.
+-    
+-    Previously, snappy_unittests would read from an absolute path /testdata/..;
+-    convert it to use a relative path instead.
+-    
+-    Patch from Marc-Antonie Ruel.
+-    
+-    R=maruel
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@72 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit 27a0cc394950ebdad2e8d67322f0862835b10bd9
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Fri Jan 18 12:16:36 2013 +0000
+-
+-    Increase the Zippy block size from 32 kB to 64 kB, winning ~3% density
+-    while being effectively performance neutral.
+-    
+-    The longer story about density is that we win 3-6% density on the benchmarks
+-    where this has any effect at all; many of the benchmarks (cp, c, lsp, man)
+-    are smaller than 32 kB and thus will have no effect. Binary data also seems
+-    to win little or nothing; of course, the already-compressed data wins nothing.
+-    The protobuf benchmark wins as much as ~18% depending on architecture,
+-    but I wouldn't be too sure that this is representative of protobuf data in
+-    general.
+-    
+-    As of performance, we lose a tiny amount since we get more tags (e.g., a long
+-    literal might be broken up into literal-copy-literal), but we win it back with
+-    less clearing of the hash table, and more opportunities to skip incompressible
+-    data (e.g. in the jpg benchmark). Decompression seems to get ever so slightly
+-    slower, again due to more tags. The total net change is about as close to zero
+-    as we can get, so the end effect seems to be simply more density and no
+-    real performance change.
+-    
+-    The comment about not changing kBlockSize, scary as it is, is not really
+-    relevant, since we're never going to have a block-level decompressor without
+-    explicitly marked blocks. Replace it with something more appropriate.
+-    
+-    This affects the framing format, but it's okay to change it since it basically
+-    has no users yet.
+-    
+-    
+-    Density (note that cp, c, lsp and man are all smaller than 32 kB):
+-    
+-       Benchmark         Description   Base (%)  New (%)  Improvement
+-       --------------------------------------------------------------
+-       ZFlat/0           html            22.57    22.31     +5.6%
+-       ZFlat/1           urls            50.89    47.77     +6.5%
+-       ZFlat/2           jpg             99.88    99.87     +0.0%
+-       ZFlat/3           pdf             82.13    82.07     +0.1%
+-       ZFlat/4           html4           23.55    22.51     +4.6%
+-       ZFlat/5           cp              48.12    48.12     +0.0%
+-       ZFlat/6           c               42.40    42.40     +0.0%
+-       ZFlat/7           lsp             48.37    48.37     +0.0%
+-       ZFlat/8           xls             41.34    41.23     +0.3%
+-       ZFlat/9           txt1            59.81    57.87     +3.4%
+-       ZFlat/10          txt2            64.07    61.93     +3.5%
+-       ZFlat/11          txt3            57.11    54.92     +4.0%
+-       ZFlat/12          txt4            68.35    66.22     +3.2%
+-       ZFlat/13          bin             18.21    18.11     +0.6%
+-       ZFlat/14          sum             51.88    48.96     +6.0%
+-       ZFlat/15          man             59.36    59.36     +0.0%
+-       ZFlat/16          pb              23.15    19.64    +17.9%
+-       ZFlat/17          gaviota         38.27    37.72     +1.5%
+-       Geometric mean                    45.51    44.15     +3.1%
+-    
+-    
+-    Microbenchmarks (64-bit, opt):
+-    
+-    Westmere 2.8 GHz:
+-    
+-       Benchmark                          Base (ns)  New (ns)                                Improvement
+-       -------------------------------------------------------------------------------------------------
+-       BM_UFlat/0                             75342     75027  1.3GB/s  html                    +0.4%
+-       BM_UFlat/1                            723767    744269  899.6MB/s  urls                  -2.8%
+-       BM_UFlat/2                             10072     10072  11.7GB/s  jpg                    +0.0%
+-       BM_UFlat/3                             30747     30388  2.9GB/s  pdf                     +1.2%
+-       BM_UFlat/4                            307353    306063  1.2GB/s  html4                   +0.4%
+-       BM_UFlat/5                             28593     28743  816.3MB/s  cp                    -0.5%
+-       BM_UFlat/6                             12958     12998  818.1MB/s  c                     -0.3%
+-       BM_UFlat/7                              3700      3792  935.8MB/s  lsp                   -2.4%
+-       BM_UFlat/8                            999685    999905  982.1MB/s  xls                   -0.0%
+-       BM_UFlat/9                            232954    230079  630.4MB/s  txt1                  +1.2%
+-       BM_UFlat/10                           200785    201468  592.6MB/s  txt2                  -0.3%
+-       BM_UFlat/11                           617267    610968  666.1MB/s  txt3                  +1.0%
+-       BM_UFlat/12                           821595    822475  558.7MB/s  txt4                  -0.1%
+-       BM_UFlat/13                           377097    377632  1.3GB/s  bin                     -0.1%
+-       BM_UFlat/14                            45476     45260  805.8MB/s  sum                   +0.5%
+-       BM_UFlat/15                             4985      5003  805.7MB/s  man                   -0.4%
+-       BM_UFlat/16                            80813     77494  1.4GB/s  pb                      +4.3%
+-       BM_UFlat/17                           251792    241553  727.7MB/s  gaviota               +4.2%
+-       BM_UValidate/0                         40343     40354  2.4GB/s  html                    -0.0%
+-       BM_UValidate/1                        426890    451574  1.4GB/s  urls                    -5.5%
+-       BM_UValidate/2                           187       179  661.9GB/s  jpg                   +4.5%
+-       BM_UValidate/3                         13783     13827  6.4GB/s  pdf                     -0.3%
+-       BM_UValidate/4                        162393    163335  2.3GB/s  html4                   -0.6%
+-       BM_UDataBuffer/0                       93756     93302  1046.7MB/s  html                 +0.5%
+-       BM_UDataBuffer/1                      886714    916292  730.7MB/s  urls                  -3.2%
+-       BM_UDataBuffer/2                       15861     16401  7.2GB/s  jpg                     -3.3%
+-       BM_UDataBuffer/3                       38934     39224  2.2GB/s  pdf                     -0.7%
+-       BM_UDataBuffer/4                      381008    379428  1029.5MB/s  html4                +0.4%
+-       BM_UCord/0                             92528     91098  1072.0MB/s  html                 +1.6%
+-       BM_UCord/1                            858421    885287  756.3MB/s  urls                  -3.0%
+-       BM_UCord/2                             13140     13464  8.8GB/s  jpg                     -2.4%
+-       BM_UCord/3                             39012     37773  2.3GB/s  pdf                     +3.3%
+-       BM_UCord/4                            376869    371267  1052.1MB/s  html4                +1.5%
+-       BM_UCordString/0                       75810     75303  1.3GB/s  html                    +0.7%
+-       BM_UCordString/1                      735290    753841  888.2MB/s  urls                  -2.5%
+-       BM_UCordString/2                       11945     13113  9.0GB/s  jpg                     -8.9%
+-       BM_UCordString/3                       33901     32562  2.7GB/s  pdf                     +4.1%
+-       BM_UCordString/4                      310985    309390  1.2GB/s  html4                   +0.5%
+-       BM_UCordValidate/0                     40952     40450  2.4GB/s  html                    +1.2%
+-       BM_UCordValidate/1                    433842    456531  1.4GB/s  urls                    -5.0%
+-       BM_UCordValidate/2                      1179      1173  100.8GB/s  jpg                   +0.5%
+-       BM_UCordValidate/3                     14481     14392  6.1GB/s  pdf                     +0.6%
+-       BM_UCordValidate/4                    164364    164151  2.3GB/s  html4                   +0.1%
+-       BM_ZFlat/0                            160610    156601  623.6MB/s  html (22.31 %)        +2.6%
+-       BM_ZFlat/1                           1995238   1993582  335.9MB/s  urls (47.77 %)        +0.1%
+-       BM_ZFlat/2                             30133     24983  4.7GB/s  jpg (99.87 %)          +20.6%
+-       BM_ZFlat/3                             74453     73128  1.2GB/s  pdf (82.07 %)           +1.8%
+-       BM_ZFlat/4                            647674    633729  616.4MB/s  html4 (22.51 %)       +2.2%
+-       BM_ZFlat/5                             76259     76090  308.4MB/s  cp (48.12 %)          +0.2%
+-       BM_ZFlat/6                             31106     31084  342.1MB/s  c (42.40 %)           +0.1%
+-       BM_ZFlat/7                             10507     10443  339.8MB/s  lsp (48.37 %)         +0.6%
+-       BM_ZFlat/8                           1811047   1793325  547.6MB/s  xls (41.23 %)         +1.0%
+-       BM_ZFlat/9                            597903    581793  249.3MB/s  txt1 (57.87 %)        +2.8%
+-       BM_ZFlat/10                           525320    514522  232.0MB/s  txt2 (61.93 %)        +2.1%
+-       BM_ZFlat/11                          1596591   1551636  262.3MB/s  txt3 (54.92 %)        +2.9%
+-       BM_ZFlat/12                          2134523   2094033  219.5MB/s  txt4 (66.22 %)        +1.9%
+-       BM_ZFlat/13                           593024    587869  832.6MB/s  bin (18.11 %)         +0.9%
+-       BM_ZFlat/14                           114746    110666  329.5MB/s  sum (48.96 %)         +3.7%
+-       BM_ZFlat/15                            14376     14485  278.3MB/s  man (59.36 %)         -0.8%
+-       BM_ZFlat/16                           167908    150070  753.6MB/s  pb (19.64 %)         +11.9%
+-       BM_ZFlat/17                           460228    442253  397.5MB/s  gaviota (37.72 %)     +4.1%
+-       BM_ZCord/0                            164896    160241  609.4MB/s  html                  +2.9%
+-       BM_ZCord/1                           2070239   2043492  327.7MB/s  urls                  +1.3%
+-       BM_ZCord/2                             54402     47002  2.5GB/s  jpg                    +15.7%
+-       BM_ZCord/3                             85871     83832  1073.1MB/s  pdf                  +2.4%
+-       BM_ZCord/4                            664078    648825  602.0MB/s  html4                 +2.4%
+-       BM_ZDataBuffer/0                      174874    172549  566.0MB/s  html                  +1.3%
+-       BM_ZDataBuffer/1                     2134410   2139173  313.0MB/s  urls                  -0.2%
+-       BM_ZDataBuffer/2                       71911     69551  1.7GB/s  jpg                     +3.4%
+-       BM_ZDataBuffer/3                       98236     99727  902.1MB/s  pdf                   -1.5%
+-       BM_ZDataBuffer/4                      710776    699104  558.8MB/s  html4                 +1.7%
+-       Sum of all benchmarks               27358908  27200688                                   +0.6%
+-    
+-    
+-    Sandy Bridge 2.6 GHz:
+-    
+-       Benchmark                          Base (ns)  New (ns)                                Improvement
+-       -------------------------------------------------------------------------------------------------
+-       BM_UFlat/0                             49356     49018  1.9GB/s  html                    +0.7%
+-       BM_UFlat/1                            516764    531955  1.2GB/s  urls                    -2.9%
+-       BM_UFlat/2                              6982      7304  16.2GB/s  jpg                    -4.4%
+-       BM_UFlat/3                             15285     15598  5.6GB/s  pdf                     -2.0%
+-       BM_UFlat/4                            206557    206669  1.8GB/s  html4                   -0.1%
+-       BM_UFlat/5                             13681     13567  1.7GB/s  cp                      +0.8%
+-       BM_UFlat/6                              6571      6592  1.6GB/s  c                       -0.3%
+-       BM_UFlat/7                              2008      1994  1.7GB/s  lsp                     +0.7%
+-       BM_UFlat/8                            775700    773286  1.2GB/s  xls                     +0.3%
+-       BM_UFlat/9                            165578    164480  881.8MB/s  txt1                  +0.7%
+-       BM_UFlat/10                           143707    144139  828.2MB/s  txt2                  -0.3%
+-       BM_UFlat/11                           443026    436281  932.8MB/s  txt3                  +1.5%
+-       BM_UFlat/12                           603129    595856  771.2MB/s  txt4                  +1.2%
+-       BM_UFlat/13                           271682    270450  1.8GB/s  bin                     +0.5%
+-       BM_UFlat/14                            26200     25666  1.4GB/s  sum                     +2.1%
+-       BM_UFlat/15                             2620      2608  1.5GB/s  man                     +0.5%
+-       BM_UFlat/16                            48908     47756  2.3GB/s  pb                      +2.4%
+-       BM_UFlat/17                           174638    170346  1031.9MB/s  gaviota              +2.5%
+-       BM_UValidate/0                         31922     31898  3.0GB/s  html                    +0.1%
+-       BM_UValidate/1                        341265    363554  1.8GB/s  urls                    -6.1%
+-       BM_UValidate/2                           160       151  782.8GB/s  jpg                   +6.0%
+-       BM_UValidate/3                         10402     10380  8.5GB/s  pdf                     +0.2%
+-       BM_UValidate/4                        129490    130587  2.9GB/s  html4                   -0.8%
+-       BM_UDataBuffer/0                       59383     58736  1.6GB/s  html                    +1.1%
+-       BM_UDataBuffer/1                      619222    637786  1049.8MB/s  urls                 -2.9%
+-       BM_UDataBuffer/2                       10775     11941  9.9GB/s  jpg                     -9.8%
+-       BM_UDataBuffer/3                       18002     17930  4.9GB/s  pdf                     +0.4%
+-       BM_UDataBuffer/4                      259182    259306  1.5GB/s  html4                   -0.0%
+-       BM_UCord/0                             59379     57814  1.6GB/s  html                    +2.7%
+-       BM_UCord/1                            598456    615162  1088.4MB/s  urls                 -2.7%
+-       BM_UCord/2                              8519      8628  13.7GB/s  jpg                    -1.3%
+-       BM_UCord/3                             18123     17537  5.0GB/s  pdf                     +3.3%
+-       BM_UCord/4                            252375    252331  1.5GB/s  html4                   +0.0%
+-       BM_UCordString/0                       49494     49790  1.9GB/s  html                    -0.6%
+-       BM_UCordString/1                      524659    541803  1.2GB/s  urls                    -3.2%
+-       BM_UCordString/2                        8206      8354  14.2GB/s  jpg                    -1.8%
+-       BM_UCordString/3                       17235     16537  5.3GB/s  pdf                     +4.2%
+-       BM_UCordString/4                      210188    211072  1.8GB/s  html4                   -0.4%
+-       BM_UCordValidate/0                     31956     31587  3.0GB/s  html                    +1.2%
+-       BM_UCordValidate/1                    340828    362141  1.8GB/s  urls                    -5.9%
+-       BM_UCordValidate/2                       783       744  158.9GB/s  jpg                   +5.2%
+-       BM_UCordValidate/3                     10543     10462  8.4GB/s  pdf                     +0.8%
+-       BM_UCordValidate/4                    130150    129789  2.9GB/s  html4                   +0.3%
+-       BM_ZFlat/0                            113873    111200  878.2MB/s  html (22.31 %)        +2.4%
+-       BM_ZFlat/1                           1473023   1489858  449.4MB/s  urls (47.77 %)        -1.1%
+-       BM_ZFlat/2                             23569     19486  6.1GB/s  jpg (99.87 %)          +21.0%
+-       BM_ZFlat/3                             49178     48046  1.8GB/s  pdf (82.07 %)           +2.4%
+-       BM_ZFlat/4                            475063    469394  832.2MB/s  html4 (22.51 %)       +1.2%
+-       BM_ZFlat/5                             46910     46816  501.2MB/s  cp (48.12 %)          +0.2%
+-       BM_ZFlat/6                             16883     16916  628.6MB/s  c (42.40 %)           -0.2%
+-       BM_ZFlat/7                              5381      5447  651.5MB/s  lsp (48.37 %)         -1.2%
+-       BM_ZFlat/8                           1466870   1473861  666.3MB/s  xls (41.23 %)         -0.5%
+-       BM_ZFlat/9                            468006    464101  312.5MB/s  txt1 (57.87 %)        +0.8%
+-       BM_ZFlat/10                           408157    408957  291.9MB/s  txt2 (61.93 %)        -0.2%
+-       BM_ZFlat/11                          1253348   1232910  330.1MB/s  txt3 (54.92 %)        +1.7%
+-       BM_ZFlat/12                          1702373   1702977  269.8MB/s  txt4 (66.22 %)        -0.0%
+-       BM_ZFlat/13                           439792    438557  1116.0MB/s  bin (18.11 %)        +0.3%
+-       BM_ZFlat/14                            80766     78851  462.5MB/s  sum (48.96 %)         +2.4%
+-       BM_ZFlat/15                             7420      7542  534.5MB/s  man (59.36 %)         -1.6%
+-       BM_ZFlat/16                           112043    100126  1.1GB/s  pb (19.64 %)           +11.9%
+-       BM_ZFlat/17                           368877    357703  491.4MB/s  gaviota (37.72 %)     +3.1%
+-       BM_ZCord/0                            116402    113564  859.9MB/s  html                  +2.5%
+-       BM_ZCord/1                           1507156   1519911  440.5MB/s  urls                  -0.8%
+-       BM_ZCord/2                             39860     33686  3.5GB/s  jpg                    +18.3%
+-       BM_ZCord/3                             56211     54694  1.6GB/s  pdf                     +2.8%
+-       BM_ZCord/4                            485594    479212  815.1MB/s  html4                 +1.3%
+-       BM_ZDataBuffer/0                      123185    121572  803.3MB/s  html                  +1.3%
+-       BM_ZDataBuffer/1                     1569111   1589380  421.3MB/s  urls                  -1.3%
+-       BM_ZDataBuffer/2                       53143     49556  2.4GB/s  jpg                     +7.2%
+-       BM_ZDataBuffer/3                       65725     66826  1.3GB/s  pdf                     -1.6%
+-       BM_ZDataBuffer/4                      517871    514750  758.9MB/s  html4                 +0.6%
+-       Sum of all benchmarks               20258879  20315484                                   -0.3%
+-    
+-    
+-    AMD Instanbul 2.4 GHz:
+-    
+-       Benchmark                          Base (ns)  New (ns)                                Improvement
+-       -------------------------------------------------------------------------------------------------
+-       BM_UFlat/0                             97120     96585  1011.1MB/s  html                 +0.6%
+-       BM_UFlat/1                            917473    948016  706.3MB/s  urls                  -3.2%
+-       BM_UFlat/2                             21496     23938  4.9GB/s  jpg                    -10.2%
+-       BM_UFlat/3                             44751     45639  1.9GB/s  pdf                     -1.9%
+-       BM_UFlat/4                            391950    391413  998.0MB/s  html4                 +0.1%
+-       BM_UFlat/5                             37366     37201  630.7MB/s  cp                    +0.4%
+-       BM_UFlat/6                             18350     18318  580.5MB/s  c                     +0.2%
+-       BM_UFlat/7                              5672      5661  626.9MB/s  lsp                   +0.2%
+-       BM_UFlat/8                           1533390   1529441  642.1MB/s  xls                   +0.3%
+-       BM_UFlat/9                            335477    336553  431.0MB/s  txt1                  -0.3%
+-       BM_UFlat/10                           285140    292080  408.7MB/s  txt2                  -2.4%
+-       BM_UFlat/11                           888507    894758  454.9MB/s  txt3                  -0.7%
+-       BM_UFlat/12                          1187643   1210928  379.5MB/s  txt4                  -1.9%
+-       BM_UFlat/13                           493717    507447  964.5MB/s  bin                   -2.7%
+-       BM_UFlat/14                            61740     60870  599.1MB/s  sum                   +1.4%
+-       BM_UFlat/15                             7211      7187  560.9MB/s  man                   +0.3%
+-       BM_UFlat/16                            97435     93100  1.2GB/s  pb                      +4.7%
+-       BM_UFlat/17                           362662    356395  493.2MB/s  gaviota               +1.8%
+-       BM_UValidate/0                         47475     47118  2.0GB/s  html                    +0.8%
+-       BM_UValidate/1                        501304    529741  1.2GB/s  urls                    -5.4%
+-       BM_UValidate/2                           276       243  486.2GB/s  jpg                  +13.6%
+-       BM_UValidate/3                         16361     16261  5.4GB/s  pdf                     +0.6%
+-       BM_UValidate/4                        190741    190353  2.0GB/s  html4                   +0.2%
+-       BM_UDataBuffer/0                      111080    109771  889.6MB/s  html                  +1.2%
+-       BM_UDataBuffer/1                     1051035   1085999  616.5MB/s  urls                  -3.2%
+-       BM_UDataBuffer/2                       25801     25463  4.6GB/s  jpg                     +1.3%
+-       BM_UDataBuffer/3                       50493     49946  1.8GB/s  pdf                     +1.1%
+-       BM_UDataBuffer/4                      447258    444138  879.5MB/s  html4                 +0.7%
+-       BM_UCord/0                            109350    107909  905.0MB/s  html                  +1.3%
+-       BM_UCord/1                           1023396   1054964  634.7MB/s  urls                  -3.0%
+-       BM_UCord/2                             25292     24371  4.9GB/s  jpg                     +3.8%
+-       BM_UCord/3                             48955     49736  1.8GB/s  pdf                     -1.6%
+-       BM_UCord/4                            440452    437331  893.2MB/s  html4                 +0.7%
+-       BM_UCordString/0                       98511     98031  996.2MB/s  html                  +0.5%
+-       BM_UCordString/1                      933230    963495  694.9MB/s  urls                  -3.1%
+-       BM_UCordString/2                       23311     24076  4.9GB/s  jpg                     -3.2%
+-       BM_UCordString/3                       45568     46196  1.9GB/s  pdf                     -1.4%
+-       BM_UCordString/4                      397791    396934  984.1MB/s  html4                 +0.2%
+-       BM_UCordValidate/0                     47537     46921  2.0GB/s  html                    +1.3%
+-       BM_UCordValidate/1                    505071    532716  1.2GB/s  urls                    -5.2%
+-       BM_UCordValidate/2                      1663      1621  72.9GB/s  jpg                    +2.6%
+-       BM_UCordValidate/3                     16890     16926  5.2GB/s  pdf                     -0.2%
+-       BM_UCordValidate/4                    192365    191984  2.0GB/s  html4                   +0.2%
+-       BM_ZFlat/0                            184708    179103  545.3MB/s  html (22.31 %)        +3.1%
+-       BM_ZFlat/1                           2293864   2302950  290.7MB/s  urls (47.77 %)        -0.4%
+-       BM_ZFlat/2                             52852     47618  2.5GB/s  jpg (99.87 %)          +11.0%
+-       BM_ZFlat/3                            100766     96179  935.3MB/s  pdf (82.07 %)         +4.8%
+-       BM_ZFlat/4                            741220    727977  536.6MB/s  html4 (22.51 %)       +1.8%
+-       BM_ZFlat/5                             85402     85418  274.7MB/s  cp (48.12 %)          -0.0%
+-       BM_ZFlat/6                             36558     36494  291.4MB/s  c (42.40 %)           +0.2%
+-       BM_ZFlat/7                             12706     12507  283.7MB/s  lsp (48.37 %)         +1.6%
+-       BM_ZFlat/8                           2336823   2335688  420.5MB/s  xls (41.23 %)         +0.0%
+-       BM_ZFlat/9                            701804    681153  212.9MB/s  txt1 (57.87 %)        +3.0%
+-       BM_ZFlat/10                           606700    597194  199.9MB/s  txt2 (61.93 %)        +1.6%
+-       BM_ZFlat/11                          1852283   1803238  225.7MB/s  txt3 (54.92 %)        +2.7%
+-       BM_ZFlat/12                          2475527   2443354  188.1MB/s  txt4 (66.22 %)        +1.3%
+-       BM_ZFlat/13                           694497    696654  702.6MB/s  bin (18.11 %)         -0.3%
+-       BM_ZFlat/14                           136929    129855  280.8MB/s  sum (48.96 %)         +5.4%
+-       BM_ZFlat/15                            17172     17124  235.4MB/s  man (59.36 %)         +0.3%
+-       BM_ZFlat/16                           190364    171763  658.4MB/s  pb (19.64 %)         +10.8%
+-       BM_ZFlat/17                           567285    555190  316.6MB/s  gaviota (37.72 %)     +2.2%
+-       BM_ZCord/0                            193490    187031  522.1MB/s  html                  +3.5%
+-       BM_ZCord/1                           2427537   2415315  277.2MB/s  urls                  +0.5%
+-       BM_ZCord/2                             85378     81412  1.5GB/s  jpg                     +4.9%
+-       BM_ZCord/3                            121898    119419  753.3MB/s  pdf                   +2.1%
+-       BM_ZCord/4                            779564    762961  512.0MB/s  html4                 +2.2%
+-       BM_ZDataBuffer/0                      213820    207272  471.1MB/s  html                  +3.2%
+-       BM_ZDataBuffer/1                     2589010   2586495  258.9MB/s  urls                  +0.1%
+-       BM_ZDataBuffer/2                      121871    118885  1018.4MB/s  jpg                  +2.5%
+-       BM_ZDataBuffer/3                      145382    145986  616.2MB/s  pdf                   -0.4%
+-       BM_ZDataBuffer/4                      868117    852754  458.1MB/s  html4                 +1.8%
+-       Sum of all benchmarks               33771833  33744763                                   +0.1%
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@71 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit 81f34784b7b812dcda956ee489dfdc74ec2da990
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Sun Jan 6 19:21:26 2013 +0000
+-
+-    Adjust the Snappy open-source distribution for the changes in Google's
+-    internal file API.
+-    
+-    R=sanjay
+-    
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@70 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit 698af469b47fe809905e2ed173ad84241de5800f
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Fri Jan 4 11:54:20 2013 +0000
+-
+-    Change a few ORs to additions where they don't matter. This helps the compiler
+-    use the LEA instruction more efficiently, since e.g. a + (b << 2) can be encoded
+-    as one instruction. Even more importantly, it can constant-fold the
+-    COPY_* enums together with the shifted negative constants, which also saves
+-    some instructions. (We don't need it for LITERAL, since it happens to be 0.)
+-    
+-    I am unsure why the compiler couldn't do this itself, but the theory is that
+-    it cannot prove that len-1 and len-4 cannot underflow/wrap, and thus can't
+-    do the optimization safely.
+-    
+-    The gains are small but measurable; 0.5-1.0% over the BM_Z* benchmarks
+-    (measured on Westmere, Sandy Bridge and Istanbul).
+-    
+-    R=sanjay
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@69 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit 55209f9b92efd97e0a61be28ed94210de04c3bfc
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Mon Oct 8 11:37:16 2012 +0000
+-
+-    Stop giving -Werror to automake, due to an incompatibility between current
+-    versions of libtool and automake on non-GNU platforms (e.g. Mac OS X).
+-    
+-    R=sanjay
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@68 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit b86e81c8b3426a62d8ab3a7674c2506e9e678740
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Fri Aug 17 13:54:47 2012 +0000
+-
+-    Fix public issue 66: Document GetUncompressedLength better, in particular that
+-    it leaves the source in a state that's not appropriate for RawUncompress.
+-    
+-    R=sanjay
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@67 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit 2e225ba821b420ae28e1d427075d5589c1e892d9
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Tue Jul 31 11:44:44 2012 +0000
+-
+-    Fix public issue 64: Check for <sys/time.h> at configure time,
+-    since MSVC seemingly does not have it.
+-    
+-    R=sanjay
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@66 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit e89f20ab46ee11050760c6d57f05c2a3825a911c
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Wed Jul 4 09:34:48 2012 +0000
+-
+-    Handle the case where gettimeofday() goes backwards or returns the same value
+-    twice; it could cause division by zero in the unit test framework.
+-    (We already had one fix for this in place, but it was incomplete.)
+-    
+-    This could in theory happen on any system, since there are few guarantees
+-    about gettimeofday(), but seems to only happen in practice on GNU/Hurd, where
+-    gettimeofday() is cached and only updated ever so often.
+-    
+-    R=sanjay
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@65 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit 3ec60ac9878de5d0317ad38fc545080a4bfaa74f
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Wed Jul 4 09:28:33 2012 +0000
+-
+-    Mark ARMv4 as not supporting unaligned accesses (not just ARMv5 and ARMv6);
+-    apparently Debian still targets these by default, giving us segfaults on
+-    armel.
+-    
+-    R=sanjay
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@64 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit be80d6f74f9d82220e952a54f3f129aae1f13f95
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Tue May 22 09:46:05 2012 +0000
+-
+-    Fix public bug #62: Remove an extraneous comma at the end of an enum list,
+-    causing compile errors when embedded in Mozilla on OpenBSD.
+-    
+-    R=sanjay
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@63 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit 8b95464146dddab1c7068f879162db9a885cdafe
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Tue May 22 09:32:50 2012 +0000
+-
+-    Snappy library no longer depends on iostream.
+-    
+-    Achieved by moving logging macro definitions to a test-only
+-    header file, and by changing non-test code to use assert,
+-    fprintf, and abort instead of LOG/CHECK macros.
+-    
+-    R=sesse
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@62 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit fc723b212d6972af7051261754770b3f70a7dc03
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Fri Feb 24 15:46:37 2012 +0000
+-
+-    Release Snappy 1.0.5.
+-    
+-    R=sanjay
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@61 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit dc63e0ad9693e13390ba31b00d92ecccaf7605c3
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Thu Feb 23 17:00:36 2012 +0000
+-
+-    For 32-bit platforms, do not try to accelerate multiple neighboring
+-    32-bit loads with a 64-bit load during compression (it's not a win).
+-    
+-    The main target for this optimization is ARM, but 32-bit x86 gets
+-    a small gain, too, although there is noise in the microbenchmarks.
+-    It's a no-op for 64-bit x86. It does not affect decompression.
+-    
+-    Microbenchmark results on a Cortex-A9 1GHz, using g++ 4.6.2 (from
+-    Ubuntu/Linaro), -O2 -DNDEBUG -Wa,-march=armv7a -mtune=cortex-a9
+-    -mthumb-interwork, minimum 1000 iterations:
+-    
+-      Benchmark            Time(ns)    CPU(ns) Iterations
+-      ---------------------------------------------------
+-      BM_ZFlat/0            1158277    1160000       1000 84.2MB/s  html (23.57 %)    [ +4.3%]
+-      BM_ZFlat/1           14861782   14860000       1000 45.1MB/s  urls (50.89 %)    [ +1.1%]
+-      BM_ZFlat/2             393595     390000       1000 310.5MB/s  jpg (99.88 %)    [ +0.0%]
+-      BM_ZFlat/3             650583     650000       1000 138.4MB/s  pdf (82.13 %)    [ +3.1%]
+-      BM_ZFlat/4            4661480    4660000       1000 83.8MB/s  html4 (23.55 %)   [ +4.3%]
+-      BM_ZFlat/5             491973     490000       1000 47.9MB/s  cp (48.12 %)      [ +2.0%]
+-      BM_ZFlat/6             193575     192678       1038 55.2MB/s  c (42.40 %)       [ +9.0%]
+-      BM_ZFlat/7              62343      62754       3187 56.5MB/s  lsp (48.37 %)     [ +2.6%]
+-      BM_ZFlat/8           17708468   17710000       1000 55.5MB/s  xls (41.34 %)     [ -0.3%]
+-      BM_ZFlat/9            3755345    3760000       1000 38.6MB/s  txt1 (59.81 %)    [ +8.2%]
+-      BM_ZFlat/10           3324217    3320000       1000 36.0MB/s  txt2 (64.07 %)    [ +4.2%]
+-      BM_ZFlat/11          10139932   10140000       1000 40.1MB/s  txt3 (57.11 %)    [ +6.4%]
+-      BM_ZFlat/12          13532109   13530000       1000 34.0MB/s  txt4 (68.35 %)    [ +5.0%]
+-      BM_ZFlat/13           4690847    4690000       1000 104.4MB/s  bin (18.21 %)    [ +4.1%]
+-      BM_ZFlat/14            830682     830000       1000 43.9MB/s  sum (51.88 %)     [ +1.2%]
+-      BM_ZFlat/15             84784      85011       2235 47.4MB/s  man (59.36 %)     [ +1.1%]
+-      BM_ZFlat/16           1293254    1290000       1000 87.7MB/s  pb (23.15 %)      [ +2.3%]
+-      BM_ZFlat/17           2775155    2780000       1000 63.2MB/s  gaviota (38.27 %) [+12.2%]
+-    
+-    Core i7 in 32-bit mode (only one run and 100 iterations, though, so noisy):
+-    
+-      Benchmark            Time(ns)    CPU(ns) Iterations
+-      ---------------------------------------------------
+-      BM_ZFlat/0             227582     223464       3043 437.0MB/s  html (23.57 %)    [ +7.4%]
+-      BM_ZFlat/1            2982430    2918455        233 229.4MB/s  urls (50.89 %)    [ +2.9%]
+-      BM_ZFlat/2              46967      46658      15217 2.5GB/s  jpg (99.88 %)       [ +0.0%]
+-      BM_ZFlat/3             115298     114864       5833 783.2MB/s  pdf (82.13 %)     [ +1.5%]
+-      BM_ZFlat/4             913440     899743        778 434.2MB/s  html4 (23.55 %)   [ +0.3%]
+-      BM_ZFlat/5             110302     108571       7000 216.1MB/s  cp (48.12 %)      [ +0.0%]
+-      BM_ZFlat/6              44409      43372      15909 245.2MB/s  c (42.40 %)       [ +0.8%]
+-      BM_ZFlat/7              15713      15643      46667 226.9MB/s  lsp (48.37 %)     [ +2.7%]
+-      BM_ZFlat/8            2625539    2602230        269 377.4MB/s  xls (41.34 %)     [ +1.4%]
+-      BM_ZFlat/9             808884     811429        875 178.8MB/s  txt1 (59.81 %)    [ -3.9%]
+-      BM_ZFlat/10            709532     700000       1000 170.5MB/s  txt2 (64.07 %)    [ +0.0%]
+-      BM_ZFlat/11           2177682    2162162        333 188.2MB/s  txt3 (57.11 %)    [ -1.4%]
+-      BM_ZFlat/12           2849640    2840000        250 161.8MB/s  txt4 (68.35 %)    [ -1.4%]
+-      BM_ZFlat/13            849760     835476        778 585.8MB/s  bin (18.21 %)     [ +1.2%]
+-      BM_ZFlat/14            165940     164571       4375 221.6MB/s  sum (51.88 %)     [ +1.4%]
+-      BM_ZFlat/15             20939      20571      35000 196.0MB/s  man (59.36 %)     [ +2.1%]
+-      BM_ZFlat/16            239209     236544       2917 478.1MB/s  pb (23.15 %)      [ +4.2%]
+-      BM_ZFlat/17            616206     610000       1000 288.2MB/s  gaviota (38.27 %) [ -1.6%]
+-    
+-    R=sanjay
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@60 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit f8829ea39d51432ba4e6a26ddaec57acea779f4c
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Tue Feb 21 17:02:17 2012 +0000
+-
+-    Enable the use of unaligned loads and stores for ARM-based architectures
+-    where they are available (ARMv7 and higher). This gives a significant
+-    speed boost on ARM, both for compression and decompression.
+-    It should not affect x86 at all.
+-    
+-    There are more changes possible to speed up ARM, but it might not be
+-    that easy to do without hurting x86 or making the code uglier.
+-    Also, we de not try to use NEON yet.
+-    
+-    Microbenchmark results on a Cortex-A9 1GHz, using g++ 4.6.2 (from Ubuntu/Linaro),
+-    -O2 -DNDEBUG -Wa,-march=armv7a -mtune=cortex-a9 -mthumb-interwork:
+-    
+-    Benchmark            Time(ns)    CPU(ns) Iterations
+-    ---------------------------------------------------
+-    BM_UFlat/0             524806     529100        378 184.6MB/s  html            [+33.6%]
+-    BM_UFlat/1            5139790    5200000        100 128.8MB/s  urls            [+28.8%]
+-    BM_UFlat/2              86540      84166       1901 1.4GB/s  jpg               [ +0.6%]
+-    BM_UFlat/3             215351     210176        904 428.0MB/s  pdf             [+29.8%]
+-    BM_UFlat/4            2144490    2100000        100 186.0MB/s  html4           [+33.3%]
+-    BM_UFlat/5             194482     190000       1000 123.5MB/s  cp              [+36.2%]
+-    BM_UFlat/6              91843      90175       2107 117.9MB/s  c               [+38.6%]
+-    BM_UFlat/7              28535      28426       6684 124.8MB/s  lsp             [+34.7%]
+-    BM_UFlat/8            9206600    9200000        100 106.7MB/s  xls             [+42.4%]
+-    BM_UFlat/9            1865273    1886792        106 76.9MB/s  txt1             [+32.5%]
+-    BM_UFlat/10           1576809    1587301        126 75.2MB/s  txt2             [+32.3%]
+-    BM_UFlat/11           4968450    4900000        100 83.1MB/s  txt3             [+32.7%]
+-    BM_UFlat/12           6673970    6700000        100 68.6MB/s  txt4             [+32.8%]
+-    BM_UFlat/13           2391470    2400000        100 203.9MB/s  bin             [+29.2%]
+-    BM_UFlat/14            334601     344827        522 105.8MB/s  sum             [+30.6%]
+-    BM_UFlat/15             37404      38080       5252 105.9MB/s  man             [+33.8%]
+-    BM_UFlat/16            535470     540540        370 209.2MB/s  pb              [+31.2%]
+-    BM_UFlat/17           1875245    1886792        106 93.2MB/s  gaviota          [+37.8%]
+-    BM_UValidate/0         178425     179533       1114 543.9MB/s  html            [ +2.7%]
+-    BM_UValidate/1        2100450    2000000        100 334.8MB/s  urls            [ +5.0%]
+-    BM_UValidate/2           1039       1044     172413 113.3GB/s  jpg             [ +3.4%]
+-    BM_UValidate/3          59423      59470       3363 1.5GB/s  pdf               [ +7.8%]
+-    BM_UValidate/4         760716     766283        261 509.8MB/s  html4           [ +6.5%]
+-    BM_ZFlat/0            1204632    1204819        166 81.1MB/s  html (23.57 %)   [+32.8%]
+-    BM_ZFlat/1           15656190   15600000        100 42.9MB/s  urls (50.89 %)   [+27.6%]
+-    BM_ZFlat/2             403336     410677        487 294.8MB/s  jpg (99.88 %)   [+16.5%]
+-    BM_ZFlat/3             664073     671140        298 134.0MB/s  pdf (82.13 %)   [+28.4%]
+-    BM_ZFlat/4            4961940    4900000        100 79.7MB/s  html4 (23.55 %)  [+30.6%]
+-    BM_ZFlat/5             500664     501253        399 46.8MB/s  cp (48.12 %)     [+33.4%]
+-    BM_ZFlat/6             217276     215982        926 49.2MB/s  c (42.40 %)      [+25.0%]
+-    BM_ZFlat/7              64122      65487       3054 54.2MB/s  lsp (48.37 %)    [+36.1%]
+-    BM_ZFlat/8           18045730   18000000        100 54.6MB/s  xls (41.34 %)    [+34.4%]
+-    BM_ZFlat/9            4051530    4000000        100 36.3MB/s  txt1 (59.81 %)   [+25.0%]
+-    BM_ZFlat/10           3451800    3500000        100 34.1MB/s  txt2 (64.07 %)   [+25.7%]
+-    BM_ZFlat/11          11052340   11100000        100 36.7MB/s  txt3 (57.11 %)   [+24.3%]
+-    BM_ZFlat/12          14538690   14600000        100 31.5MB/s  txt4 (68.35 %)   [+24.7%]
+-    BM_ZFlat/13           5041850    5000000        100 97.9MB/s  bin (18.21 %)    [+32.0%]
+-    BM_ZFlat/14            908840     909090        220 40.1MB/s  sum (51.88 %)    [+22.2%]
+-    BM_ZFlat/15             86921      86206       1972 46.8MB/s  man (59.36 %)    [+42.2%]
+-    BM_ZFlat/16           1312315    1315789        152 86.0MB/s  pb (23.15 %)     [+34.5%]
+-    BM_ZFlat/17           3173120    3200000        100 54.9MB/s  gaviota (38.27%) [+28.1%]
+-    
+-    
+-    The move from 64-bit to 32-bit operations for the copies also affected 32-bit x86;
+-    positive on the decompression side, and slightly negative on the compression side
+-    (unless that is noise; I only ran once):
+-    
+-    Benchmark              Time(ns)    CPU(ns) Iterations
+-    -----------------------------------------------------
+-    BM_UFlat/0                86279      86140       7778 1.1GB/s  html             [ +7.5%]
+-    BM_UFlat/1               839265     822622        778 813.9MB/s  urls           [ +9.4%]
+-    BM_UFlat/2                 9180       9143      87500 12.9GB/s  jpg             [ +1.2%]
+-    BM_UFlat/3                35080      35000      20000 2.5GB/s  pdf              [+10.1%]
+-    BM_UFlat/4               350318     345000       2000 1.1GB/s  html4            [ +7.0%]
+-    BM_UFlat/5                33808      33472      21212 701.0MB/s  cp             [ +9.0%]
+-    BM_UFlat/6                15201      15214      46667 698.9MB/s  c              [+14.9%]
+-    BM_UFlat/7                 4652       4651     159091 762.9MB/s  lsp            [ +7.5%]
+-    BM_UFlat/8              1285551    1282528        538 765.7MB/s  xls            [+10.7%]
+-    BM_UFlat/9               282510     281690       2414 514.9MB/s  txt1           [+13.6%]
+-    BM_UFlat/10              243494     239286       2800 498.9MB/s  txt2           [+14.4%]
+-    BM_UFlat/11              743625     740000       1000 550.0MB/s  txt3           [+14.3%]
+-    BM_UFlat/12              999441     989717        778 464.3MB/s  txt4           [+16.1%]
+-    BM_UFlat/13              412402     410076       1707 1.2GB/s  bin              [ +7.3%]
+-    BM_UFlat/14               54876      54000      10000 675.3MB/s  sum            [+13.0%]
+-    BM_UFlat/15                6146       6100     100000 660.8MB/s  man            [+14.8%]
+-    BM_UFlat/16               90496      90286       8750 1.2GB/s  pb               [ +4.0%]
+-    BM_UFlat/17              292650     292000       2500 602.0MB/s  gaviota        [+18.1%]
+-    BM_UValidate/0            49620      49699      14286 1.9GB/s  html             [ +0.0%]
+-    BM_UValidate/1           501371     500000       1000 1.3GB/s  urls             [ +0.0%]
+-    BM_UValidate/2              232        227    3043478 521.5GB/s  jpg            [ +1.3%]
+-    BM_UValidate/3            17250      17143      43750 5.1GB/s  pdf              [ -1.3%]
+-    BM_UValidate/4           198643     200000       3500 1.9GB/s  html4            [ -0.9%]
+-    BM_ZFlat/0               227128     229415       3182 425.7MB/s  html (23.57 %) [ -1.4%]
+-    BM_ZFlat/1              2970089    2960000        250 226.2MB/s  urls (50.89 %) [ -1.9%]
+-    BM_ZFlat/2                45683      44999      15556 2.6GB/s  jpg (99.88 %)    [ +2.2%]
+-    BM_ZFlat/3               114661     113136       6364 795.1MB/s  pdf (82.13 %)  [ -1.5%]
+-    BM_ZFlat/4               919702     914286        875 427.2MB/s  html4 (23.55%) [ -1.3%]
+-    BM_ZFlat/5               108189     108422       6364 216.4MB/s  cp (48.12 %)   [ -1.2%]
+-    BM_ZFlat/6                44525      44000      15909 241.7MB/s  c (42.40 %)    [ -2.9%]
+-    BM_ZFlat/7                15973      15857      46667 223.8MB/s  lsp (48.37 %)  [ +0.0%]
+-    BM_ZFlat/8              2677888    2639405        269 372.1MB/s  xls (41.34 %)  [ -1.4%]
+-    BM_ZFlat/9               800715     780000       1000 186.0MB/s  txt1 (59.81 %) [ -0.4%]
+-    BM_ZFlat/10              700089     700000       1000 170.5MB/s  txt2 (64.07 %) [ -2.9%]
+-    BM_ZFlat/11             2159356    2138365        318 190.3MB/s  txt3 (57.11 %) [ -0.3%]
+-    BM_ZFlat/12             2796143    2779923        259 165.3MB/s  txt4 (68.35 %) [ -1.4%]
+-    BM_ZFlat/13              856458     835476        778 585.8MB/s  bin (18.21 %)  [ -0.1%]
+-    BM_ZFlat/14              166908     166857       4375 218.6MB/s  sum (51.88 %)  [ -1.4%]
+-    BM_ZFlat/15               21181      20857      35000 193.3MB/s  man (59.36 %)  [ -0.8%]
+-    BM_ZFlat/16              244009     239973       2917 471.3MB/s  pb (23.15 %)   [ -1.4%]
+-    BM_ZFlat/17              596362     590000       1000 297.9MB/s  gaviota (38.27%) [ +0.0%]
+-    
+-    R=sanjay
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@59 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit f2e184f638bdc7905f26c24faaf10fc0f5d33403
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Sat Feb 11 22:11:22 2012 +0000
+-
+-    Lower the size allocated in the "corrupted input" unit test from 256 MB
+-    to 2 MB. This fixes issues with running the unit test on platforms with
+-    little RAM (e.g. some ARM boards).
+-    
+-    Also, reactivate the 2 MB test for 64-bit platforms; there's no good
+-    reason why it shouldn't be.
+-    
+-    R=sanjay
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@58 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit e750dc0f054ba74b0ce76dd2013e6728cc7a41c5
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Sun Jan 8 17:55:48 2012 +0000
+-
+-    Minor refactoring to accomodate changes in Google's internal code tree.
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@57 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit d9068ee301bdf893a4d8cb7c6518eacc44c4c1f2
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Wed Jan 4 13:10:46 2012 +0000
+-
+-    Fix public issue r57: Fix most warnings with -Wall, mostly signed/unsigned
+-    warnings. There are still some in the unit test, but the main .cc file should
+-    be clean. We haven't enabled -Wall for the default build, since the unit test
+-    is still not clean.
+-    
+-    This also fixes a real bug in the open-source implementation of
+-    ReadFileToStringOrDie(); it would not detect errors correctly.
+-    
+-    I had to go through some pains to avoid performance loss as the types
+-    were changed; I think there might still be some with 32-bit if and only if LFS
+-    is enabled (ie., size_t is 64-bit), but for regular 32-bit and 64-bit I can't
+-    see any losses, and I've diffed the generated GCC assembler between the old and
+-    new code without seeing any significant choices. If anything, it's ever so
+-    slightly faster.
+-    
+-    This may or may not enable compression of very large blocks (>2^32 bytes)
+-    when size_t is 64-bit, but I haven't checked, and it is still not a supported
+-    case.
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@56 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit 0755c815197dacc77d8971ae917c86d7aa96bf8e
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Wed Jan 4 10:46:39 2012 +0000
+-
+-    Add a framing format description. We do not have any implementation of this at
+-    the current point, but there seems to be enough of a general interest in the
+-    topic (cf. public bug #34).
+-    
+-    R=csilvers,sanjay
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@55 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit d7eb2dc4133794b62cba691f9be40d1549bc32e2
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Mon Dec 5 21:27:26 2011 +0000
+-
+-    Speed up decompression by moving the refill check to the end of the loop.
+-    
+-    This seems to work because in most of the branches, the compiler can evaluate
+-    “ip_limit_ - ip” in a more efficient way than reloading ip_limit_ from memory
+-    (either by already having the entire expression in a register, or reconstructing
+-    it from “avail”, or something else). Memory loads, even from L1, are seemingly
+-    costly in the big picture at the current decompression speeds.
+-    
+-    Microbenchmarks (64-bit, opt mode):
+-    
+-    Westmere (Intel Core i7):
+-    
+-      Benchmark     Time(ns)    CPU(ns) Iterations
+-      --------------------------------------------
+-      BM_UFlat/0       74492      74491     187894 1.3GB/s  html      [ +5.9%]
+-      BM_UFlat/1      712268     712263      19644 940.0MB/s  urls    [ +3.8%]
+-      BM_UFlat/2       10591      10590    1000000 11.2GB/s  jpg      [ -6.8%]
+-      BM_UFlat/3       29643      29643     469915 3.0GB/s  pdf       [ +7.9%]
+-      BM_UFlat/4      304669     304667      45930 1.3GB/s  html4     [ +4.8%]
+-      BM_UFlat/5       28508      28507     490077 823.1MB/s  cp      [ +4.0%]
+-      BM_UFlat/6       12415      12415    1000000 856.5MB/s  c       [ +8.6%]
+-      BM_UFlat/7        3415       3415    4084723 1039.0MB/s  lsp    [+18.0%]
+-      BM_UFlat/8      979569     979563      14261 1002.5MB/s  xls    [ +5.8%]
+-      BM_UFlat/9      230150     230148      60934 630.2MB/s  txt1    [ +5.2%]
+-      BM_UFlat/10     197167     197166      71135 605.5MB/s  txt2    [ +4.7%]
+-      BM_UFlat/11     607394     607390      23041 670.1MB/s  txt3    [ +5.6%]
+-      BM_UFlat/12     808502     808496      17316 568.4MB/s  txt4    [ +5.0%]
+-      BM_UFlat/13     372791     372788      37564 1.3GB/s  bin       [ +3.3%]
+-      BM_UFlat/14      44541      44541     313969 818.8MB/s  sum     [ +5.7%]
+-      BM_UFlat/15       4833       4833    2898697 834.1MB/s  man     [ +4.8%]
+-      BM_UFlat/16      79855      79855     175356 1.4GB/s  pb        [ +4.8%]
+-      BM_UFlat/17     245845     245843      56838 715.0MB/s  gaviota [ +5.8%]
+-    
+-    Clovertown (Intel Core 2):
+-    
+-      Benchmark     Time(ns)    CPU(ns) Iterations
+-      --------------------------------------------
+-      BM_UFlat/0      107911     107890     100000 905.1MB/s  html    [ +2.2%]
+-      BM_UFlat/1     1011237    1011041      10000 662.3MB/s  urls    [ +2.5%]
+-      BM_UFlat/2       26775      26770     523089 4.4GB/s  jpg       [ +0.0%]
+-      BM_UFlat/3       48103      48095     290618 1.8GB/s  pdf       [ +3.4%]
+-      BM_UFlat/4      437724     437644      31937 892.6MB/s  html4   [ +2.1%]
+-      BM_UFlat/5       39607      39600     358284 592.5MB/s  cp      [ +2.4%]
+-      BM_UFlat/6       18227      18224     768191 583.5MB/s  c       [ +2.7%]
+-      BM_UFlat/7        5171       5170    2709437 686.4MB/s  lsp     [ +3.9%]
+-      BM_UFlat/8     1560291    1559989       8970 629.5MB/s  xls     [ +3.6%]
+-      BM_UFlat/9      335401     335343      41731 432.5MB/s  txt1    [ +3.0%]
+-      BM_UFlat/10     287014     286963      48758 416.0MB/s  txt2    [ +2.8%]
+-      BM_UFlat/11     888522     888356      15752 458.1MB/s  txt3    [ +2.9%]
+-      BM_UFlat/12    1186600    1186378      10000 387.3MB/s  txt4    [ +3.1%]
+-      BM_UFlat/13     572295     572188      24468 855.4MB/s  bin     [ +2.1%]
+-      BM_UFlat/14      64060      64049     218401 569.4MB/s  sum     [ +4.1%]
+-      BM_UFlat/15       7264       7263    1916168 555.0MB/s  man     [ +1.4%]
+-      BM_UFlat/16     108853     108836     100000 1039.1MB/s  pb     [ +1.7%]
+-      BM_UFlat/17     364289     364223      38419 482.6MB/s  gaviota [ +4.9%]
+-    
+-    Barcelona (AMD Opteron):
+-    
+-      Benchmark     Time(ns)    CPU(ns) Iterations
+-      --------------------------------------------
+-      BM_UFlat/0      103900     103871     100000 940.2MB/s  html    [ +8.3%]
+-      BM_UFlat/1     1000435    1000107      10000 669.5MB/s  urls    [ +6.6%]
+-      BM_UFlat/2       24659      24652     567362 4.8GB/s  jpg       [ +0.1%]
+-      BM_UFlat/3       48206      48193     291121 1.8GB/s  pdf       [ +5.0%]
+-      BM_UFlat/4      421980     421850      33174 926.0MB/s  html4   [ +7.3%]
+-      BM_UFlat/5       40368      40357     346994 581.4MB/s  cp      [ +8.7%]
+-      BM_UFlat/6       19836      19830     708695 536.2MB/s  c       [ +8.0%]
+-      BM_UFlat/7        6100       6098    2292774 581.9MB/s  lsp     [ +9.0%]
+-      BM_UFlat/8     1693093    1692514       8261 580.2MB/s  xls     [ +8.0%]
+-      BM_UFlat/9      365991     365886      38225 396.4MB/s  txt1    [ +7.1%]
+-      BM_UFlat/10     311330     311238      44950 383.6MB/s  txt2    [ +7.6%]
+-      BM_UFlat/11     975037     974737      14376 417.5MB/s  txt3    [ +6.9%]
+-      BM_UFlat/12    1303558    1303175      10000 352.6MB/s  txt4    [ +7.3%]
+-      BM_UFlat/13     517448     517290      27144 946.2MB/s  bin     [ +5.5%]
+-      BM_UFlat/14      66537      66518     210352 548.3MB/s  sum     [ +7.5%]
+-      BM_UFlat/15       7976       7974    1760383 505.6MB/s  man     [ +5.6%]
+-      BM_UFlat/16     103121     103092     100000 1097.0MB/s  pb     [ +8.7%]
+-      BM_UFlat/17     391431     391314      35733 449.2MB/s  gaviota [ +6.5%]
+-    
+-    R=sanjay
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@54 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit 5ed51ce15fc4ff8d2f7235704eb6b0c3f762fb88
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Wed Nov 23 11:14:17 2011 +0000
+-
+-    Speed up decompression by making the fast path for literals faster.
+-    
+-    We do the fast-path step as soon as possible; in fact, as soon as we know the
+-    literal length. Since we usually hit the fast path, we can then skip the checks
+-    for long literals and available input space (beyond what the fast path check
+-    already does).
+-    
+-    Note that this changes the decompression Writer API; however, it does not
+-    change the ABI, since writers are always templatized and as such never
+-    cross compilation units. The new API is slightly more general, in that it
+-    doesn't hard-code the value 16. Note that we also take care to check
+-    for len <= 16 first, since the other two checks almost always succeed
+-    (so we don't want to waste time checking for them until we have to).
+-    
+-    The improvements are most marked on Nehalem, but are generally positive
+-    on other platforms as well. All microbenchmarks are 64-bit, opt.
+-    
+-    Clovertown (Core 2):
+-    
+-      Benchmark     Time(ns)    CPU(ns) Iterations
+-      --------------------------------------------
+-      BM_UFlat/0      110226     110224     100000 886.0MB/s  html    [ +1.5%]
+-      BM_UFlat/1     1036523    1036508      10000 646.0MB/s  urls    [ -0.8%]
+-      BM_UFlat/2       26775      26775     522570 4.4GB/s  jpg       [ +0.0%]
+-      BM_UFlat/3       49738      49737     280974 1.8GB/s  pdf       [ +0.3%]
+-      BM_UFlat/4      446790     446792      31334 874.3MB/s  html4   [ +0.8%]
+-      BM_UFlat/5       40561      40562     350424 578.5MB/s  cp      [ +1.3%]
+-      BM_UFlat/6       18722      18722     746903 568.0MB/s  c       [ +1.4%]
+-      BM_UFlat/7        5373       5373    2608632 660.5MB/s  lsp     [ +8.3%]
+-      BM_UFlat/8     1615716    1615718       8670 607.8MB/s  xls     [ +2.0%]
+-      BM_UFlat/9      345278     345281      40481 420.1MB/s  txt1    [ +1.4%]
+-      BM_UFlat/10     294855     294855      47452 404.9MB/s  txt2    [ +1.6%]
+-      BM_UFlat/11     914263     914263      15316 445.2MB/s  txt3    [ +1.1%]
+-      BM_UFlat/12    1222694    1222691      10000 375.8MB/s  txt4    [ +1.4%]
+-      BM_UFlat/13     584495     584489      23954 837.4MB/s  bin     [ -0.6%]
+-      BM_UFlat/14      66662      66662     210123 547.1MB/s  sum     [ +1.2%]
+-      BM_UFlat/15       7368       7368    1881856 547.1MB/s  man     [ +4.0%]
+-      BM_UFlat/16     110727     110726     100000 1021.4MB/s  pb     [ +2.3%]
+-      BM_UFlat/17     382138     382141      36616 460.0MB/s  gaviota [ -0.7%]
+-    
+-    Westmere (Core i7):
+-    
+-      Benchmark     Time(ns)    CPU(ns) Iterations
+-      --------------------------------------------
+-      BM_UFlat/0       78861      78853     177703 1.2GB/s  html      [ +2.1%]
+-      BM_UFlat/1      739560     739491      18912 905.4MB/s  urls    [ +3.4%]
+-      BM_UFlat/2        9867       9866    1419014 12.0GB/s  jpg      [ +3.4%]
+-      BM_UFlat/3       31989      31986     438385 2.7GB/s  pdf       [ +0.2%]
+-      BM_UFlat/4      319406     319380      43771 1.2GB/s  html4     [ +1.9%]
+-      BM_UFlat/5       29639      29636     472862 791.7MB/s  cp      [ +5.2%]
+-      BM_UFlat/6       13478      13477    1000000 789.0MB/s  c       [ +2.3%]
+-      BM_UFlat/7        4030       4029    3475364 880.7MB/s  lsp     [ +8.7%]
+-      BM_UFlat/8     1036585    1036492      10000 947.5MB/s  xls     [ +6.9%]
+-      BM_UFlat/9      242127     242105      57838 599.1MB/s  txt1    [ +3.0%]
+-      BM_UFlat/10     206499     206480      67595 578.2MB/s  txt2    [ +3.4%]
+-      BM_UFlat/11     641635     641570      21811 634.4MB/s  txt3    [ +2.4%]
+-      BM_UFlat/12     848847     848769      16443 541.4MB/s  txt4    [ +3.1%]
+-      BM_UFlat/13     384968     384938      36366 1.2GB/s  bin       [ +0.3%]
+-      BM_UFlat/14      47106      47101     297770 774.3MB/s  sum     [ +4.4%]
+-      BM_UFlat/15       5063       5063    2772202 796.2MB/s  man     [ +7.7%]
+-      BM_UFlat/16      83663      83656     167697 1.3GB/s  pb        [ +1.8%]
+-      BM_UFlat/17     260224     260198      53823 675.6MB/s  gaviota [ -0.5%]
+-    
+-    Barcelona (Opteron):
+-    
+-      Benchmark     Time(ns)    CPU(ns) Iterations
+-      --------------------------------------------
+-      BM_UFlat/0      112490     112457     100000 868.4MB/s  html    [ -0.4%]
+-      BM_UFlat/1     1066719    1066339      10000 627.9MB/s  urls    [ +1.0%]
+-      BM_UFlat/2       24679      24672     563802 4.8GB/s  jpg       [ +0.7%]
+-      BM_UFlat/3       50603      50589     277285 1.7GB/s  pdf       [ +2.6%]
+-      BM_UFlat/4      452982     452849      30900 862.6MB/s  html4   [ -0.2%]
+-      BM_UFlat/5       43860      43848     319554 535.1MB/s  cp      [ +1.2%]
+-      BM_UFlat/6       21419      21413     653573 496.6MB/s  c       [ +1.0%]
+-      BM_UFlat/7        6646       6645    2105405 534.1MB/s  lsp     [ +0.3%]
+-      BM_UFlat/8     1828487    1827886       7658 537.3MB/s  xls     [ +2.6%]
+-      BM_UFlat/9      391824     391714      35708 370.3MB/s  txt1    [ +2.2%]
+-      BM_UFlat/10     334913     334816      41885 356.6MB/s  txt2    [ +1.7%]
+-      BM_UFlat/11    1042062    1041674      10000 390.7MB/s  txt3    [ +1.1%]
+-      BM_UFlat/12    1398902    1398456      10000 328.6MB/s  txt4    [ +1.7%]
+-      BM_UFlat/13     545706     545530      25669 897.2MB/s  bin     [ -0.4%]
+-      BM_UFlat/14      71512      71505     196035 510.0MB/s  sum     [ +1.4%]
+-      BM_UFlat/15       8422       8421    1665036 478.7MB/s  man     [ +2.6%]
+-      BM_UFlat/16     112053     112048     100000 1009.3MB/s  pb     [ -0.4%]
+-      BM_UFlat/17     416723     416713      33612 421.8MB/s  gaviota [ -2.0%]
+-    
+-    R=sanjay
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@53 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit 0c1b9c3904430f5b399bd057d76de4bc36b7a123
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Tue Nov 8 14:46:39 2011 +0000
+-
+-    Fix public issue #53: Update the README to the API we actually open-sourced
+-    with.
+-    
+-    R=sanjay
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@52 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit b61134bc0a6a904b41522b4e5c9e80874c730cef
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Wed Oct 5 12:27:12 2011 +0000
+-
+-    In the format description, use a clearer example to emphasize that varints are
+-    stored in little-endian. Patch from Christian von Roques.
+-    
+-    R=csilvers
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@51 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit 21a2e4f55758e759302cd84ad0f3580affcba7d9
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Thu Sep 15 19:34:06 2011 +0000
+-
+-    Release Snappy 1.0.4.
+-    
+-    R=sanjay
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@50 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit e2e303286813c759c5b1cdb46dad63c494f0a061
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Thu Sep 15 09:50:05 2011 +0000
+-
+-    Fix public issue #50: Include generic byteswap macros.
+-    Also include Solaris 10 and FreeBSD versions.
+-    
+-    R=csilvers
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@49 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit 593002da3c051f4721312869f816b41485bad3b7
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Wed Aug 10 18:57:27 2011 +0000
+-
+-    Partially fix public issue 50: Remove an extra comma from the end of some
+-    enum declarations, as it seems the Sun compiler does not like it.
+-    
+-    Based on patch by Travis Vitek.
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@48 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit f1063a5dc43891eed37f0586bfea57b84dddd756
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Wed Aug 10 18:44:16 2011 +0000
+-
+-    Use the right #ifdef test for sys/mman.h.
+-    
+-    Based on patch by Travis Vitek.
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@47 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit 41c827a2fa9ce048202d941187f211180feadde4
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Wed Aug 10 01:22:09 2011 +0000
+-
+-    Fix public issue #47: Small comment cleanups in the unit test.
+-    
+-    Originally based on a patch by Patrick Pelletier.
+-    
+-    R=sanjay
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@46 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit 59aeffa6049b5c2a3a467e7602c1f93630b870e7
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Wed Aug 10 01:14:43 2011 +0000
+-
+-    Fix public issue #46: Format description said "3-byte offset"
+-    instead of "4-byte offset" for the longest copies.
+-    
+-    Also fix an inconsistency in the heading for section 2.2.3.
+-    Both patches by Patrick Pelletier.
+-    
+-    R=csilvers
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@45 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit 57e7cd72559cb022ef32856f2252a4c4585e562e
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Tue Jun 28 11:40:25 2011 +0000
+-
+-    Fix public issue #44: Make the definition and declaration of CompressFragment
+-    identical, even regarding cv-qualifiers.
+-    
+-    This is required to work around a bug in the Solaris Studio C++ compiler
+-    (it does not properly disregard cv-qualifiers when doing name mangling).
+-    
+-    R=sanjay
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@44 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit 13c4a449a8ea22139c9aa441e8024eebc9dbdf6e
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Sat Jun 4 10:19:05 2011 +0000
+-
+-    Correct an inaccuracy in the Snappy format description.
+-    (I stumbled into this when changing the way we decompress literals.)
+-    
+-    R=csilvers
+-    
+-    Revision created by MOE tool push_codebase.
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@43 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit f5406737403119e1483a71d2084d17728663a114
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Fri Jun 3 20:53:06 2011 +0000
+-
+-    Speed up decompression by removing a fast-path attempt.
+-    
+-    Whenever we try to enter a copy fast-path, there is a certain cost in checking
+-    that all the preconditions are in place, but it's normally offset by the fact
+-    that we can usually take the cheaper path. However, in a certain path we've
+-    already established that "avail < literal_length", which usually means that
+-    either the available space is small, or the literal is big. Both will disqualify
+-    us from taking the fast path, and thus we take the hit from the precondition
+-    checking without gaining much from having a fast path. Thus, simply don't try
+-    the fast path in this situation -- we're already on a slow path anyway
+-    (one where we need to refill more data from the reader).
+-    
+-    I'm a bit surprised at how much this gained; it could be that this path is
+-    more common than I thought, or that the simpler structure somehow makes the
+-    compiler happier. I haven't looked at the assembler, but it's a win across
+-    the board on both Core 2, Core i7 and Opteron, at least for the cases we
+-    typically care about. The gains seem to be the largest on Core i7, though.
+-    Results from my Core i7 workstation:
+-    
+-    
+-      Benchmark            Time(ns)    CPU(ns) Iterations
+-      ---------------------------------------------------
+-      BM_UFlat/0              73337      73091     190996 1.3GB/s  html      [ +1.7%]
+-      BM_UFlat/1             696379     693501      20173 965.5MB/s  urls    [ +2.7%]
+-      BM_UFlat/2               9765       9734    1472135 12.1GB/s  jpg      [ +0.7%]
+-      BM_UFlat/3              29720      29621     472973 3.0GB/s  pdf       [ +1.8%]
+-      BM_UFlat/4             294636     293834      47782 1.3GB/s  html4     [ +2.3%]
+-      BM_UFlat/5              28399      28320     494700 828.5MB/s  cp      [ +3.5%]
+-      BM_UFlat/6              12795      12760    1000000 833.3MB/s  c       [ +1.2%]
+-      BM_UFlat/7               3984       3973    3526448 893.2MB/s  lsp     [ +5.7%]
+-      BM_UFlat/8             991996     989322      14141 992.6MB/s  xls     [ +3.3%]
+-      BM_UFlat/9             228620     227835      61404 636.6MB/s  txt1    [ +4.0%]
+-      BM_UFlat/10            197114     196494      72165 607.5MB/s  txt2    [ +3.5%]
+-      BM_UFlat/11            605240     603437      23217 674.4MB/s  txt3    [ +3.7%]
+-      BM_UFlat/12            804157     802016      17456 573.0MB/s  txt4    [ +3.9%]
+-      BM_UFlat/13            347860     346998      40346 1.4GB/s  bin       [ +1.2%]
+-      BM_UFlat/14             44684      44559     315315 818.4MB/s  sum     [ +2.3%]
+-      BM_UFlat/15              5120       5106    2739726 789.4MB/s  man     [ +3.3%]
+-      BM_UFlat/16             76591      76355     183486 1.4GB/s  pb        [ +2.8%]
+-      BM_UFlat/17            238564     237828      58824 739.1MB/s  gaviota [ +1.6%]
+-      BM_UValidate/0          42194      42060     333333 2.3GB/s  html      [ -0.1%]
+-      BM_UValidate/1         433182     432005      32407 1.5GB/s  urls      [ -0.1%]
+-      BM_UValidate/2            197        196   71428571 603.3GB/s  jpg     [ +0.5%]
+-      BM_UValidate/3          14494      14462     972222 6.1GB/s  pdf       [ +0.5%]
+-      BM_UValidate/4         168444     167836      83832 2.3GB/s  html4     [ +0.1%]
+-    
+-    R=jeff
+-    
+-    Revision created by MOE tool push_codebase.
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@42 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit 197f3ee9f9397e98c9abf07f9da875fbcb725dba
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Fri Jun 3 20:47:14 2011 +0000
+-
+-    Speed up decompression by not needing a lookup table for literal items.
+-    
+-    Looking up into and decoding the values from char_table has long shown up as a
+-    hotspot in the decompressor. While it turns out that it's hard to make a more
+-    efficient decoder for the copy ops, the literals are simple enough that we can
+-    decode them without needing a table lookup. (This means that 1/4 of the table
+-    is now unused, although that in itself doesn't buy us anything.)
+-    
+-    The gains are small, but definitely present; some tests win as much as 10%,
+-    but 1-4% is more typical. These results are from Core i7, in 64-bit mode;
+-    Core 2 and Opteron show similar results. (I've run with more iterations
+-    than unusual to make sure the smaller gains don't drown entirely in noise.)
+-    
+-      Benchmark            Time(ns)    CPU(ns) Iterations
+-      ---------------------------------------------------
+-      BM_UFlat/0              74665      74428     182055 1.3GB/s  html      [ +3.1%]
+-      BM_UFlat/1             714106     711997      19663 940.4MB/s  urls    [ +4.4%]
+-      BM_UFlat/2               9820       9789    1427115 12.1GB/s  jpg      [ -1.2%]
+-      BM_UFlat/3              30461      30380     465116 2.9GB/s  pdf       [ +0.8%]
+-      BM_UFlat/4             301445     300568      46512 1.3GB/s  html4     [ +2.2%]
+-      BM_UFlat/5              29338      29263     479452 801.8MB/s  cp      [ +1.6%]
+-      BM_UFlat/6              13004      12970    1000000 819.9MB/s  c       [ +2.1%]
+-      BM_UFlat/7               4180       4168    3349282 851.4MB/s  lsp     [ +1.3%]
+-      BM_UFlat/8            1026149    1024000      10000 959.0MB/s  xls     [+10.7%]
+-      BM_UFlat/9             237441     236830      59072 612.4MB/s  txt1    [ +0.3%]
+-      BM_UFlat/10            203966     203298      69307 587.2MB/s  txt2    [ +0.8%]
+-      BM_UFlat/11            627230     625000      22400 651.2MB/s  txt3    [ +0.7%]
+-      BM_UFlat/12            836188     833979      16787 551.0MB/s  txt4    [ +1.3%]
+-      BM_UFlat/13            351904     350750      39886 1.4GB/s  bin       [ +3.8%]
+-      BM_UFlat/14             45685      45562     308370 800.4MB/s  sum     [ +5.9%]
+-      BM_UFlat/15              5286       5270    2656546 764.9MB/s  man     [ +1.5%]
+-      BM_UFlat/16             78774      78544     178117 1.4GB/s  pb        [ +4.3%]
+-      BM_UFlat/17            242270     241345      58091 728.3MB/s  gaviota [ +1.2%]
+-      BM_UValidate/0          42149      42000     333333 2.3GB/s  html      [ -3.0%]
+-      BM_UValidate/1         432741     431303      32483 1.5GB/s  urls      [ +7.8%]
+-      BM_UValidate/2            198        197   71428571 600.7GB/s  jpg     [+16.8%]
+-      BM_UValidate/3          14560      14521     965517 6.1GB/s  pdf       [ -4.1%]
+-      BM_UValidate/4         169065     168671      83832 2.3GB/s  html4     [ -2.9%]
+-    
+-    R=jeff
+-    
+-    Revision created by MOE tool push_codebase.
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@41 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit 8efa2639e885ac467e7b11c662975c5844019fb9
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Thu Jun 2 22:57:41 2011 +0000
+-
+-    Release Snappy 1.0.3.
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@40 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit 2e12124bd87f39296709decc65195fa5bfced538
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Thu Jun 2 18:06:54 2011 +0000
+-
+-    Remove an unneeded goto in the decompressor; it turns out that the
+-    state of ip_ after decompression (or attempted decompresion) is
+-    completely irrelevant, so we don't need the trailer.
+-    
+-    Performance is, as expected, mostly flat -- there's a curious ~3-5%
+-    loss in the "lsp" test, but that test case is so short it is hard to say
+-    anything definitive about why (most likely, it's some sort of
+-    unrelated effect).
+-    
+-    R=jeff
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@39 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit c266bbf32103f8ed4a83e2272ed3d8828d5b8b34
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Thu Jun 2 17:59:40 2011 +0000
+-
+-    Speed up decompression by caching ip_.
+-    
+-    It is seemingly hard for the compiler to understand that ip_, the current input
+-    pointer into the compressed data stream, can not alias on anything else, and
+-    thus using it directly will incur memory traffic as it cannot be kept in a
+-    register. The code already knew about this and cached it into a local
+-    variable, but since Step() only decoded one tag, it had to move ip_ back into
+-    place between every tag. This seems to have cost us a significant amount of
+-    performance, so changing Step() into a function that decodes as much as it can
+-    before it saves ip_ back and returns. (Note that Step() was already inlined,
+-    so it is not the manual inlining that buys the performance here.)
+-    
+-    The wins are about 3-6% for Core 2, 6-13% on Core i7 and 5-12% on Opteron
+-    (for plain array-to-array decompression, in 64-bit opt mode).
+-    
+-    There is a tiny difference in the behavior here; if an invalid literal is
+-    encountered (ie., the writer refuses the Append() operation), ip_ will now
+-    point to the byte past the tag byte, instead of where the literal was
+-    originally thought to end. However, we don't use ip_ for anything after
+-    DecompressAllTags() has returned, so this should not change external behavior
+-    in any way.
+-    
+-    Microbenchmark results for Core i7, 64-bit (Opteron results are similar):
+-    
+-    Benchmark            Time(ns)    CPU(ns) Iterations
+-    ---------------------------------------------------
+-    BM_UFlat/0              79134      79110       8835 1.2GB/s  html      [ +6.2%]
+-    BM_UFlat/1             786126     786096        891 851.8MB/s  urls    [+10.0%]
+-    BM_UFlat/2               9948       9948      69125 11.9GB/s  jpg      [ -1.3%]
+-    BM_UFlat/3              31999      31998      21898 2.7GB/s  pdf       [ +6.5%]
+-    BM_UFlat/4             318909     318829       2204 1.2GB/s  html4     [ +6.5%]
+-    BM_UFlat/5              31384      31390      22363 747.5MB/s  cp      [ +9.2%]
+-    BM_UFlat/6              14037      14034      49858 757.7MB/s  c       [+10.6%]
+-    BM_UFlat/7               4612       4612     151395 769.5MB/s  lsp     [ +9.5%]
+-    BM_UFlat/8            1203174    1203007        582 816.3MB/s  xls     [+19.3%]
+-    BM_UFlat/9             253869     253955       2757 571.1MB/s  txt1    [+11.4%]
+-    BM_UFlat/10            219292     219290       3194 544.4MB/s  txt2    [+12.1%]
+-    BM_UFlat/11            672135     672131       1000 605.5MB/s  txt3    [+11.2%]
+-    BM_UFlat/12            902512     902492        776 509.2MB/s  txt4    [+12.5%]
+-    BM_UFlat/13            372110     371998       1881 1.3GB/s  bin       [ +5.8%]
+-    BM_UFlat/14             50407      50407      10000 723.5MB/s  sum     [+13.5%]
+-    BM_UFlat/15              5699       5701     100000 707.2MB/s  man     [+12.4%]
+-    BM_UFlat/16             83448      83424       8383 1.3GB/s  pb        [ +5.7%]
+-    BM_UFlat/17            256958     256963       2723 684.1MB/s  gaviota [ +7.9%]
+-    BM_UValidate/0          42795      42796      16351 2.2GB/s  html      [+25.8%]
+-    BM_UValidate/1         490672     490622       1427 1.3GB/s  urls      [+22.7%]
+-    BM_UValidate/2            237        237    2950297 499.0GB/s  jpg     [+24.9%]
+-    BM_UValidate/3          14610      14611      47901 6.0GB/s  pdf       [+26.8%]
+-    BM_UValidate/4         171973     171990       4071 2.2GB/s  html4     [+25.7%]
+-    
+-    
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@38 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit d0ee043bc50c62c5b5ff3da044f0b5567257407d
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Tue May 17 08:48:25 2011 +0000
+-
+-    Fix the numbering of the headlines in the Snappy format description.
+-    
+-    R=csilvers
+-    DELTA=4  (0 added, 0 deleted, 4 changed)
+-    
+-    
+-    Revision created by MOE tool push_codebase.
+-    MOE_MIGRATION=1906
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@37 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit 6c7053871fbdb459c9c14287a138d7f82d6d84a1
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Mon May 16 08:59:18 2011 +0000
+-
+-    Fix public issue #32: Add compressed format documentation for Snappy.
+-    This text is new, but an earlier version from Zeev Tarantov was used
+-    as reference.
+-    
+-    R=csilvers
+-    DELTA=112  (111 added, 0 deleted, 1 changed)
+-    
+-    
+-    Revision created by MOE tool push_codebase.
+-    MOE_MIGRATION=1867
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@36 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit a1f9f9973d127992f341d442969c86fd9a0847c9
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Mon May 9 21:29:02 2011 +0000
+-
+-    Fix public issue #39: Pick out the median runs based on CPU time,
+-    not real time. Also, use nth_element instead of sort, since we
+-    only need one element.
+-    
+-    R=csilvers
+-    DELTA=5  (3 added, 0 deleted, 2 changed)
+-    
+-    
+-    Revision created by MOE tool push_codebase.
+-    MOE_MIGRATION=1799
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@35 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit f7b105683c074cdf233740089e245e43f63e7e55
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Mon May 9 21:28:45 2011 +0000
+-
+-    Fix public issue #38: Make the microbenchmark framework handle
+-    properly cases where gettimeofday() can stand return the same
+-    result twice (as sometimes on GNU/Hurd) or go backwards
+-    (as when the user adjusts the clock). We avoid a division-by-zero,
+-    and put a lower bound on the number of iterations -- the same
+-    amount as we use to calibrate.
+-    
+-    We should probably use CLOCK_MONOTONIC for platforms that support
+-    it, to be robust against clock adjustments; we already use Windows'
+-    monotonic timers. However, that's for a later changelist.
+-    
+-    R=csilvers
+-    DELTA=7  (5 added, 0 deleted, 2 changed)
+-    
+-    
+-    Revision created by MOE tool push_codebase.
+-    MOE_MIGRATION=1798
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@34 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit d8d481427a05b88cdb0810c29bf400153595c423
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Tue May 3 23:22:52 2011 +0000
+-
+-    Fix public issue #37: Only link snappy_unittest against -lz and other autodetected
+-    libraries, not libsnappy.so (which doesn't need any such dependency).
+-    
+-    R=csilvers
+-    DELTA=20  (14 added, 0 deleted, 6 changed)
+-    
+-    
+-    Revision created by MOE tool push_codebase.
+-    MOE_MIGRATION=1710
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@33 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit bcecf195c0aeb2c98144d3d54b4d8d228774f50d
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Tue May 3 23:22:33 2011 +0000
+-
+-    Release Snappy 1.0.2, to get the license change and various other fixes into
+-    a release.
+-    
+-    R=csilvers
+-    DELTA=239  (236 added, 0 deleted, 3 changed)
+-    
+-    
+-    Revision created by MOE tool push_codebase.
+-    MOE_MIGRATION=1709
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@32 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit 84d9f642025cda672dda0d94a8008f094500aaa6
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Tue Apr 26 12:34:55 2011 +0000
+-
+-    Fix public issue #30: Stop using gettimeofday() altogether on Win32,
+-    as MSVC doesn't include it. Replace with QueryPerformanceCounter(),
+-    which is monotonic and probably reasonably high-resolution.
+-    (Some machines have traditionally had bugs in QPC, but they should
+-    be relatively rare these days, and there's really no much better
+-    alternative that I know of.)
+-    
+-    R=csilvers
+-    DELTA=74  (55 added, 19 deleted, 0 changed)
+-    
+-    
+-    Revision created by MOE tool push_codebase.
+-    MOE_MIGRATION=1556
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@31 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit 3d8e71df8d30f980d71d4c784ebfc5ff62d5b0cb
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Tue Apr 26 12:34:37 2011 +0000
+-
+-    Fix public issue #31: Don't reset PATH in autogen.sh; instead, do the trickery
+-    we need for our own build system internally.
+-    
+-    R=csilvers
+-    DELTA=16  (13 added, 1 deleted, 2 changed)
+-    
+-    
+-    Revision created by MOE tool push_codebase.
+-    MOE_MIGRATION=1555
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@30 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit 73987351de54c88e2fc3f5dcdeceb47708df3585
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Fri Apr 15 22:55:56 2011 +0000
+-
+-    When including <windows.h>, define WIN32_LEAN_AND_MEAN first,
+-    so we won't pull in macro definitions of things like min() and max(),
+-    which can conflict with <algorithm>.
+-    
+-    R=csilvers
+-    DELTA=1  (1 added, 0 deleted, 0 changed)
+-    
+-    
+-    Revision created by MOE tool push_codebase.
+-    MOE_MIGRATION=1485
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@29 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit fb7e0eade471a20b009720a84fea0af1552791d5
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Mon Apr 11 09:07:01 2011 +0000
+-
+-    Fix public issue #29: Write CPU timing code for Windows, based on GetProcessTimes()
+-    instead of getursage().
+-    
+-    I thought I'd already committed this patch, so that the 1.0.1 release already
+-    would have a Windows-compatible snappy_unittest, but I'd seemingly deleted it
+-    instead, so this is a reconstruction.
+-    
+-    R=csilvers
+-    DELTA=43  (39 added, 3 deleted, 1 changed)
+-    
+-    
+-    Revision created by MOE tool push_codebase.
+-    MOE_MIGRATION=1295
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@28 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit c67fa0c755a329000da5546fff79089d62ac2f82
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Fri Apr 8 09:51:53 2011 +0000
+-
+-    Include C bindings of Snappy, contributed by Martin Gieseking.
+-    
+-    I've made a few changes since Martin's version; mostly style nits, but also
+-    a semantic change -- most functions that return bool in the C++ version now
+-    return an enum, to better match typical C (and zlib) semantics.
+-    
+-    I've kept the copyright notice, since Martin is obviously the author here;
+-    he has signed the contributor license agreement, though, so this should not
+-    hinder Google's use in the future.
+-    
+-    We'll need to update the libtool version number to match the added interface,
+-    but as of http://www.gnu.org/software/libtool/manual/html_node/Updating-version-info.html
+-    I'm going to wait until public release.
+-    
+-    R=csilvers
+-    DELTA=238  (233 added, 0 deleted, 5 changed)
+-    
+-    
+-    Revision created by MOE tool push_codebase.
+-    MOE_MIGRATION=1294
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@27 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit 56be85cb9ae06f2e92180ae2575bdd10c012ab73
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Thu Apr 7 16:36:43 2011 +0000
+-
+-    Replace geo.protodata with a newer version.
+-    
+-    The data compresses/decompresses slightly faster than the old data, and has
+-    similar density.
+-    
+-    R=lookingbill
+-    DELTA=1  (0 added, 0 deleted, 1 changed)
+-    
+-    
+-    Revision created by MOE tool push_codebase.
+-    MOE_MIGRATION=1288
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@26 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit 3dd93f3ec74df54a37f68bffabb058ac757bbe72
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Wed Mar 30 20:27:53 2011 +0000
+-
+-    Fix public issue #27: Add HAVE_CONFIG_H tests around the config.h
+-    inclusion in snappy-stubs-internal.h, which eases compiling outside the
+-    automake/autoconf framework.
+-    
+-    R=csilvers
+-    DELTA=5  (4 added, 1 deleted, 0 changed)
+-    
+-    
+-    Revision created by MOE tool push_codebase.
+-    MOE_MIGRATION=1152
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@25 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit f67bcaa61006da8b325a7ed9909a782590971815
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Wed Mar 30 20:27:39 2011 +0000
+-
+-    Fix public issue #26: Take memory allocation and reallocation entirely out of the
+-    Measure() loop. This gives all algorithms a small speed boost, except Snappy which
+-    already didn't do reallocation (so the measurements were slightly biased in its
+-    favor).
+-    
+-    R=csilvers
+-    DELTA=92  (69 added, 9 deleted, 14 changed)
+-    
+-    
+-    Revision created by MOE tool push_codebase.
+-    MOE_MIGRATION=1151
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@24 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit cc333c1c5cc4eabceceb9848ff3cac6c604ecbc6
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Wed Mar 30 20:25:09 2011 +0000
+-
+-    Renamed "namespace zippy" to "namespace snappy" to reduce
+-    the differences from the opensource code.  Will make it easier
+-    in the future to mix-and-match third-party code that uses
+-    snappy with google code.
+-    
+-    Currently, csearch shows that the only external user of
+-    "namespace zippy" is some bigtable code that accesses
+-    a TEST variable, which is temporarily kept in the zippy
+-    namespace.
+-    
+-    R=sesse
+-    DELTA=123  (18 added, 3 deleted, 102 changed)
+-    
+-    
+-    Revision created by MOE tool push_codebase.
+-    MOE_MIGRATION=1150
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@23 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit f19fb07e6dc79d6857e37df572dba25ff30fc8f3
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Mon Mar 28 22:17:04 2011 +0000
+-
+-    Put back the final few lines of what was truncated during the
+-    license header change.
+-    
+-    R=csilvers
+-    DELTA=5  (4 added, 0 deleted, 1 changed)
+-    
+-    
+-    Revision created by MOE tool push_codebase.
+-    MOE_MIGRATION=1094
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@22 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit 7e8ca8f8315fc2ecb4eea19db695039ab2ca43a0
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Sat Mar 26 02:34:34 2011 +0000
+-
+-    Change on 2011-03-25 19:18:00-07:00 by sesse
+-    
+-    	Replace the Apache 2.0 license header by the BSD-type license header;
+-    	somehow a lot of the files were missed in the last round.
+-    
+-    	R=dannyb,csilvers
+-    	DELTA=147  (74 added, 2 deleted, 71 changed)
+-    
+-    Change on 2011-03-25 19:25:07-07:00 by sesse
+-    
+-    	Unbreak the build; the relicensing removed a bit too much (only comments
+-    	were intended, but I also accidentially removed some of the top lines of
+-    	the actual source).
+-    
+-    
+-    
+-    Revision created by MOE tool push_codebase.
+-    MOE_MIGRATION=1072
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@21 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit b4bbc1041b35d844ec26fbae25f2864995361fd8
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Fri Mar 25 16:14:41 2011 +0000
+-
+-    Change Snappy from the Apache 2.0 to a BSD-type license.
+-    
+-    R=dannyb
+-    DELTA=328  (80 added, 184 deleted, 64 changed)
+-    
+-    
+-    Revision created by MOE tool push_codebase.
+-    MOE_MIGRATION=1061
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@20 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit c47640c510eb11cf8913edfa34f667bceb3a4401
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Fri Mar 25 00:39:01 2011 +0000
+-
+-    Release Snappy 1.0.1, to soup up all the various small changes
+-    that have been made since release.
+-    
+-    R=csilvers
+-    DELTA=266  (260 added, 0 deleted, 6 changed)
+-    
+-    
+-    Revision created by MOE tool push_codebase.
+-    MOE_MIGRATION=1057
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@19 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit b1dc1f643eaff897a5ce135f525799b99687b118
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Thu Mar 24 19:15:54 2011 +0000
+-
+-    Fix a microbenchmark crash on mingw32; seemingly %lld is not universally
+-    supported on Windows, and %I64d is recommended instead.
+-    
+-    R=csilvers
+-    DELTA=6  (5 added, 0 deleted, 1 changed)
+-    
+-    
+-    Revision created by MOE tool push_codebase.
+-    MOE_MIGRATION=1034
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@18 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit 98004ca9afc62a3279dfe9d9a359083f61db437f
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Thu Mar 24 19:15:27 2011 +0000
+-
+-    Fix public issue #19: Fix unit test when Google Test is installed but the
+-    gflags package isn't (Google Test is not properly initialized).
+-    
+-    Patch by Martin Gieseking.
+-    
+-    R=csilvers
+-    DELTA=2  (1 added, 0 deleted, 1 changed)
+-    
+-    
+-    Revision created by MOE tool push_codebase.
+-    MOE_MIGRATION=1033
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@17 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit 444a6c5f72d6f8d8f7213a5bcc08b26606eb9934
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Thu Mar 24 19:13:57 2011 +0000
+-
+-    Make the unit test work on systems without mmap(). This is required for,
+-    among others, Windows support. For Windows in specific, we could have used
+-    CreateFileMapping/MapViewOfFile, but this should at least get us a bit closer
+-    to compiling, and is of course also relevant for embedded systems with no MMU.
+-    
+-    (Part 2/2)
+-    
+-    R=csilvers
+-    DELTA=15  (12 added, 3 deleted, 0 changed)
+-    
+-    
+-    Revision created by MOE tool push_codebase.
+-    MOE_MIGRATION=1032
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@16 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit 2e182e9bb840737f9cd8817e859dc17a82f2c16b
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Thu Mar 24 19:12:27 2011 +0000
+-
+-    Make the unit test work on systems without mmap(). This is required for,
+-    among others, Windows support. For Windows in specific, we could have used
+-    CreateFileMapping/MapViewOfFile, but this should at least get us a bit closer
+-    to compiling, and is of course also relevant for embedded systems with no MMU.
+-    
+-    (Part 1/2)
+-    
+-    R=csilvers
+-    DELTA=9  (8 added, 0 deleted, 1 changed)
+-    
+-    
+-    Revision created by MOE tool push_codebase.
+-    MOE_MIGRATION=1031
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@15 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit 48662cbb7f81533977334629790d346220084527
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Wed Mar 23 23:17:36 2011 +0000
+-
+-    Fix public issue #12: Don't keep autogenerated auto* files in Subversion;
+-    it causes problems with others sending patches etc..
+-    
+-    We can't get this 100% hermetic anyhow, due to files like lt~obsolete.m4,
+-    so we can just as well go cleanly in the other direction.
+-    
+-    R=csilvers
+-    DELTA=21038  (0 added, 21036 deleted, 2 changed)
+-    
+-    
+-    Revision created by MOE tool push_codebase.
+-    MOE_MIGRATION=1012
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@14 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit 9e4717a586149c9538b353400312bab5ab5458c4
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Wed Mar 23 17:50:49 2011 +0000
+-
+-    Fix public issue tracker bug #3: Call AC_SUBST([LIBTOOL_DEPS]), or the rule
+-    to rebuild libtool in Makefile.am won't work.
+-    
+-    R=csilvers
+-    DELTA=1  (1 added, 0 deleted, 0 changed)
+-    
+-    
+-    Revision created by MOE tool push_codebase.
+-    MOE_MIGRATION=997
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@13 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit 519c822a34a91a0c0eb32d98e9686ee7d9cd6651
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Wed Mar 23 11:16:39 2011 +0000
+-
+-    Fix public issue #10: Don't add GTEST_CPPFLAGS to snappy_unittest_CXXFLAGS;
+-    it's not needed (CPPFLAGS are always included when compiling).
+-    
+-    R=csilvers
+-    DELTA=1  (0 added, 1 deleted, 0 changed)
+-    
+-    
+-    Revision created by MOE tool push_codebase.
+-    MOE_MIGRATION=994
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@12 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit ea6b936378583cba730c33c8a53776edc1782208
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Wed Mar 23 11:16:18 2011 +0000
+-
+-    Fix public issue #9: Add -Wall -Werror to automake flags.
+-    (This concerns automake itself, not the C++ compiler.)
+-    
+-    R=csilvers
+-    DELTA=4  (3 added, 0 deleted, 1 changed)
+-    
+-    
+-    Revision created by MOE tool push_codebase.
+-    MOE_MIGRATION=993
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@11 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit e3ca06af253094b1c3a8eae508cd97accf077535
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Wed Mar 23 11:13:37 2011 +0000
+-
+-    Fix a typo in the Snappy README file.
+-    
+-    R=csilvers
+-    DELTA=1  (0 added, 0 deleted, 1 changed)
+-    
+-    
+-    Revision created by MOE tool push_codebase.
+-    MOE_MIGRATION=992
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@10 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit 39d27bea23873abaa663e884261386b17b058f20
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Wed Mar 23 11:13:13 2011 +0000
+-
+-    Fix public issue #6: Add a --with-gflags for disabling gflags autodetection
+-    and using a manually given setting (use/don't use) instead.
+-    
+-    R=csilvers
+-    DELTA=16  (13 added, 0 deleted, 3 changed)
+-    
+-    
+-    Revision created by MOE tool push_codebase.
+-    MOE_MIGRATION=991
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@9 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit 60add43d99c1c31aeecd895cb555ad6f6520608e
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Wed Mar 23 11:12:44 2011 +0000
+-
+-    Fix public issue #5: Replace the EXTRA_LIBSNAPPY_LDFLAGS setup with something
+-    slightly more standard, that also doesn't leak libtool command-line into
+-    configure.ac.
+-    
+-    R=csilvers
+-    DELTA=7  (0 added, 4 deleted, 3 changed)
+-    
+-    
+-    Revision created by MOE tool push_codebase.
+-    MOE_MIGRATION=990
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@8 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit a8dd1700879ad646106742aa0e9c3a48dc07b01d
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Wed Mar 23 11:12:22 2011 +0000
+-
+-    Fix public issue #4: Properly quote all macro arguments in configure.ac.
+-    
+-    R=csilvers
+-    DELTA=16  (0 added, 0 deleted, 16 changed)
+-    
+-    
+-    Revision created by MOE tool push_codebase.
+-    MOE_MIGRATION=989
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@7 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit 79752dd7033658e28dc894de55012bdf2c9afca3
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Wed Mar 23 11:11:54 2011 +0000
+-
+-    Fix public issue #7: Don't use internal variables named ac_*, as those belong
+-    to autoconf's namespace.
+-    
+-    R=csilvers
+-    DELTA=6  (0 added, 0 deleted, 6 changed)
+-    
+-    
+-    Revision created by MOE tool push_codebase.
+-    MOE_MIGRATION=988
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@6 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit 46e39fb20c297129494b969ac4ea64fcd04b4fa0
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Wed Mar 23 11:11:09 2011 +0000
+-
+-    Add missing licensing headers to a few files. (Part 2/2.)
+-    
+-    R=csilvers
+-    DELTA=12  (12 added, 0 deleted, 0 changed)
+-    
+-    
+-    Revision created by MOE tool push_codebase.
+-    MOE_MIGRATION=987
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@5 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit 3e764216fc8edaafca480443b90e55c14eaae2c2
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Wed Mar 23 11:10:39 2011 +0000
+-
+-    Add mising licensing headers to a few files. (Part 1/2.)
+-    
+-    R=csilvers
+-    DELTA=24  (24 added, 0 deleted, 0 changed)
+-    
+-    
+-    Revision created by MOE tool push_codebase.
+-    MOE_MIGRATION=986
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@4 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit 9a59f183c8ffec62dcdabd3499d0d515e44e4ef0
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Wed Mar 23 11:10:04 2011 +0000
+-
+-    Use the correct license file for the Apache 2.0 license;
+-    spotted by Florian Weimer.
+-    
+-    R=csilvers
+-    DELTA=202  (174 added, 0 deleted, 28 changed)
+-    
+-    
+-    Revision created by MOE tool push_codebase.
+-    MOE_MIGRATION=985
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@3 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit 28a64402392c791905d6e1384ea1b48a5cb0b281
+-Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Fri Mar 18 17:14:15 2011 +0000
+-
+-    Revision created by MOE tool push_codebase.
+-    MOE_MIGRATION=
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@2 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+-
+-commit 7c3c6077b72b4ae2237267a20f640b55e9a90569
+-Author: sesse@google.com <sesse@google.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+-Date:   Fri Mar 18 17:13:52 2011 +0000
+-
+-    Create trunk directory.
+-    
+-    
+-    git-svn-id: https://snappy.googlecode.com/svn/trunk@1 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+diff --git a/other-licenses/snappy/src/NEWS b/other-licenses/snappy/src/NEWS
+--- a/other-licenses/snappy/src/NEWS
++++ b/other-licenses/snappy/src/NEWS
+@@ -1,8 +1,56 @@
++Snappy v1.1.8, January 15th 2020:
++
++  * Small performance improvements.
++
++  * Removed snappy::string alias for std::string.
++
++  * Improved CMake configuration.
++
++Snappy v1.1.7, August 24th 2017:
++
++  * Improved CMake build support for 64-bit Linux distributions.
++
++  * MSVC builds now use MSVC-specific intrinsics that map to clzll.
++
++  * ARM64 (AArch64) builds use the code paths optimized for 64-bit processors.
++
++Snappy v1.1.6, July 12th 2017:
++
++This is a re-release of v1.1.5 with proper SONAME / SOVERSION values.
++
++Snappy v1.1.5, June 28th 2017:
++
++This release has broken SONAME / SOVERSION values. Users of snappy as a shared
++library should avoid 1.1.5 and use 1.1.6 instead. SONAME / SOVERSION errors will
++manifest as the dynamic library loader complaining that it cannot find snappy's
++shared library file (libsnappy.so / libsnappy.dylib), or that the library it
++found does not have the required version. 1.1.6 has the same code as 1.1.5, but
++carries build configuration fixes for the issues above.
++
++  * Add CMake build support. The autoconf build support is now deprecated, and
++    will be removed in the next release.
++
++  * Add AppVeyor configuration, for Windows CI coverage.
++
++  * Small performance improvement on little-endian PowerPC.
++
++  * Small performance improvement on LLVM with position-independent executables.
++
++  * Fix a few issues with various build environments.
++
++Snappy v1.1.4, January 25th 2017:
++
++  * Fix a 1% performance regression when snappy is used in PIE executables.
++
++  * Improve compression performance by 5%.
++
++  * Improve decompression performance by 20%.
++
+ Snappy v1.1.3, July 6th 2015:
+ 
+ This is the first release to be done from GitHub, which means that
+ some minor things like the ChangeLog format has changed (git log
+ format instead of svn log).
+ 
+   * Add support for Uncompress() from a Source to a Sink.
+ 
+diff --git a/other-licenses/snappy/src/README b/other-licenses/snappy/src/README.md
+rename from other-licenses/snappy/src/README
+rename to other-licenses/snappy/src/README.md
+--- a/other-licenses/snappy/src/README
++++ b/other-licenses/snappy/src/README.md
+@@ -29,63 +29,78 @@ and the like.
+ 
+ Performance
+ ===========
+ 
+ Snappy is intended to be fast. On a single core of a Core i7 processor
+ in 64-bit mode, it compresses at about 250 MB/sec or more and decompresses at
+ about 500 MB/sec or more. (These numbers are for the slowest inputs in our
+ benchmark suite; others are much faster.) In our tests, Snappy usually
+-is faster than algorithms in the same class (e.g. LZO, LZF, FastLZ, QuickLZ,
++is faster than algorithms in the same class (e.g. LZO, LZF, QuickLZ,
+ etc.) while achieving comparable compression ratios.
+ 
+ Typical compression ratios (based on the benchmark suite) are about 1.5-1.7x
+ for plain text, about 2-4x for HTML, and of course 1.0x for JPEGs, PNGs and
+ other already-compressed data. Similar numbers for zlib in its fastest mode
+ are 2.6-2.8x, 3-7x and 1.0x, respectively. More sophisticated algorithms are
+ capable of achieving yet higher compression rates, although usually at the
+ expense of speed. Of course, compression ratio will vary significantly with
+ the input.
+ 
+ Although Snappy should be fairly portable, it is primarily optimized
+ for 64-bit x86-compatible processors, and may run slower in other environments.
+ In particular:
+ 
+  - Snappy uses 64-bit operations in several places to process more data at
+    once than would otherwise be possible.
+- - Snappy assumes unaligned 32- and 64-bit loads and stores are cheap.
+-   On some platforms, these must be emulated with single-byte loads 
++ - Snappy assumes unaligned 32 and 64-bit loads and stores are cheap.
++   On some platforms, these must be emulated with single-byte loads
+    and stores, which is much slower.
+  - Snappy assumes little-endian throughout, and needs to byte-swap data in
+    several places if running on a big-endian platform.
+ 
+ Experience has shown that even heavily tuned code can be improved.
+ Performance optimizations, whether for 64-bit x86 or other platforms,
+ are of course most welcome; see "Contact", below.
+ 
+ 
++Building
++========
++
++You need the CMake version specified in [CMakeLists.txt](./CMakeLists.txt)
++or later to build:
++
++```bash
++mkdir build
++cd build && cmake ../ && make
++```
++
+ Usage
+ =====
+ 
+ Note that Snappy, both the implementation and the main interface,
+ is written in C++. However, several third-party bindings to other languages
+-are available; see the home page at http://google.github.io/snappy/
+-for more information. Also, if you want to use Snappy from C code, you can
+-use the included C bindings in snappy-c.h.
++are available; see the [home page](docs/README.md) for more information.
++Also, if you want to use Snappy from C code, you can use the included C
++bindings in snappy-c.h.
+ 
+ To use Snappy from your own C++ program, include the file "snappy.h" from
+ your calling file, and link against the compiled library.
+ 
+ There are many ways to call Snappy, but the simplest possible is
+ 
+-  snappy::Compress(input.data(), input.size(), &output);
++```c++
++snappy::Compress(input.data(), input.size(), &output);
++```
+ 
+ and similarly
+ 
+-  snappy::Uncompress(input.data(), input.size(), &output);
++```c++
++snappy::Uncompress(input.data(), input.size(), &output);
++```
+ 
+ where "input" and "output" are both instances of std::string.
+ 
+ There are other interfaces that are more flexible in various ways, including
+ support for custom (non-array) input sources. See the header file for more
+ information.
+ 
+ 
+@@ -97,43 +112,37 @@ library itself. You do not need it to us
+ but it contains several useful components for Snappy development.
+ 
+ First of all, it contains unit tests, verifying correctness on your machine in
+ various scenarios. If you want to change or optimize Snappy, please run the
+ tests to verify you have not broken anything. Note that if you have the
+ Google Test library installed, unit test behavior (especially failures) will be
+ significantly more user-friendly. You can find Google Test at
+ 
+-  http://github.com/google/googletest
++  https://github.com/google/googletest
+ 
+ You probably also want the gflags library for handling of command-line flags;
+ you can find it at
+ 
+-  http://gflags.github.io/gflags/
++  https://gflags.github.io/gflags/
+ 
+ In addition to the unit tests, snappy contains microbenchmarks used to
+ tune compression and decompression performance. These are automatically run
+ before the unit tests, but you can disable them using the flag
+ --run_microbenchmarks=false if you have gflags installed (otherwise you will
+ need to edit the source).
+ 
+ Finally, snappy can benchmark Snappy against a few other compression libraries
+-(zlib, LZO, LZF, FastLZ and QuickLZ), if they were detected at configure time.
++(zlib, LZO, LZF, and QuickLZ), if they were detected at configure time.
+ To benchmark using a given file, give the compression algorithm you want to test
+ Snappy against (e.g. --zlib) and then a list of one or more file names on the
+ command line. The testdata/ directory contains the files used by the
+ microbenchmark, which should provide a reasonably balanced starting point for
+ benchmarking. (Note that baddata[1-3].snappy are not intended as benchmarks; they
+ are used to verify correctness in the presence of corrupted data in the unit
+ test.)
+ 
+ 
+ Contact
+ =======
+ 
+ Snappy is distributed through GitHub. For the latest version, a bug tracker,
+-and other information, see
+-
+-  http://google.github.io/snappy/
+-
+-or the repository at
+-
+-  https://github.com/google/snappy
++and other information, see https://github.com/google/snappy.
+diff --git a/other-licenses/snappy/src/snappy-internal.h b/other-licenses/snappy/src/snappy-internal.h
+--- a/other-licenses/snappy/src/snappy-internal.h
++++ b/other-licenses/snappy/src/snappy-internal.h
+@@ -31,31 +31,40 @@
+ #ifndef THIRD_PARTY_SNAPPY_SNAPPY_INTERNAL_H_
+ #define THIRD_PARTY_SNAPPY_SNAPPY_INTERNAL_H_
+ 
+ #include "snappy-stubs-internal.h"
+ 
+ namespace snappy {
+ namespace internal {
+ 
++// Working memory performs a single allocation to hold all scratch space
++// required for compression.
+ class WorkingMemory {
+  public:
+-  WorkingMemory() : large_table_(NULL) { }
+-  ~WorkingMemory() { delete[] large_table_; }
++  explicit WorkingMemory(size_t input_size);
++  ~WorkingMemory();
+ 
+   // Allocates and clears a hash table using memory in "*this",
+   // stores the number of buckets in "*table_size" and returns a pointer to
+   // the base of the hash table.
+-  uint16* GetHashTable(size_t input_size, int* table_size);
++  uint16* GetHashTable(size_t fragment_size, int* table_size) const;
++  char* GetScratchInput() const { return input_; }
++  char* GetScratchOutput() const { return output_; }
+ 
+  private:
+-  uint16 small_table_[1<<10];    // 2KB
+-  uint16* large_table_;          // Allocated only when needed
++  char* mem_;      // the allocated memory, never nullptr
++  size_t size_;    // the size of the allocated memory, never 0
++  uint16* table_;  // the pointer to the hashtable
++  char* input_;    // the pointer to the input scratch buffer
++  char* output_;   // the pointer to the output scratch buffer
+ 
+-  DISALLOW_COPY_AND_ASSIGN(WorkingMemory);
++  // No copying
++  WorkingMemory(const WorkingMemory&);
++  void operator=(const WorkingMemory&);
+ };
+ 
+ // Flat array compression that does not emit the "uncompressed length"
+ // prefix. Compresses "input" string to the "*op" buffer.
+ //
+ // REQUIRES: "input_length <= kBlockSize"
+ // REQUIRES: "op" points to an array of memory that is at least
+ // "MaxCompressedLength(input_length)" in size.
+@@ -65,67 +74,82 @@ class WorkingMemory {
+ // Returns an "end" pointer into "op" buffer.
+ // "end - op" is the compressed size of "input".
+ char* CompressFragment(const char* input,
+                        size_t input_length,
+                        char* op,
+                        uint16* table,
+                        const int table_size);
+ 
+-// Return the largest n such that
++// Find the largest n such that
+ //
+ //   s1[0,n-1] == s2[0,n-1]
+ //   and n <= (s2_limit - s2).
+ //
++// Return make_pair(n, n < 8).
+ // Does not read *s2_limit or beyond.
+ // Does not read *(s1 + (s2_limit - s2)) or beyond.
+ // Requires that s2_limit >= s2.
+ //
+-// Separate implementation for x86_64, for speed.  Uses the fact that
+-// x86_64 is little endian.
+-#if defined(ARCH_K8)
+-static inline int FindMatchLength(const char* s1,
+-                                  const char* s2,
+-                                  const char* s2_limit) {
++// Separate implementation for 64-bit, little-endian cpus.
++#if !defined(SNAPPY_IS_BIG_ENDIAN) && \
++    (defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM))
++static inline std::pair<size_t, bool> FindMatchLength(const char* s1,
++                                                      const char* s2,
++                                                      const char* s2_limit) {
+   assert(s2_limit >= s2);
+-  int matched = 0;
++  size_t matched = 0;
++
++  // This block isn't necessary for correctness; we could just start looping
++  // immediately.  As an optimization though, it is useful.  It creates some not
++  // uncommon code paths that determine, without extra effort, whether the match
++  // length is less than 8.  In short, we are hoping to avoid a conditional
++  // branch, and perhaps get better code layout from the C++ compiler.
++  if (SNAPPY_PREDICT_TRUE(s2 <= s2_limit - 8)) {
++    uint64 a1 = UNALIGNED_LOAD64(s1);
++    uint64 a2 = UNALIGNED_LOAD64(s2);
++    if (a1 != a2) {
++      return std::pair<size_t, bool>(Bits::FindLSBSetNonZero64(a1 ^ a2) >> 3,
++                                     true);
++    } else {
++      matched = 8;
++      s2 += 8;
++    }
++  }
+ 
+   // Find out how long the match is. We loop over the data 64 bits at a
+   // time until we find a 64-bit block that doesn't match; then we find
+   // the first non-matching bit and use that to calculate the total
+   // length of the match.
+-  while (PREDICT_TRUE(s2 <= s2_limit - 8)) {
++  while (SNAPPY_PREDICT_TRUE(s2 <= s2_limit - 8)) {
+     if (UNALIGNED_LOAD64(s2) == UNALIGNED_LOAD64(s1 + matched)) {
+       s2 += 8;
+       matched += 8;
+     } else {
+-      // On current (mid-2008) Opteron models there is a 3% more
+-      // efficient code sequence to find the first non-matching byte.
+-      // However, what follows is ~10% better on Intel Core 2 and newer,
+-      // and we expect AMD's bsf instruction to improve.
+       uint64 x = UNALIGNED_LOAD64(s2) ^ UNALIGNED_LOAD64(s1 + matched);
+       int matching_bits = Bits::FindLSBSetNonZero64(x);
+       matched += matching_bits >> 3;
+-      return matched;
++      assert(matched >= 8);
++      return std::pair<size_t, bool>(matched, false);
+     }
+   }
+-  while (PREDICT_TRUE(s2 < s2_limit)) {
++  while (SNAPPY_PREDICT_TRUE(s2 < s2_limit)) {
+     if (s1[matched] == *s2) {
+       ++s2;
+       ++matched;
+     } else {
+-      return matched;
++      return std::pair<size_t, bool>(matched, matched < 8);
+     }
+   }
+-  return matched;
++  return std::pair<size_t, bool>(matched, matched < 8);
+ }
+ #else
+-static inline int FindMatchLength(const char* s1,
+-                                  const char* s2,
+-                                  const char* s2_limit) {
++static inline std::pair<size_t, bool> FindMatchLength(const char* s1,
++                                                      const char* s2,
++                                                      const char* s2_limit) {
+   // Implementation based on the x86-64 version, above.
+   assert(s2_limit >= s2);
+   int matched = 0;
+ 
+   while (s2 <= s2_limit - 4 &&
+          UNALIGNED_LOAD32(s2) == UNALIGNED_LOAD32(s1 + matched)) {
+     s2 += 4;
+     matched += 4;
+@@ -135,36 +159,31 @@ static inline int FindMatchLength(const 
+     int matching_bits = Bits::FindLSBSetNonZero(x);
+     matched += matching_bits >> 3;
+   } else {
+     while ((s2 < s2_limit) && (s1[matched] == *s2)) {
+       ++s2;
+       ++matched;
+     }
+   }
+-  return matched;
++  return std::pair<size_t, bool>(matched, matched < 8);
+ }
+ #endif
+ 
+ // Lookup tables for decompression code.  Give --snappy_dump_decompression_table
+ // to the unit test to recompute char_table.
+ 
+ enum {
+   LITERAL = 0,
+   COPY_1_BYTE_OFFSET = 1,  // 3 bit length + 3 bits of offset in opcode
+   COPY_2_BYTE_OFFSET = 2,
+   COPY_4_BYTE_OFFSET = 3
+ };
+ static const int kMaximumTagLength = 5;  // COPY_4_BYTE_OFFSET plus the actual offset.
+ 
+-// Mapping from i in range [0,4] to a mask to extract the bottom 8*i bits
+-static const uint32 wordmask[] = {
+-  0u, 0xffu, 0xffffu, 0xffffffu, 0xffffffffu
+-};
+-
+ // Data stored per entry in lookup table:
+ //      Range   Bits-used       Description
+ //      ------------------------------------
+ //      1..64   0..7            Literal/copy length encoded in opcode byte
+ //      0..7    8..10           Copy offset encoded in opcode byte / 256
+ //      0..4    11..13          Extra bytes after opcode
+ //
+ // We use eight bits for the length even though 7 would have sufficed
+diff --git a/other-licenses/snappy/src/snappy-stubs-internal.cc b/other-licenses/snappy/src/snappy-stubs-internal.cc
+--- a/other-licenses/snappy/src/snappy-stubs-internal.cc
++++ b/other-licenses/snappy/src/snappy-stubs-internal.cc
+@@ -28,15 +28,15 @@
+ 
+ #include <algorithm>
+ #include <string>
+ 
+ #include "snappy-stubs-internal.h"
+ 
+ namespace snappy {
+ 
+-void Varint::Append32(string* s, uint32 value) {
++void Varint::Append32(std::string* s, uint32 value) {
+   char buf[Varint::kMax32];
+   const char* p = Varint::Encode32(buf, value);
+   s->append(buf, p - buf);
+ }
+ 
+ }  // namespace snappy
+diff --git a/other-licenses/snappy/src/snappy-stubs-internal.h b/other-licenses/snappy/src/snappy-stubs-internal.h
+--- a/other-licenses/snappy/src/snappy-stubs-internal.h
++++ b/other-licenses/snappy/src/snappy-stubs-internal.h
+@@ -40,49 +40,73 @@
+ #include <assert.h>
+ #include <stdlib.h>
+ #include <string.h>
+ 
+ #ifdef HAVE_SYS_MMAN_H
+ #include <sys/mman.h>
+ #endif
+ 
++#ifdef HAVE_UNISTD_H
++#include <unistd.h>
++#endif
++
++#if defined(_MSC_VER)
++#include <intrin.h>
++#endif  // defined(_MSC_VER)
++
++#ifndef __has_feature
++#define __has_feature(x) 0
++#endif
++
++#if __has_feature(memory_sanitizer)
++#include <sanitizer/msan_interface.h>
++#define SNAPPY_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \
++    __msan_unpoison((address), (size))
++#else
++#define SNAPPY_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) /* empty */
++#endif  // __has_feature(memory_sanitizer)
++
+ #include "snappy-stubs-public.h"
+ 
+ #if defined(__x86_64__)
+ 
+ // Enable 64-bit optimized versions of some routines.
+ #define ARCH_K8 1
+ 
++#elif defined(__ppc64__)
++
++#define ARCH_PPC 1
++
++#elif defined(__aarch64__)
++
++#define ARCH_ARM 1
++
+ #endif
+ 
+ // Needed by OS X, among others.
+ #ifndef MAP_ANONYMOUS
+ #define MAP_ANONYMOUS MAP_ANON
+ #endif
+ 
+-// Pull in std::min, std::ostream, and the likes. This is safe because this
+-// header file is never used from any public header files.
+-using namespace std;
+-
+ // The size of an array, if known at compile-time.
+ // Will give unexpected results if used on a pointer.
+ // We undefine it first, since some compilers already have a definition.
+ #ifdef ARRAYSIZE
+ #undef ARRAYSIZE
+ #endif
+ #define ARRAYSIZE(a) (sizeof(a) / sizeof(*(a)))
+ 
+ // Static prediction hints.
+ #ifdef HAVE_BUILTIN_EXPECT
+-#define PREDICT_FALSE(x) (__builtin_expect(x, 0))
+-#define PREDICT_TRUE(x) (__builtin_expect(!!(x), 1))
++#define SNAPPY_PREDICT_FALSE(x) (__builtin_expect(x, 0))
++#define SNAPPY_PREDICT_TRUE(x) (__builtin_expect(!!(x), 1))
+ #else
+-#define PREDICT_FALSE(x) x
+-#define PREDICT_TRUE(x) x
++#define SNAPPY_PREDICT_FALSE(x) x
++#define SNAPPY_PREDICT_TRUE(x) x
+ #endif
+ 
+ // This is only used for recomputing the tag byte table used during
+ // decompression; for simplicity we just remove it from the open-source
+ // version (anyone who wants to regenerate it can just do the call
+ // themselves within main()).
+ #define DEFINE_bool(flag_name, default_value, description) \
+   bool FLAGS_ ## flag_name = default_value
+@@ -91,19 +115,20 @@ using namespace std;
+ 
+ namespace snappy {
+ 
+ static const uint32 kuint32max = static_cast<uint32>(0xFFFFFFFF);
+ static const int64 kint64max = static_cast<int64>(0x7FFFFFFFFFFFFFFFLL);
+ 
+ // Potentially unaligned loads and stores.
+ 
+-// x86 and PowerPC can simply do these loads and stores native.
++// x86, PowerPC, and ARM64 can simply do these loads and stores native.
+ 
+-#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__)
++#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || \
++    defined(__aarch64__)
+ 
+ #define UNALIGNED_LOAD16(_p) (*reinterpret_cast<const uint16 *>(_p))
+ #define UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32 *>(_p))
+ #define UNALIGNED_LOAD64(_p) (*reinterpret_cast<const uint64 *>(_p))
+ 
+ #define UNALIGNED_STORE16(_p, _val) (*reinterpret_cast<uint16 *>(_p) = (_val))
+ #define UNALIGNED_STORE32(_p, _val) (*reinterpret_cast<uint32 *>(_p) = (_val))
+ #define UNALIGNED_STORE64(_p, _val) (*reinterpret_cast<uint64 *>(_p) = (_val))
+@@ -169,17 +194,17 @@ struct Unaligned32Struct {
+ 
+ #define UNALIGNED_STORE16(_p, _val) \
+     ((reinterpret_cast< ::snappy::base::internal::Unaligned16Struct *>(_p))->value = \
+          (_val))
+ #define UNALIGNED_STORE32(_p, _val) \
+     ((reinterpret_cast< ::snappy::base::internal::Unaligned32Struct *>(_p))->value = \
+          (_val))
+ 
+-// TODO(user): NEON supports unaligned 64-bit loads and stores.
++// TODO: NEON supports unaligned 64-bit loads and stores.
+ // See if that would be more efficient on platforms supporting it,
+ // at least for copies.
+ 
+ inline uint64 UNALIGNED_LOAD64(const void *p) {
+   uint64 t;
+   memcpy(&t, p, sizeof t);
+   return t;
+ }
+@@ -220,32 +245,18 @@ inline void UNALIGNED_STORE32(void *p, u
+ }
+ 
+ inline void UNALIGNED_STORE64(void *p, uint64 v) {
+   memcpy(p, &v, sizeof v);
+ }
+ 
+ #endif
+ 
+-// This can be more efficient than UNALIGNED_LOAD64 + UNALIGNED_STORE64
+-// on some platforms, in particular ARM.
+-inline void UnalignedCopy64(const void *src, void *dst) {
+-  if (sizeof(void *) == 8) {
+-    UNALIGNED_STORE64(dst, UNALIGNED_LOAD64(src));
+-  } else {
+-    const char *src_char = reinterpret_cast<const char *>(src);
+-    char *dst_char = reinterpret_cast<char *>(dst);
+-
+-    UNALIGNED_STORE32(dst_char, UNALIGNED_LOAD32(src_char));
+-    UNALIGNED_STORE32(dst_char + 4, UNALIGNED_LOAD32(src_char + 4));
+-  }
+-}
+-
+ // The following guarantees declaration of the byte swap functions.
+-#ifdef WORDS_BIGENDIAN
++#if defined(SNAPPY_IS_BIG_ENDIAN)
+ 
+ #ifdef HAVE_SYS_BYTEORDER_H
+ #include <sys/byteorder.h>
+ #endif
+ 
+ #ifdef HAVE_SYS_ENDIAN_H
+ #include <sys/endian.h>
+ #endif
+@@ -292,51 +303,51 @@ inline uint32 bswap_32(uint32 x) {
+ inline uint64 bswap_64(uint64 x) {
+   x = ((x & 0xff00ff00ff00ff00ULL) >> 8) | ((x & 0x00ff00ff00ff00ffULL) << 8);
+   x = ((x & 0xffff0000ffff0000ULL) >> 16) | ((x & 0x0000ffff0000ffffULL) << 16);
+   return (x >> 32) | (x << 32);
+ }
+ 
+ #endif
+ 
+-#endif  // WORDS_BIGENDIAN
++#endif  // defined(SNAPPY_IS_BIG_ENDIAN)
+ 
+ // Convert to little-endian storage, opposite of network format.
+ // Convert x from host to little endian: x = LittleEndian.FromHost(x);
+ // convert x from little endian to host: x = LittleEndian.ToHost(x);
+ //
+ //  Store values into unaligned memory converting to little endian order:
+ //    LittleEndian.Store16(p, x);
+ //
+ //  Load unaligned values stored in little endian converting to host order:
+ //    x = LittleEndian.Load16(p);
+ class LittleEndian {
+  public:
+   // Conversion functions.
+-#ifdef WORDS_BIGENDIAN
++#if defined(SNAPPY_IS_BIG_ENDIAN)
+ 
+   static uint16 FromHost16(uint16 x) { return bswap_16(x); }
+   static uint16 ToHost16(uint16 x) { return bswap_16(x); }
+ 
+   static uint32 FromHost32(uint32 x) { return bswap_32(x); }
+   static uint32 ToHost32(uint32 x) { return bswap_32(x); }
+ 
+   static bool IsLittleEndian() { return false; }
+ 
+-#else  // !defined(WORDS_BIGENDIAN)
++#else  // !defined(SNAPPY_IS_BIG_ENDIAN)
+ 
+   static uint16 FromHost16(uint16 x) { return x; }
+   static uint16 ToHost16(uint16 x) { return x; }
+ 
+   static uint32 FromHost32(uint32 x) { return x; }
+   static uint32 ToHost32(uint32 x) { return x; }
+ 
+   static bool IsLittleEndian() { return true; }
+ 
+-#endif  // !defined(WORDS_BIGENDIAN)
++#endif  // !defined(SNAPPY_IS_BIG_ENDIAN)
+ 
+   // Functions to do unaligned loads and stores in little-endian order.
+   static uint16 Load16(const void *p) {
+     return ToHost16(UNALIGNED_LOAD16(p));
+   }
+ 
+   static void Store16(void *p, uint16 v) {
+     UNALIGNED_STORE16(p, FromHost16(v));
+@@ -349,85 +360,152 @@ class LittleEndian {
+   static void Store32(void *p, uint32 v) {
+     UNALIGNED_STORE32(p, FromHost32(v));
+   }
+ };
+ 
+ // Some bit-manipulation functions.
+ class Bits {
+  public:
++  // Return floor(log2(n)) for positive integer n.
++  static int Log2FloorNonZero(uint32 n);
++
+   // Return floor(log2(n)) for positive integer n.  Returns -1 iff n == 0.
+   static int Log2Floor(uint32 n);
+ 
+   // Return the first set least / most significant bit, 0-indexed.  Returns an
+   // undefined value if n == 0.  FindLSBSetNonZero() is similar to ffs() except
+   // that it's 0-indexed.
+   static int FindLSBSetNonZero(uint32 n);
++
++#if defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
+   static int FindLSBSetNonZero64(uint64 n);
++#endif  // defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
+ 
+  private:
+-  DISALLOW_COPY_AND_ASSIGN(Bits);
++  // No copying
++  Bits(const Bits&);
++  void operator=(const Bits&);
+ };
+ 
+ #ifdef HAVE_BUILTIN_CTZ
+ 
++inline int Bits::Log2FloorNonZero(uint32 n) {
++  assert(n != 0);
++  // (31 ^ x) is equivalent to (31 - x) for x in [0, 31]. An easy proof
++  // represents subtraction in base 2 and observes that there's no carry.
++  //
++  // GCC and Clang represent __builtin_clz on x86 as 31 ^ _bit_scan_reverse(x).
++  // Using "31 ^" here instead of "31 -" allows the optimizer to strip the
++  // function body down to _bit_scan_reverse(x).
++  return 31 ^ __builtin_clz(n);
++}
++
+ inline int Bits::Log2Floor(uint32 n) {
+-  return n == 0 ? -1 : 31 ^ __builtin_clz(n);
++  return (n == 0) ? -1 : Bits::Log2FloorNonZero(n);
+ }
+ 
+ inline int Bits::FindLSBSetNonZero(uint32 n) {
++  assert(n != 0);
+   return __builtin_ctz(n);
+ }
+ 
++#if defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
+ inline int Bits::FindLSBSetNonZero64(uint64 n) {
++  assert(n != 0);
+   return __builtin_ctzll(n);
+ }
++#endif  // defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
++
++#elif defined(_MSC_VER)
++
++inline int Bits::Log2FloorNonZero(uint32 n) {
++  assert(n != 0);
++  unsigned long where;
++  _BitScanReverse(&where, n);
++  return static_cast<int>(where);
++}
++
++inline int Bits::Log2Floor(uint32 n) {
++  unsigned long where;
++  if (_BitScanReverse(&where, n))
++    return static_cast<int>(where);
++  return -1;
++}
++
++inline int Bits::FindLSBSetNonZero(uint32 n) {
++  assert(n != 0);
++  unsigned long where;
++  if (_BitScanForward(&where, n))
++    return static_cast<int>(where);
++  return 32;
++}
++
++#if defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
++inline int Bits::FindLSBSetNonZero64(uint64 n) {
++  assert(n != 0);
++  unsigned long where;
++  if (_BitScanForward64(&where, n))
++    return static_cast<int>(where);
++  return 64;
++}
++#endif  // defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
+ 
+ #else  // Portable versions.
+ 
+-inline int Bits::Log2Floor(uint32 n) {
+-  if (n == 0)
+-    return -1;
++inline int Bits::Log2FloorNonZero(uint32 n) {
++  assert(n != 0);
++
+   int log = 0;
+   uint32 value = n;
+   for (int i = 4; i >= 0; --i) {
+     int shift = (1 << i);
+     uint32 x = value >> shift;
+     if (x != 0) {
+       value = x;
+       log += shift;
+     }
+   }
+   assert(value == 1);
+   return log;
+ }
+ 
++inline int Bits::Log2Floor(uint32 n) {
++  return (n == 0) ? -1 : Bits::Log2FloorNonZero(n);
++}
++
+ inline int Bits::FindLSBSetNonZero(uint32 n) {
++  assert(n != 0);
++
+   int rc = 31;
+   for (int i = 4, shift = 1 << 4; i >= 0; --i) {
+     const uint32 x = n << shift;
+     if (x != 0) {
+       n = x;
+       rc -= shift;
+     }
+     shift >>= 1;
+   }
+   return rc;
+ }
+ 
++#if defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
+ // FindLSBSetNonZero64() is defined in terms of FindLSBSetNonZero().
+ inline int Bits::FindLSBSetNonZero64(uint64 n) {
++  assert(n != 0);
++
+   const uint32 bottombits = static_cast<uint32>(n);
+   if (bottombits == 0) {
+     // Bottom bits are zero, so scan in top bits
+     return 32 + FindLSBSetNonZero(static_cast<uint32>(n >> 32));
+   } else {
+     return FindLSBSetNonZero(bottombits);
+   }
+ }
++#endif  // defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
+ 
+ #endif  // End portable versions.
+ 
+ // Variable-length integer encoding.
+ class Varint {
+  public:
+   // Maximum lengths of varint encoding of uint32.
+   static const int kMax32 = 5;
+@@ -441,17 +519,17 @@ class Varint {
+                                       uint32* OUTPUT);
+ 
+   // REQUIRES   "ptr" points to a buffer of length sufficient to hold "v".
+   // EFFECTS    Encodes "v" into "ptr" and returns a pointer to the
+   //            byte just past the last encoded byte.
+   static char* Encode32(char* ptr, uint32 v);
+ 
+   // EFFECTS    Appends the varint representation of "value" to "*s".
+-  static void Append32(string* s, uint32 value);
++  static void Append32(std::string* s, uint32 value);
+ };
+ 
+ inline const char* Varint::Parse32WithLimit(const char* p,
+                                             const char* l,
+                                             uint32* OUTPUT) {
+   const unsigned char* ptr = reinterpret_cast<const unsigned char*>(p);
+   const unsigned char* limit = reinterpret_cast<const unsigned char*>(l);
+   uint32 b, result;
+@@ -498,31 +576,31 @@ inline char* Varint::Encode32(char* sptr
+   }
+   return reinterpret_cast<char*>(ptr);
+ }
+ 
+ // If you know the internal layout of the std::string in use, you can
+ // replace this function with one that resizes the string without
+ // filling the new space with zeros (if applicable) --
+ // it will be non-portable but faster.
+-inline void STLStringResizeUninitialized(string* s, size_t new_size) {
++inline void STLStringResizeUninitialized(std::string* s, size_t new_size) {
+   s->resize(new_size);
+ }
+ 
+ // Return a mutable char* pointing to a string's internal buffer,
+ // which may not be null-terminated. Writing through this pointer will
+ // modify the string.
+ //
+ // string_as_array(&str)[i] is valid for 0 <= i < str.size() until the
+ // next call to a string method that invalidates iterators.
+ //
+ // As of 2006-04, there is no standard-blessed way of getting a
+ // mutable reference to a string's internal buffer. However, issue 530
+ // (http://www.open-std.org/JTC1/SC22/WG21/docs/lwg-defects.html#530)
+ // proposes this as the method. It will officially be part of the standard
+ // for C++0x. This should already work on all current implementations.
+-inline char* string_as_array(string* str) {
++inline char* string_as_array(std::string* str) {
+   return str->empty() ? NULL : &*str->begin();
+ }
+ 
+ }  // namespace snappy
+ 
+ #endif  // THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
+diff --git a/other-licenses/snappy/src/snappy-stubs-public.h.in b/other-licenses/snappy/src/snappy-stubs-public.h.in
+--- a/other-licenses/snappy/src/snappy-stubs-public.h.in
++++ b/other-licenses/snappy/src/snappy-stubs-public.h.in
+@@ -1,10 +1,9 @@
+ // Copyright 2011 Google Inc. All Rights Reserved.
+-// Author: sesse@google.com (Steinar H. Gunderson)
+ //
+ // Redistribution and use in source and binary forms, with or without
+ // modification, are permitted provided that the following conditions are
+ // met:
+ //
+ //     * Redistributions of source code must retain the above copyright
+ // notice, this list of conditions and the following disclaimer.
+ //     * Redistributions in binary form must reproduce the above
+@@ -31,70 +30,45 @@
+ //
+ // This file cannot include config.h, as it is included from snappy.h,
+ // which is a public header. Instead, snappy-stubs-public.h is generated by
+ // from snappy-stubs-public.h.in at configure time.
+ 
+ #ifndef THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
+ #define THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
+ 
+-#if @ac_cv_have_stdint_h@
+-#include <stdint.h>
+-#endif
+-
+-#if @ac_cv_have_stddef_h@
+-#include <stddef.h>
+-#endif
++#include <cstddef>
++#include <cstdint>
++#include <string>
+ 
+-#if @ac_cv_have_sys_uio_h@
++#if ${HAVE_SYS_UIO_H_01}  // HAVE_SYS_UIO_H
+ #include <sys/uio.h>
+-#endif
++#endif  // HAVE_SYS_UIO_H
+ 
+-#define SNAPPY_MAJOR @SNAPPY_MAJOR@
+-#define SNAPPY_MINOR @SNAPPY_MINOR@
+-#define SNAPPY_PATCHLEVEL @SNAPPY_PATCHLEVEL@
++#define SNAPPY_MAJOR ${PROJECT_VERSION_MAJOR}
++#define SNAPPY_MINOR ${PROJECT_VERSION_MINOR}
++#define SNAPPY_PATCHLEVEL ${PROJECT_VERSION_PATCH}
+ #define SNAPPY_VERSION \
+     ((SNAPPY_MAJOR << 16) | (SNAPPY_MINOR << 8) | SNAPPY_PATCHLEVEL)
+ 
+-#include <string>
+-
+ namespace snappy {
+ 
+-#if @ac_cv_have_stdint_h@
+-typedef int8_t int8;
+-typedef uint8_t uint8;
+-typedef int16_t int16;
+-typedef uint16_t uint16;
+-typedef int32_t int32;
+-typedef uint32_t uint32;
+-typedef int64_t int64;
+-typedef uint64_t uint64;
+-#else
+-typedef signed char int8;
+-typedef unsigned char uint8;
+-typedef short int16;
+-typedef unsigned short uint16;
+-typedef int int32;
+-typedef unsigned int uint32;
+-typedef long long int64;
+-typedef unsigned long long uint64;
+-#endif
++using int8 = std::int8_t;
++using uint8 = std::uint8_t;
++using int16 = std::int16_t;
++using uint16 = std::uint16_t;
++using int32 = std::int32_t;
++using uint32 = std::uint32_t;
++using int64 = std::int64_t;
++using uint64 = std::uint64_t;
+ 
+-typedef std::string string;
+-
+-#ifndef DISALLOW_COPY_AND_ASSIGN
+-#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
+-  TypeName(const TypeName&);               \
+-  void operator=(const TypeName&)
+-#endif
+-
+-#if !@ac_cv_have_sys_uio_h@
++#if !${HAVE_SYS_UIO_H_01}  // !HAVE_SYS_UIO_H
+ // Windows does not have an iovec type, yet the concept is universally useful.
+ // It is simple to define it ourselves, so we put it inside our own namespace.
+ struct iovec {
+-	void* iov_base;
+-	size_t iov_len;
++  void* iov_base;
++  size_t iov_len;
+ };
+-#endif
++#endif  // !HAVE_SYS_UIO_H
+ 
+ }  // namespace snappy
+ 
+ #endif  // THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
+diff --git a/other-licenses/snappy/src/snappy-test.cc b/other-licenses/snappy/src/snappy-test.cc
+--- a/other-licenses/snappy/src/snappy-test.cc
++++ b/other-licenses/snappy/src/snappy-test.cc
+@@ -28,60 +28,63 @@
+ //
+ // Various stubs for the unit tests for the open-source version of Snappy.
+ 
+ #ifdef HAVE_CONFIG_H
+ #include "config.h"
+ #endif
+ 
+ #ifdef HAVE_WINDOWS_H
++// Needed to be able to use std::max without workarounds in the source code.
++// https://support.microsoft.com/en-us/help/143208/prb-using-stl-in-windows-program-can-cause-min-max-conflicts
++#define NOMINMAX
+ #include <windows.h>
+ #endif
+ 
+ #include "snappy-test.h"
+ 
+ #include <algorithm>
+ 
+ DEFINE_bool(run_microbenchmarks, true,
+             "Run microbenchmarks before doing anything else.");
+ 
+ namespace snappy {
+ 
+-string ReadTestDataFile(const string& base, size_t size_limit) {
+-  string contents;
++std::string ReadTestDataFile(const std::string& base, size_t size_limit) {
++  std::string contents;
+   const char* srcdir = getenv("srcdir");  // This is set by Automake.
+-  string prefix;
++  std::string prefix;
+   if (srcdir) {
+-    prefix = string(srcdir) + "/";
++    prefix = std::string(srcdir) + "/";
+   }
+   file::GetContents(prefix + "testdata/" + base, &contents, file::Defaults()
+       ).CheckSuccess();
+   if (size_limit > 0) {
+     contents = contents.substr(0, size_limit);
+   }
+   return contents;
+ }
+ 
+-string ReadTestDataFile(const string& base) {
++std::string ReadTestDataFile(const std::string& base) {
+   return ReadTestDataFile(base, 0);
+ }
+ 
+-string StringPrintf(const char* format, ...) {
++std::string StrFormat(const char* format, ...) {
+   char buf[4096];
+   va_list ap;
+   va_start(ap, format);
+   vsnprintf(buf, sizeof(buf), format, ap);
+   va_end(ap);
+   return buf;
+ }
+ 
+ bool benchmark_running = false;
+ int64 benchmark_real_time_us = 0;
+ int64 benchmark_cpu_time_us = 0;
+-string *benchmark_label = NULL;
++std::string* benchmark_label = nullptr;
+ int64 benchmark_bytes_processed = 0;
+ 
+ void ResetBenchmarkTiming() {
+   benchmark_real_time_us = 0;
+   benchmark_cpu_time_us = 0;
+ }
+ 
+ #ifdef WIN32
+@@ -155,21 +158,21 @@ void StopBenchmarkTiming() {
+                                       benchmark_start_cpu.ru_utime.tv_sec);
+   benchmark_cpu_time_us += (benchmark_stop_cpu.ru_utime.tv_usec -
+                             benchmark_start_cpu.ru_utime.tv_usec);
+ #endif  // WIN32
+ 
+   benchmark_running = false;
+ }
+ 
+-void SetBenchmarkLabel(const string& str) {
++void SetBenchmarkLabel(const std::string& str) {
+   if (benchmark_label) {
+     delete benchmark_label;
+   }
+-  benchmark_label = new string(str);
++  benchmark_label = new std::string(str);
+ }
+ 
+ void SetBenchmarkBytesProcessed(int64 bytes) {
+   benchmark_bytes_processed = bytes;
+ }
+ 
+ struct BenchmarkRun {
+   int64 real_time_us;
+@@ -196,53 +199,54 @@ void Benchmark::Run() {
+     // as we used to calibrate.
+     // Run five times and pick the median.
+     const int kNumRuns = 5;
+     const int kMedianPos = kNumRuns / 2;
+     int num_iterations = 0;
+     if (benchmark_real_time_us > 0) {
+       num_iterations = 200000 * kCalibrateIterations / benchmark_real_time_us;
+     }
+-    num_iterations = max(num_iterations, kCalibrateIterations);
++    num_iterations = std::max(num_iterations, kCalibrateIterations);
+     BenchmarkRun benchmark_runs[kNumRuns];
+ 
+     for (int run = 0; run < kNumRuns; ++run) {
+       ResetBenchmarkTiming();
+       StartBenchmarkTiming();
+       (*function_)(num_iterations, test_case_num);
+       StopBenchmarkTiming();
+ 
+       benchmark_runs[run].real_time_us = benchmark_real_time_us;
+       benchmark_runs[run].cpu_time_us = benchmark_cpu_time_us;
+     }
+ 
+-    string heading = StringPrintf("%s/%d", name_.c_str(), test_case_num);
+-    string human_readable_speed;
++    std::string heading = StrFormat("%s/%d", name_.c_str(), test_case_num);
++    std::string human_readable_speed;
+ 
+-    nth_element(benchmark_runs,
+-                benchmark_runs + kMedianPos,
+-                benchmark_runs + kNumRuns,
+-                BenchmarkCompareCPUTime());
++    std::nth_element(benchmark_runs,
++                     benchmark_runs + kMedianPos,
++                     benchmark_runs + kNumRuns,
++                     BenchmarkCompareCPUTime());
+     int64 real_time_us = benchmark_runs[kMedianPos].real_time_us;
+     int64 cpu_time_us = benchmark_runs[kMedianPos].cpu_time_us;
+     if (cpu_time_us <= 0) {
+       human_readable_speed = "?";
+     } else {
+       int64 bytes_per_second =
+           benchmark_bytes_processed * 1000000 / cpu_time_us;
+       if (bytes_per_second < 1024) {
+-        human_readable_speed = StringPrintf("%dB/s", bytes_per_second);
++        human_readable_speed =
++            StrFormat("%dB/s", static_cast<int>(bytes_per_second));
+       } else if (bytes_per_second < 1024 * 1024) {
+-        human_readable_speed = StringPrintf(
++        human_readable_speed = StrFormat(
+             "%.1fkB/s", bytes_per_second / 1024.0f);
+       } else if (bytes_per_second < 1024 * 1024 * 1024) {
+-        human_readable_speed = StringPrintf(
++        human_readable_speed = StrFormat(
+             "%.1fMB/s", bytes_per_second / (1024.0f * 1024.0f));
+       } else {
+-        human_readable_speed = StringPrintf(
++        human_readable_speed = StrFormat(
+             "%.1fGB/s", bytes_per_second / (1024.0f * 1024.0f * 1024.0f));
+       }
+     }
+ 
+     fprintf(stderr,
+ #ifdef WIN32
+             "%-18s %10I64d %10I64d %10d %s  %s\n",
+ #else
+@@ -518,18 +522,18 @@ int ZLib::UncompressAtMostOrAll(Bytef *d
+ 
+   if ((err == Z_STREAM_END || err == Z_OK)  // everything went ok
+              && uncomp_stream_.avail_in == 0) {    // and we read it all
+     ;
+   } else if (err == Z_STREAM_END && uncomp_stream_.avail_in > 0) {
+     LOG(WARNING)
+       << "UncompressChunkOrAll: Received some extra data, bytes total: "
+       << uncomp_stream_.avail_in << " bytes: "
+-      << string(reinterpret_cast<const char *>(uncomp_stream_.next_in),
+-                min(int(uncomp_stream_.avail_in), 20));
++      << std::string(reinterpret_cast<const char *>(uncomp_stream_.next_in),
++                     std::min(int(uncomp_stream_.avail_in), 20));
+     UncompressErrorInit();
+     return Z_DATA_ERROR;       // what's the extra data for?
+   } else if (err != Z_OK && err != Z_STREAM_END && err != Z_BUF_ERROR) {
+     // an error happened
+     LOG(WARNING) << "UncompressChunkOrAll: Error: " << err
+                  << " avail_out: " << uncomp_stream_.avail_out;
+     UncompressErrorInit();
+     return err;
+diff --git a/other-licenses/snappy/src/snappy-test.h b/other-licenses/snappy/src/snappy-test.h
+--- a/other-licenses/snappy/src/snappy-test.h
++++ b/other-licenses/snappy/src/snappy-test.h
+@@ -50,18 +50,16 @@
+ #ifdef HAVE_SYS_TIME_H
+ #include <sys/time.h>
+ #endif
+ 
+ #ifdef HAVE_WINDOWS_H
+ #include <windows.h>
+ #endif
+ 
+-#include <string>
+-
+ #ifdef HAVE_GTEST
+ 
+ #include <gtest/gtest.h>
+ #undef TYPED_TEST
+ #define TYPED_TEST TEST
+ #define INIT_GTEST(argc, argv) ::testing::InitGoogleTest(argc, *argv)
+ 
+ #else
+@@ -105,70 +103,52 @@
+ #ifdef HAVE_LIBZ
+ #include "zlib.h"
+ #endif
+ 
+ #ifdef HAVE_LIBLZO2
+ #include "lzo/lzo1x.h"
+ #endif
+ 
+-#ifdef HAVE_LIBLZF
+-extern "C" {
+-#include "lzf.h"
+-}
+-#endif
+-
+-#ifdef HAVE_LIBFASTLZ
+-#include "fastlz.h"
+-#endif
+-
+-#ifdef HAVE_LIBQUICKLZ
+-#include "quicklz.h"
+-#endif
+-
+ namespace {
+ 
+-namespace File {
+-  void Init() { }
+-}  // namespace File
+-
+ namespace file {
+   int Defaults() { return 0; }
+ 
+   class DummyStatus {
+    public:
+     void CheckSuccess() { }
+   };
+ 
+-  DummyStatus GetContents(const string& filename, string* data, int unused) {
++  DummyStatus GetContents(
++      const std::string& filename, std::string* data, int unused) {
+     FILE* fp = fopen(filename.c_str(), "rb");
+     if (fp == NULL) {
+       perror(filename.c_str());
+       exit(1);
+     }
+ 
+     data->clear();
+     while (!feof(fp)) {
+       char buf[4096];
+       size_t ret = fread(buf, 1, 4096, fp);
+       if (ret == 0 && ferror(fp)) {
+         perror("fread");
+         exit(1);
+       }
+-      data->append(string(buf, ret));
++      data->append(std::string(buf, ret));
+     }
+ 
+     fclose(fp);
+ 
+     return DummyStatus();
+   }
+ 
+-  DummyStatus SetContents(const string& filename,
+-                          const string& str,
+-                          int unused) {
++  inline DummyStatus SetContents(
++      const std::string& filename, const std::string& str, int unused) {
+     FILE* fp = fopen(filename.c_str(), "wb");
+     if (fp == NULL) {
+       perror(filename.c_str());
+       exit(1);
+     }
+ 
+     int ret = fwrite(str.data(), str.size(), 1, fp);
+     if (ret != 1) {
+@@ -182,87 +162,37 @@ namespace file {
+   }
+ }  // namespace file
+ 
+ }  // namespace
+ 
+ namespace snappy {
+ 
+ #define FLAGS_test_random_seed 301
+-typedef string TypeParam;
++using TypeParam = std::string;
+ 
+ void Test_CorruptedTest_VerifyCorrupted();
+ void Test_Snappy_SimpleTests();
+ void Test_Snappy_MaxBlowup();
+ void Test_Snappy_RandomData();
+ void Test_Snappy_FourByteOffset();
+ void Test_SnappyCorruption_TruncatedVarint();
+ void Test_SnappyCorruption_UnterminatedVarint();
+ void Test_SnappyCorruption_OverflowingVarint();
+ void Test_Snappy_ReadPastEndOfBuffer();
+ void Test_Snappy_FindMatchLength();
+ void Test_Snappy_FindMatchLengthRandom();
+ 
+-string ReadTestDataFile(const string& base, size_t size_limit);
++std::string ReadTestDataFile(const std::string& base, size_t size_limit);
+ 
+-string ReadTestDataFile(const string& base);
++std::string ReadTestDataFile(const std::string& base);
+ 
+ // A sprintf() variant that returns a std::string.
+ // Not safe for general use due to truncation issues.
+-string StringPrintf(const char* format, ...);
+-
+-// A simple, non-cryptographically-secure random generator.
+-class ACMRandom {
+- public:
+-  explicit ACMRandom(uint32 seed) : seed_(seed) {}
+-
+-  int32 Next();
+-
+-  int32 Uniform(int32 n) {
+-    return Next() % n;
+-  }
+-  uint8 Rand8() {
+-    return static_cast<uint8>((Next() >> 1) & 0x000000ff);
+-  }
+-  bool OneIn(int X) { return Uniform(X) == 0; }
+-
+-  // Skewed: pick "base" uniformly from range [0,max_log] and then
+-  // return "base" random bits.  The effect is to pick a number in the
+-  // range [0,2^max_log-1] with bias towards smaller numbers.
+-  int32 Skewed(int max_log);
+-
+- private:
+-  static const uint32 M = 2147483647L;   // 2^31-1
+-  uint32 seed_;
+-};
+-
+-inline int32 ACMRandom::Next() {
+-  static const uint64 A = 16807;  // bits 14, 8, 7, 5, 2, 1, 0
+-  // We are computing
+-  //       seed_ = (seed_ * A) % M,    where M = 2^31-1
+-  //
+-  // seed_ must not be zero or M, or else all subsequent computed values
+-  // will be zero or M respectively.  For all other values, seed_ will end
+-  // up cycling through every number in [1,M-1]
+-  uint64 product = seed_ * A;
+-
+-  // Compute (product % M) using the fact that ((x << 31) % M) == x.
+-  seed_ = (product >> 31) + (product & M);
+-  // The first reduction may overflow by 1 bit, so we may need to repeat.
+-  // mod == M is not possible; using > allows the faster sign-bit-based test.
+-  if (seed_ > M) {
+-    seed_ -= M;
+-  }
+-  return seed_;
+-}
+-
+-inline int32 ACMRandom::Skewed(int max_log) {
+-  const int32 base = (Next() - 1) % (max_log+1);
+-  return (Next() - 1) & ((1u << base)-1);
+-}
++std::string StrFormat(const char* format, ...);
+ 
+ // A wall-time clock. This stub is not super-accurate, nor resistant to the
+ // system time changing.
+ class CycleTimer {
+  public:
+   CycleTimer() : real_time_us_(0) {}
+ 
+   void Start() {
+@@ -306,45 +236,47 @@ class CycleTimer {
+ };
+ 
+ // Minimalistic microbenchmark framework.
+ 
+ typedef void (*BenchmarkFunction)(int, int);
+ 
+ class Benchmark {
+  public:
+-  Benchmark(const string& name, BenchmarkFunction function) :
+-      name_(name), function_(function) {}
++  Benchmark(const std::string& name, BenchmarkFunction function)
++      : name_(name), function_(function) {}
+ 
+   Benchmark* DenseRange(int start, int stop) {
+     start_ = start;
+     stop_ = stop;
+     return this;
+   }
+ 
+   void Run();
+ 
+  private:
+-  const string name_;
++  const std::string name_;
+   const BenchmarkFunction function_;
+   int start_, stop_;
+ };
+ #define BENCHMARK(benchmark_name) \
+   Benchmark* Benchmark_ ## benchmark_name = \
+           (new Benchmark(#benchmark_name, benchmark_name))
+ 
+ extern Benchmark* Benchmark_BM_UFlat;
+ extern Benchmark* Benchmark_BM_UIOVec;
+ extern Benchmark* Benchmark_BM_UValidate;
+ extern Benchmark* Benchmark_BM_ZFlat;
++extern Benchmark* Benchmark_BM_ZFlatAll;
++extern Benchmark* Benchmark_BM_ZFlatIncreasingTableSize;
+ 
+ void ResetBenchmarkTiming();
+ void StartBenchmarkTiming();
+ void StopBenchmarkTiming();
+-void SetBenchmarkLabel(const string& str);
++void SetBenchmarkLabel(const std::string& str);
+ void SetBenchmarkBytesProcessed(int64 bytes);
+ 
+ #ifdef HAVE_LIBZ
+ 
+ // Object-oriented wrapper around zlib.
+ class ZLib {
+  public:
+   ZLib();
+@@ -462,17 +394,17 @@ class ZLib {
+ };
+ 
+ #endif  // HAVE_LIBZ
+ 
+ }  // namespace snappy
+ 
+ DECLARE_bool(run_microbenchmarks);
+ 
+-static void RunSpecifiedBenchmarks() {
++static inline void RunSpecifiedBenchmarks() {
+   if (!FLAGS_run_microbenchmarks) {
+     return;
+   }
+ 
+   fprintf(stderr, "Running microbenchmarks.\n");
+ #ifndef NDEBUG
+   fprintf(stderr, "WARNING: Compiled with assertions enabled, will be slow.\n");
+ #endif
+@@ -481,16 +413,18 @@ static void RunSpecifiedBenchmarks() {
+ #endif
+   fprintf(stderr, "Benchmark            Time(ns)    CPU(ns) Iterations\n");
+   fprintf(stderr, "---------------------------------------------------\n");
+ 
+   snappy::Benchmark_BM_UFlat->Run();
+   snappy::Benchmark_BM_UIOVec->Run();
+   snappy::Benchmark_BM_UValidate->Run();
+   snappy::Benchmark_BM_ZFlat->Run();
++  snappy::Benchmark_BM_ZFlatAll->Run();
++  snappy::Benchmark_BM_ZFlatIncreasingTableSize->Run();
+ 
+   fprintf(stderr, "\n");
+ }
+ 
+ #ifndef HAVE_GTEST
+ 
+ static inline int RUN_ALL_TESTS() {
+   fprintf(stderr, "Running correctness tests.\n");
+@@ -510,62 +444,58 @@ static inline int RUN_ALL_TESTS() {
+   return 0;
+ }
+ 
+ #endif  // HAVE_GTEST
+ 
+ // For main().
+ namespace snappy {
+ 
+-static void CompressFile(const char* fname);
+-static void UncompressFile(const char* fname);
+-static void MeasureFile(const char* fname);
+-
+ // Logging.
+ 
+ #define LOG(level) LogMessage()
+ #define VLOG(level) true ? (void)0 : \
+     snappy::LogMessageVoidify() & snappy::LogMessage()
+ 
+ class LogMessage {
+  public:
+   LogMessage() { }
+   ~LogMessage() {
+-    cerr << endl;
++    std::cerr << std::endl;
+   }
+ 
+   LogMessage& operator<<(const std::string& msg) {
+-    cerr << msg;
++    std::cerr << msg;
+     return *this;
+   }
+   LogMessage& operator<<(int x) {
+-    cerr << x;
++    std::cerr << x;
+     return *this;
+   }
+ };
+ 
+ // Asserts, both versions activated in debug mode only,
+ // and ones that are always active.
+ 
+ #define CRASH_UNLESS(condition) \
+-    PREDICT_TRUE(condition) ? (void)0 : \
++    SNAPPY_PREDICT_TRUE(condition) ? (void)0 : \
+     snappy::LogMessageVoidify() & snappy::LogMessageCrash()
+ 
+ #ifdef _MSC_VER
+ // ~LogMessageCrash calls abort() and therefore never exits. This is by design
+ // so temporarily disable warning C4722.
+ #pragma warning(push)
+ #pragma warning(disable:4722)
+ #endif
+ 
+ class LogMessageCrash : public LogMessage {
+  public:
+   LogMessageCrash() { }
+   ~LogMessageCrash() {
+-    cerr << endl;
++    std::cerr << std::endl;
+     abort();
+   }
+ };
+ 
+ #ifdef _MSC_VER
+ #pragma warning(pop)
+ #endif
+ 
+@@ -585,15 +515,11 @@ class LogMessageVoidify {
+ #define CHECK_LE(a, b) CRASH_UNLESS((a) <= (b))
+ #define CHECK_GE(a, b) CRASH_UNLESS((a) >= (b))
+ #define CHECK_EQ(a, b) CRASH_UNLESS((a) == (b))
+ #define CHECK_NE(a, b) CRASH_UNLESS((a) != (b))
+ #define CHECK_LT(a, b) CRASH_UNLESS((a) < (b))
+ #define CHECK_GT(a, b) CRASH_UNLESS((a) > (b))
+ #define CHECK_OK(cond) (cond).CheckSuccess()
+ 
+-}  // namespace
+-
+-using snappy::CompressFile;
+-using snappy::UncompressFile;
+-using snappy::MeasureFile;
++}  // namespace snappy
+ 
+ #endif  // THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_TEST_H_
+diff --git a/other-licenses/snappy/src/snappy.cc b/other-licenses/snappy/src/snappy.cc
+--- a/other-licenses/snappy/src/snappy.cc
++++ b/other-licenses/snappy/src/snappy.cc
+@@ -25,32 +25,67 @@
+ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ 
+ #include "snappy.h"
+ #include "snappy-internal.h"
+ #include "snappy-sinksource.h"
+ 
++#if !defined(SNAPPY_HAVE_SSSE3)
++// __SSSE3__ is defined by GCC and Clang. Visual Studio doesn't target SIMD
++// support between SSE2 and AVX (so SSSE3 instructions require AVX support), and
++// defines __AVX__ when AVX support is available.
++#if defined(__SSSE3__) || defined(__AVX__)
++#define SNAPPY_HAVE_SSSE3 1
++#else
++#define SNAPPY_HAVE_SSSE3 0
++#endif
++#endif  // !defined(SNAPPY_HAVE_SSSE3)
++
++#if !defined(SNAPPY_HAVE_BMI2)
++// __BMI2__ is defined by GCC and Clang. Visual Studio doesn't target BMI2
++// specifically, but it does define __AVX2__ when AVX2 support is available.
++// Fortunately, AVX2 was introduced in Haswell, just like BMI2.
++//
++// BMI2 is not defined as a subset of AVX2 (unlike SSSE3 and AVX above). So,
++// GCC and Clang can build code with AVX2 enabled but BMI2 disabled, in which
++// case issuing BMI2 instructions results in a compiler error.
++#if defined(__BMI2__) || (defined(_MSC_VER) && defined(__AVX2__))
++#define SNAPPY_HAVE_BMI2 1
++#else
++#define SNAPPY_HAVE_BMI2 0
++#endif
++#endif  // !defined(SNAPPY_HAVE_BMI2)
++
++#if SNAPPY_HAVE_SSSE3
++// Please do not replace with <x86intrin.h>. or with headers that assume more
++// advanced SSE versions without checking with all the OWNERS.
++#include <tmmintrin.h>
++#endif
++
++#if SNAPPY_HAVE_BMI2
++// Please do not replace with <x86intrin.h>. or with headers that assume more
++// advanced SSE versions without checking with all the OWNERS.
++#include <immintrin.h>
++#endif
++
+ #include <stdio.h>
+ 
+ #include <algorithm>
+ #include <string>
+ #include <vector>
+ 
+-
+ namespace snappy {
+ 
+ using internal::COPY_1_BYTE_OFFSET;
+ using internal::COPY_2_BYTE_OFFSET;
+-using internal::COPY_4_BYTE_OFFSET;
+ using internal::LITERAL;
+ using internal::char_table;
+ using internal::kMaximumTagLength;
+-using internal::wordmask;
+ 
+ // Any hash function will produce a valid compressed bitstream, but a good
+ // hash function reduces the number of collisions and thus yields better
+ // compression for compressible input, and more speed for incompressible
+ // input. Of course, it doesn't hurt if the hash function is reasonably fast
+ // either, as it gets called a lot.
+ static inline uint32 HashBytes(uint32 bytes, int shift) {
+   uint32 kMul = 0x1e35a7bd;
+@@ -79,200 +114,374 @@ size_t MaxCompressedLength(size_t source
+   // enough, it will take 5 bytes to encode the copy op.  Therefore the
+   // worst case here is a one-byte literal followed by a five-byte copy.
+   // I.e., 6 bytes of input turn into 7 bytes of "compressed" data.
+   //
+   // This last factor dominates the blowup, so the final estimate is:
+   return 32 + source_len + source_len/6;
+ }
+ 
+-// Copy "len" bytes from "src" to "op", one byte at a time.  Used for
+-// handling COPY operations where the input and output regions may
+-// overlap.  For example, suppose:
+-//    src    == "ab"
+-//    op     == src + 2
+-//    len    == 20
+-// After IncrementalCopy(src, op, len), the result will have
+-// eleven copies of "ab"
++namespace {
++
++void UnalignedCopy64(const void* src, void* dst) {
++  char tmp[8];
++  memcpy(tmp, src, 8);
++  memcpy(dst, tmp, 8);
++}
++
++void UnalignedCopy128(const void* src, void* dst) {
++  // memcpy gets vectorized when the appropriate compiler options are used.
++  // For example, x86 compilers targeting SSE2+ will optimize to an SSE2 load
++  // and store.
++  char tmp[16];
++  memcpy(tmp, src, 16);
++  memcpy(dst, tmp, 16);
++}
++
++// Copy [src, src+(op_limit-op)) to [op, (op_limit-op)) a byte at a time. Used
++// for handling COPY operations where the input and output regions may overlap.
++// For example, suppose:
++//    src       == "ab"
++//    op        == src + 2
++//    op_limit  == op + 20
++// After IncrementalCopySlow(src, op, op_limit), the result will have eleven
++// copies of "ab"
+ //    ababababababababababab
+-// Note that this does not match the semantics of either memcpy()
+-// or memmove().
+-static inline void IncrementalCopy(const char* src, char* op, ssize_t len) {
+-  assert(len > 0);
+-  do {
++// Note that this does not match the semantics of either memcpy() or memmove().
++inline char* IncrementalCopySlow(const char* src, char* op,
++                                 char* const op_limit) {
++  // TODO: Remove pragma when LLVM is aware this
++  // function is only called in cold regions and when cold regions don't get
++  // vectorized or unrolled.
++#ifdef __clang__
++#pragma clang loop unroll(disable)
++#endif
++  while (op < op_limit) {
+     *op++ = *src++;
+-  } while (--len > 0);
++  }
++  return op_limit;
+ }
+ 
+-// Equivalent to IncrementalCopy except that it can write up to ten extra
+-// bytes after the end of the copy, and that it is faster.
+-//
+-// The main part of this loop is a simple copy of eight bytes at a time until
+-// we've copied (at least) the requested amount of bytes.  However, if op and
+-// src are less than eight bytes apart (indicating a repeating pattern of
+-// length < 8), we first need to expand the pattern in order to get the correct
+-// results. For instance, if the buffer looks like this, with the eight-byte
+-// <src> and <op> patterns marked as intervals:
+-//
+-//    abxxxxxxxxxxxx
+-//    [------]           src
+-//      [------]         op
+-//
+-// a single eight-byte copy from <src> to <op> will repeat the pattern once,
+-// after which we can move <op> two bytes without moving <src>:
+-//
+-//    ababxxxxxxxxxx
+-//    [------]           src
+-//        [------]       op
+-//
+-// and repeat the exercise until the two no longer overlap.
+-//
+-// This allows us to do very well in the special case of one single byte
+-// repeated many times, without taking a big hit for more general cases.
+-//
+-// The worst case of extra writing past the end of the match occurs when
+-// op - src == 1 and len == 1; the last copy will read from byte positions
+-// [0..7] and write to [4..11], whereas it was only supposed to write to
+-// position 1. Thus, ten excess bytes.
++#if SNAPPY_HAVE_SSSE3
++
++// This is a table of shuffle control masks that can be used as the source
++// operand for PSHUFB to permute the contents of the destination XMM register
++// into a repeating byte pattern.
++alignas(16) const char pshufb_fill_patterns[7][16] = {
++  {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
++  {0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1},
++  {0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0},
++  {0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3},
++  {0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0},
++  {0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3},
++  {0, 1, 2, 3, 4, 5, 6, 0, 1, 2, 3, 4, 5, 6, 0, 1},
++};
++
++#endif  // SNAPPY_HAVE_SSSE3
++
++// Copy [src, src+(op_limit-op)) to [op, (op_limit-op)) but faster than
++// IncrementalCopySlow. buf_limit is the address past the end of the writable
++// region of the buffer.
++inline char* IncrementalCopy(const char* src, char* op, char* const op_limit,
++                             char* const buf_limit) {
++  // Terminology:
++  //
++  // slop = buf_limit - op
++  // pat  = op - src
++  // len  = limit - op
++  assert(src < op);
++  assert(op <= op_limit);
++  assert(op_limit <= buf_limit);
++  // NOTE: The compressor always emits 4 <= len <= 64. It is ok to assume that
++  // to optimize this function but we have to also handle other cases in case
++  // the input does not satisfy these conditions.
++
++  size_t pattern_size = op - src;
++  // The cases are split into different branches to allow the branch predictor,
++  // FDO, and static prediction hints to work better. For each input we list the
++  // ratio of invocations that match each condition.
++  //
++  // input        slop < 16   pat < 8  len > 16
++  // ------------------------------------------
++  // html|html4|cp   0%         1.01%    27.73%
++  // urls            0%         0.88%    14.79%
++  // jpg             0%        64.29%     7.14%
++  // pdf             0%         2.56%    58.06%
++  // txt[1-4]        0%         0.23%     0.97%
++  // pb              0%         0.96%    13.88%
++  // bin             0.01%     22.27%    41.17%
++  //
++  // It is very rare that we don't have enough slop for doing block copies. It
++  // is also rare that we need to expand a pattern. Small patterns are common
++  // for incompressible formats and for those we are plenty fast already.
++  // Lengths are normally not greater than 16 but they vary depending on the
++  // input. In general if we always predict len <= 16 it would be an ok
++  // prediction.
++  //
++  // In order to be fast we want a pattern >= 8 bytes and an unrolled loop
++  // copying 2x 8 bytes at a time.
+ 
+-namespace {
+-
+-const int kMaxIncrementCopyOverflow = 10;
++  // Handle the uncommon case where pattern is less than 8 bytes.
++  if (SNAPPY_PREDICT_FALSE(pattern_size < 8)) {
++#if SNAPPY_HAVE_SSSE3
++    // Load the first eight bytes into an 128-bit XMM register, then use PSHUFB
++    // to permute the register's contents in-place into a repeating sequence of
++    // the first "pattern_size" bytes.
++    // For example, suppose:
++    //    src       == "abc"
++    //    op        == op + 3
++    // After _mm_shuffle_epi8(), "pattern" will have five copies of "abc"
++    // followed by one byte of slop: abcabcabcabcabca.
++    //
++    // The non-SSE fallback implementation suffers from store-forwarding stalls
++    // because its loads and stores partly overlap. By expanding the pattern
++    // in-place, we avoid the penalty.
++    if (SNAPPY_PREDICT_TRUE(op <= buf_limit - 16)) {
++      const __m128i shuffle_mask = _mm_load_si128(
++          reinterpret_cast<const __m128i*>(pshufb_fill_patterns)
++          + pattern_size - 1);
++      const __m128i pattern = _mm_shuffle_epi8(
++          _mm_loadl_epi64(reinterpret_cast<const __m128i*>(src)), shuffle_mask);
++      // Uninitialized bytes are masked out by the shuffle mask.
++      // TODO: remove annotation and macro defs once MSan is fixed.
++      SNAPPY_ANNOTATE_MEMORY_IS_INITIALIZED(&pattern, sizeof(pattern));
++      pattern_size *= 16 / pattern_size;
++      char* op_end = std::min(op_limit, buf_limit - 15);
++      while (op < op_end) {
++        _mm_storeu_si128(reinterpret_cast<__m128i*>(op), pattern);
++        op += pattern_size;
++      }
++      if (SNAPPY_PREDICT_TRUE(op >= op_limit)) return op_limit;
++    }
++    return IncrementalCopySlow(src, op, op_limit);
++#else  // !SNAPPY_HAVE_SSSE3
++    // If plenty of buffer space remains, expand the pattern to at least 8
++    // bytes. The way the following loop is written, we need 8 bytes of buffer
++    // space if pattern_size >= 4, 11 bytes if pattern_size is 1 or 3, and 10
++    // bytes if pattern_size is 2.  Precisely encoding that is probably not
++    // worthwhile; instead, invoke the slow path if we cannot write 11 bytes
++    // (because 11 are required in the worst case).
++    if (SNAPPY_PREDICT_TRUE(op <= buf_limit - 11)) {
++      while (pattern_size < 8) {
++        UnalignedCopy64(src, op);
++        op += pattern_size;
++        pattern_size *= 2;
++      }
++      if (SNAPPY_PREDICT_TRUE(op >= op_limit)) return op_limit;
++    } else {
++      return IncrementalCopySlow(src, op, op_limit);
++    }
++#endif  // SNAPPY_HAVE_SSSE3
++  }
++  assert(pattern_size >= 8);
+ 
+-inline void IncrementalCopyFastPath(const char* src, char* op, ssize_t len) {
+-  while (PREDICT_FALSE(op - src < 8)) {
++  // Copy 2x 8 bytes at a time. Because op - src can be < 16, a single
++  // UnalignedCopy128 might overwrite data in op. UnalignedCopy64 is safe
++  // because expanding the pattern to at least 8 bytes guarantees that
++  // op - src >= 8.
++  //
++  // Typically, the op_limit is the gating factor so try to simplify the loop
++  // based on that.
++  if (SNAPPY_PREDICT_TRUE(op_limit <= buf_limit - 16)) {
++    // There is at least one, and at most four 16-byte blocks. Writing four
++    // conditionals instead of a loop allows FDO to layout the code with respect
++    // to the actual probabilities of each length.
++    // TODO: Replace with loop with trip count hint.
+     UnalignedCopy64(src, op);
+-    len -= op - src;
+-    op += op - src;
++    UnalignedCopy64(src + 8, op + 8);
++
++    if (op + 16 < op_limit) {
++      UnalignedCopy64(src + 16, op + 16);
++      UnalignedCopy64(src + 24, op + 24);
++    }
++    if (op + 32 < op_limit) {
++      UnalignedCopy64(src + 32, op + 32);
++      UnalignedCopy64(src + 40, op + 40);
++    }
++    if (op + 48 < op_limit) {
++      UnalignedCopy64(src + 48, op + 48);
++      UnalignedCopy64(src + 56, op + 56);
++    }
++    return op_limit;
+   }
+-  while (len > 0) {
++
++  // Fall back to doing as much as we can with the available slop in the
++  // buffer. This code path is relatively cold however so we save code size by
++  // avoiding unrolling and vectorizing.
++  //
++  // TODO: Remove pragma when when cold regions don't get vectorized
++  // or unrolled.
++#ifdef __clang__
++#pragma clang loop unroll(disable)
++#endif
++  for (char *op_end = buf_limit - 16; op < op_end; op += 16, src += 16) {
++    UnalignedCopy64(src, op);
++    UnalignedCopy64(src + 8, op + 8);
++  }
++  if (op >= op_limit)
++    return op_limit;
++
++  // We only take this branch if we didn't have enough slop and we can do a
++  // single 8 byte copy.
++  if (SNAPPY_PREDICT_FALSE(op <= buf_limit - 8)) {
+     UnalignedCopy64(src, op);
+     src += 8;
+     op += 8;
+-    len -= 8;
+   }
++  return IncrementalCopySlow(src, op, op_limit);
+ }
+ 
+ }  // namespace
+ 
++template <bool allow_fast_path>
+ static inline char* EmitLiteral(char* op,
+                                 const char* literal,
+-                                int len,
+-                                bool allow_fast_path) {
+-  int n = len - 1;      // Zero-length literals are disallowed
+-  if (n < 60) {
++                                int len) {
++  // The vast majority of copies are below 16 bytes, for which a
++  // call to memcpy is overkill. This fast path can sometimes
++  // copy up to 15 bytes too much, but that is okay in the
++  // main loop, since we have a bit to go on for both sides:
++  //
++  //   - The input will always have kInputMarginBytes = 15 extra
++  //     available bytes, as long as we're in the main loop, and
++  //     if not, allow_fast_path = false.
++  //   - The output will always have 32 spare bytes (see
++  //     MaxCompressedLength).
++  assert(len > 0);      // Zero-length literals are disallowed
++  int n = len - 1;
++  if (allow_fast_path && len <= 16) {
+     // Fits in tag byte
+     *op++ = LITERAL | (n << 2);
+ 
+-    // The vast majority of copies are below 16 bytes, for which a
+-    // call to memcpy is overkill. This fast path can sometimes
+-    // copy up to 15 bytes too much, but that is okay in the
+-    // main loop, since we have a bit to go on for both sides:
+-    //
+-    //   - The input will always have kInputMarginBytes = 15 extra
+-    //     available bytes, as long as we're in the main loop, and
+-    //     if not, allow_fast_path = false.
+-    //   - The output will always have 32 spare bytes (see
+-    //     MaxCompressedLength).
+-    if (allow_fast_path && len <= 16) {
+-      UnalignedCopy64(literal, op);
+-      UnalignedCopy64(literal + 8, op + 8);
+-      return op + len;
+-    }
++    UnalignedCopy128(literal, op);
++    return op + len;
++  }
++
++  if (n < 60) {
++    // Fits in tag byte
++    *op++ = LITERAL | (n << 2);
+   } else {
+-    // Encode in upcoming bytes
+-    char* base = op;
+-    int count = 0;
+-    op++;
+-    while (n > 0) {
+-      *op++ = n & 0xff;
+-      n >>= 8;
+-      count++;
+-    }
++    int count = (Bits::Log2Floor(n) >> 3) + 1;
+     assert(count >= 1);
+     assert(count <= 4);
+-    *base = LITERAL | ((59+count) << 2);
++    *op++ = LITERAL | ((59 + count) << 2);
++    // Encode in upcoming bytes.
++    // Write 4 bytes, though we may care about only 1 of them. The output buffer
++    // is guaranteed to have at least 3 more spaces left as 'len >= 61' holds
++    // here and there is a memcpy of size 'len' below.
++    LittleEndian::Store32(op, n);
++    op += count;
+   }
+   memcpy(op, literal, len);
+   return op + len;
+ }
+ 
+-static inline char* EmitCopyLessThan64(char* op, size_t offset, int len) {
++template <bool len_less_than_12>
++static inline char* EmitCopyAtMost64(char* op, size_t offset, size_t len) {
+   assert(len <= 64);
+   assert(len >= 4);
+   assert(offset < 65536);
++  assert(len_less_than_12 == (len < 12));
+ 
+-  if ((len < 12) && (offset < 2048)) {
+-    size_t len_minus_4 = len - 4;
+-    assert(len_minus_4 < 8);            // Must fit in 3 bits
+-    *op++ = COPY_1_BYTE_OFFSET + ((len_minus_4) << 2) + ((offset >> 8) << 5);
++  if (len_less_than_12 && SNAPPY_PREDICT_TRUE(offset < 2048)) {
++    // offset fits in 11 bits.  The 3 highest go in the top of the first byte,
++    // and the rest go in the second byte.
++    *op++ = COPY_1_BYTE_OFFSET + ((len - 4) << 2) + ((offset >> 3) & 0xe0);
+     *op++ = offset & 0xff;
+   } else {
+-    *op++ = COPY_2_BYTE_OFFSET + ((len-1) << 2);
+-    LittleEndian::Store16(op, offset);
+-    op += 2;
++    // Write 4 bytes, though we only care about 3 of them.  The output buffer
++    // is required to have some slack, so the extra byte won't overrun it.
++    uint32 u = COPY_2_BYTE_OFFSET + ((len - 1) << 2) + (offset << 8);
++    LittleEndian::Store32(op, u);
++    op += 3;
+   }
+   return op;
+ }
+ 
+-static inline char* EmitCopy(char* op, size_t offset, int len) {
+-  // Emit 64 byte copies but make sure to keep at least four bytes reserved
+-  while (PREDICT_FALSE(len >= 68)) {
+-    op = EmitCopyLessThan64(op, offset, 64);
+-    len -= 64;
+-  }
++template <bool len_less_than_12>
++static inline char* EmitCopy(char* op, size_t offset, size_t len) {
++  assert(len_less_than_12 == (len < 12));
++  if (len_less_than_12) {
++    return EmitCopyAtMost64</*len_less_than_12=*/true>(op, offset, len);
++  } else {
++    // A special case for len <= 64 might help, but so far measurements suggest
++    // it's in the noise.
++
++    // Emit 64 byte copies but make sure to keep at least four bytes reserved.
++    while (SNAPPY_PREDICT_FALSE(len >= 68)) {
++      op = EmitCopyAtMost64</*len_less_than_12=*/false>(op, offset, 64);
++      len -= 64;
++    }
+ 
+-  // Emit an extra 60 byte copy if have too much data to fit in one copy
+-  if (len > 64) {
+-    op = EmitCopyLessThan64(op, offset, 60);
+-    len -= 60;
+-  }
++    // One or two copies will now finish the job.
++    if (len > 64) {
++      op = EmitCopyAtMost64</*len_less_than_12=*/false>(op, offset, 60);
++      len -= 60;
++    }
+ 
+-  // Emit remainder
+-  op = EmitCopyLessThan64(op, offset, len);
+-  return op;
++    // Emit remainder.
++    if (len < 12) {
++      op = EmitCopyAtMost64</*len_less_than_12=*/true>(op, offset, len);
++    } else {
++      op = EmitCopyAtMost64</*len_less_than_12=*/false>(op, offset, len);
++    }
++    return op;
++  }
+ }
+ 
+-
+ bool GetUncompressedLength(const char* start, size_t n, size_t* result) {
+   uint32 v = 0;
+   const char* limit = start + n;
+   if (Varint::Parse32WithLimit(start, limit, &v) != NULL) {
+     *result = v;
+     return true;
+   } else {
+     return false;
+   }
+ }
+ 
+-namespace internal {
+-uint16* WorkingMemory::GetHashTable(size_t input_size, int* table_size) {
+-  // Use smaller hash table when input.size() is smaller, since we
+-  // fill the table, incurring O(hash table size) overhead for
+-  // compression, and if the input is short, we won't need that
+-  // many hash table entries anyway.
+-  assert(kMaxHashTableSize >= 256);
+-  size_t htsize = 256;
+-  while (htsize < kMaxHashTableSize && htsize < input_size) {
+-    htsize <<= 1;
++namespace {
++uint32 CalculateTableSize(uint32 input_size) {
++  static_assert(
++      kMaxHashTableSize >= kMinHashTableSize,
++      "kMaxHashTableSize should be greater or equal to kMinHashTableSize.");
++  if (input_size > kMaxHashTableSize) {
++    return kMaxHashTableSize;
+   }
++  if (input_size < kMinHashTableSize) {
++    return kMinHashTableSize;
++  }
++  // This is equivalent to Log2Ceiling(input_size), assuming input_size > 1.
++  // 2 << Log2Floor(x - 1) is equivalent to 1 << (1 + Log2Floor(x - 1)).
++  return 2u << Bits::Log2Floor(input_size - 1);
++}
++}  // namespace
+ 
+-  uint16* table;
+-  if (htsize <= ARRAYSIZE(small_table_)) {
+-    table = small_table_;
+-  } else {
+-    if (large_table_ == NULL) {
+-      large_table_ = new uint16[kMaxHashTableSize];
+-    }
+-    table = large_table_;
+-  }
++namespace internal {
++WorkingMemory::WorkingMemory(size_t input_size) {
++  const size_t max_fragment_size = std::min(input_size, kBlockSize);
++  const size_t table_size = CalculateTableSize(max_fragment_size);
++  size_ = table_size * sizeof(*table_) + max_fragment_size +
++          MaxCompressedLength(max_fragment_size);
++  mem_ = std::allocator<char>().allocate(size_);
++  table_ = reinterpret_cast<uint16*>(mem_);
++  input_ = mem_ + table_size * sizeof(*table_);
++  output_ = input_ + max_fragment_size;
++}
+ 
++WorkingMemory::~WorkingMemory() {
++  std::allocator<char>().deallocate(mem_, size_);
++}
++
++uint16* WorkingMemory::GetHashTable(size_t fragment_size,
++                                    int* table_size) const {
++  const size_t htsize = CalculateTableSize(fragment_size);
++  memset(table_, 0, htsize * sizeof(*table_));
+   *table_size = htsize;
+-  memset(table, 0, htsize * sizeof(*table));
+-  return table;
++  return table_;
+ }
+ }  // end namespace internal
+ 
+ // For 0 <= offset <= 4, GetUint32AtOffset(GetEightBytesAt(p), offset) will
+ // equal UNALIGNED_LOAD32(p + offset).  Motivation: On x86-64 hardware we have
+ // empirically found that overlapping loads such as
+ //  UNALIGNED_LOAD32(p) ... UNALIGNED_LOAD32(p+1) ... UNALIGNED_LOAD32(p+2)
+ // are slower than UNALIGNED_LOAD64(p) followed by shifts and casts to uint32.
+@@ -329,27 +538,27 @@ namespace internal {
+ char* CompressFragment(const char* input,
+                        size_t input_size,
+                        char* op,
+                        uint16* table,
+                        const int table_size) {
+   // "ip" is the input pointer, and "op" is the output pointer.
+   const char* ip = input;
+   assert(input_size <= kBlockSize);
+-  assert((table_size & (table_size - 1)) == 0); // table must be power of two
++  assert((table_size & (table_size - 1)) == 0);  // table must be power of two
+   const int shift = 32 - Bits::Log2Floor(table_size);
+   assert(static_cast<int>(kuint32max >> shift) == table_size - 1);
+   const char* ip_end = input + input_size;
+   const char* base_ip = ip;
+   // Bytes in [next_emit, ip) will be emitted as literal bytes.  Or
+   // [next_emit, ip_end) after the main loop.
+   const char* next_emit = ip;
+ 
+   const size_t kInputMarginBytes = 15;
+-  if (PREDICT_TRUE(input_size >= kInputMarginBytes)) {
++  if (SNAPPY_PREDICT_TRUE(input_size >= kInputMarginBytes)) {
+     const char* ip_limit = input + input_size - kInputMarginBytes;
+ 
+     for (uint32 next_hash = Hash(++ip, shift); ; ) {
+       assert(next_emit < ip);
+       // The body of this loop calls EmitLiteral once and then EmitCopy one or
+       // more times.  (The exception is that when we're close to exhausting
+       // the input we goto emit_remainder.)
+       //
+@@ -380,85 +589,96 @@ char* CompressFragment(const char* input
+       const char* candidate;
+       do {
+         ip = next_ip;
+         uint32 hash = next_hash;
+         assert(hash == Hash(ip, shift));
+         uint32 bytes_between_hash_lookups = skip >> 5;
+         skip += bytes_between_hash_lookups;
+         next_ip = ip + bytes_between_hash_lookups;
+-        if (PREDICT_FALSE(next_ip > ip_limit)) {
++        if (SNAPPY_PREDICT_FALSE(next_ip > ip_limit)) {
+           goto emit_remainder;
+         }
+         next_hash = Hash(next_ip, shift);
+         candidate = base_ip + table[hash];
+         assert(candidate >= base_ip);
+         assert(candidate < ip);
+ 
+         table[hash] = ip - base_ip;
+-      } while (PREDICT_TRUE(UNALIGNED_LOAD32(ip) !=
+-                            UNALIGNED_LOAD32(candidate)));
++      } while (SNAPPY_PREDICT_TRUE(UNALIGNED_LOAD32(ip) !=
++                                 UNALIGNED_LOAD32(candidate)));
+ 
+       // Step 2: A 4-byte match has been found.  We'll later see if more
+       // than 4 bytes match.  But, prior to the match, input
+       // bytes [next_emit, ip) are unmatched.  Emit them as "literal bytes."
+       assert(next_emit + 16 <= ip_end);
+-      op = EmitLiteral(op, next_emit, ip - next_emit, true);
++      op = EmitLiteral</*allow_fast_path=*/true>(op, next_emit, ip - next_emit);
+ 
+       // Step 3: Call EmitCopy, and then see if another EmitCopy could
+       // be our next move.  Repeat until we find no match for the
+       // input immediately after what was consumed by the last EmitCopy call.
+       //
+       // If we exit this loop normally then we need to call EmitLiteral next,
+       // though we don't yet know how big the literal will be.  We handle that
+       // by proceeding to the next iteration of the main loop.  We also can exit
+       // this loop via goto if we get close to exhausting the input.
+       EightBytesReference input_bytes;
+       uint32 candidate_bytes = 0;
+ 
+       do {
+         // We have a 4-byte match at ip, and no need to emit any
+         // "literal bytes" prior to ip.
+         const char* base = ip;
+-        int matched = 4 + FindMatchLength(candidate + 4, ip + 4, ip_end);
++        std::pair<size_t, bool> p =
++            FindMatchLength(candidate + 4, ip + 4, ip_end);
++        size_t matched = 4 + p.first;
+         ip += matched;
+         size_t offset = base - candidate;
+         assert(0 == memcmp(base, candidate, matched));
+-        op = EmitCopy(op, offset, matched);
+-        // We could immediately start working at ip now, but to improve
+-        // compression we first update table[Hash(ip - 1, ...)].
+-        const char* insert_tail = ip - 1;
++        if (p.second) {
++          op = EmitCopy</*len_less_than_12=*/true>(op, offset, matched);
++        } else {
++          op = EmitCopy</*len_less_than_12=*/false>(op, offset, matched);
++        }
+         next_emit = ip;
+-        if (PREDICT_FALSE(ip >= ip_limit)) {
++        if (SNAPPY_PREDICT_FALSE(ip >= ip_limit)) {
+           goto emit_remainder;
+         }
+-        input_bytes = GetEightBytesAt(insert_tail);
++        // We are now looking for a 4-byte match again.  We read
++        // table[Hash(ip, shift)] for that.  To improve compression,
++        // we also update table[Hash(ip - 1, shift)] and table[Hash(ip, shift)].
++        input_bytes = GetEightBytesAt(ip - 1);
+         uint32 prev_hash = HashBytes(GetUint32AtOffset(input_bytes, 0), shift);
+         table[prev_hash] = ip - base_ip - 1;
+         uint32 cur_hash = HashBytes(GetUint32AtOffset(input_bytes, 1), shift);
+         candidate = base_ip + table[cur_hash];
+         candidate_bytes = UNALIGNED_LOAD32(candidate);
+         table[cur_hash] = ip - base_ip;
+       } while (GetUint32AtOffset(input_bytes, 1) == candidate_bytes);
+ 
+       next_hash = HashBytes(GetUint32AtOffset(input_bytes, 2), shift);
+       ++ip;
+     }
+   }
+ 
+  emit_remainder:
+   // Emit the remaining bytes as a literal
+   if (next_emit < ip_end) {
+-    op = EmitLiteral(op, next_emit, ip_end - next_emit, false);
++    op = EmitLiteral</*allow_fast_path=*/false>(op, next_emit,
++                                                ip_end - next_emit);
+   }
+ 
+   return op;
+ }
+ }  // end namespace internal
+ 
++// Called back at avery compression call to trace parameters and sizes.
++static inline void Report(const char *algorithm, size_t compressed_size,
++                          size_t uncompressed_size) {}
++
+ // Signature of output types needed by decompression code.
+ // The decompression code is templatized on a type that obeys this
+ // signature so that we do not pay virtual function call overhead in
+ // the middle of a tight decompression loop.
+ //
+ // class DecompressionWriter {
+ //  public:
+ //   // Called before decompression
+@@ -489,16 +709,38 @@ char* CompressFragment(const char* input
+ //   //    if <length> is 61 or more, as in this case the literal length is not
+ //   //    decoded fully. In practice, this should not be a big problem,
+ //   //    as it is unlikely that one would implement a fast path accepting
+ //   //    this much data.
+ //   //
+ //   bool TryFastAppend(const char* ip, size_t available, size_t length);
+ // };
+ 
++static inline uint32 ExtractLowBytes(uint32 v, int n) {
++  assert(n >= 0);
++  assert(n <= 4);
++#if SNAPPY_HAVE_BMI2
++  return _bzhi_u32(v, 8 * n);
++#else
++  // This needs to be wider than uint32 otherwise `mask << 32` will be
++  // undefined.
++  uint64 mask = 0xffffffff;
++  return v & ~(mask << (8 * n));
++#endif
++}
++
++static inline bool LeftShiftOverflows(uint8 value, uint32 shift) {
++  assert(shift < 32);
++  static const uint8 masks[] = {
++      0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,  //
++      0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,  //
++      0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,  //
++      0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe};
++  return (value & masks[shift]) != 0;
++}
+ 
+ // Helper class for decompression
+ class SnappyDecompressor {
+  private:
+   Source*       reader_;         // Underlying source of bytes to decompress
+   const char*   ip_;             // Points to next buffered byte
+   const char*   ip_limit_;       // Points just past buffered bytes
+   uint32        peeked_;         // Bytes peeked from reader (need to skip)
+@@ -527,77 +769,108 @@ class SnappyDecompressor {
+   }
+ 
+   // Returns true iff we have hit the end of the input without an error.
+   bool eof() const {
+     return eof_;
+   }
+ 
+   // Read the uncompressed length stored at the start of the compressed data.
+-  // On succcess, stores the length in *result and returns true.
++  // On success, stores the length in *result and returns true.
+   // On failure, returns false.
+   bool ReadUncompressedLength(uint32* result) {
+     assert(ip_ == NULL);       // Must not have read anything yet
+     // Length is encoded in 1..5 bytes
+     *result = 0;
+     uint32 shift = 0;
+     while (true) {
+       if (shift >= 32) return false;
+       size_t n;
+       const char* ip = reader_->Peek(&n);
+       if (n == 0) return false;
+       const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip));
+       reader_->Skip(1);
+       uint32 val = c & 0x7f;
+-      if (((val << shift) >> shift) != val) return false;
++      if (LeftShiftOverflows(static_cast<uint8>(val), shift)) return false;
+       *result |= val << shift;
+       if (c < 128) {
+         break;
+       }
+       shift += 7;
+     }
+     return true;
+   }
+ 
+   // Process the next item found in the input.
+   // Returns true if successful, false on error or end of input.
+   template <class Writer>
++#if defined(__GNUC__) && defined(__x86_64__)
++  __attribute__((aligned(32)))
++#endif
+   void DecompressAllTags(Writer* writer) {
++    // In x86, pad the function body to start 16 bytes later. This function has
++    // a couple of hotspots that are highly sensitive to alignment: we have
++    // observed regressions by more than 20% in some metrics just by moving the
++    // exact same code to a different position in the benchmark binary.
++    //
++    // Putting this code on a 32-byte-aligned boundary + 16 bytes makes us hit
++    // the "lucky" case consistently. Unfortunately, this is a very brittle
++    // workaround, and future differences in code generation may reintroduce
++    // this regression. If you experience a big, difficult to explain, benchmark
++    // performance regression here, first try removing this hack.
++#if defined(__GNUC__) && defined(__x86_64__)
++    // Two 8-byte "NOP DWORD ptr [EAX + EAX*1 + 00000000H]" instructions.
++    asm(".byte 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00");
++    asm(".byte 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00");
++#endif
++
+     const char* ip = ip_;
+-
+     // We could have put this refill fragment only at the beginning of the loop.
+     // However, duplicating it at the end of each branch gives the compiler more
+     // scope to optimize the <ip_limit_ - ip> expression based on the local
+     // context, which overall increases speed.
+     #define MAYBE_REFILL() \
+         if (ip_limit_ - ip < kMaximumTagLength) { \
+           ip_ = ip; \
+           if (!RefillTag()) return; \
+           ip = ip_; \
+         }
+ 
+     MAYBE_REFILL();
+     for ( ;; ) {
+       const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip++));
+ 
+-      if ((c & 0x3) == LITERAL) {
++      // Ratio of iterations that have LITERAL vs non-LITERAL for different
++      // inputs.
++      //
++      // input          LITERAL  NON_LITERAL
++      // -----------------------------------
++      // html|html4|cp   23%        77%
++      // urls            36%        64%
++      // jpg             47%        53%
++      // pdf             19%        81%
++      // txt[1-4]        25%        75%
++      // pb              24%        76%
++      // bin             24%        76%
++      if (SNAPPY_PREDICT_FALSE((c & 0x3) == LITERAL)) {
+         size_t literal_length = (c >> 2) + 1u;
+         if (writer->TryFastAppend(ip, ip_limit_ - ip, literal_length)) {
+           assert(literal_length < 61);
+           ip += literal_length;
+-          // NOTE(user): There is no MAYBE_REFILL() here, as TryFastAppend()
++          // NOTE: There is no MAYBE_REFILL() here, as TryFastAppend()
+           // will not return true unless there's already at least five spare
+           // bytes in addition to the literal.
+           continue;
+         }
+-        if (PREDICT_FALSE(literal_length >= 61)) {
++        if (SNAPPY_PREDICT_FALSE(literal_length >= 61)) {
+           // Long literal.
+           const size_t literal_length_length = literal_length - 60;
+           literal_length =
+-              (LittleEndian::Load32(ip) & wordmask[literal_length_length]) + 1;
++              ExtractLowBytes(LittleEndian::Load32(ip), literal_length_length) +
++              1;
+           ip += literal_length_length;
+         }
+ 
+         size_t avail = ip_limit_ - ip;
+         while (avail < literal_length) {
+           if (!writer->Append(ip, avail)) return;
+           literal_length -= avail;
+           reader_->Skip(peeked_);
+@@ -609,25 +882,26 @@ class SnappyDecompressor {
+           ip_limit_ = ip + avail;
+         }
+         if (!writer->Append(ip, literal_length)) {
+           return;
+         }
+         ip += literal_length;
+         MAYBE_REFILL();
+       } else {
+-        const uint32 entry = char_table[c];
+-        const uint32 trailer = LittleEndian::Load32(ip) & wordmask[entry >> 11];
+-        const uint32 length = entry & 0xff;
++        const size_t entry = char_table[c];
++        const size_t trailer =
++            ExtractLowBytes(LittleEndian::Load32(ip), entry >> 11);
++        const size_t length = entry & 0xff;
+         ip += entry >> 11;
+ 
+         // copy_offset/256 is encoded in bits 8..10.  By just fetching
+         // those bits, we get copy_offset (since the bit-field starts at
+         // bit 8).
+-        const uint32 copy_offset = entry & 0x700;
++        const size_t copy_offset = entry & 0x700;
+         if (!writer->AppendFromSelf(copy_offset + trailer, length)) {
+           return;
+         }
+         MAYBE_REFILL();
+       }
+     }
+ 
+ #undef MAYBE_REFILL
+@@ -637,20 +911,18 @@ class SnappyDecompressor {
+ bool SnappyDecompressor::RefillTag() {
+   const char* ip = ip_;
+   if (ip == ip_limit_) {
+     // Fetch a new fragment from the reader
+     reader_->Skip(peeked_);   // All peeked bytes are used up
+     size_t n;
+     ip = reader_->Peek(&n);
+     peeked_ = n;
+-    if (n == 0) {
+-      eof_ = true;
+-      return false;
+-    }
++    eof_ = (n == 0);
++    if (eof_) return false;
+     ip_limit_ = ip + n;
+   }
+ 
+   // Read the tag character
+   assert(ip < ip_limit_);
+   const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip));
+   const uint32 entry = char_table[c];
+   const uint32 needed = (entry >> 11) + 1;  // +1 byte for 'c'
+@@ -665,17 +937,17 @@ bool SnappyDecompressor::RefillTag() {
+     // read more than we need.
+     memmove(scratch_, ip, nbuf);
+     reader_->Skip(peeked_);  // All peeked bytes are used up
+     peeked_ = 0;
+     while (nbuf < needed) {
+       size_t length;
+       const char* src = reader_->Peek(&length);
+       if (length == 0) return false;
+-      uint32 to_add = min<uint32>(needed - nbuf, length);
++      uint32 to_add = std::min<uint32>(needed - nbuf, length);
+       memcpy(scratch_ + nbuf, src, to_add);
+       nbuf += to_add;
+       reader_->Skip(to_add);
+     }
+     assert(nbuf == needed);
+     ip_ = scratch_;
+     ip_limit_ = scratch_ + needed;
+   } else if (nbuf < kMaximumTagLength) {
+@@ -694,75 +966,73 @@ bool SnappyDecompressor::RefillTag() {
+ }
+ 
+ template <typename Writer>
+ static bool InternalUncompress(Source* r, Writer* writer) {
+   // Read the uncompressed length from the front of the compressed input
+   SnappyDecompressor decompressor(r);
+   uint32 uncompressed_len = 0;
+   if (!decompressor.ReadUncompressedLength(&uncompressed_len)) return false;
+-  return InternalUncompressAllTags(&decompressor, writer, uncompressed_len);
++
++  return InternalUncompressAllTags(&decompressor, writer, r->Available(),
++                                   uncompressed_len);
+ }
+ 
+ template <typename Writer>
+ static bool InternalUncompressAllTags(SnappyDecompressor* decompressor,
+                                       Writer* writer,
++                                      uint32 compressed_len,
+                                       uint32 uncompressed_len) {
++  Report("snappy_uncompress", compressed_len, uncompressed_len);
++
+   writer->SetExpectedLength(uncompressed_len);
+ 
+   // Process the entire input
+   decompressor->DecompressAllTags(writer);
+   writer->Flush();
+   return (decompressor->eof() && writer->CheckLength());
+ }
+ 
+ bool GetUncompressedLength(Source* source, uint32* result) {
+   SnappyDecompressor decompressor(source);
+   return decompressor.ReadUncompressedLength(result);
+ }
+ 
+ size_t Compress(Source* reader, Sink* writer) {
+   size_t written = 0;
+   size_t N = reader->Available();
++  const size_t uncompressed_size = N;
+   char ulength[Varint::kMax32];
+   char* p = Varint::Encode32(ulength, N);
+   writer->Append(ulength, p-ulength);
+   written += (p - ulength);
+ 
+-  internal::WorkingMemory wmem;
+-  char* scratch = NULL;
+-  char* scratch_output = NULL;
++  internal::WorkingMemory wmem(N);
+ 
+   while (N > 0) {
+     // Get next block to compress (without copying if possible)
+     size_t fragment_size;
+     const char* fragment = reader->Peek(&fragment_size);
+     assert(fragment_size != 0);  // premature end of input
+-    const size_t num_to_read = min(N, kBlockSize);
++    const size_t num_to_read = std::min(N, kBlockSize);
+     size_t bytes_read = fragment_size;
+ 
+     size_t pending_advance = 0;
+     if (bytes_read >= num_to_read) {
+       // Buffer returned by reader is large enough
+       pending_advance = num_to_read;
+       fragment_size = num_to_read;
+     } else {
+-      // Read into scratch buffer
+-      if (scratch == NULL) {
+-        // If this is the last iteration, we want to allocate N bytes
+-        // of space, otherwise the max possible kBlockSize space.
+-        // num_to_read contains exactly the correct value
+-        scratch = new char[num_to_read];
+-      }
++      char* scratch = wmem.GetScratchInput();
+       memcpy(scratch, fragment, bytes_read);
+       reader->Skip(bytes_read);
+ 
+       while (bytes_read < num_to_read) {
+         fragment = reader->Peek(&fragment_size);
+-        size_t n = min<size_t>(fragment_size, num_to_read - bytes_read);
++        size_t n = std::min<size_t>(fragment_size, num_to_read - bytes_read);
+         memcpy(scratch + bytes_read, fragment, n);
+         bytes_read += n;
+         reader->Skip(n);
+       }
+       assert(bytes_read == num_to_read);
+       fragment = scratch;
+       fragment_size = num_to_read;
+     }
+@@ -772,193 +1042,203 @@ size_t Compress(Source* reader, Sink* wr
+     int table_size;
+     uint16* table = wmem.GetHashTable(num_to_read, &table_size);
+ 
+     // Compress input_fragment and append to dest
+     const int max_output = MaxCompressedLength(num_to_read);
+ 
+     // Need a scratch buffer for the output, in case the byte sink doesn't
+     // have room for us directly.
+-    if (scratch_output == NULL) {
+-      scratch_output = new char[max_output];
+-    } else {
+-      // Since we encode kBlockSize regions followed by a region
+-      // which is <= kBlockSize in length, a previously allocated
+-      // scratch_output[] region is big enough for this iteration.
+-    }
+-    char* dest = writer->GetAppendBuffer(max_output, scratch_output);
+-    char* end = internal::CompressFragment(fragment, fragment_size,
+-                                           dest, table, table_size);
++
++    // Since we encode kBlockSize regions followed by a region
++    // which is <= kBlockSize in length, a previously allocated
++    // scratch_output[] region is big enough for this iteration.
++    char* dest = writer->GetAppendBuffer(max_output, wmem.GetScratchOutput());
++    char* end = internal::CompressFragment(fragment, fragment_size, dest, table,
++                                           table_size);
+     writer->Append(dest, end - dest);
+     written += (end - dest);
+ 
+     N -= num_to_read;
+     reader->Skip(pending_advance);
+   }
+ 
+-  delete[] scratch;
+-  delete[] scratch_output;
++  Report("snappy_compress", written, uncompressed_size);
+ 
+   return written;
+ }
+ 
+ // -----------------------------------------------------------------------
+ // IOVec interfaces
+ // -----------------------------------------------------------------------
+ 
+ // A type that writes to an iovec.
+ // Note that this is not a "ByteSink", but a type that matches the
+ // Writer template argument to SnappyDecompressor::DecompressAllTags().
+ class SnappyIOVecWriter {
+  private:
++  // output_iov_end_ is set to iov + count and used to determine when
++  // the end of the iovs is reached.
++  const struct iovec* output_iov_end_;
++
++#if !defined(NDEBUG)
+   const struct iovec* output_iov_;
+-  const size_t output_iov_count_;
++#endif  // !defined(NDEBUG)
+ 
+-  // We are currently writing into output_iov_[curr_iov_index_].
+-  size_t curr_iov_index_;
++  // Current iov that is being written into.
++  const struct iovec* curr_iov_;
+ 
+-  // Bytes written to output_iov_[curr_iov_index_] so far.
+-  size_t curr_iov_written_;
++  // Pointer to current iov's write location.
++  char* curr_iov_output_;
++
++  // Remaining bytes to write into curr_iov_output.
++  size_t curr_iov_remaining_;
+ 
+   // Total bytes decompressed into output_iov_ so far.
+   size_t total_written_;
+ 
+   // Maximum number of bytes that will be decompressed into output_iov_.
+   size_t output_limit_;
+ 
+-  inline char* GetIOVecPointer(size_t index, size_t offset) {
+-    return reinterpret_cast<char*>(output_iov_[index].iov_base) +
+-        offset;
++  static inline char* GetIOVecPointer(const struct iovec* iov, size_t offset) {
++    return reinterpret_cast<char*>(iov->iov_base) + offset;
+   }
+ 
+  public:
+   // Does not take ownership of iov. iov must be valid during the
+   // entire lifetime of the SnappyIOVecWriter.
+   inline SnappyIOVecWriter(const struct iovec* iov, size_t iov_count)
+-      : output_iov_(iov),
+-        output_iov_count_(iov_count),
+-        curr_iov_index_(0),
+-        curr_iov_written_(0),
++      : output_iov_end_(iov + iov_count),
++#if !defined(NDEBUG)
++        output_iov_(iov),
++#endif  // !defined(NDEBUG)
++        curr_iov_(iov),
++        curr_iov_output_(iov_count ? reinterpret_cast<char*>(iov->iov_base)
++                                   : nullptr),
++        curr_iov_remaining_(iov_count ? iov->iov_len : 0),
+         total_written_(0),
+-        output_limit_(-1) {
+-  }
++        output_limit_(-1) {}
+ 
+   inline void SetExpectedLength(size_t len) {
+     output_limit_ = len;
+   }
+ 
+   inline bool CheckLength() const {
+     return total_written_ == output_limit_;
+   }
+ 
+   inline bool Append(const char* ip, size_t len) {
+     if (total_written_ + len > output_limit_) {
+       return false;
+     }
+ 
++    return AppendNoCheck(ip, len);
++  }
++
++  inline bool AppendNoCheck(const char* ip, size_t len) {
+     while (len > 0) {
+-      assert(curr_iov_written_ <= output_iov_[curr_iov_index_].iov_len);
+-      if (curr_iov_written_ >= output_iov_[curr_iov_index_].iov_len) {
++      if (curr_iov_remaining_ == 0) {
+         // This iovec is full. Go to the next one.
+-        if (curr_iov_index_ + 1 >= output_iov_count_) {
++        if (curr_iov_ + 1 >= output_iov_end_) {
+           return false;
+         }
+-        curr_iov_written_ = 0;
+-        ++curr_iov_index_;
++        ++curr_iov_;
++        curr_iov_output_ = reinterpret_cast<char*>(curr_iov_->iov_base);
++        curr_iov_remaining_ = curr_iov_->iov_len;
+       }
+ 
+-      const size_t to_write = std::min(
+-          len, output_iov_[curr_iov_index_].iov_len - curr_iov_written_);
+-      memcpy(GetIOVecPointer(curr_iov_index_, curr_iov_written_),
+-             ip,
+-             to_write);
+-      curr_iov_written_ += to_write;
++      const size_t to_write = std::min(len, curr_iov_remaining_);
++      memcpy(curr_iov_output_, ip, to_write);
++      curr_iov_output_ += to_write;
++      curr_iov_remaining_ -= to_write;
+       total_written_ += to_write;
+       ip += to_write;
+       len -= to_write;
+     }
+ 
+     return true;
+   }
+ 
+   inline bool TryFastAppend(const char* ip, size_t available, size_t len) {
+     const size_t space_left = output_limit_ - total_written_;
+     if (len <= 16 && available >= 16 + kMaximumTagLength && space_left >= 16 &&
+-        output_iov_[curr_iov_index_].iov_len - curr_iov_written_ >= 16) {
++        curr_iov_remaining_ >= 16) {
+       // Fast path, used for the majority (about 95%) of invocations.
+-      char* ptr = GetIOVecPointer(curr_iov_index_, curr_iov_written_);
+-      UnalignedCopy64(ip, ptr);
+-      UnalignedCopy64(ip + 8, ptr + 8);
+-      curr_iov_written_ += len;
++      UnalignedCopy128(ip, curr_iov_output_);
++      curr_iov_output_ += len;
++      curr_iov_remaining_ -= len;
+       total_written_ += len;
+       return true;
+     }
+ 
+     return false;
+   }
+ 
+   inline bool AppendFromSelf(size_t offset, size_t len) {
+-    if (offset > total_written_ || offset == 0) {
++    // See SnappyArrayWriter::AppendFromSelf for an explanation of
++    // the "offset - 1u" trick.
++    if (offset - 1u >= total_written_) {
+       return false;
+     }
+     const size_t space_left = output_limit_ - total_written_;
+     if (len > space_left) {
+       return false;
+     }
+ 
+     // Locate the iovec from which we need to start the copy.
+-    size_t from_iov_index = curr_iov_index_;
+-    size_t from_iov_offset = curr_iov_written_;
++    const iovec* from_iov = curr_iov_;
++    size_t from_iov_offset = curr_iov_->iov_len - curr_iov_remaining_;
+     while (offset > 0) {
+       if (from_iov_offset >= offset) {
+         from_iov_offset -= offset;
+         break;
+       }
+ 
+       offset -= from_iov_offset;
+-      assert(from_iov_index > 0);
+-      --from_iov_index;
+-      from_iov_offset = output_iov_[from_iov_index].iov_len;
++      --from_iov;
++#if !defined(NDEBUG)
++      assert(from_iov >= output_iov_);
++#endif  // !defined(NDEBUG)
++      from_iov_offset = from_iov->iov_len;
+     }
+ 
+     // Copy <len> bytes starting from the iovec pointed to by from_iov_index to
+     // the current iovec.
+     while (len > 0) {
+-      assert(from_iov_index <= curr_iov_index_);
+-      if (from_iov_index != curr_iov_index_) {
+-        const size_t to_copy = std::min(
+-            output_iov_[from_iov_index].iov_len - from_iov_offset,
+-            len);
+-        Append(GetIOVecPointer(from_iov_index, from_iov_offset), to_copy);
++      assert(from_iov <= curr_iov_);
++      if (from_iov != curr_iov_) {
++        const size_t to_copy =
++            std::min(from_iov->iov_len - from_iov_offset, len);
++        AppendNoCheck(GetIOVecPointer(from_iov, from_iov_offset), to_copy);
+         len -= to_copy;
+         if (len > 0) {
+-          ++from_iov_index;
++          ++from_iov;
+           from_iov_offset = 0;
+         }
+       } else {
+-        assert(curr_iov_written_ <= output_iov_[curr_iov_index_].iov_len);
+-        size_t to_copy = std::min(output_iov_[curr_iov_index_].iov_len -
+-                                      curr_iov_written_,
+-                                  len);
++        size_t to_copy = curr_iov_remaining_;
+         if (to_copy == 0) {
+           // This iovec is full. Go to the next one.
+-          if (curr_iov_index_ + 1 >= output_iov_count_) {
++          if (curr_iov_ + 1 >= output_iov_end_) {
+             return false;
+           }
+-          ++curr_iov_index_;
+-          curr_iov_written_ = 0;
++          ++curr_iov_;
++          curr_iov_output_ = reinterpret_cast<char*>(curr_iov_->iov_base);
++          curr_iov_remaining_ = curr_iov_->iov_len;
+           continue;
+         }
+         if (to_copy > len) {
+           to_copy = len;
+         }
+-        IncrementalCopy(GetIOVecPointer(from_iov_index, from_iov_offset),
+-                        GetIOVecPointer(curr_iov_index_, curr_iov_written_),
+-                        to_copy);
+-        curr_iov_written_ += to_copy;
++
++        IncrementalCopy(GetIOVecPointer(from_iov, from_iov_offset),
++                        curr_iov_output_, curr_iov_output_ + to_copy,
++                        curr_iov_output_ + curr_iov_remaining_);
++        curr_iov_output_ += to_copy;
++        curr_iov_remaining_ -= to_copy;
+         from_iov_offset += to_copy;
+         total_written_ += to_copy;
+         len -= to_copy;
+       }
+     }
+ 
+     return true;
+   }
+@@ -1017,76 +1297,57 @@ class SnappyArrayWriter {
+     return true;
+   }
+ 
+   inline bool TryFastAppend(const char* ip, size_t available, size_t len) {
+     char* op = op_;
+     const size_t space_left = op_limit_ - op;
+     if (len <= 16 && available >= 16 + kMaximumTagLength && space_left >= 16) {
+       // Fast path, used for the majority (about 95%) of invocations.
+-      UnalignedCopy64(ip, op);
+-      UnalignedCopy64(ip + 8, op + 8);
++      UnalignedCopy128(ip, op);
+       op_ = op + len;
+       return true;
+     } else {
+       return false;
+     }
+   }
+ 
+   inline bool AppendFromSelf(size_t offset, size_t len) {
+-    char* op = op_;
+-    const size_t space_left = op_limit_ - op;
++    char* const op_end = op_ + len;
+ 
+     // Check if we try to append from before the start of the buffer.
+     // Normally this would just be a check for "produced < offset",
+     // but "produced <= offset - 1u" is equivalent for every case
+     // except the one where offset==0, where the right side will wrap around
+     // to a very big number. This is convenient, as offset==0 is another
+     // invalid case that we also want to catch, so that we do not go
+     // into an infinite loop.
+-    assert(op >= base_);
+-    size_t produced = op - base_;
+-    if (produced <= offset - 1u) {
+-      return false;
+-    }
+-    if (len <= 16 && offset >= 8 && space_left >= 16) {
+-      // Fast path, used for the majority (70-80%) of dynamic invocations.
+-      UnalignedCopy64(op - offset, op);
+-      UnalignedCopy64(op - offset + 8, op + 8);
+-    } else {
+-      if (space_left >= len + kMaxIncrementCopyOverflow) {
+-        IncrementalCopyFastPath(op - offset, op, len);
+-      } else {
+-        if (space_left < len) {
+-          return false;
+-        }
+-        IncrementalCopy(op - offset, op, len);
+-      }
+-    }
++    if (Produced() <= offset - 1u || op_end > op_limit_) return false;
++    op_ = IncrementalCopy(op_ - offset, op_, op_end, op_limit_);
+ 
+-    op_ = op + len;
+     return true;
+   }
+   inline size_t Produced() const {
++    assert(op_ >= base_);
+     return op_ - base_;
+   }
+   inline void Flush() {}
+ };
+ 
+ bool RawUncompress(const char* compressed, size_t n, char* uncompressed) {
+   ByteArraySource reader(compressed, n);
+   return RawUncompress(&reader, uncompressed);
+ }
+ 
+ bool RawUncompress(Source* compressed, char* uncompressed) {
+   SnappyArrayWriter output(uncompressed);
+   return InternalUncompress(compressed, &output);
+ }
+ 
+-bool Uncompress(const char* compressed, size_t n, string* uncompressed) {
++bool Uncompress(const char* compressed, size_t n, std::string* uncompressed) {
+   size_t ulength;
+   if (!GetUncompressedLength(compressed, n, &ulength)) {
+     return false;
+   }
+   // On 32-bit builds: max_size() < kuint32max.  Check for that instead
+   // of crashing (e.g., consider externally specified compressed data).
+   if (ulength > uncompressed->max_size()) {
+     return false;
+@@ -1144,19 +1405,20 @@ void RawCompress(const char* input,
+   ByteArraySource reader(input, input_length);
+   UncheckedByteArraySink writer(compressed);
+   Compress(&reader, &writer);
+ 
+   // Compute how many bytes were added
+   *compressed_length = (writer.CurrentDestination() - compressed);
+ }
+ 
+-size_t Compress(const char* input, size_t input_length, string* compressed) {
++size_t Compress(const char* input, size_t input_length,
++                std::string* compressed) {
+   // Pre-grow the buffer to the max length of the compressed output
+-  compressed->resize(MaxCompressedLength(input_length));
++  STLStringResizeUninitialized(compressed, MaxCompressedLength(input_length));
+ 
+   size_t compressed_length;
+   RawCompress(input, input_length, string_as_array(compressed),
+               &compressed_length);
+   compressed->resize(compressed_length);
+   return compressed_length;
+ }
+ 
+@@ -1169,17 +1431,17 @@ size_t Compress(const char* input, size_
+ // allocates a buffer of "size" and appends that to the destination.
+ template <typename Allocator>
+ class SnappyScatteredWriter {
+   Allocator allocator_;
+ 
+   // We need random access into the data generated so far.  Therefore
+   // we keep track of all of the generated data as an array of blocks.
+   // All of the blocks except the last have length kBlockSize.
+-  vector<char*> blocks_;
++  std::vector<char*> blocks_;
+   size_t expected_;
+ 
+   // Total size of all fully generated blocks so far
+   size_t full_size_;
+ 
+   // Pointer into current output block
+   char* op_base_;       // Base of output block
+   char* op_ptr_;        // Pointer to next unfilled byte in block
+@@ -1228,36 +1490,33 @@ class SnappyScatteredWriter {
+   }
+ 
+   inline bool TryFastAppend(const char* ip, size_t available, size_t length) {
+     char* op = op_ptr_;
+     const int space_left = op_limit_ - op;
+     if (length <= 16 && available >= 16 + kMaximumTagLength &&
+         space_left >= 16) {
+       // Fast path, used for the majority (about 95%) of invocations.
+-      UNALIGNED_STORE64(op, UNALIGNED_LOAD64(ip));
+-      UNALIGNED_STORE64(op + 8, UNALIGNED_LOAD64(ip + 8));
++      UnalignedCopy128(ip, op);
+       op_ptr_ = op + length;
+       return true;
+     } else {
+       return false;
+     }
+   }
+ 
+   inline bool AppendFromSelf(size_t offset, size_t len) {
++    char* const op_end = op_ptr_ + len;
+     // See SnappyArrayWriter::AppendFromSelf for an explanation of
+     // the "offset - 1u" trick.
+-    if (offset - 1u < op_ptr_ - op_base_) {
+-      const size_t space_left = op_limit_ - op_ptr_;
+-      if (space_left >= len + kMaxIncrementCopyOverflow) {
+-        // Fast path: src and dst in current block.
+-        IncrementalCopyFastPath(op_ptr_ - offset, op_ptr_, len);
+-        op_ptr_ += len;
+-        return true;
+-      }
++    if (SNAPPY_PREDICT_TRUE(offset - 1u < op_ptr_ - op_base_ &&
++                          op_end <= op_limit_)) {
++      // Fast path: src and dst in current block.
++      op_ptr_ = IncrementalCopy(op_ptr_ - offset, op_ptr_, op_end, op_limit_);
++      return true;
+     }
+     return SlowAppendFromSelf(offset, len);
+   }
+ 
+   // Called at the end of the decompress. We ask the allocator
+   // write all blocks to the sink.
+   inline void Flush() { allocator_.Flush(Produced()); }
+ };
+@@ -1275,17 +1534,17 @@ bool SnappyScatteredWriter<Allocator>::S
+     ip += avail;
+ 
+     // Bounds check
+     if (full_size_ + len > expected_) {
+       return false;
+     }
+ 
+     // Make new block
+-    size_t bsize = min<size_t>(kBlockSize, expected_ - full_size_);
++    size_t bsize = std::min<size_t>(kBlockSize, expected_ - full_size_);
+     op_base_ = allocator_.Allocate(bsize);
+     op_ptr_ = op_base_;
+     op_limit_ = op_base_ + bsize;
+     blocks_.push_back(op_base_);
+     avail = bsize;
+   }
+ 
+   memcpy(op_ptr_, ip, len);
+@@ -1332,17 +1591,17 @@ class SnappySinkAllocator {
+   // random access to the blocks and once we hand the
+   // block over to the sink, we can't access it anymore.
+   // Also we don't write more than has been actually written
+   // to the blocks.
+   void Flush(size_t size) {
+     size_t size_written = 0;
+     size_t block_size;
+     for (int i = 0; i < blocks_.size(); ++i) {
+-      block_size = min<size_t>(blocks_[i].size, size - size_written);
++      block_size = std::min<size_t>(blocks_[i].size, size - size_written);
+       dest_->AppendAndTakeOwnership(blocks_[i].data, block_size,
+                                     &SnappySinkAllocator::Deleter, NULL);
+       size_written += block_size;
+     }
+     blocks_.clear();
+   }
+ 
+  private:
+@@ -1352,17 +1611,17 @@ class SnappySinkAllocator {
+     Datablock(char* p, size_t s) : data(p), size(s) {}
+   };
+ 
+   static void Deleter(void* arg, const char* bytes, size_t size) {
+     delete[] bytes;
+   }
+ 
+   Sink* dest_;
+-  vector<Datablock> blocks_;
++  std::vector<Datablock> blocks_;
+ 
+   // Note: copying this object is allowed
+ };
+ 
+ size_t UncompressAsMuchAsPossible(Source* compressed, Sink* uncompressed) {
+   SnappySinkAllocator allocator(uncompressed);
+   SnappyScatteredWriter<SnappySinkAllocator> writer(allocator);
+   InternalUncompress(compressed, &writer);
+@@ -1377,24 +1636,26 @@ bool Uncompress(Source* compressed, Sink
+     return false;
+   }
+ 
+   char c;
+   size_t allocated_size;
+   char* buf = uncompressed->GetAppendBufferVariable(
+       1, uncompressed_len, &c, 1, &allocated_size);
+ 
++  const size_t compressed_len = compressed->Available();
+   // If we can get a flat buffer, then use it, otherwise do block by block
+   // uncompression
+   if (allocated_size >= uncompressed_len) {
+     SnappyArrayWriter writer(buf);
+-    bool result = InternalUncompressAllTags(
+-        &decompressor, &writer, uncompressed_len);
++    bool result = InternalUncompressAllTags(&decompressor, &writer,
++                                            compressed_len, uncompressed_len);
+     uncompressed->Append(buf, writer.Produced());
+     return result;
+   } else {
+     SnappySinkAllocator allocator(uncompressed);
+     SnappyScatteredWriter<SnappySinkAllocator> writer(allocator);
+-    return InternalUncompressAllTags(&decompressor, &writer, uncompressed_len);
++    return InternalUncompressAllTags(&decompressor, &writer, compressed_len,
++                                     uncompressed_len);
+   }
+ }
+ 
+-} // end namespace snappy
++}  // namespace snappy
+diff --git a/other-licenses/snappy/src/snappy.h b/other-licenses/snappy/src/snappy.h
+--- a/other-licenses/snappy/src/snappy.h
++++ b/other-licenses/snappy/src/snappy.h
+@@ -34,17 +34,17 @@
+ // with long repeated sequences or compressing data that is similar to
+ // other data, while still compressing fast, you might look at first
+ // using BMDiff and then compressing the output of BMDiff with
+ // Snappy.
+ 
+ #ifndef THIRD_PARTY_SNAPPY_SNAPPY_H__
+ #define THIRD_PARTY_SNAPPY_SNAPPY_H__
+ 
+-#include <stddef.h>
++#include <cstddef>
+ #include <string>
+ 
+ #include "snappy-stubs-public.h"
+ 
+ namespace snappy {
+   class Source;
+   class Sink;
+ 
+@@ -64,30 +64,31 @@ namespace snappy {
+   // further operations, such as RawUncompress(). You will need to rewind
+   // or recreate the source yourself before attempting any further calls.
+   bool GetUncompressedLength(Source* source, uint32* result);
+ 
+   // ------------------------------------------------------------------------
+   // Higher-level string based routines (should be sufficient for most users)
+   // ------------------------------------------------------------------------
+ 
+-  // Sets "*output" to the compressed version of "input[0,input_length-1]".
+-  // Original contents of *output are lost.
++  // Sets "*compressed" to the compressed version of "input[0,input_length-1]".
++  // Original contents of *compressed are lost.
+   //
+-  // REQUIRES: "input[]" is not an alias of "*output".
+-  size_t Compress(const char* input, size_t input_length, string* output);
++  // REQUIRES: "input[]" is not an alias of "*compressed".
++  size_t Compress(const char* input, size_t input_length,
++                  std::string* compressed);
+ 
+   // Decompresses "compressed[0,compressed_length-1]" to "*uncompressed".
+   // Original contents of "*uncompressed" are lost.
+   //
+   // REQUIRES: "compressed[]" is not an alias of "*uncompressed".
+   //
+   // returns false if the message is corrupted and could not be decompressed
+   bool Uncompress(const char* compressed, size_t compressed_length,
+-                  string* uncompressed);
++                  std::string* uncompressed);
+ 
+   // Decompresses "compressed" to "*uncompressed".
+   //
+   // returns false if the message is corrupted and could not be decompressed
+   bool Uncompress(Source* compressed, Sink* uncompressed);
+ 
+   // This routine uncompresses as much of the "compressed" as possible
+   // into sink.  It returns the number of valid bytes added to sink
+@@ -188,16 +189,19 @@ namespace snappy {
+   // code assumes that kBlockSize <= 65536; in particular, the hash table
+   // can only store 16-bit offsets, and EmitCopy() also assumes the offset
+   // is 65535 bytes or less. Note also that if you change this, it will
+   // affect the framing format (see framing_format.txt).
+   //
+   // Note that there might be older data around that is compressed with larger
+   // block sizes, so the decompression code should not rely on the
+   // non-existence of long backreferences.
+-  static const int kBlockLog = 16;
+-  static const size_t kBlockSize = 1 << kBlockLog;
++  static constexpr int kBlockLog = 16;
++  static constexpr size_t kBlockSize = 1 << kBlockLog;
+ 
+-  static const int kMaxHashTableBits = 14;
+-  static const size_t kMaxHashTableSize = 1 << kMaxHashTableBits;
++  static constexpr int kMinHashTableBits = 8;
++  static constexpr size_t kMinHashTableSize = 1 << kMinHashTableBits;
++
++  static constexpr int kMaxHashTableBits = 14;
++  static constexpr size_t kMaxHashTableSize = 1 << kMaxHashTableBits;
+ }  // end namespace snappy
+ 
+ #endif  // THIRD_PARTY_SNAPPY_SNAPPY_H__
+diff --git a/other-licenses/snappy/src/snappy-stubs-internal.cc b/other-licenses/snappy/src/snappy_compress_fuzzer.cc
+copy from other-licenses/snappy/src/snappy-stubs-internal.cc
+copy to other-licenses/snappy/src/snappy_compress_fuzzer.cc
+--- a/other-licenses/snappy/src/snappy-stubs-internal.cc
++++ b/other-licenses/snappy/src/snappy_compress_fuzzer.cc
+@@ -1,9 +1,9 @@
+-// Copyright 2011 Google Inc. All Rights Reserved.
++// Copyright 2019 Google Inc. All Rights Reserved.
+ //
+ // Redistribution and use in source and binary forms, with or without
+ // modification, are permitted provided that the following conditions are
+ // met:
+ //
+ //     * Redistributions of source code must retain the above copyright
+ // notice, this list of conditions and the following disclaimer.
+ //     * Redistributions in binary form must reproduce the above
+@@ -20,23 +20,40 @@
+ // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++//
++// libFuzzer harness for fuzzing snappy compression code.
+ 
+-#include <algorithm>
++#include <cassert>
++#include <cstddef>
++#include <cstdint>
+ #include <string>
+ 
+-#include "snappy-stubs-internal.h"
++#include "snappy.h"
+ 
+-namespace snappy {
++// Entry point for LibFuzzer.
++extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
++  std::string input(reinterpret_cast<const char*>(data), size);
++
++  std::string compressed;
++  size_t compressed_size =
++      snappy::Compress(input.data(), input.size(), &compressed);
+ 
+-void Varint::Append32(string* s, uint32 value) {
+-  char buf[Varint::kMax32];
+-  const char* p = Varint::Encode32(buf, value);
+-  s->append(buf, p - buf);
++  (void)compressed_size;  // Variable only used in debug builds.
++  assert(compressed_size == compressed.size());
++  assert(compressed.size() <= snappy::MaxCompressedLength(input.size()));
++  assert(snappy::IsValidCompressedBuffer(compressed.data(), compressed.size()));
++
++  std::string uncompressed_after_compress;
++  bool uncompress_succeeded = snappy::Uncompress(
++      compressed.data(), compressed.size(), &uncompressed_after_compress);
++
++  (void)uncompress_succeeded;  // Variable only used in debug builds.
++  assert(uncompress_succeeded);
++  assert(input == uncompressed_after_compress);
++  return 0;
+ }
+-
+-}  // namespace snappy
+diff --git a/other-licenses/snappy/src/snappy-stubs-internal.cc b/other-licenses/snappy/src/snappy_uncompress_fuzzer.cc
+copy from other-licenses/snappy/src/snappy-stubs-internal.cc
+copy to other-licenses/snappy/src/snappy_uncompress_fuzzer.cc
+--- a/other-licenses/snappy/src/snappy-stubs-internal.cc
++++ b/other-licenses/snappy/src/snappy_uncompress_fuzzer.cc
+@@ -1,9 +1,9 @@
+-// Copyright 2011 Google Inc. All Rights Reserved.
++// Copyright 2019 Google Inc. All Rights Reserved.
+ //
+ // Redistribution and use in source and binary forms, with or without
+ // modification, are permitted provided that the following conditions are
+ // met:
+ //
+ //     * Redistributions of source code must retain the above copyright
+ // notice, this list of conditions and the following disclaimer.
+ //     * Redistributions in binary form must reproduce the above
+@@ -20,23 +20,38 @@
+ // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++//
++// libFuzzer harness for fuzzing snappy's decompression code.
+ 
+-#include <algorithm>
++#include <cassert>
++#include <cstddef>
++#include <cstdint>
+ #include <string>
+ 
+-#include "snappy-stubs-internal.h"
++#include "snappy.h"
+ 
+-namespace snappy {
++// Entry point for LibFuzzer.
++extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
++  std::string input(reinterpret_cast<const char*>(data), size);
+ 
+-void Varint::Append32(string* s, uint32 value) {
+-  char buf[Varint::kMax32];
+-  const char* p = Varint::Encode32(buf, value);
+-  s->append(buf, p - buf);
++  // Avoid self-crafted decompression bombs.
++  size_t uncompressed_size;
++  constexpr size_t kMaxUncompressedSize = 1 << 20;
++  bool get_uncompressed_length_succeeded = snappy::GetUncompressedLength(
++      input.data(), input.size(), &uncompressed_size);
++  if (!get_uncompressed_length_succeeded ||
++      (uncompressed_size > kMaxUncompressedSize)) {
++    return 0;
++  }
++
++  std::string uncompressed;
++  // The return value of snappy::Uncompress() is ignored because decompression
++  // will fail on invalid inputs.
++  snappy::Uncompress(input.data(), input.size(), &uncompressed);
++  return 0;
+ }
+-
+-}  // namespace snappy
+diff --git a/other-licenses/snappy/src/snappy_unittest.cc b/other-licenses/snappy/src/snappy_unittest.cc
+--- a/other-licenses/snappy/src/snappy_unittest.cc
++++ b/other-licenses/snappy/src/snappy_unittest.cc
+@@ -24,19 +24,20 @@
+ // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ 
+ #include <math.h>
+ #include <stdlib.h>
+ 
+-
+ #include <algorithm>
++#include <random>
+ #include <string>
++#include <utility>
+ #include <vector>
+ 
+ #include "snappy.h"
+ #include "snappy-internal.h"
+ #include "snappy-test.h"
+ #include "snappy-sinksource.h"
+ 
+ DEFINE_int32(start_len, -1,
+@@ -45,48 +46,40 @@ DEFINE_int32(end_len, -1,
+              "Starting prefix size for testing (-1: just full file contents)");
+ DEFINE_int32(bytes, 10485760,
+              "How many bytes to compress/uncompress per file for timing");
+ 
+ DEFINE_bool(zlib, false,
+             "Run zlib compression (http://www.zlib.net)");
+ DEFINE_bool(lzo, false,
+             "Run LZO compression (http://www.oberhumer.com/opensource/lzo/)");
+-DEFINE_bool(quicklz, false,
+-            "Run quickLZ compression (http://www.quicklz.com/)");
+-DEFINE_bool(liblzf, false,
+-            "Run libLZF compression "
+-            "(http://www.goof.com/pcg/marc/liblzf.html)");
+-DEFINE_bool(fastlz, false,
+-            "Run FastLZ compression (http://www.fastlz.org/");
+ DEFINE_bool(snappy, true, "Run snappy compression");
+ 
+ DEFINE_bool(write_compressed, false,
+             "Write compressed versions of each file to <file>.comp");
+ DEFINE_bool(write_uncompressed, false,
+             "Write uncompressed versions of each file to <file>.uncomp");
+ 
+ DEFINE_bool(snappy_dump_decompression_table, false,
+             "If true, we print the decompression table during tests.");
+ 
+ namespace snappy {
+ 
+-
+-#ifdef HAVE_FUNC_MMAP
++#if defined(HAVE_FUNC_MMAP) && defined(HAVE_FUNC_SYSCONF)
+ 
+ // To test against code that reads beyond its input, this class copies a
+ // string to a newly allocated group of pages, the last of which
+ // is made unreadable via mprotect. Note that we need to allocate the
+ // memory with mmap(), as POSIX allows mprotect() only on memory allocated
+ // with mmap(), and some malloc/posix_memalign implementations expect to
+ // be able to read previously allocated memory while doing heap allocations.
+ class DataEndingAtUnreadablePage {
+  public:
+-  explicit DataEndingAtUnreadablePage(const string& s) {
+-    const size_t page_size = getpagesize();
++  explicit DataEndingAtUnreadablePage(const std::string& s) {
++    const size_t page_size = sysconf(_SC_PAGESIZE);
+     const size_t size = s.size();
+     // Round up space for string to a multiple of page_size.
+     size_t space_for_string = (size + page_size - 1) & ~(page_size - 1);
+     alloc_size_ = space_for_string + page_size;
+     mem_ = mmap(NULL, alloc_size_,
+                 PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+     CHECK_NE(MAP_FAILED, mem_);
+     protected_page_ = reinterpret_cast<char*>(mem_) + space_for_string;
+@@ -94,75 +87,61 @@ class DataEndingAtUnreadablePage {
+     memcpy(dst, s.data(), size);
+     data_ = dst;
+     size_ = size;
+     // Make guard page unreadable.
+     CHECK_EQ(0, mprotect(protected_page_, page_size, PROT_NONE));
+   }
+ 
+   ~DataEndingAtUnreadablePage() {
++    const size_t page_size = sysconf(_SC_PAGESIZE);
+     // Undo the mprotect.
+-    CHECK_EQ(0, mprotect(protected_page_, getpagesize(), PROT_READ|PROT_WRITE));
++    CHECK_EQ(0, mprotect(protected_page_, page_size, PROT_READ|PROT_WRITE));
+     CHECK_EQ(0, munmap(mem_, alloc_size_));
+   }
+ 
+   const char* data() const { return data_; }
+   size_t size() const { return size_; }
+ 
+  private:
+   size_t alloc_size_;
+   void* mem_;
+   char* protected_page_;
+   const char* data_;
+   size_t size_;
+ };
+ 
+-#else  // HAVE_FUNC_MMAP
++#else  // defined(HAVE_FUNC_MMAP) && defined(HAVE_FUNC_SYSCONF)
+ 
+ // Fallback for systems without mmap.
+-typedef string DataEndingAtUnreadablePage;
++using DataEndingAtUnreadablePage = std::string;
+ 
+ #endif
+ 
+ enum CompressorType {
+-  ZLIB, LZO, LIBLZF, QUICKLZ, FASTLZ, SNAPPY
++  ZLIB, LZO, SNAPPY
+ };
+ 
+ const char* names[] = {
+-  "ZLIB", "LZO", "LIBLZF", "QUICKLZ", "FASTLZ", "SNAPPY"
++  "ZLIB", "LZO", "SNAPPY"
+ };
+ 
+ static size_t MinimumRequiredOutputSpace(size_t input_size,
+                                          CompressorType comp) {
+   switch (comp) {
+ #ifdef ZLIB_VERSION
+     case ZLIB:
+       return ZLib::MinCompressbufSize(input_size);
+ #endif  // ZLIB_VERSION
+ 
+ #ifdef LZO_VERSION
+     case LZO:
+       return input_size + input_size/64 + 16 + 3;
+ #endif  // LZO_VERSION
+ 
+-#ifdef LZF_VERSION
+-    case LIBLZF:
+-      return input_size;
+-#endif  // LZF_VERSION
+-
+-#ifdef QLZ_VERSION_MAJOR
+-    case QUICKLZ:
+-      return input_size + 36000;  // 36000 is used for scratch.
+-#endif  // QLZ_VERSION_MAJOR
+-
+-#ifdef FASTLZ_VERSION
+-    case FASTLZ:
+-      return max(static_cast<int>(ceil(input_size * 1.05)), 66);
+-#endif  // FASTLZ_VERSION
+-
+     case SNAPPY:
+       return snappy::MaxCompressedLength(input_size);
+ 
+     default:
+       LOG(FATAL) << "Unknown compression type number " << comp;
+       return 0;
+   }
+ }
+@@ -170,17 +149,17 @@ static size_t MinimumRequiredOutputSpace
+ // Returns true if we successfully compressed, false otherwise.
+ //
+ // If compressed_is_preallocated is set, do not resize the compressed buffer.
+ // This is typically what you want for a benchmark, in order to not spend
+ // time in the memory allocator. If you do set this flag, however,
+ // "compressed" must be preinitialized to at least MinCompressbufSize(comp)
+ // number of bytes, and may contain junk bytes at the end after return.
+ static bool Compress(const char* input, size_t input_size, CompressorType comp,
+-                     string* compressed, bool compressed_is_preallocated) {
++                     std::string* compressed, bool compressed_is_preallocated) {
+   if (!compressed_is_preallocated) {
+     compressed->resize(MinimumRequiredOutputSpace(input_size, comp));
+   }
+ 
+   switch (comp) {
+ #ifdef ZLIB_VERSION
+     case ZLIB: {
+       ZLib zlib;
+@@ -212,68 +191,16 @@ static bool Compress(const char* input, 
+       delete[] mem;
+       if (!compressed_is_preallocated) {
+         compressed->resize(destlen);
+       }
+       break;
+     }
+ #endif  // LZO_VERSION
+ 
+-#ifdef LZF_VERSION
+-    case LIBLZF: {
+-      int destlen = lzf_compress(input,
+-                                 input_size,
+-                                 string_as_array(compressed),
+-                                 input_size);
+-      if (destlen == 0) {
+-        // lzf *can* cause lots of blowup when compressing, so they
+-        // recommend to limit outsize to insize, and just not compress
+-        // if it's bigger.  Ideally, we'd just swap input and output.
+-        compressed->assign(input, input_size);
+-        destlen = input_size;
+-      }
+-      if (!compressed_is_preallocated) {
+-        compressed->resize(destlen);
+-      }
+-      break;
+-    }
+-#endif  // LZF_VERSION
+-
+-#ifdef QLZ_VERSION_MAJOR
+-    case QUICKLZ: {
+-      qlz_state_compress *state_compress = new qlz_state_compress;
+-      int destlen = qlz_compress(input,
+-                                 string_as_array(compressed),
+-                                 input_size,
+-                                 state_compress);
+-      delete state_compress;
+-      CHECK_NE(0, destlen);
+-      if (!compressed_is_preallocated) {
+-        compressed->resize(destlen);
+-      }
+-      break;
+-    }
+-#endif  // QLZ_VERSION_MAJOR
+-
+-#ifdef FASTLZ_VERSION
+-    case FASTLZ: {
+-      // Use level 1 compression since we mostly care about speed.
+-      int destlen = fastlz_compress_level(
+-          1,
+-          input,
+-          input_size,
+-          string_as_array(compressed));
+-      if (!compressed_is_preallocated) {
+-        compressed->resize(destlen);
+-      }
+-      CHECK_NE(destlen, 0);
+-      break;
+-    }
+-#endif  // FASTLZ_VERSION
+-
+     case SNAPPY: {
+       size_t destlen;
+       snappy::RawCompress(input, input_size,
+                           string_as_array(compressed),
+                           &destlen);
+       CHECK_LE(destlen, snappy::MaxCompressedLength(input_size));
+       if (!compressed_is_preallocated) {
+         compressed->resize(destlen);
+@@ -283,18 +210,18 @@ static bool Compress(const char* input, 
+ 
+     default: {
+       return false;     // the asked-for library wasn't compiled in
+     }
+   }
+   return true;
+ }
+ 
+-static bool Uncompress(const string& compressed, CompressorType comp,
+-                       int size, string* output) {
++static bool Uncompress(const std::string& compressed, CompressorType comp,
++                       int size, std::string* output) {
+   switch (comp) {
+ #ifdef ZLIB_VERSION
+     case ZLIB: {
+       output->resize(size);
+       ZLib zlib;
+       uLongf destlen = output->size();
+       int ret = zlib.Uncompress(
+           reinterpret_cast<Bytef*>(string_as_array(output)),
+@@ -318,59 +245,16 @@ static bool Uncompress(const string& com
+           &destlen,
+           NULL);
+       CHECK_EQ(LZO_E_OK, ret);
+       CHECK_EQ(static_cast<lzo_uint>(size), destlen);
+       break;
+     }
+ #endif  // LZO_VERSION
+ 
+-#ifdef LZF_VERSION
+-    case LIBLZF: {
+-      output->resize(size);
+-      int destlen = lzf_decompress(compressed.data(),
+-                                   compressed.size(),
+-                                   string_as_array(output),
+-                                   output->size());
+-      if (destlen == 0) {
+-        // This error probably means we had decided not to compress,
+-        // and thus have stored input in output directly.
+-        output->assign(compressed.data(), compressed.size());
+-        destlen = compressed.size();
+-      }
+-      CHECK_EQ(destlen, size);
+-      break;
+-    }
+-#endif  // LZF_VERSION
+-
+-#ifdef QLZ_VERSION_MAJOR
+-    case QUICKLZ: {
+-      output->resize(size);
+-      qlz_state_decompress *state_decompress = new qlz_state_decompress;
+-      int destlen = qlz_decompress(compressed.data(),
+-                                   string_as_array(output),
+-                                   state_decompress);
+-      delete state_decompress;
+-      CHECK_EQ(destlen, size);
+-      break;
+-    }
+-#endif  // QLZ_VERSION_MAJOR
+-
+-#ifdef FASTLZ_VERSION
+-    case FASTLZ: {
+-      output->resize(size);
+-      int destlen = fastlz_decompress(compressed.data(),
+-                                      compressed.length(),
+-                                      string_as_array(output),
+-                                      size);
+-      CHECK_EQ(destlen, size);
+-      break;
+-    }
+-#endif  // FASTLZ_VERSION
+-
+     case SNAPPY: {
+       snappy::RawUncompress(compressed.data(), compressed.size(),
+                             string_as_array(output));
+       break;
+     }
+ 
+     default: {
+       return false;     // the asked-for library wasn't compiled in
+@@ -388,23 +272,23 @@ static void Measure(const char* data,
+   static const int kRuns = 5;
+   double ctime[kRuns];
+   double utime[kRuns];
+   int compressed_size = 0;
+ 
+   {
+     // Chop the input into blocks
+     int num_blocks = (length + block_size - 1) / block_size;
+-    vector<const char*> input(num_blocks);
+-    vector<size_t> input_length(num_blocks);
+-    vector<string> compressed(num_blocks);
+-    vector<string> output(num_blocks);
++    std::vector<const char*> input(num_blocks);
++    std::vector<size_t> input_length(num_blocks);
++    std::vector<std::string> compressed(num_blocks);
++    std::vector<std::string> output(num_blocks);
+     for (int b = 0; b < num_blocks; b++) {
+       int input_start = b * block_size;
+-      int input_limit = min<int>((b+1)*block_size, length);
++      int input_limit = std::min<int>((b+1)*block_size, length);
+       input[b] = data+input_start;
+       input_length[b] = input_limit-input_start;
+ 
+       // Pre-grow the output buffer so we don't measure string append time.
+       compressed[b].resize(MinimumRequiredOutputSpace(block_size, comp));
+     }
+ 
+     // First, try one trial compression to make sure the code is compiled in
+@@ -449,210 +333,213 @@ static void Measure(const char* data,
+     }
+ 
+     compressed_size = 0;
+     for (size_t i = 0; i < compressed.size(); i++) {
+       compressed_size += compressed[i].size();
+     }
+   }
+ 
+-  sort(ctime, ctime + kRuns);
+-  sort(utime, utime + kRuns);
++  std::sort(ctime, ctime + kRuns);
++  std::sort(utime, utime + kRuns);
+   const int med = kRuns/2;
+ 
+   float comp_rate = (length / ctime[med]) * repeats / 1048576.0;
+   float uncomp_rate = (length / utime[med]) * repeats / 1048576.0;
+-  string x = names[comp];
++  std::string x = names[comp];
+   x += ":";
+-  string urate = (uncomp_rate >= 0)
+-                 ? StringPrintf("%.1f", uncomp_rate)
+-                 : string("?");
++  std::string urate = (uncomp_rate >= 0) ? StrFormat("%.1f", uncomp_rate)
++                                         : std::string("?");
+   printf("%-7s [b %dM] bytes %6d -> %6d %4.1f%%  "
+          "comp %5.1f MB/s  uncomp %5s MB/s\n",
+          x.c_str(),
+          block_size/(1<<20),
+          static_cast<int>(length), static_cast<uint32>(compressed_size),
+-         (compressed_size * 100.0) / max<int>(1, length),
++         (compressed_size * 100.0) / std::max<int>(1, length),
+          comp_rate,
+          urate.c_str());
+ }
+ 
+-static int VerifyString(const string& input) {
+-  string compressed;
++static int VerifyString(const std::string& input) {
++  std::string compressed;
+   DataEndingAtUnreadablePage i(input);
+   const size_t written = snappy::Compress(i.data(), i.size(), &compressed);
+   CHECK_EQ(written, compressed.size());
+   CHECK_LE(compressed.size(),
+            snappy::MaxCompressedLength(input.size()));
+   CHECK(snappy::IsValidCompressedBuffer(compressed.data(), compressed.size()));
+ 
+-  string uncompressed;
++  std::string uncompressed;
+   DataEndingAtUnreadablePage c(compressed);
+   CHECK(snappy::Uncompress(c.data(), c.size(), &uncompressed));
+   CHECK_EQ(uncompressed, input);
+   return uncompressed.size();
+ }
+ 
+-static void VerifyStringSink(const string& input) {
+-  string compressed;
++static void VerifyStringSink(const std::string& input) {
++  std::string compressed;
+   DataEndingAtUnreadablePage i(input);
+   const size_t written = snappy::Compress(i.data(), i.size(), &compressed);
+   CHECK_EQ(written, compressed.size());
+   CHECK_LE(compressed.size(),
+            snappy::MaxCompressedLength(input.size()));
+   CHECK(snappy::IsValidCompressedBuffer(compressed.data(), compressed.size()));
+ 
+-  string uncompressed;
++  std::string uncompressed;
+   uncompressed.resize(input.size());
+   snappy::UncheckedByteArraySink sink(string_as_array(&uncompressed));
+   DataEndingAtUnreadablePage c(compressed);
+   snappy::ByteArraySource source(c.data(), c.size());
+   CHECK(snappy::Uncompress(&source, &sink));
+   CHECK_EQ(uncompressed, input);
+ }
+ 
+-static void VerifyIOVec(const string& input) {
+-  string compressed;
++static void VerifyIOVec(const std::string& input) {
++  std::string compressed;
+   DataEndingAtUnreadablePage i(input);
+   const size_t written = snappy::Compress(i.data(), i.size(), &compressed);
+   CHECK_EQ(written, compressed.size());
+   CHECK_LE(compressed.size(),
+            snappy::MaxCompressedLength(input.size()));
+   CHECK(snappy::IsValidCompressedBuffer(compressed.data(), compressed.size()));
+ 
+   // Try uncompressing into an iovec containing a random number of entries
+   // ranging from 1 to 10.
+   char* buf = new char[input.size()];
+-  ACMRandom rnd(input.size());
+-  size_t num = rnd.Next() % 10 + 1;
++  std::minstd_rand0 rng(input.size());
++  std::uniform_int_distribution<size_t> uniform_1_to_10(1, 10);
++  size_t num = uniform_1_to_10(rng);
+   if (input.size() < num) {
+     num = input.size();
+   }
+   struct iovec* iov = new iovec[num];
+   int used_so_far = 0;
++  std::bernoulli_distribution one_in_five(1.0 / 5);
+   for (size_t i = 0; i < num; ++i) {
++    assert(used_so_far < input.size());
+     iov[i].iov_base = buf + used_so_far;
+     if (i == num - 1) {
+       iov[i].iov_len = input.size() - used_so_far;
+     } else {
+       // Randomly choose to insert a 0 byte entry.
+-      if (rnd.OneIn(5)) {
++      if (one_in_five(rng)) {
+         iov[i].iov_len = 0;
+       } else {
+-        iov[i].iov_len = rnd.Uniform(input.size());
++        std::uniform_int_distribution<size_t> uniform_not_used_so_far(
++            0, input.size() - used_so_far - 1);
++        iov[i].iov_len = uniform_not_used_so_far(rng);
+       }
+     }
+     used_so_far += iov[i].iov_len;
+   }
+   CHECK(snappy::RawUncompressToIOVec(
+       compressed.data(), compressed.size(), iov, num));
+   CHECK(!memcmp(buf, input.data(), input.size()));
+   delete[] iov;
+   delete[] buf;
+ }
+ 
+ // Test that data compressed by a compressor that does not
+ // obey block sizes is uncompressed properly.
+-static void VerifyNonBlockedCompression(const string& input) {
++static void VerifyNonBlockedCompression(const std::string& input) {
+   if (input.length() > snappy::kBlockSize) {
+     // We cannot test larger blocks than the maximum block size, obviously.
+     return;
+   }
+ 
+-  string prefix;
++  std::string prefix;
+   Varint::Append32(&prefix, input.size());
+ 
+   // Setup compression table
+-  snappy::internal::WorkingMemory wmem;
++  snappy::internal::WorkingMemory wmem(input.size());
+   int table_size;
+   uint16* table = wmem.GetHashTable(input.size(), &table_size);
+ 
+   // Compress entire input in one shot
+-  string compressed;
++  std::string compressed;
+   compressed += prefix;
+   compressed.resize(prefix.size()+snappy::MaxCompressedLength(input.size()));
+   char* dest = string_as_array(&compressed) + prefix.size();
+   char* end = snappy::internal::CompressFragment(input.data(), input.size(),
+                                                 dest, table, table_size);
+   compressed.resize(end - compressed.data());
+ 
+-  // Uncompress into string
+-  string uncomp_str;
++  // Uncompress into std::string
++  std::string uncomp_str;
+   CHECK(snappy::Uncompress(compressed.data(), compressed.size(), &uncomp_str));
+   CHECK_EQ(uncomp_str, input);
+ 
+   // Uncompress using source/sink
+-  string uncomp_str2;
++  std::string uncomp_str2;
+   uncomp_str2.resize(input.size());
+   snappy::UncheckedByteArraySink sink(string_as_array(&uncomp_str2));
+   snappy::ByteArraySource source(compressed.data(), compressed.size());
+   CHECK(snappy::Uncompress(&source, &sink));
+   CHECK_EQ(uncomp_str2, input);
+ 
+   // Uncompress into iovec
+   {
+     static const int kNumBlocks = 10;
+     struct iovec vec[kNumBlocks];
+     const int block_size = 1 + input.size() / kNumBlocks;
+-    string iovec_data(block_size * kNumBlocks, 'x');
++    std::string iovec_data(block_size * kNumBlocks, 'x');
+     for (int i = 0; i < kNumBlocks; i++) {
+       vec[i].iov_base = string_as_array(&iovec_data) + i * block_size;
+       vec[i].iov_len = block_size;
+     }
+     CHECK(snappy::RawUncompressToIOVec(compressed.data(), compressed.size(),
+                                        vec, kNumBlocks));
+-    CHECK_EQ(string(iovec_data.data(), input.size()), input);
++    CHECK_EQ(std::string(iovec_data.data(), input.size()), input);
+   }
+ }
+ 
+ // Expand the input so that it is at least K times as big as block size
+-static string Expand(const string& input) {
++static std::string Expand(const std::string& input) {
+   static const int K = 3;
+-  string data = input;
++  std::string data = input;
+   while (data.size() < K * snappy::kBlockSize) {
+     data += input;
+   }
+   return data;
+ }
+ 
+-static int Verify(const string& input) {
++static int Verify(const std::string& input) {
+   VLOG(1) << "Verifying input of size " << input.size();
+ 
+   // Compress using string based routines
+   const int result = VerifyString(input);
+ 
+   // Verify using sink based routines
+   VerifyStringSink(input);
+ 
+   VerifyNonBlockedCompression(input);
+   VerifyIOVec(input);
+   if (!input.empty()) {
+-    const string expanded = Expand(input);
++    const std::string expanded = Expand(input);
+     VerifyNonBlockedCompression(expanded);
+     VerifyIOVec(input);
+   }
+ 
+   return result;
+ }
+ 
+-
+-static bool IsValidCompressedBuffer(const string& c) {
++static bool IsValidCompressedBuffer(const std::string& c) {
+   return snappy::IsValidCompressedBuffer(c.data(), c.size());
+ }
+-static bool Uncompress(const string& c, string* u) {
++static bool Uncompress(const std::string& c, std::string* u) {
+   return snappy::Uncompress(c.data(), c.size(), u);
+ }
+ 
+ // This test checks to ensure that snappy doesn't coredump if it gets
+ // corrupted data.
+ TEST(CorruptedTest, VerifyCorrupted) {
+-  string source = "making sure we don't crash with corrupted input";
++  std::string source = "making sure we don't crash with corrupted input";
+   VLOG(1) << source;
+-  string dest;
+-  string uncmp;
++  std::string dest;
++  std::string uncmp;
+   snappy::Compress(source.data(), source.size(), &dest);
+ 
+   // Mess around with the data. It's hard to simulate all possible
+   // corruptions; this is just one example ...
+   CHECK_GT(dest.size(), 3);
+   dest[1]--;
+   dest[3]++;
+   // this really ought to fail.
+@@ -689,19 +576,19 @@ TEST(CorruptedTest, VerifyCorrupted) {
+   // This decodes to about 2 MB; much smaller, but should still fail.
+   dest[0] = dest[1] = dest[2] = '\xff';
+   dest[3] = 0x00;
+   CHECK(!IsValidCompressedBuffer(dest));
+   CHECK(!Uncompress(dest, &uncmp));
+ 
+   // try reading stuff in from a bad file.
+   for (int i = 1; i <= 3; ++i) {
+-    string data = ReadTestDataFile(StringPrintf("baddata%d.snappy", i).c_str(),
+-                                   0);
+-    string uncmp;
++    std::string data =
++        ReadTestDataFile(StrFormat("baddata%d.snappy", i).c_str(), 0);
++    std::string uncmp;
+     // check that we don't return a crazy length
+     size_t ulen;
+     CHECK(!snappy::GetUncompressedLength(data.data(), data.size(), &ulen)
+           || (ulen < (1<<20)));
+     uint32 ulen2;
+     snappy::ByteArraySource source(data.data(), data.size());
+     CHECK(!snappy::GetUncompressedLength(&source, &ulen2) ||
+           (ulen2 < (1<<20)));
+@@ -709,37 +596,37 @@ TEST(CorruptedTest, VerifyCorrupted) {
+     CHECK(!Uncompress(data, &uncmp));
+   }
+ }
+ 
+ // Helper routines to construct arbitrary compressed strings.
+ // These mirror the compression code in snappy.cc, but are copied
+ // here so that we can bypass some limitations in the how snappy.cc
+ // invokes these routines.
+-static void AppendLiteral(string* dst, const string& literal) {
++static void AppendLiteral(std::string* dst, const std::string& literal) {
+   if (literal.empty()) return;
+   int n = literal.size() - 1;
+   if (n < 60) {
+     // Fit length in tag byte
+     dst->push_back(0 | (n << 2));
+   } else {
+     // Encode in upcoming bytes
+     char number[4];
+     int count = 0;
+     while (n > 0) {
+       number[count++] = n & 0xff;
+       n >>= 8;
+     }
+     dst->push_back(0 | ((59+count) << 2));
+-    *dst += string(number, count);
++    *dst += std::string(number, count);
+   }
+   *dst += literal;
+ }
+ 
+-static void AppendCopy(string* dst, int offset, int length) {
++static void AppendCopy(std::string* dst, int offset, int length) {
+   while (length > 0) {
+     // Figure out how much to copy in one shot
+     int to_copy;
+     if (length >= 68) {
+       to_copy = 64;
+     } else if (length > 64) {
+       to_copy = 60;
+     } else {
+@@ -766,96 +653,112 @@ static void AppendCopy(string* dst, int 
+ }
+ 
+ TEST(Snappy, SimpleTests) {
+   Verify("");
+   Verify("a");
+   Verify("ab");
+   Verify("abc");
+ 
+-  Verify("aaaaaaa" + string(16, 'b') + string("aaaaa") + "abc");
+-  Verify("aaaaaaa" + string(256, 'b') + string("aaaaa") + "abc");
+-  Verify("aaaaaaa" + string(2047, 'b') + string("aaaaa") + "abc");
+-  Verify("aaaaaaa" + string(65536, 'b') + string("aaaaa") + "abc");
+-  Verify("abcaaaaaaa" + string(65536, 'b') + string("aaaaa") + "abc");
++  Verify("aaaaaaa" + std::string(16, 'b') + std::string("aaaaa") + "abc");
++  Verify("aaaaaaa" + std::string(256, 'b') + std::string("aaaaa") + "abc");
++  Verify("aaaaaaa" + std::string(2047, 'b') + std::string("aaaaa") + "abc");
++  Verify("aaaaaaa" + std::string(65536, 'b') + std::string("aaaaa") + "abc");
++  Verify("abcaaaaaaa" + std::string(65536, 'b') + std::string("aaaaa") + "abc");
+ }
+ 
+ // Verify max blowup (lots of four-byte copies)
+ TEST(Snappy, MaxBlowup) {
+-  string input;
+-  for (int i = 0; i < 20000; i++) {
+-    ACMRandom rnd(i);
+-    uint32 bytes = static_cast<uint32>(rnd.Next());
+-    input.append(reinterpret_cast<char*>(&bytes), sizeof(bytes));
+-  }
+-  for (int i = 19999; i >= 0; i--) {
+-    ACMRandom rnd(i);
+-    uint32 bytes = static_cast<uint32>(rnd.Next());
+-    input.append(reinterpret_cast<char*>(&bytes), sizeof(bytes));
++  std::mt19937 rng;
++  std::uniform_int_distribution<int> uniform_byte(0, 255);
++  std::string input;
++  for (int i = 0; i < 80000; ++i)
++    input.push_back(static_cast<char>(uniform_byte(rng)));
++
++  for (int i = 0; i < 80000; i += 4) {
++    std::string four_bytes(input.end() - i - 4, input.end() - i);
++    input.append(four_bytes);
+   }
+   Verify(input);
+ }
+ 
+ TEST(Snappy, RandomData) {
+-  ACMRandom rnd(FLAGS_test_random_seed);
++  std::minstd_rand0 rng(FLAGS_test_random_seed);
++  std::uniform_int_distribution<int> uniform_0_to_3(0, 3);
++  std::uniform_int_distribution<int> uniform_0_to_8(0, 8);
++  std::uniform_int_distribution<int> uniform_byte(0, 255);
++  std::uniform_int_distribution<size_t> uniform_4k(0, 4095);
++  std::uniform_int_distribution<size_t> uniform_64k(0, 65535);
++  std::bernoulli_distribution one_in_ten(1.0 / 10);
+ 
+-  const int num_ops = 20000;
++  constexpr int num_ops = 20000;
+   for (int i = 0; i < num_ops; i++) {
+     if ((i % 1000) == 0) {
+       VLOG(0) << "Random op " << i << " of " << num_ops;
+     }
+ 
+-    string x;
+-    size_t len = rnd.Uniform(4096);
++    std::string x;
++    size_t len = uniform_4k(rng);
+     if (i < 100) {
+-      len = 65536 + rnd.Uniform(65536);
++      len = 65536 + uniform_64k(rng);
+     }
+     while (x.size() < len) {
+       int run_len = 1;
+-      if (rnd.OneIn(10)) {
+-        run_len = rnd.Skewed(8);
++      if (one_in_ten(rng)) {
++        int skewed_bits = uniform_0_to_8(rng);
++        // int is guaranteed to hold at least 16 bits, this uses at most 8 bits.
++        std::uniform_int_distribution<int> skewed_low(0,
++                                                      (1 << skewed_bits) - 1);
++        run_len = skewed_low(rng);
+       }
+-      char c = (i < 100) ? rnd.Uniform(256) : rnd.Skewed(3);
++      char c = static_cast<char>(uniform_byte(rng));
++      if (i >= 100) {
++        int skewed_bits = uniform_0_to_3(rng);
++        // int is guaranteed to hold at least 16 bits, this uses at most 3 bits.
++        std::uniform_int_distribution<int> skewed_low(0,
++                                                      (1 << skewed_bits) - 1);
++        c = static_cast<char>(skewed_low(rng));
++      }
+       while (run_len-- > 0 && x.size() < len) {
+-        x += c;
++        x.push_back(c);
+       }
+     }
+ 
+     Verify(x);
+   }
+ }
+ 
+ TEST(Snappy, FourByteOffset) {
+   // The new compressor cannot generate four-byte offsets since
+   // it chops up the input into 32KB pieces.  So we hand-emit the
+   // copy manually.
+ 
+   // The two fragments that make up the input string.
+-  string fragment1 = "012345689abcdefghijklmnopqrstuvwxyz";
+-  string fragment2 = "some other string";
++  std::string fragment1 = "012345689abcdefghijklmnopqrstuvwxyz";
++  std::string fragment2 = "some other string";
+ 
+   // How many times each fragment is emitted.
+   const int n1 = 2;
+   const int n2 = 100000 / fragment2.size();
+   const int length = n1 * fragment1.size() + n2 * fragment2.size();
+ 
+-  string compressed;
++  std::string compressed;
+   Varint::Append32(&compressed, length);
+ 
+   AppendLiteral(&compressed, fragment1);
+-  string src = fragment1;
++  std::string src = fragment1;
+   for (int i = 0; i < n2; i++) {
+     AppendLiteral(&compressed, fragment2);
+     src += fragment2;
+   }
+   AppendCopy(&compressed, src.size(), fragment1.size());
+   src += fragment1;
+   CHECK_EQ(length, src.size());
+ 
+-  string uncompressed;
++  std::string uncompressed;
+   CHECK(snappy::IsValidCompressedBuffer(compressed.data(), compressed.size()));
+   CHECK(snappy::Uncompress(compressed.data(), compressed.size(),
+                            &uncompressed));
+   CHECK_EQ(uncompressed, src);
+ }
+ 
+ TEST(Snappy, IOVecEdgeCases) {
+   // Test some tricky edge cases in the iovec output that are not necessarily
+@@ -867,17 +770,17 @@ TEST(Snappy, IOVecEdgeCases) {
+   static const int kLengths[] = { 2, 1, 4, 8, 128 };
+ 
+   struct iovec iov[ARRAYSIZE(kLengths)];
+   for (int i = 0; i < ARRAYSIZE(kLengths); ++i) {
+     iov[i].iov_base = new char[kLengths[i]];
+     iov[i].iov_len = kLengths[i];
+   }
+ 
+-  string compressed;
++  std::string compressed;
+   Varint::Append32(&compressed, 22);
+ 
+   // A literal whose output crosses three blocks.
+   // [ab] [c] [123 ] [        ] [        ]
+   AppendLiteral(&compressed, "abc123");
+ 
+   // A copy whose output crosses two blocks (source and destination
+   // segments marked).
+@@ -928,17 +831,17 @@ TEST(Snappy, IOVecLiteralOverflow) {
+   static const int kLengths[] = { 3, 4 };
+ 
+   struct iovec iov[ARRAYSIZE(kLengths)];
+   for (int i = 0; i < ARRAYSIZE(kLengths); ++i) {
+     iov[i].iov_base = new char[kLengths[i]];
+     iov[i].iov_len = kLengths[i];
+   }
+ 
+-  string compressed;
++  std::string compressed;
+   Varint::Append32(&compressed, 8);
+ 
+   AppendLiteral(&compressed, "12345678");
+ 
+   CHECK(!snappy::RawUncompressToIOVec(
+       compressed.data(), compressed.size(), iov, ARRAYSIZE(iov)));
+ 
+   for (int i = 0; i < ARRAYSIZE(kLengths); ++i) {
+@@ -950,94 +853,94 @@ TEST(Snappy, IOVecCopyOverflow) {
+   static const int kLengths[] = { 3, 4 };
+ 
+   struct iovec iov[ARRAYSIZE(kLengths)];
+   for (int i = 0; i < ARRAYSIZE(kLengths); ++i) {
+     iov[i].iov_base = new char[kLengths[i]];
+     iov[i].iov_len = kLengths[i];
+   }
+ 
+-  string compressed;
++  std::string compressed;
+   Varint::Append32(&compressed, 8);
+ 
+   AppendLiteral(&compressed, "123");
+   AppendCopy(&compressed, 3, 5);
+ 
+   CHECK(!snappy::RawUncompressToIOVec(
+       compressed.data(), compressed.size(), iov, ARRAYSIZE(iov)));
+ 
+   for (int i = 0; i < ARRAYSIZE(kLengths); ++i) {
+     delete[] reinterpret_cast<char *>(iov[i].iov_base);
+   }
+ }
+ 
+-static bool CheckUncompressedLength(const string& compressed,
++static bool CheckUncompressedLength(const std::string& compressed,
+                                     size_t* ulength) {
+   const bool result1 = snappy::GetUncompressedLength(compressed.data(),
+                                                      compressed.size(),
+                                                      ulength);
+ 
+   snappy::ByteArraySource source(compressed.data(), compressed.size());
+   uint32 length;
+   const bool result2 = snappy::GetUncompressedLength(&source, &length);
+   CHECK_EQ(result1, result2);
+   return result1;
+ }
+ 
+ TEST(SnappyCorruption, TruncatedVarint) {
+-  string compressed, uncompressed;
++  std::string compressed, uncompressed;
+   size_t ulength;
+   compressed.push_back('\xf0');
+   CHECK(!CheckUncompressedLength(compressed, &ulength));
+   CHECK(!snappy::IsValidCompressedBuffer(compressed.data(), compressed.size()));
+   CHECK(!snappy::Uncompress(compressed.data(), compressed.size(),
+                             &uncompressed));
+ }
+ 
+ TEST(SnappyCorruption, UnterminatedVarint) {
+-  string compressed, uncompressed;
++  std::string compressed, uncompressed;
+   size_t ulength;
+   compressed.push_back('\x80');
+   compressed.push_back('\x80');
+   compressed.push_back('\x80');
+   compressed.push_back('\x80');
+   compressed.push_back('\x80');
+   compressed.push_back(10);
+   CHECK(!CheckUncompressedLength(compressed, &ulength));
+   CHECK(!snappy::IsValidCompressedBuffer(compressed.data(), compressed.size()));
+   CHECK(!snappy::Uncompress(compressed.data(), compressed.size(),
+                             &uncompressed));
+ }
+ 
+ TEST(SnappyCorruption, OverflowingVarint) {
+-  string compressed, uncompressed;
++  std::string compressed, uncompressed;
+   size_t ulength;
+   compressed.push_back('\xfb');
+   compressed.push_back('\xff');
+   compressed.push_back('\xff');
+   compressed.push_back('\xff');
+   compressed.push_back('\x7f');
+   CHECK(!CheckUncompressedLength(compressed, &ulength));
+   CHECK(!snappy::IsValidCompressedBuffer(compressed.data(), compressed.size()));
+   CHECK(!snappy::Uncompress(compressed.data(), compressed.size(),
+                             &uncompressed));
+ }
+ 
+ TEST(Snappy, ReadPastEndOfBuffer) {
+   // Check that we do not read past end of input
+ 
+   // Make a compressed string that ends with a single-byte literal
+-  string compressed;
++  std::string compressed;
+   Varint::Append32(&compressed, 1);
+   AppendLiteral(&compressed, "x");
+ 
+-  string uncompressed;
++  std::string uncompressed;
+   DataEndingAtUnreadablePage c(compressed);
+   CHECK(snappy::Uncompress(c.data(), c.size(), &uncompressed));
+-  CHECK_EQ(uncompressed, string("x"));
++  CHECK_EQ(uncompressed, std::string("x"));
+ }
+ 
+ // Check for an infinite loop caused by a copy with offset==0
+ TEST(Snappy, ZeroOffsetCopy) {
+   const char* compressed = "\x40\x12\x00\x00";
+   //  \x40              Length (must be > kMaxIncrementCopyOverflow)
+   //  \x12\x00\x00      Copy with offset==0, length==5
+   char uncompressed[100];
+@@ -1049,17 +952,20 @@ TEST(Snappy, ZeroOffsetCopyValidation) {
+   //  \x05              Length
+   //  \x12\x00\x00      Copy with offset==0, length==5
+   EXPECT_FALSE(snappy::IsValidCompressedBuffer(compressed, 4));
+ }
+ 
+ namespace {
+ 
+ int TestFindMatchLength(const char* s1, const char *s2, unsigned length) {
+-  return snappy::internal::FindMatchLength(s1, s2, s2 + length);
++  std::pair<size_t, bool> p =
++      snappy::internal::FindMatchLength(s1, s2, s2 + length);
++  CHECK_EQ(p.first < 8, p.second);
++  return p.first;
+ }
+ 
+ }  // namespace
+ 
+ TEST(Snappy, FindMatchLength) {
+   // Exercise all different code paths through the function.
+   // 64-bit version:
+ 
+@@ -1145,32 +1051,34 @@ TEST(Snappy, FindMatchLength) {
+   // Same, but edge cases.
+   EXPECT_EQ(10, TestFindMatchLength("xxxxxxabcd0123", "xxxxxxabcd?123", 14));
+   EXPECT_EQ(11, TestFindMatchLength("xxxxxxabcd0123", "xxxxxxabcd0?23", 14));
+   EXPECT_EQ(12, TestFindMatchLength("xxxxxxabcd0123", "xxxxxxabcd0132", 14));
+   EXPECT_EQ(13, TestFindMatchLength("xxxxxxabcd0123", "xxxxxxabcd012?", 14));
+ }
+ 
+ TEST(Snappy, FindMatchLengthRandom) {
+-  const int kNumTrials = 10000;
+-  const int kTypicalLength = 10;
+-  ACMRandom rnd(FLAGS_test_random_seed);
++  constexpr int kNumTrials = 10000;
++  constexpr int kTypicalLength = 10;
++  std::minstd_rand0 rng(FLAGS_test_random_seed);
++  std::uniform_int_distribution<int> uniform_byte(0, 255);
++  std::bernoulli_distribution one_in_two(1.0 / 2);
++  std::bernoulli_distribution one_in_typical_length(1.0 / kTypicalLength);
+ 
+   for (int i = 0; i < kNumTrials; i++) {
+-    string s, t;
+-    char a = rnd.Rand8();
+-    char b = rnd.Rand8();
+-    while (!rnd.OneIn(kTypicalLength)) {
+-      s.push_back(rnd.OneIn(2) ? a : b);
+-      t.push_back(rnd.OneIn(2) ? a : b);
++    std::string s, t;
++    char a = static_cast<char>(uniform_byte(rng));
++    char b = static_cast<char>(uniform_byte(rng));
++    while (!one_in_typical_length(rng)) {
++      s.push_back(one_in_two(rng) ? a : b);
++      t.push_back(one_in_two(rng) ? a : b);
+     }
+     DataEndingAtUnreadablePage u(s);
+     DataEndingAtUnreadablePage v(t);
+-    int matched = snappy::internal::FindMatchLength(
+-        u.data(), v.data(), v.data() + t.size());
++    int matched = TestFindMatchLength(u.data(), v.data(), t.size());
+     if (matched == t.size()) {
+       EXPECT_EQ(s, t);
+     } else {
+       EXPECT_NE(s[matched], t[matched]);
+       for (int j = 0; j < matched; j++) {
+         EXPECT_EQ(s[j], t[j]);
+       }
+     }
+@@ -1190,17 +1098,16 @@ static uint16 MakeEntry(unsigned int ext
+ // Check that the decompression table is correct, and optionally print out
+ // the computed one.
+ TEST(Snappy, VerifyCharTable) {
+   using snappy::internal::LITERAL;
+   using snappy::internal::COPY_1_BYTE_OFFSET;
+   using snappy::internal::COPY_2_BYTE_OFFSET;
+   using snappy::internal::COPY_4_BYTE_OFFSET;
+   using snappy::internal::char_table;
+-  using snappy::internal::wordmask;
+ 
+   uint16 dst[256];
+ 
+   // Place invalid entries in all places to detect missing initialization
+   int assigned = 0;
+   for (int i = 0; i < 256; i++) {
+     dst[i] = 0xffff;
+   }
+@@ -1267,59 +1174,56 @@ TEST(Snappy, VerifyCharTable) {
+ 
+   // Check that computed table matched recorded table.
+   for (int i = 0; i < 256; i++) {
+     EXPECT_EQ(dst[i], char_table[i]) << "Mismatch in byte " << i;
+   }
+ }
+ 
+ static void CompressFile(const char* fname) {
+-  string fullinput;
++  std::string fullinput;
+   CHECK_OK(file::GetContents(fname, &fullinput, file::Defaults()));
+ 
+-  string compressed;
++  std::string compressed;
+   Compress(fullinput.data(), fullinput.size(), SNAPPY, &compressed, false);
+ 
+-  CHECK_OK(file::SetContents(string(fname).append(".comp"), compressed,
++  CHECK_OK(file::SetContents(std::string(fname).append(".comp"), compressed,
+                              file::Defaults()));
+ }
+ 
+ static void UncompressFile(const char* fname) {
+-  string fullinput;
++  std::string fullinput;
+   CHECK_OK(file::GetContents(fname, &fullinput, file::Defaults()));
+ 
+   size_t uncompLength;
+   CHECK(CheckUncompressedLength(fullinput, &uncompLength));
+ 
+-  string uncompressed;
++  std::string uncompressed;
+   uncompressed.resize(uncompLength);
+   CHECK(snappy::Uncompress(fullinput.data(), fullinput.size(), &uncompressed));
+ 
+-  CHECK_OK(file::SetContents(string(fname).append(".uncomp"), uncompressed,
++  CHECK_OK(file::SetContents(std::string(fname).append(".uncomp"), uncompressed,
+                              file::Defaults()));
+ }
+ 
+ static void MeasureFile(const char* fname) {
+-  string fullinput;
++  std::string fullinput;
+   CHECK_OK(file::GetContents(fname, &fullinput, file::Defaults()));
+   printf("%-40s :\n", fname);
+ 
+   int start_len = (FLAGS_start_len < 0) ? fullinput.size() : FLAGS_start_len;
+   int end_len = fullinput.size();
+   if (FLAGS_end_len >= 0) {
+-    end_len = min<int>(fullinput.size(), FLAGS_end_len);
++    end_len = std::min<int>(fullinput.size(), FLAGS_end_len);
+   }
+   for (int len = start_len; len <= end_len; len++) {
+     const char* const input = fullinput.data();
+     int repeats = (FLAGS_bytes + len) / (len + 1);
+     if (FLAGS_zlib)     Measure(input, len, ZLIB, repeats, 1024<<10);
+     if (FLAGS_lzo)      Measure(input, len, LZO, repeats, 1024<<10);
+-    if (FLAGS_liblzf)   Measure(input, len, LIBLZF, repeats, 1024<<10);
+-    if (FLAGS_quicklz)  Measure(input, len, QUICKLZ, repeats, 1024<<10);
+-    if (FLAGS_fastlz)   Measure(input, len, FASTLZ, repeats, 1024<<10);
+     if (FLAGS_snappy)    Measure(input, len, SNAPPY, repeats, 4096<<10);
+ 
+     // For block-size based measurements
+     if (0 && FLAGS_snappy) {
+       Measure(input, len, SNAPPY, repeats, 8<<10);
+       Measure(input, len, SNAPPY, repeats, 16<<10);
+       Measure(input, len, SNAPPY, repeats, 32<<10);
+       Measure(input, len, SNAPPY, repeats, 64<<10);
+@@ -1349,20 +1253,20 @@ static struct {
+ };
+ 
+ static void BM_UFlat(int iters, int arg) {
+   StopBenchmarkTiming();
+ 
+   // Pick file to process based on "arg"
+   CHECK_GE(arg, 0);
+   CHECK_LT(arg, ARRAYSIZE(files));
+-  string contents = ReadTestDataFile(files[arg].filename,
+-                                     files[arg].size_limit);
++  std::string contents =
++      ReadTestDataFile(files[arg].filename, files[arg].size_limit);
+ 
+-  string zcontents;
++  std::string zcontents;
+   snappy::Compress(contents.data(), contents.size(), &zcontents);
+   char* dst = new char[contents.size()];
+ 
+   SetBenchmarkBytesProcessed(static_cast<int64>(iters) *
+                              static_cast<int64>(contents.size()));
+   SetBenchmarkLabel(files[arg].label);
+   StartBenchmarkTiming();
+   while (iters-- > 0) {
+@@ -1375,20 +1279,20 @@ static void BM_UFlat(int iters, int arg)
+ BENCHMARK(BM_UFlat)->DenseRange(0, ARRAYSIZE(files) - 1);
+ 
+ static void BM_UValidate(int iters, int arg) {
+   StopBenchmarkTiming();
+ 
+   // Pick file to process based on "arg"
+   CHECK_GE(arg, 0);
+   CHECK_LT(arg, ARRAYSIZE(files));
+-  string contents = ReadTestDataFile(files[arg].filename,
+-                                     files[arg].size_limit);
++  std::string contents =
++      ReadTestDataFile(files[arg].filename, files[arg].size_limit);
+ 
+-  string zcontents;
++  std::string zcontents;
+   snappy::Compress(contents.data(), contents.size(), &zcontents);
+ 
+   SetBenchmarkBytesProcessed(static_cast<int64>(iters) *
+                              static_cast<int64>(contents.size()));
+   SetBenchmarkLabel(files[arg].label);
+   StartBenchmarkTiming();
+   while (iters-- > 0) {
+     CHECK(snappy::IsValidCompressedBuffer(zcontents.data(), zcontents.size()));
+@@ -1398,20 +1302,20 @@ static void BM_UValidate(int iters, int 
+ BENCHMARK(BM_UValidate)->DenseRange(0, 4);
+ 
+ static void BM_UIOVec(int iters, int arg) {
+   StopBenchmarkTiming();
+ 
+   // Pick file to process based on "arg"
+   CHECK_GE(arg, 0);
+   CHECK_LT(arg, ARRAYSIZE(files));
+-  string contents = ReadTestDataFile(files[arg].filename,
+-                                     files[arg].size_limit);
++  std::string contents =
++      ReadTestDataFile(files[arg].filename, files[arg].size_limit);
+ 
+-  string zcontents;
++  std::string zcontents;
+   snappy::Compress(contents.data(), contents.size(), &zcontents);
+ 
+   // Uncompress into an iovec containing ten entries.
+   const int kNumEntries = 10;
+   struct iovec iov[kNumEntries];
+   char *dst = new char[contents.size()];
+   int used_so_far = 0;
+   for (int i = 0; i < kNumEntries; ++i) {
+@@ -1444,86 +1348,159 @@ static void BM_UIOVec(int iters, int arg
+ BENCHMARK(BM_UIOVec)->DenseRange(0, 4);
+ 
+ static void BM_UFlatSink(int iters, int arg) {
+   StopBenchmarkTiming();
+ 
+   // Pick file to process based on "arg"
+   CHECK_GE(arg, 0);
+   CHECK_LT(arg, ARRAYSIZE(files));
+-  string contents = ReadTestDataFile(files[arg].filename,
+-                                     files[arg].size_limit);
++  std::string contents =
++      ReadTestDataFile(files[arg].filename, files[arg].size_limit);
+ 
+-  string zcontents;
++  std::string zcontents;
+   snappy::Compress(contents.data(), contents.size(), &zcontents);
+   char* dst = new char[contents.size()];
+ 
+   SetBenchmarkBytesProcessed(static_cast<int64>(iters) *
+                              static_cast<int64>(contents.size()));
+   SetBenchmarkLabel(files[arg].label);
+   StartBenchmarkTiming();
+   while (iters-- > 0) {
+     snappy::ByteArraySource source(zcontents.data(), zcontents.size());
+     snappy::UncheckedByteArraySink sink(dst);
+     CHECK(snappy::Uncompress(&source, &sink));
+   }
+   StopBenchmarkTiming();
+ 
+-  string s(dst, contents.size());
++  std::string s(dst, contents.size());
+   CHECK_EQ(contents, s);
+ 
+   delete[] dst;
+ }
+ 
+ BENCHMARK(BM_UFlatSink)->DenseRange(0, ARRAYSIZE(files) - 1);
+ 
+ static void BM_ZFlat(int iters, int arg) {
+   StopBenchmarkTiming();
+ 
+   // Pick file to process based on "arg"
+   CHECK_GE(arg, 0);
+   CHECK_LT(arg, ARRAYSIZE(files));
+-  string contents = ReadTestDataFile(files[arg].filename,
+-                                     files[arg].size_limit);
++  std::string contents =
++      ReadTestDataFile(files[arg].filename, files[arg].size_limit);
+ 
+   char* dst = new char[snappy::MaxCompressedLength(contents.size())];
+ 
+   SetBenchmarkBytesProcessed(static_cast<int64>(iters) *
+                              static_cast<int64>(contents.size()));
+   StartBenchmarkTiming();
+ 
+   size_t zsize = 0;
+   while (iters-- > 0) {
+     snappy::RawCompress(contents.data(), contents.size(), dst, &zsize);
+   }
+   StopBenchmarkTiming();
+   const double compression_ratio =
+       static_cast<double>(zsize) / std::max<size_t>(1, contents.size());
+-  SetBenchmarkLabel(StringPrintf("%s (%.2f %%)",
+-                                 files[arg].label, 100.0 * compression_ratio));
+-  VLOG(0) << StringPrintf("compression for %s: %zd -> %zd bytes",
+-                          files[arg].label, contents.size(), zsize);
++  SetBenchmarkLabel(StrFormat("%s (%.2f %%)", files[arg].label,
++                              100.0 * compression_ratio));
++  VLOG(0) << StrFormat("compression for %s: %zd -> %zd bytes",
++                       files[arg].label, static_cast<int>(contents.size()),
++                       static_cast<int>(zsize));
+   delete[] dst;
+ }
+ BENCHMARK(BM_ZFlat)->DenseRange(0, ARRAYSIZE(files) - 1);
+ 
++static void BM_ZFlatAll(int iters, int arg) {
++  StopBenchmarkTiming();
++
++  CHECK_EQ(arg, 0);
++  const int num_files = ARRAYSIZE(files);
++
++  std::vector<std::string> contents(num_files);
++  std::vector<char*> dst(num_files);
++
++  int64 total_contents_size = 0;
++  for (int i = 0; i < num_files; ++i) {
++    contents[i] = ReadTestDataFile(files[i].filename, files[i].size_limit);
++    dst[i] = new char[snappy::MaxCompressedLength(contents[i].size())];
++    total_contents_size += contents[i].size();
++  }
++
++  SetBenchmarkBytesProcessed(static_cast<int64>(iters) * total_contents_size);
++  StartBenchmarkTiming();
++
++  size_t zsize = 0;
++  while (iters-- > 0) {
++    for (int i = 0; i < num_files; ++i) {
++      snappy::RawCompress(contents[i].data(), contents[i].size(), dst[i],
++                          &zsize);
++    }
++  }
++  StopBenchmarkTiming();
++
++  for (int i = 0; i < num_files; ++i) {
++    delete[] dst[i];
++  }
++  SetBenchmarkLabel(StrFormat("%d files", num_files));
++}
++BENCHMARK(BM_ZFlatAll)->DenseRange(0, 0);
++
++static void BM_ZFlatIncreasingTableSize(int iters, int arg) {
++  StopBenchmarkTiming();
++
++  CHECK_EQ(arg, 0);
++  CHECK_GT(ARRAYSIZE(files), 0);
++  const std::string base_content =
++      ReadTestDataFile(files[0].filename, files[0].size_limit);
++
++  std::vector<std::string> contents;
++  std::vector<char*> dst;
++  int64 total_contents_size = 0;
++  for (int table_bits = kMinHashTableBits; table_bits <= kMaxHashTableBits;
++       ++table_bits) {
++    std::string content = base_content;
++    content.resize(1 << table_bits);
++    dst.push_back(new char[snappy::MaxCompressedLength(content.size())]);
++    total_contents_size += content.size();
++    contents.push_back(std::move(content));
++  }
++
++  size_t zsize = 0;
++  SetBenchmarkBytesProcessed(static_cast<int64>(iters) * total_contents_size);
++  StartBenchmarkTiming();
++  while (iters-- > 0) {
++    for (int i = 0; i < contents.size(); ++i) {
++      snappy::RawCompress(contents[i].data(), contents[i].size(), dst[i],
++                          &zsize);
++    }
++  }
++  StopBenchmarkTiming();
++
++  for (int i = 0; i < dst.size(); ++i) {
++    delete[] dst[i];
++  }
++  SetBenchmarkLabel(StrFormat("%zd tables", contents.size()));
++}
++BENCHMARK(BM_ZFlatIncreasingTableSize)->DenseRange(0, 0);
++
+ }  // namespace snappy
+ 
+-
+ int main(int argc, char** argv) {
+   InitGoogle(argv[0], &argc, &argv, true);
+   RunSpecifiedBenchmarks();
+ 
+   if (argc >= 2) {
+     for (int arg = 1; arg < argc; arg++) {
+       if (FLAGS_write_compressed) {
+-        CompressFile(argv[arg]);
++        snappy::CompressFile(argv[arg]);
+       } else if (FLAGS_write_uncompressed) {
+-        UncompressFile(argv[arg]);
++        snappy::UncompressFile(argv[arg]);
+       } else {
+-        MeasureFile(argv[arg]);
++        snappy::MeasureFile(argv[arg]);
+       }
+     }
+     return 0;
+   }
+ 
+   return RUN_ALL_TESTS();
+ }

+ 53 - 0
mozilla-release/patches/1434513-61a1.patch

@@ -0,0 +1,53 @@
+# HG changeset patch
+# User Adam Gashlin <agashlin@mozilla.com>
+# Date 1520380160 28800
+# Node ID 660d7b66d239b2d89f0b1a4e0f9d7a866ad3af56
+# Parent  ba5bfdf2b72b21ab2bfcd1a67c54153c2dd3d85a
+Bug 1434513 - Adjust bsdiff heuristics. r=glandium
+
+diff --git a/other-licenses/bsdiff/bsdiff.c b/other-licenses/bsdiff/bsdiff.c
+--- a/other-licenses/bsdiff/bsdiff.c
++++ b/other-licenses/bsdiff/bsdiff.c
+@@ -299,39 +299,39 @@ int main(int argc,char *argv[])
+ 					0,oldsize,&pos);
+ 
+ 			for(;scsc<scan+len;scsc++)
+ 			if((scsc+lastoffset<oldsize) &&
+ 				(old[scsc+lastoffset] == newbuf[scsc]))
+ 				oldscore++;
+ 
+ 			if(((len==oldscore) && (len!=0)) || 
+-				(len>oldscore+8)) break;
++				(len>oldscore+10)) break;
+ 
+ 			if((scan+lastoffset<oldsize) &&
+ 				(old[scan+lastoffset] == newbuf[scan]))
+ 				oldscore--;
+ 		};
+ 
+ 		if((len!=oldscore) || (scan==newsize)) {
+ 			MBSPatchTriple triple;
+ 
+ 			s=0;Sf=0;lenf=0;
+ 			for(i=0;(lastscan+i<scan)&&(lastpos+i<oldsize);) {
+ 				if(old[lastpos+i]==newbuf[lastscan+i]) s++;
+ 				i++;
+-				if(s*2-i>Sf*2-lenf) { Sf=s; lenf=i; };
++				if(s*3-i*2>Sf*3-lenf*2) { Sf=s; lenf=i; };
+ 			};
+ 
+ 			lenb=0;
+ 			if(scan<newsize) {
+ 				s=0;Sb=0;
+ 				for(i=1;(scan>=lastscan+i)&&(pos>=i);i++) {
+ 					if(old[pos-i]==newbuf[scan-i]) s++;
+-					if(s*2-i>Sb*2-lenb) { Sb=s; lenb=i; };
++					if(s*3-i*2>Sb*3-lenb*2) { Sb=s; lenb=i; };
+ 				};
+ 			};
+ 
+ 			if(lastscan+lenf>scan-lenb) {
+ 				overlap=(lastscan+lenf)-(scan-lenb);
+ 				s=0;Ss=0;lens=0;
+ 				for(i=0;i<overlap;i++) {
+ 					if(newbuf[lastscan+lenf-overlap+i]==

+ 31 - 0
mozilla-release/patches/1458129-64a1.patch

@@ -0,0 +1,31 @@
+# HG changeset patch
+# User June Wilde <jewilde@mozilla.com>
+# Date 1539883748 14400
+# Node ID 379d79841c5d439654d77f264f7c9d730863059e
+# Parent  0ff0b54b9ec7c3e99acf8987b63995dbdce3e64a
+Bug 1458129 - Prevent double free in mar_sign.c. r=rstrong
+
+Differential Revision: https://phabricator.services.mozilla.com/D8924
+
+diff --git a/modules/libmar/sign/mar_sign.c b/modules/libmar/sign/mar_sign.c
+--- a/modules/libmar/sign/mar_sign.c
++++ b/modules/libmar/sign/mar_sign.c
+@@ -529,16 +529,17 @@ extract_signature(const char *src, uint3
+     fprintf(stderr, "ERROR: Signature index was out of range\n");
+     goto failure;
+   }
+ 
+   /* Skip to the correct signature */
+   for (i = 0; i <= sigIndex; i++) {
+     /* Avoid leaking while skipping signatures */
+     free(extractedSignature);
++    extractedSignature = NULL;
+ 
+     /* skip past the signature algorithm ID */
+     if (fseeko(fpSrc, sizeof(uint32_t), SEEK_CUR)) {
+       fprintf(stderr, "ERROR: Could not seek past sig algorithm ID.\n");
+       goto failure;
+     }
+ 
+     /* Get the signature length */
+

+ 119 - 0
mozilla-release/patches/1468539-65a1.patch

@@ -0,0 +1,119 @@
+# HG changeset patch
+# User June Wilde <jewilde@mozilla.com>
+# Date 1541471382 18000
+# Node ID 609b4f85c0d3da3d29c4f45581af9b2aa465f1e5
+# Parent  0874b793fc046bf31ca80c673d5dfa0bb38ea343
+Bug 1468539 - Limit libmar from reading more than a single Additional_Section. r=mhowell
+
+Only a single type of additional block has ever been defined for the MAR
+archive format and only a single block of that type is needed per file.
+Limiting ourselves to reading only that until we define more seems
+sensible.
+
+Move additionalBlockSize check before first fread
+Add MAXADDITIONALBLOCKSIZE as a constant for checking block sizes
+
+Differential Revision: https://phabricator.services.mozilla.com/D10797
+
+diff --git a/modules/libmar/src/mar_read.c b/modules/libmar/src/mar_read.c
+--- a/modules/libmar/src/mar_read.c
++++ b/modules/libmar/src/mar_read.c
+@@ -12,16 +12,22 @@
+ #include "mar.h"
+ 
+ #ifdef XP_WIN
+ #include <winsock2.h>
+ #else
+ #include <netinet/in.h>
+ #endif
+ 
++/* This block must be at most 104 bytes.
++   MAR channel name < 64 bytes, and product version < 32 bytes + 3 NULL
++   terminator bytes. We only check for 96 though because we remove 8
++   bytes above from the additionalBlockSize: We subtract
++   sizeof(additionalBlockSize) and sizeof(additionalBlockID) */
++#define MAXADDITIONALBLOCKSIZE 96
+ 
+ /* this is the same hash algorithm used by nsZipArchive.cpp */
+ static uint32_t mar_hash_name(const char *name) {
+   uint32_t val = 0;
+   unsigned char* c;
+ 
+   for (c = (unsigned char *) name; *c; ++c)
+     val = val*37 + *c;
+@@ -392,62 +398,61 @@ read_product_info_block(char *path,
+  *
+  * @param infoBlock Out parameter for where to store the result to
+  * @return 0 on success, -1 on failure
+ */
+ int
+ mar_read_product_info_block(MarFile *mar,
+                             struct ProductInformationBlock *infoBlock)
+ {
+-  uint32_t i, offsetAdditionalBlocks, numAdditionalBlocks,
++  uint32_t offsetAdditionalBlocks, numAdditionalBlocks,
+     additionalBlockSize, additionalBlockID;
+   int hasAdditionalBlocks;
+ 
+   /* The buffer size is 97 bytes because the MAR channel name < 64 bytes, and
+      product version < 32 bytes + 3 NULL terminator bytes. */
+-  char buf[97] = { '\0' };
++  char buf[MAXADDITIONALBLOCKSIZE + 1] = { '\0' };
+   if (get_mar_file_info_fp(mar->fp, NULL, NULL,
+                            &hasAdditionalBlocks,
+                            &offsetAdditionalBlocks,
+                            &numAdditionalBlocks) != 0) {
+     return -1;
+   }
+-  for (i = 0; i < numAdditionalBlocks; ++i) {
++
++  /* We only have the one additional block type and only one is expected to be
++     in a MAR file so check if any exist and process the first found */
++  if (numAdditionalBlocks > 0) {
+     /* Read the additional block size */
+     if (fread(&additionalBlockSize,
+               sizeof(additionalBlockSize),
+               1, mar->fp) != 1) {
+       return -1;
+     }
+     additionalBlockSize = ntohl(additionalBlockSize) -
+                           sizeof(additionalBlockSize) -
+                           sizeof(additionalBlockID);
+ 
++    /* Additional Block sizes should only be 96 bytes long */
++    if (additionalBlockSize > MAXADDITIONALBLOCKSIZE) {
++      return -1;
++    }
++
+     /* Read the additional block ID */
+     if (fread(&additionalBlockID,
+               sizeof(additionalBlockID),
+               1, mar->fp) != 1) {
+       return -1;
+     }
+     additionalBlockID = ntohl(additionalBlockID);
+ 
+     if (PRODUCT_INFO_BLOCK_ID == additionalBlockID) {
+       const char *location;
+       int len;
+ 
+-      /* This block must be at most 104 bytes.
+-         MAR channel name < 64 bytes, and product version < 32 bytes + 3 NULL
+-         terminator bytes. We only check for 96 though because we remove 8
+-         bytes above from the additionalBlockSize: We subtract
+-         sizeof(additionalBlockSize) and sizeof(additionalBlockID) */
+-      if (additionalBlockSize > 96) {
+-        return -1;
+-      }
+-
+-    if (fread(buf, additionalBlockSize, 1, mar->fp) != 1) {
++      if (fread(buf, additionalBlockSize, 1, mar->fp) != 1) {
+         return -1;
+       }
+ 
+       /* Extract the MAR channel name from the buffer.  For now we
+          point to the stack allocated buffer but we strdup this
+          if we are within bounds of each field's max length. */
+       location = buf;
+       len = strlen(location);
+

+ 116 - 0
mozilla-release/patches/1468542-65a1.patch

@@ -0,0 +1,116 @@
+# HG changeset patch
+# User June Wilde <jewilde@mozilla.com>
+# Date 1542653870 -7200
+# Node ID 97613730a99f6156f256d351b5b7767b1d80a2b6
+# Parent  c376952fc91969b904b31cc0da568bb716cc17df
+Bug 1468542 - Restrict acceptable bounds for i in nss_secutil; r=rstrong
+
+Summary:
+Adds some missing braces on if structures
+Adds a check for i being larger or equal to nb
+
+Reviewers: rstrong
+
+Reviewed By: rstrong
+
+Bug #: 1468542
+
+Differential Revision: https://phabricator.services.mozilla.com/D12193
+
+diff --git a/modules/libmar/sign/nss_secutil.c b/modules/libmar/sign/nss_secutil.c
+--- a/modules/libmar/sign/nss_secutil.c
++++ b/modules/libmar/sign/nss_secutil.c
+@@ -102,30 +102,30 @@ GetPasswordString(void *arg, char *promp
+     phrase[PORT_Strlen(phrase)-1] = 0;
+   }
+   return (char*) PORT_Strdup(phrase);
+ }
+ 
+ char *
+ SECU_FilePasswd(PK11SlotInfo *slot, PRBool retry, void *arg)
+ {
+-  char* phrases, *phrase;
++  char *phrases, *phrase;
+   PRFileDesc *fd;
+   int32_t nb;
+   char *pwFile = arg;
+   int i;
+   const long maxPwdFileSize = 4096;
+-  char* tokenName = NULL;
++  char *tokenName = NULL;
+   int tokenLen = 0;
+ 
+   if (!pwFile)
+     return 0;
+ 
+   if (retry) {
+-    return 0;  /* no good retrying - the files contents will be the same */
++    return 0; /* no good retrying - the files contents will be the same */
+   }
+ 
+   phrases = PORT_ZAlloc(maxPwdFileSize);
+ 
+   if (!phrases) {
+     return 0; /* out of memory */
+   }
+ 
+@@ -148,43 +148,48 @@ SECU_FilePasswd(PK11SlotInfo *slot, PRBo
+ 
+   if (slot) {
+     tokenName = PK11_GetTokenName(slot);
+     if (tokenName) {
+       tokenLen = PORT_Strlen(tokenName);
+     }
+   }
+   i = 0;
+-  do
+-  {
++  do {
+     int startphrase = i;
+     int phraseLen;
+ 
+     /* handle the Windows EOL case */
+-    while (phrases[i] != '\r' && phrases[i] != '\n' && i < nb) i++;
++    while (phrases[i] != '\r' && phrases[i] != '\n' && i < nb)
++      i++;
+     /* terminate passphrase */
+-    phrases[i++] = '\0';
++    if (i < nb) {
++      phrases[i++] = '\0';
++    }
+     /* clean up any EOL before the start of the next passphrase */
+-    while ( (i<nb) && (phrases[i] == '\r' || phrases[i] == '\n')) {
++    while ((i < nb) && (phrases[i] == '\r' || phrases[i] == '\n')) {
+       phrases[i++] = '\0';
+     }
+     /* now analyze the current passphrase */
+     phrase = &phrases[startphrase];
+     if (!tokenName)
+       break;
+-    if (PORT_Strncmp(phrase, tokenName, tokenLen)) continue;
++    if (PORT_Strncmp(phrase, tokenName, tokenLen))
++      continue;
+     phraseLen = PORT_Strlen(phrase);
+-    if (phraseLen < (tokenLen+1)) continue;
+-    if (phrase[tokenLen] != ':') continue;
++    if (phraseLen < (tokenLen + 1))
++      continue;
++    if (phrase[tokenLen] != ':')
++      continue;
+     phrase = &phrase[tokenLen+1];
+     break;
+ 
+-  } while (i<nb);
++  } while (i < nb);
+ 
+-  phrase = PORT_Strdup((char*)phrase);
++  phrase = PORT_Strdup((char *)phrase);
+   PORT_Free(phrases);
+   return phrase;
+ }
+ 
+ char *
+ SECU_GetModulePassword(PK11SlotInfo *slot, PRBool retry, void *arg)
+ {
+     char prompt[255];
+

+ 207 - 0
mozilla-release/patches/1468544-65a1.patch

@@ -0,0 +1,207 @@
+# HG changeset patch
+# User June Wilde <jewilde@mozilla.com>
+# Date 1541529261 18000
+# Node ID bfc6238f43786da1babdc8ee547ddc39ddbcb216
+# Parent  ed16256499502d0331ba5a00702f7b26168c2510
+Bug 1468544 - Replace mar_hash_name with CityHash algorithm. r=rstrong
+
+Make CityHash64, CityHash64WithSeed, and CityHash64WithSeeds usable from C code
+Remove unnecessary includes from mar_read.c as well
+Add DisableStlWrapping to mar tool's moz.build to fix linkage break when
+building in Windows with MSVC
+
+Differential Revision: https://phabricator.services.mozilla.com/D10774
+
+diff --git a/modules/libmar/src/mar_read.c b/modules/libmar/src/mar_read.c
+--- a/modules/libmar/src/mar_read.c
++++ b/modules/libmar/src/mar_read.c
+@@ -3,41 +3,29 @@
+ /* This Source Code Form is subject to the terms of the Mozilla Public
+  * License, v. 2.0. If a copy of the MPL was not distributed with this
+  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+ 
+ #include <sys/types.h>
+ #include <fcntl.h>
+ #include <stdlib.h>
+ #include <string.h>
++#include "city.h"
+ #include "mar_private.h"
+ #include "mar.h"
+ 
+-#ifdef XP_WIN
+-#include <winsock2.h>
+-#else
+-#include <netinet/in.h>
+-#endif
+-
+ /* This block must be at most 104 bytes.
+    MAR channel name < 64 bytes, and product version < 32 bytes + 3 NULL
+    terminator bytes. We only check for 96 though because we remove 8
+    bytes above from the additionalBlockSize: We subtract
+    sizeof(additionalBlockSize) and sizeof(additionalBlockID) */
+ #define MAXADDITIONALBLOCKSIZE 96
+ 
+-/* this is the same hash algorithm used by nsZipArchive.cpp */
+ static uint32_t mar_hash_name(const char *name) {
+-  uint32_t val = 0;
+-  unsigned char* c;
+-
+-  for (c = (unsigned char *) name; *c; ++c)
+-    val = val*37 + *c;
+-
+-  return val % TABLESIZE;
++  return CityHash64(name, strlen(name)) % TABLESIZE;
+ }
+ 
+ static int mar_insert_item(MarFile *mar, const char *name, int namelen,
+                            uint32_t offset, uint32_t length, uint32_t flags) {
+   MarItem *item, *root;
+   uint32_t hash;
+ 
+   item = (MarItem *) malloc(sizeof(MarItem) + namelen);
+diff --git a/modules/libmar/src/moz.build b/modules/libmar/src/moz.build
+--- a/modules/libmar/src/moz.build
++++ b/modules/libmar/src/moz.build
+@@ -27,12 +27,16 @@ if CONFIG['CC_TYPE'] != 'msvc':
+ Library('mar')
+ 
+ UNIFIED_SOURCES += [
+     'mar_create.c',
+     'mar_extract.c',
+     'mar_read.c',
+ ]
+ 
++LOCAL_INCLUDES += [
++    '../../../other-licenses/nsis/Contrib/CityHash/cityhash',
++]
++
+ FORCE_STATIC_LIB = True
+ 
+ if CONFIG['OS_ARCH'] == 'WINNT':
+     USE_STATIC_LIBS = True
+diff --git a/modules/libmar/tool/moz.build b/modules/libmar/tool/moz.build
+--- a/modules/libmar/tool/moz.build
++++ b/modules/libmar/tool/moz.build
+@@ -1,15 +1,16 @@
+ # -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+ # vim: set filetype=python:
+ # This Source Code Form is subject to the terms of the Mozilla Public
+ # License, v. 2.0. If a copy of the MPL was not distributed with this
+ # file, You can obtain one at http://mozilla.org/MPL/2.0/.
+ 
+ HOST_SOURCES += [
++    '/other-licenses/nsis/Contrib/CityHash/cityhash/city.cpp',
+     'mar.c',
+ ]
+ 
+ HostProgram('mar')
+ 
+ HOST_USE_LIBS += [
+     'hostmar',
+ ]
+@@ -53,16 +54,18 @@ elif CONFIG['OS_ARCH'] == 'Darwin':
+       '-framework Security',
+     ]
+ 
+ if CONFIG['HOST_OS_ARCH'] == 'WINNT':
+     HOST_OS_LIBS += [
+         'ws2_32',
+     ]
+ 
++DisableStlWrapping()
++
+ if CONFIG['CC_TYPE'] != 'msvc':
+     # C11 for static_assert
+     c11_flags = ['-std=gnu11']
+     if CONFIG['CC_TYPE'] == 'clang-cl':
+         c11_flags.insert(0, '-Xclang')
+     HOST_CFLAGS += c11_flags
+     CFLAGS += c11_flags
+ 
+diff --git a/other-licenses/nsis/Contrib/CityHash/cityhash/city.h b/other-licenses/nsis/Contrib/CityHash/cityhash/city.h
+--- a/other-licenses/nsis/Contrib/CityHash/cityhash/city.h
++++ b/other-licenses/nsis/Contrib/CityHash/cityhash/city.h
+@@ -45,44 +45,49 @@
+ 
+ #include <stdlib.h>  // for size_t.
+ #include <stdint.h>
+ 
+ typedef uint8_t uint8;
+ typedef uint32_t uint32;
+ typedef uint64_t uint64;
+ 
++#ifdef __cplusplus
+ // The standard <utility> header doesn't compile, apparently it conflicts
+ // with... some Mozilla something or other. But all that's used from it
+ // is std::pair, so we can just replace that with mozilla::Pair.
+ #ifndef MOZILLA_CLIENT
+ #include <utility>
+ typedef std::pair<uint64, uint64> uint128;
+ inline uint64 Uint128Low64(const uint128& x) { return x.first; }
+ inline uint64 Uint128High64(const uint128& x) { return x.second; }
+ #else
+ #include "mozilla/Pair.h"
+ typedef mozilla::Pair<uint64, uint64> uint128;
+ inline uint64 Uint128Low64(const uint128& x) { return x.first(); }
+ inline uint64 Uint128High64(const uint128& x) { return x.second(); }
+ #endif
+ 
++extern "C" {
++#endif
+ 
+ // Hash function for a byte array.
+ uint64 CityHash64(const char *buf, size_t len);
+ 
+ // Hash function for a byte array.  For convenience, a 64-bit seed is also
+ // hashed into the result.
+ uint64 CityHash64WithSeed(const char *buf, size_t len, uint64 seed);
+ 
+ // Hash function for a byte array.  For convenience, two seeds are also
+ // hashed into the result.
+ uint64 CityHash64WithSeeds(const char *buf, size_t len,
+                            uint64 seed0, uint64 seed1);
+ 
++#ifdef __cplusplus
++}
+ // Hash function for a byte array.
+ uint128 CityHash128(const char *s, size_t len);
+ 
+ // Hash function for a byte array.  For convenience, a 128-bit seed is also
+ // hashed into the result.
+ uint128 CityHash128WithSeed(const char *s, size_t len, uint128 seed);
+ 
+ // Hash 128 input bits down to 64 bits of output.
+@@ -92,10 +97,11 @@ inline uint64 Hash128to64(const uint128&
+   const uint64 kMul = 0x9ddfea08eb382d69;
+   uint64 a = (Uint128Low64(x) ^ Uint128High64(x)) * kMul;
+   a ^= (a >> 47);
+   uint64 b = (Uint128High64(x) ^ a) * kMul;
+   b ^= (b >> 47);
+   b *= kMul;
+   return b;
+ }
++#endif
+ 
+ #endif  // CITY_HASH_H_
+diff --git a/toolkit/mozapps/update/common/moz.build b/toolkit/mozapps/update/common/moz.build
+--- a/toolkit/mozapps/update/common/moz.build
++++ b/toolkit/mozapps/update/common/moz.build
+@@ -43,11 +43,18 @@ if CONFIG['MOZ_WIDGET_TOOLKIT'] == 'wind
+             'registrycertificates.cpp',
+         ]
+         OS_LIBS += [
+             'crypt32',
+             'wintrust',
+         ]
+ 
+ SOURCES += [
++    '/other-licenses/nsis/Contrib/CityHash/cityhash/city.cpp',
+     'readstrings.cpp',
+     'updatecommon.cpp',
+ ]
++
++LOCAL_INCLUDES += [
++    '/other-licenses/nsis/Contrib/CityHash/cityhash',
++]
++
++DisableStlWrapping()

+ 173 - 0
mozilla-release/patches/1468552-65a1.patch

@@ -0,0 +1,173 @@
+# HG changeset patch
+# User June Wilde <jewilde@mozilla.com>
+# Date 1543343430 0
+# Node ID 6798cc7aaed0765f259470eb5f7cac16d4310545
+# Parent  e54386a858606c13bd4c7c93426c8bacdab0d7b2
+Bug 1468552 - Update bspatch.cpp to match Chromium version; r=rstrong
+
+Adds bounds checking around file reads and header values
+
+Differential Revision: https://phabricator.services.mozilla.com/D12356
+
+diff --git a/toolkit/mozapps/update/updater/bspatch.cpp b/toolkit/mozapps/update/updater/bspatch.cpp
+--- a/toolkit/mozapps/update/updater/bspatch.cpp
++++ b/toolkit/mozapps/update/updater/bspatch.cpp
+@@ -66,35 +66,53 @@ MBS_ReadHeader(FILE* file, MBSPatchHeade
+   header->scrc32    = ntohl(header->scrc32);
+   header->dlen      = ntohl(header->dlen);
+   header->cblen     = ntohl(header->cblen);
+   header->difflen   = ntohl(header->difflen);
+   header->extralen  = ntohl(header->extralen);
+ 
+   struct stat hs;
+   s = fstat(fileno(file), &hs);
+-  if (s)
++  if (s != 0)
+     return READ_ERROR;
+ 
+   if (memcmp(header->tag, "MBDIFF10", 8) != 0)
+     return UNEXPECTED_BSPATCH_ERROR;
+ 
+-  if (sizeof(MBSPatchHeader) +
+-      header->cblen +
+-      header->difflen +
+-      header->extralen != uint32_t(hs.st_size))
++  if (hs.st_size > INT_MAX)
++    return UNEXPECTED_BSPATCH_ERROR;
++
++  size_t size = static_cast<size_t>(hs.st_size);
++  if (size < sizeof(MBSPatchHeader))
++    return UNEXPECTED_BSPATCH_ERROR;
++  size -= sizeof(MBSPatchHeader);
++
++  if (size < header->cblen)
++    return UNEXPECTED_BSPATCH_ERROR;
++  size -= header->cblen;
++
++  if (size < header->difflen)
++    return UNEXPECTED_BSPATCH_ERROR;
++  size -= header->difflen;
++
++  if (size < header->extralen)
++    return UNEXPECTED_BSPATCH_ERROR;
++  size -= header->extralen;
++
++  if (size != 0)
+     return UNEXPECTED_BSPATCH_ERROR;
+ 
+   return OK;
+ }
+ 
+ int
+ MBS_ApplyPatch(const MBSPatchHeader *header, FILE* patchFile,
+                unsigned char *fbuffer, FILE* file)
+ {
++  unsigned char *fbufstart = fbuffer;
+   unsigned char *fbufend = fbuffer + header->slen;
+ 
+   unsigned char *buf = (unsigned char*) malloc(header->cblen +
+                                                header->difflen +
+                                                header->extralen);
+   if (!buf)
+     return BSPATCH_MEM_ERROR;
+ 
+@@ -107,81 +125,92 @@ MBS_ApplyPatch(const MBSPatchHeader *hea
+     size_t c = fread(wb, 1, count, patchFile);
+     if (c != count) {
+       rv = READ_ERROR;
+       goto end;
+     }
+ 
+     r -= c;
+     wb += c;
++
++    if (c == 0 && r) {
++      rv = UNEXPECTED_BSPATCH_ERROR;
++      goto end;
++    }
+   }
+ 
+   {
+     MBSPatchTriple *ctrlsrc = (MBSPatchTriple*) buf;
++    if (header->cblen % sizeof(MBSPatchTriple) != 0) {
++      rv = UNEXPECTED_BSPATCH_ERROR;
++      goto end;
++    }
++
+     unsigned char *diffsrc = buf + header->cblen;
+     unsigned char *extrasrc = diffsrc + header->difflen;
+ 
+     MBSPatchTriple *ctrlend = (MBSPatchTriple*) diffsrc;
+     unsigned char *diffend = extrasrc;
+     unsigned char *extraend = extrasrc + header->extralen;
+ 
+-    do {
++    while (ctrlsrc < ctrlend) {
+       ctrlsrc->x = ntohl(ctrlsrc->x);
+       ctrlsrc->y = ntohl(ctrlsrc->y);
+       ctrlsrc->z = ntohl(ctrlsrc->z);
+ 
+ #ifdef DEBUG_bsmedberg
+       printf("Applying block:\n"
+              " x: %u\n"
+              " y: %u\n"
+              " z: %i\n",
+              ctrlsrc->x,
+              ctrlsrc->y,
+              ctrlsrc->z);
+ #endif
+ 
+       /* Add x bytes from oldfile to x bytes from the diff block */
+ 
+-      if (fbuffer + ctrlsrc->x > fbufend ||
+-          diffsrc + ctrlsrc->x > diffend) {
++      if (ctrlsrc->x > static_cast<size_t>(fbufend - fbuffer) ||
++          ctrlsrc->x > static_cast<size_t>(diffend - diffsrc)) {
+         rv = UNEXPECTED_BSPATCH_ERROR;
+         goto end;
+       }
+       for (uint32_t i = 0; i < ctrlsrc->x; ++i) {
+         diffsrc[i] += fbuffer[i];
+       }
+       if ((uint32_t) fwrite(diffsrc, 1, ctrlsrc->x, file) != ctrlsrc->x) {
+         rv = WRITE_ERROR_PATCH_FILE;
+         goto end;
+       }
+       fbuffer += ctrlsrc->x;
+       diffsrc += ctrlsrc->x;
+ 
+       /* Copy y bytes from the extra block */
+ 
+-      if (extrasrc + ctrlsrc->y > extraend) {
++      if (ctrlsrc->y > static_cast<size_t>(extraend - extrasrc)) {
+         rv = UNEXPECTED_BSPATCH_ERROR;
+         goto end;
+       }
+       if ((uint32_t) fwrite(extrasrc, 1, ctrlsrc->y, file) != ctrlsrc->y) {
+         rv = WRITE_ERROR_PATCH_FILE;
+         goto end;
+       }
+       extrasrc += ctrlsrc->y;
+ 
+       /* "seek" forwards in oldfile by z bytes */
+ 
+-      if (fbuffer + ctrlsrc->z > fbufend) {
++      if (ctrlsrc->z < fbufstart - fbuffer ||
++          ctrlsrc->z > fbufend - fbuffer) {
+         rv = UNEXPECTED_BSPATCH_ERROR;
+         goto end;
+       }
+       fbuffer += ctrlsrc->z;
+ 
+       /* and on to the next control block */
+ 
+       ++ctrlsrc;
+-    } while (ctrlsrc < ctrlend);
++    }
+   }
+ 
+ end:
+   free(buf);
+   return rv;
+ }
+

+ 996 - 0
mozilla-release/patches/1468556-65a1.patch

@@ -0,0 +1,996 @@
+# HG changeset patch
+# User June Wilde <jewilde@mozilla.com>
+# Date 1543253124 0
+# Node ID 06463d10abdee48650cbe28ae63e957951430412
+# Parent  447f8ddd9bbd1c5b618025758019e01525e2cb45
+Bug 1468556 - Protect against overlapping files in libmar; r=mhowell
+
+Disallows files from referencing the same bytes in the content blocks of a MAR
+file by storing a list of structs containing a file's byte offsets and lengths.
+A list was chosen since the cap of 256 files wouldn't produce considerable
+overhead when extracting/reading/searching/etc through the archive.
+
+Removing the ability for a MAR file to reference the same content block
+repeatedly seems like a better solution than what was suggested in the BLRG
+report. (limiting the number of files or checking for overly large
+decompressed files)
+
+Allows us to prohibit this type of file bomb while only losing an attribute
+of the MAR file format that wasn't being leveraged. The fix is applied in
+mar_enum_items and mar_find_item so that the manifest the updater uses is
+equally safeguarded as the mar host tool.
+
+Differential Revision: https://phabricator.services.mozilla.com/D11706
+
+diff --git a/modules/libmar/src/mar.h b/modules/libmar/src/mar.h
+--- a/modules/libmar/src/mar.h
++++ b/modules/libmar/src/mar.h
+@@ -22,143 +22,171 @@ extern "C" {
+ */
+ #define MAX_SIGNATURES 8
+ #ifdef __cplusplus
+ static_assert(MAX_SIGNATURES <= 9, "too many signatures");
+ #else
+ MOZ_STATIC_ASSERT(MAX_SIGNATURES <= 9, "too many signatures");
+ #endif
+ 
+-struct ProductInformationBlock {
+-  const char *MARChannelID;
+-  const char *productVersion;
++struct ProductInformationBlock
++{
++  const char* MARChannelID;
++  const char* productVersion;
+ };
+ 
+ /**
+  * The MAR item data structure.
+  */
+-typedef struct MarItem_ {
+-  struct MarItem_ *next;  /* private field */
++typedef struct MarItem_
++{
++  struct MarItem_* next;  /* private field */
+   uint32_t offset;        /* offset into archive */
+   uint32_t length;        /* length of data in bytes */
+   uint32_t flags;         /* contains file mode bits */
+   char name[1];           /* file path */
+ } MarItem;
+ 
++/**
++ * File offset and length for tracking access of byte indexes
++ */
++typedef struct SeenIndex_
++{
++  struct SeenIndex_* next; /* private field */
++  uint32_t offset;         /* offset into archive */
++  uint32_t length;         /* length of the data in bytes */
++} SeenIndex;
++
+ #define TABLESIZE 256
+ 
+-struct MarFile_ {
+-  FILE *fp;
+-  MarItem *item_table[TABLESIZE];
+-  int item_table_is_valid;
++/**
++ * Mozilla ARchive (MAR) file data structure
++ */
++struct MarFile_
++{
++  FILE* fp;                       /* file pointer to the archive */
++  MarItem* item_table[TABLESIZE]; /* hash table of files in the archive */
++  SeenIndex* index_list;          /* file indexes processed */
++  int item_table_is_valid;        /* header and index validation flag */
+ };
+ 
+ typedef struct MarFile_ MarFile;
+ 
+ /**
+  * Signature of callback function passed to mar_enum_items.
+  * @param mar       The MAR file being visited.
+  * @param item      The MAR item being visited.
+  * @param data      The data parameter passed by the caller of mar_enum_items.
+  * @return          A non-zero value to stop enumerating.
+  */
+-typedef int (* MarItemCallback)(MarFile *mar, const MarItem *item, void *data);
++typedef int (*MarItemCallback)(MarFile* mar, const MarItem* item, void* data);
+ 
+ /**
+  * Open a MAR file for reading.
+  * @param path      Specifies the path to the MAR file to open.  This path must
+  *                  be compatible with fopen.
+  * @return          NULL if an error occurs.
+  */
+-MarFile *mar_open(const char *path);
++MarFile*
++mar_open(const char* path);
+ 
+ #ifdef XP_WIN
+ MarFile *mar_wopen(const wchar_t *path);
+ #endif
+ 
+ /**
+  * Close a MAR file that was opened using mar_open.
+  * @param mar       The MarFile object to close.
+  */
+-void mar_close(MarFile *mar);
++void
++mar_close(MarFile* mar);
+ 
+ /**
+  * Find an item in the MAR file by name.
+  * @param mar       The MarFile object to query.
+  * @param item      The name of the item to query.
+  * @return          A const reference to a MAR item or NULL if not found.
+  */
+-const MarItem *mar_find_item(MarFile *mar, const char *item);
++const MarItem*
++mar_find_item(MarFile* mar, const char* item);
+ 
+ /**
+  * Enumerate all MAR items via callback function.
+  * @param mar       The MAR file to enumerate.
+  * @param callback  The function to call for each MAR item.
+  * @param data      A caller specified value that is passed along to the
+  *                  callback function.
+  * @return          0 if the enumeration ran to completion.  Otherwise, any
+  *                  non-zero return value from the callback is returned.
+  */
+-int mar_enum_items(MarFile *mar, MarItemCallback callback, void *data);
++int
++mar_enum_items(MarFile* mar, MarItemCallback callback, void* data);
+ 
+ /**
+  * Read from MAR item at given offset up to bufsize bytes.
+  * @param mar       The MAR file to read.
+  * @param item      The MAR item to read.
+  * @param offset    The byte offset relative to the start of the item.
+  * @param buf       A pointer to a buffer to copy the data into.
+  * @param bufsize   The length of the buffer to copy the data into.
+  * @return          The number of bytes written or a negative value if an
+  *                  error occurs.
+  */
+-int mar_read(MarFile *mar, const MarItem *item, int offset, uint8_t *buf,
+-             int bufsize);
++int
++mar_read(MarFile* mar,
++         const MarItem* item,
++         int offset,
++         uint8_t* buf,
++         int bufsize);
+ 
+ /**
+  * Create a MAR file from a set of files.
+  * @param dest      The path to the file to create.  This path must be
+  *                  compatible with fopen.
+  * @param numfiles  The number of files to store in the archive.
+  * @param files     The list of null-terminated file paths.  Each file
+  *                  path must be compatible with fopen.
+  * @param infoBlock The information to store in the product information block.
+  * @return          A non-zero value if an error occurs.
+  */
+-int mar_create(const char *dest,
+-               int numfiles,
+-               char **files,
+-               struct ProductInformationBlock *infoBlock);
++int
++mar_create(const char* dest,
++           int numfiles,
++           char** files,
++           struct ProductInformationBlock* infoBlock);
+ 
+ /**
+  * Extract a MAR file to the current working directory.
+  * @param path      The path to the MAR file to extract.  This path must be
+  *                  compatible with fopen.
+  * @return          A non-zero value if an error occurs.
+  */
+-int mar_extract(const char *path);
++int
++mar_extract(const char* path);
+ 
+ #define MAR_MAX_CERT_SIZE (16*1024) // Way larger than necessary
+ 
+ /* Read the entire file (not a MAR file) into a newly-allocated buffer.
+  * This function does not write to stderr. Instead, the caller should
+  * write whatever error messages it sees fit. The caller must free the returned
+  * buffer using free().
+  *
+  * @param filePath The path to the file that should be read.
+  * @param maxSize  The maximum valid file size.
+  * @param data     On success, *data will point to a newly-allocated buffer
+  *                 with the file's contents in it.
+  * @param size     On success, *size will be the size of the created buffer.
+  *
+  * @return 0 on success, -1 on error
+  */
+-int mar_read_entire_file(const char * filePath,
+-                         uint32_t maxSize,
+-                         /*out*/ const uint8_t * *data,
+-                         /*out*/ uint32_t *size);
++int
++mar_read_entire_file(const char* filePath,
++                     uint32_t maxSize,
++                     /*out*/ const uint8_t** data,
++                     /*out*/ uint32_t* size);
+ 
+ /**
+  * Verifies a MAR file by verifying each signature with the corresponding
+  * certificate. That is, the first signature will be verified using the first
+  * certificate given, the second signature will be verified using the second
+  * certificate given, etc. The signature count must exactly match the number of
+  * certificates given, and all signature verifications must succeed.
+  * We do not check that the certificate was issued by any trusted authority.
+@@ -170,30 +198,31 @@ int mar_read_entire_file(const char * fi
+  *                       file data.
+  * @param certDataSizes  Pointer to the first element in an array for size of
+  *                       the cert data.
+  * @param certCount      The number of elements in certData and certDataSizes
+  * @return 0 on success
+  *         a negative number if there was an error
+  *         a positive number if the signature does not verify
+  */
+-int mar_verify_signatures(MarFile *mar,
+-                          const uint8_t * const *certData,
+-                          const uint32_t *certDataSizes,
+-                          uint32_t certCount);
++int
++mar_verify_signatures(MarFile* mar,
++                      const uint8_t* const* certData,
++                      const uint32_t* certDataSizes,
++                      uint32_t certCount);
+ 
+ /**
+  * Reads the product info block from the MAR file's additional block section.
+  * The caller is responsible for freeing the fields in infoBlock
+  * if the return is successful.
+  *
+  * @param infoBlock Out parameter for where to store the result to
+  * @return 0 on success, -1 on failure
+ */
+ int
+-mar_read_product_info_block(MarFile *mar,
+-                            struct ProductInformationBlock *infoBlock);
++mar_read_product_info_block(MarFile* mar,
++                            struct ProductInformationBlock* infoBlock);
+ 
+ #ifdef __cplusplus
+ }
+ #endif
+ 
+ #endif  /* MAR_H__ */
+diff --git a/modules/libmar/src/mar_read.c b/modules/libmar/src/mar_read.c
+--- a/modules/libmar/src/mar_read.c
++++ b/modules/libmar/src/mar_read.c
+@@ -14,22 +14,30 @@
+ 
+ /* This block must be at most 104 bytes.
+    MAR channel name < 64 bytes, and product version < 32 bytes + 3 NULL
+    terminator bytes. We only check for 96 though because we remove 8
+    bytes above from the additionalBlockSize: We subtract
+    sizeof(additionalBlockSize) and sizeof(additionalBlockID) */
+ #define MAXADDITIONALBLOCKSIZE 96
+ 
+-static uint32_t mar_hash_name(const char *name) {
++static uint32_t
++mar_hash_name(const char* name)
++{
+   return CityHash64(name, strlen(name)) % TABLESIZE;
+ }
+ 
+-static int mar_insert_item(MarFile *mar, const char *name, int namelen,
+-                           uint32_t offset, uint32_t length, uint32_t flags) {
++static int
++mar_insert_item(MarFile* mar,
++                const char* name,
++                int namelen,
++                uint32_t offset,
++                uint32_t length,
++                uint32_t flags)
++{
+   MarItem *item, *root;
+   uint32_t hash;
+ 
+   item = (MarItem *) malloc(sizeof(MarItem) + namelen);
+   if (!item)
+     return -1;
+   item->next = NULL;
+   item->offset = offset;
+@@ -46,17 +54,19 @@ static int mar_insert_item(MarFile *mar,
+     /* append item */
+     while (root->next)
+       root = root->next;
+     root->next = item;
+   }
+   return 0;
+ }
+ 
+-static int mar_consume_index(MarFile *mar, char **buf, const char *buf_end) {
++static int
++mar_consume_index(MarFile* mar, char** buf, const char* buf_end)
++{
+   /*
+    * Each item has the following structure:
+    *   uint32_t offset      (network byte order)
+    *   uint32_t length      (network byte order)
+    *   uint32_t flags       (network byte order)
+    *   char     name[N]     (where N >= 1)
+    *   char     null_byte;
+    */
+@@ -98,17 +108,19 @@ static int mar_consume_index(MarFile *ma
+   /* consume null byte */
+   if (*buf == buf_end)
+     return -1;
+   ++(*buf);
+ 
+   return mar_insert_item(mar, name, namelen, offset, length, flags);
+ }
+ 
+-static int mar_read_index(MarFile *mar) {
++static int
++mar_read_index(MarFile* mar)
++{
+   char id[MAR_ID_SIZE], *buf, *bufptr, *bufend;
+   uint32_t offset_to_index, size_of_index;
+ 
+   /* verify MAR ID */
+   fseek(mar->fp, 0, SEEK_SET);
+   if (fread(id, MAR_ID_SIZE, 1, mar->fp) != 1)
+     return -1;
+   if (memcmp(id, MAR_ID, MAR_ID_SIZE) != 0)
+@@ -136,79 +148,151 @@ static int mar_read_index(MarFile *mar) 
+   bufend = buf + size_of_index;
+   while (bufptr < bufend && mar_consume_index(mar, &bufptr, bufend) == 0);
+ 
+   free(buf);
+   return (bufptr == bufend) ? 0 : -1;
+ }
+ 
+ /**
++ * Adds an offset and length to the MarFile's index_list
++ * @param mar     The MarFile that owns this offset length pair
++ * @param offset  The byte offset in the archive to be marked as processed
++ * @param length  The length corresponding to this byte offset
++ * @return int    1 on success, 0 if offset has been previously processed
++ *                -1 if unable to allocate space for the SeenIndexes
++ */
++static int
++mar_insert_offset(MarFile* mar, uint32_t offset, uint32_t length)
++{
++  /* Ignore files with no length */
++  if (length == 0) {
++    return 1;
++  }
++
++  SeenIndex* index = (SeenIndex*)malloc(sizeof(SeenIndex));
++  if (!index) {
++    return -1;
++  }
++  index->next = NULL;
++  index->offset = offset;
++  index->length = length;
++  uint32_t index_end = index->offset + index->length - 1;
++
++  /* If this is our first index store it at the front */
++  if (mar->index_list == NULL) {
++    mar->index_list = index;
++    return 1;
++  }
++
++  /* Search for matching indexes in the list of those previously visited */
++  SeenIndex* previous;
++  SeenIndex* current = mar->index_list;
++  while (current != NULL) {
++    uint32_t current_end = current->offset + current->length - 1;
++
++    /* If index has collided with the front or end of current or if current has
++       collided with the front or end of index return false */
++    if ((index->offset >= current->offset && index->offset <= current_end) ||
++        (index_end >= current->offset && index_end <= current_end) ||
++        (current->offset >= index->offset && current->offset <= index_end) ||
++        (current_end >= index->offset && current_end <= index_end)) {
++      free(index);
++      return 0;
++    }
++
++    /* else move to the next in the list */
++    previous = current;
++    current = current->next;
++  }
++
++  /* These indexes are valid, track them */
++  previous->next = index;
++  return 1;
++}
++
++/**
+  * Internal shared code for mar_open and mar_wopen.
+  * On failure, will fclose(fp).
+  */
+-static MarFile *mar_fpopen(FILE *fp)
++static MarFile*
++mar_fpopen(FILE* fp)
+ {
+-  MarFile *mar;
++  MarFile* mar;
+ 
+-  mar = (MarFile *) malloc(sizeof(*mar));
++  mar = (MarFile*)malloc(sizeof(*mar));
+   if (!mar) {
+     fclose(fp);
+     return NULL;
+   }
+ 
+   mar->fp = fp;
+   mar->item_table_is_valid = 0;
+   memset(mar->item_table, 0, sizeof(mar->item_table));
++  mar->index_list = NULL;
+ 
+   return mar;
+ }
+ 
+-MarFile *mar_open(const char *path) {
++MarFile*
++mar_open(const char* path)
++{
+   FILE *fp;
+ 
+   fp = fopen(path, "rb");
+   if (!fp) {
+     fprintf(stderr, "ERROR: could not open file in mar_open()\n");
+     perror(path);
+     return NULL;
+   }
+ 
+   return mar_fpopen(fp);
+ }
+ 
+ #ifdef XP_WIN
+-MarFile *mar_wopen(const wchar_t *path) {
++MarFile*
++mar_wopen(const wchar_t* path)
++{
+   FILE *fp;
+ 
+   _wfopen_s(&fp, path, L"rb");
+   if (!fp) {
+     fprintf(stderr, "ERROR: could not open file in mar_wopen()\n");
+     _wperror(path);
+     return NULL;
+   }
+ 
+   return mar_fpopen(fp);
+ }
+ #endif
+ 
+-void mar_close(MarFile *mar) {
+-  MarItem *item;
++void
++mar_close(MarFile* mar)
++{
++  MarItem* item;
++  SeenIndex* index;
+   int i;
+ 
+   fclose(mar->fp);
+ 
+   for (i = 0; i < TABLESIZE; ++i) {
+     item = mar->item_table[i];
+     while (item) {
+-      MarItem *temp = item;
++      MarItem* temp = item;
+       item = item->next;
+       free(temp);
+     }
+   }
+ 
++  while (mar->index_list != NULL) {
++    index = mar->index_list;
++    mar->index_list = index->next;
++    free(index);
++  }
++
+   free(mar);
+ }
+ 
+ /**
+  * Determines the MAR file information.
+  *
+  * @param fp                     An opened MAR file in read mode.
+  * @param hasSignatureBlock      Optional out parameter specifying if the MAR
+@@ -220,22 +304,23 @@ void mar_close(MarFile *mar) {
+  * @param offsetAdditionalBlocks Optional out parameter for the offset to the
+  *                               first additional block. Value is only valid if
+  *                               hasAdditionalBlocks is not equal to 0.
+  * @param numAdditionalBlocks    Optional out parameter for the number of
+  *                               additional blocks.  Value is only valid if
+  *                               hasAdditionalBlocks is not equal to 0.
+  * @return 0 on success and non-zero on failure.
+  */
+-int get_mar_file_info_fp(FILE *fp,
+-                         int *hasSignatureBlock,
+-                         uint32_t *numSignatures,
+-                         int *hasAdditionalBlocks,
+-                         uint32_t *offsetAdditionalBlocks,
+-                         uint32_t *numAdditionalBlocks)
++int
++get_mar_file_info_fp(FILE* fp,
++                     int* hasSignatureBlock,
++                     uint32_t* numSignatures,
++                     int* hasAdditionalBlocks,
++                     uint32_t* offsetAdditionalBlocks,
++                     uint32_t* numAdditionalBlocks)
+ {
+   uint32_t offsetToIndex, offsetToContent, signatureCount, signatureLen, i;
+ 
+   /* One of hasSignatureBlock or hasAdditionalBlocks must be non NULL */
+   if (!hasSignatureBlock && !hasAdditionalBlocks) {
+     return -1;
+   }
+ 
+@@ -358,18 +443,17 @@ int get_mar_file_info_fp(FILE *fp,
+  * Reads the product info block from the MAR file's additional block section.
+  * The caller is responsible for freeing the fields in infoBlock
+  * if the return is successful.
+  *
+  * @param infoBlock Out parameter for where to store the result to
+  * @return 0 on success, -1 on failure
+ */
+ int
+-read_product_info_block(char *path,
+-                        struct ProductInformationBlock *infoBlock)
++read_product_info_block(char* path, struct ProductInformationBlock* infoBlock)
+ {
+   int rv;
+   MarFile mar;
+   mar.fp = fopen(path, "rb");
+   if (!mar.fp) {
+     fprintf(stderr, "ERROR: could not open file in read_product_info_block()\n");
+     perror(path);
+     return -1;
+@@ -383,18 +467,18 @@ read_product_info_block(char *path,
+  * Reads the product info block from the MAR file's additional block section.
+  * The caller is responsible for freeing the fields in infoBlock
+  * if the return is successful.
+  *
+  * @param infoBlock Out parameter for where to store the result to
+  * @return 0 on success, -1 on failure
+ */
+ int
+-mar_read_product_info_block(MarFile *mar,
+-                            struct ProductInformationBlock *infoBlock)
++mar_read_product_info_block(MarFile* mar,
++                            struct ProductInformationBlock* infoBlock)
+ {
+   uint32_t offsetAdditionalBlocks, numAdditionalBlocks,
+     additionalBlockSize, additionalBlockID;
+   int hasAdditionalBlocks;
+ 
+   /* The buffer size is 97 bytes because the MAR channel name < 64 bytes, and
+      product version < 32 bytes + 3 NULL terminator bytes. */
+   char buf[MAXADDITIONALBLOCKSIZE + 1] = { '\0' };
+@@ -471,64 +555,87 @@ mar_read_product_info_block(MarFile *mar
+       }
+     }
+   }
+ 
+   /* If we had a product info block we would have already returned */
+   return -1;
+ }
+ 
+-const MarItem *mar_find_item(MarFile *mar, const char *name) {
++const MarItem*
++mar_find_item(MarFile* mar, const char* name)
++{
+   uint32_t hash;
+-  const MarItem *item;
++  const MarItem* item;
+ 
+   if (!mar->item_table_is_valid) {
+     if (mar_read_index(mar)) {
+       return NULL;
+     } else {
+       mar->item_table_is_valid = 1;
+     }
+   }
+ 
+   hash = mar_hash_name(name);
+ 
+   item = mar->item_table[hash];
+-  while (item && strcmp(item->name, name) != 0)
++  while (item && strcmp(item->name, name) != 0) {
+     item = item->next;
++  }
+ 
+-  return item;
++  /* If this is the first time seeing this item's indexes, return it */
++  if (mar_insert_offset(mar, item->offset, item->length) == 1) {
++    return item;
++  } else {
++    fprintf(stderr, "ERROR: file content collision in mar_find_item()\n");
++    return NULL;
++  }
+ }
+ 
+-int mar_enum_items(MarFile *mar, MarItemCallback callback, void *closure) {
+-  MarItem *item;
+-  int i;
++int
++mar_enum_items(MarFile* mar, MarItemCallback callback, void* closure)
++{
++  MarItem* item;
++  int i, rv;
+ 
+   if (!mar->item_table_is_valid) {
+     if (mar_read_index(mar)) {
+       return -1;
+     } else {
+       mar->item_table_is_valid = 1;
+     }
+   }
+ 
+   for (i = 0; i < TABLESIZE; ++i) {
+     item = mar->item_table[i];
+     while (item) {
+-      int rv = callback(mar, item, closure);
+-      if (rv)
+-        return rv;
++      /* if this is the first time seeing this item's indexes, process it */
++      if (mar_insert_offset(mar, item->offset, item->length) == 1) {
++        rv = callback(mar, item, closure);
++        if (rv) {
++          return rv;
++        }
++      } else {
++        fprintf(stderr, "ERROR: file content collision in mar_enum_items()\n");
++        return 1;
++      }
+       item = item->next;
+     }
+   }
+ 
+   return 0;
+ }
+ 
+-int mar_read(MarFile *mar, const MarItem *item, int offset, uint8_t *buf,
+-             int bufsize) {
++int
++mar_read(MarFile* mar,
++         const MarItem* item,
++         int offset,
++         uint8_t* buf,
++         int bufsize)
++{
+   int nr;
+ 
+   if (offset == (int) item->length)
+     return 0;
+   if (offset > (int) item->length)
+     return -1;
+ 
+   nr = item->length - offset;
+@@ -554,22 +661,23 @@ int mar_read(MarFile *mar, const MarItem
+  * @param offsetAdditionalBlocks Optional out parameter for the offset to the
+  *                               first additional block. Value is only valid if
+  *                               hasAdditionalBlocks is not equal to 0.
+  * @param numAdditionalBlocks    Optional out parameter for the number of
+  *                               additional blocks.  Value is only valid if
+  *                               has_additional_blocks is not equal to 0.
+  * @return 0 on success and non-zero on failure.
+  */
+-int get_mar_file_info(const char *path,
+-                      int *hasSignatureBlock,
+-                      uint32_t *numSignatures,
+-                      int *hasAdditionalBlocks,
+-                      uint32_t *offsetAdditionalBlocks,
+-                      uint32_t *numAdditionalBlocks)
++int
++get_mar_file_info(const char* path,
++                  int* hasSignatureBlock,
++                  uint32_t* numSignatures,
++                  int* hasAdditionalBlocks,
++                  uint32_t* offsetAdditionalBlocks,
++                  uint32_t* numAdditionalBlocks)
+ {
+   int rv;
+   FILE *fp = fopen(path, "rb");
+   if (!fp) {
+     fprintf(stderr, "ERROR: could not open file in get_mar_file_info()\n");
+     perror(path);
+     return -1;
+   }
+diff --git a/modules/libmar/tests/unit/data/manipulated_backend_collision.mar b/modules/libmar/tests/unit/data/manipulated_backend_collision.mar
+new file mode 100644
+index 0000000000000000000000000000000000000000..41d4f78482848d279230c35db097f81bcf21bb44
+GIT binary patch
+literal 210
+zc%1Wf3^HV3U|7Kb0hgdOBM@hRXa`@%pm=8wM?XJTpLkCf2L>}!J%dC;25JFA1tSGx
+z1rr5R1v3S61q%gB1p|ddg(QV!g%pKUg*1h9g$#vEg)9Z2QZ*oM1Y!}OvzDY~=A_0a
+Mf=p;Y5lUhJ0KAtN(*OVf
+
+diff --git a/modules/libmar/tests/unit/data/manipulated_frontend_collision.mar b/modules/libmar/tests/unit/data/manipulated_frontend_collision.mar
+new file mode 100644
+index 0000000000000000000000000000000000000000..582af58b59b13ceebc261dd0fe142471361ce241
+GIT binary patch
+literal 210
+zc%1Wf3^HV3U|7Kb0hgdOBM@hRXa`@%pm=8wM?XJTpLkCf2L>}!J%dC;25JFA1tSGx
+z1rr5R1v3S61q%gB1p|ddg(QV!g%pKUg*1h9g$#vEg)9Z2QZ*oM0AdlKvzDY~=A_0a
+Mf=p;c5lUhJ0K9}4(*OVf
+
+diff --git a/modules/libmar/tests/unit/data/manipulated_is_contained.mar b/modules/libmar/tests/unit/data/manipulated_is_contained.mar
+new file mode 100644
+index 0000000000000000000000000000000000000000..d51b23587d0b49f6282bcaf5f77697f1e5bad3ea
+GIT binary patch
+literal 210
+zc%1Wf3^HV3U|7Kb0hgdOBM@hRXa`@%pm=8wM?XJTpLkCf2L>}!J%dC;25JFA1tSGx
+z1rr5R1v3S61q%gB1p|ddg(QV!g%pKUg*1h9g$#vEg)9Z2QZ*oM0b&uLvzDY~=A_0a
+Of=p-tVhy-Z5(5Ce@EGa<
+
+diff --git a/modules/libmar/tests/unit/data/manipulated_is_container.mar b/modules/libmar/tests/unit/data/manipulated_is_container.mar
+new file mode 100644
+index 0000000000000000000000000000000000000000..98b33ce9e5a895acf90a5ebf3ef9a68f7672f3bc
+GIT binary patch
+literal 210
+zc%1Wf3^HV3U|7Kb0hgdOBM@hRXa`@%pm=8wM?XJTpLkCf2L>}!J%dC;25JFA1tSGx
+z1rr5R1v3S61q%gB1p|ddg(QV!g%pKUg*1h9g$#vEg)9Z2QZ*oM0AdZGvzDY~=A_0a
+Of=p-xViCAd5(5Cf@EGR+
+
+diff --git a/modules/libmar/tests/unit/data/manipulated_multiple_collision.mar b/modules/libmar/tests/unit/data/manipulated_multiple_collision.mar
+new file mode 100644
+index 0000000000000000000000000000000000000000..7e0a3dd72458417ce0f7f38b44abd868a902883d
+GIT binary patch
+literal 249
+zc%1Wf3^HV3VA#U|0Y9NMBM@hRXa`@%pm=8wM?XJTpLkCf2L>}!J%dC;25JFA1tSGx
+z1rr5R1v3S61q%gB1p|ddg(QV!g%pKUg*1h9g$#vEg)9X{1qTIX1r-HVg*XK@1uX>)
+m1x*E@c1s{`0AdlKQ<tP==A_0af=r!)B9sIYYDE!BW&i*pyC2{H
+
+diff --git a/modules/libmar/tests/unit/data/manipulated_multiple_collision_first.mar b/modules/libmar/tests/unit/data/manipulated_multiple_collision_first.mar
+new file mode 100644
+index 0000000000000000000000000000000000000000..a10d3eb53b30e247f9e4fae42a40f6b2c2c7b230
+GIT binary patch
+literal 249
+zc%1Wf3^HV3VA#U|0Y9NMBM@hRXa`@%pm=8wM?XJTpLkCf2L>}!J%dC;25JFA1tSGx
+z1rr5R1v3S61q%gB1p|ddg(QV!g%pKUg*1h9g$#vEg)9X{1qTIX1r-HVg*XK@1uX>)
+m1x*E@c1s{`0AdlKQ<tP==A_0af=r!)B9sIYT8<)=%m4r++aLk}
+
+diff --git a/modules/libmar/tests/unit/data/manipulated_multiple_collision_last.mar b/modules/libmar/tests/unit/data/manipulated_multiple_collision_last.mar
+new file mode 100644
+index 0000000000000000000000000000000000000000..bfbb9ba8535739329d560a6d84b3ddbe5aef2ed9
+GIT binary patch
+literal 249
+zc%1Wf3^HV3VA#U|0Y9NMBM@hRXa`@%pm=8wM?XJTpLkCf2L>}!J%dC;25JFA1tSGx
+z1rr5R1v3S61q%gB1p|ddg(QV!g%pKUg*1h9g$#vEg)9X{1qTIX1r-HVg*XK@1uX>)
+m1x*E@c1s{`0AdlKQ<tP==A_0af=r!^B9sIYT7e>z%m4r+$RGj$
+
+diff --git a/modules/libmar/tests/unit/data/manipulated_same_offset.mar b/modules/libmar/tests/unit/data/manipulated_same_offset.mar
+new file mode 100644
+index 0000000000000000000000000000000000000000..1326d1afd8d56e0311930d6e93b529ac93765a02
+GIT binary patch
+literal 210
+zc%1Wf3^HV3U|7Kb0hgdOBM@hRXa`@%pm=8wM?XJTpLkCf2L>}!J%dC;25JFA1tSGx
+z1rr5R1v3S61q%gB1p|ddg(QV!g%pKUg*1h9g$#vEg)9Z2QZ*oM0AdlKvzDY~=A_0a
+KGGGWLF#rI(bQsbA
+
+diff --git a/modules/libmar/tests/unit/head_libmar.js b/modules/libmar/tests/unit/head_libmar.js
+--- a/modules/libmar/tests/unit/head_libmar.js
++++ b/modules/libmar/tests/unit/head_libmar.js
+@@ -1,28 +1,28 @@
+ /* Any copyright is dedicated to the Public Domain.
+    http://creativecommons.org/publicdomain/zero/1.0/ */
+ 
+-'use strict';
++"use strict";
+ 
+ const BIN_SUFFIX = mozinfo.bin_suffix;
+ const tempDir = do_get_tempdir();
+ 
+ /**
+  * Compares binary data of 2 arrays and throws if they aren't the same.
+  * Throws on mismatch, does nothing on match.
+  *
+  * @param arr1 The first array to compare
+  * @param arr2 The second array to compare
+  */
+ function compareBinaryData(arr1, arr2) {
+   Assert.equal(arr1.length, arr2.length);
+   for (let i = 0; i < arr1.length; i++) {
+     if (arr1[i] != arr2[i]) {
+-      throw "Data differs at index " + i + 
++      throw "Data differs at index " + i +
+             ", arr1: " + arr1[i] + ", arr2: " + arr2[i];
+     }
+   }
+ }
+ 
+ /**
+  * Reads a file's data and returns it
+  *
+@@ -108,17 +108,17 @@ function createMAR(outMAR, dataDir, file
+     f.permissions = 0o664;
+   }
+ 
+   // Setup the command line arguments to create the MAR.
+   let args = ["-C", dataDir.path, "-H", "\@MAR_CHANNEL_ID\@",
+               "-V", "13.0a1", "-c", outMAR.path];
+   args = args.concat(files);
+ 
+-  info('Running: ' + signmarBin.path + " " + args.join(" "));
++  info("Running: " + signmarBin.path + " " + args.join(" "));
+   process.init(signmarBin);
+   process.run(true, args, args.length);
+ 
+   // Verify signmar returned 0 for success.
+   Assert.equal(process.exitValue, 0);
+ 
+   // Verify the out MAR file actually exists.
+   Assert.ok(outMAR.exists());
+@@ -135,18 +135,17 @@ function extractMAR(mar, dataDir) {
+   let process = Cc["@mozilla.org/process/util;1"].
+                 createInstance(Ci.nsIProcess);
+   let signmarBin = do_get_file("signmar" + BIN_SUFFIX);
+ 
+   // Make sure the signmar binary exists and is an executable.
+   Assert.ok(signmarBin.exists());
+   Assert.ok(signmarBin.isExecutable());
+ 
+-  // Setup the command line arguments to create the MAR.
++  // Setup the command line arguments to extract the MAR.
+   let args = ["-C", dataDir.path, "-x", mar.path];
+ 
+-  info('Running: ' + signmarBin.path + " " + args.join(" "));
++  info("Running: " + signmarBin.path + " " + args.join(" "));
+   process.init(signmarBin);
+   process.run(true, args, args.length);
+ 
+-  // Verify signmar returned 0 for success.
+-  Assert.equal(process.exitValue, 0);
++  return process.exitValue;
+ }
+diff --git a/modules/libmar/tests/unit/test_extract.js b/modules/libmar/tests/unit/test_extract.js
+--- a/modules/libmar/tests/unit/test_extract.js
++++ b/modules/libmar/tests/unit/test_extract.js
+@@ -4,17 +4,17 @@
+ function run_test() {
+ 
+   /**
+    * Extracts a MAR and makes sure each file matches the reference files.
+    *
+    * @param marFileName The name of the MAR file to extract
+    * @param files       The files that the extracted MAR should contain
+    */
+-  function run_one_test(marFileName, files) {
++  function extract_and_compare(marFileName, files) {
+     // Get the MAR file that we will be extracting
+     let mar = do_get_file("data/" + marFileName);
+ 
+     // Get the path that we will extract to
+     let outDir = tempDir.clone();
+     outDir.append("out");
+     Assert.ok(!outDir.exists());
+     outDir.create(Ci.nsIFile.DIRECTORY_TYPE, 0o777);
+@@ -26,62 +26,114 @@ function run_test() {
+       let outFile = outDir.clone();
+       outFile.append(files[i]);
+       Assert.ok(!outFile.exists());
+ 
+       outFiles.push(outFile);
+       refFiles.push(do_get_file("data/" + files[i]));
+     }
+ 
+-    // Extract the MAR contents into the ./out dir.
+-    extractMAR(mar, outDir);
++    // Extract the MAR contents to ./out dir and verify 0 for success.
++    Assert.equal(extractMAR(mar, outDir), 0);
+ 
+     // Compare to make sure the extracted files are the same.
+     for (let i = 0; i < files.length; i++) {
+       Assert.ok(outFiles[i].exists());
+       let refFileData = getBinaryFileData(refFiles[i]);
+       let outFileData = getBinaryFileData(outFiles[i]);
+       compareBinaryData(refFileData, outFileData);
+     }
+   }
+ 
++  /**
++   * Attempts to extract a MAR and expects a failure
++   *
++   * @param marFileName The name of the MAR file to extract
++   */
++  function extract_and_fail(marFileName) {
++    // Get the MAR file that we will be extracting
++    let mar = do_get_file("data/" + marFileName);
++
++    // Get the path that we will extract to
++    let outDir = tempDir.clone();
++    outDir.append("out");
++    Assert.ok(!outDir.exists());
++    outDir.create(Ci.nsIFile.DIRECTORY_TYPE, 0o777);
++
++    // Extract the MAR contents to ./out dir and verify -1 (255 from the
++    // nsIprocess) for failure
++    Assert.equal(extractMAR(mar, outDir), 1);
++  }
++
+   // Define the unit tests to run.
+   let tests = {
+     // Test extracting a MAR file with a 0 byte file.
+     test_zero_sized: function _test_zero_sized() {
+-      return run_one_test("0_sized.mar", ["0_sized_file"]);
++      return extract_and_compare("0_sized.mar", ["0_sized_file"]);
+     },
+     // Test extracting a MAR file with a 1 byte file.
+     test_one_byte: function _test_one_byte() {
+-      return run_one_test("1_byte.mar", ["1_byte_file"]);
++      return extract_and_compare("1_byte.mar", ["1_byte_file"]);
+     },
+     // Test extracting a MAR file with binary data.
+     test_binary_data: function _test_binary_data() {
+-      return run_one_test("binary_data.mar", ["binary_data_file"]);
++      return extract_and_compare("binary_data.mar", ["binary_data_file"]);
+     },
+     // Test extracting a MAR without a product information block (PIB) which
+     // contains binary data.
+     test_no_pib: function _test_no_pib() {
+-      return run_one_test("no_pib.mar", ["binary_data_file"]);
++      return extract_and_compare("no_pib.mar", ["binary_data_file"]);
+     },
+     // Test extracting a MAR without a product information block (PIB) that is
+     // signed and which contains binary data.
+     test_no_pib_signed: function _test_no_pib_signed() {
+-      return run_one_test("signed_no_pib.mar", ["binary_data_file"]);
++      return extract_and_compare("signed_no_pib.mar", ["binary_data_file"]);
+     },
+     // Test extracting a MAR with a product information block (PIB) that is
+     // signed and which contains binary data.
+     test_pib_signed: function _test_pib_signed() {
+-      return run_one_test("signed_pib.mar", ["binary_data_file"]);
++      return extract_and_compare("signed_pib.mar", ["binary_data_file"]);
+     },
+     // Test extracting a MAR file with multiple files inside of it.
+     test_multiple_file: function _test_multiple_file() {
+-      return run_one_test("multiple_file.mar",
++      return extract_and_compare("multiple_file.mar",
+                           ["0_sized_file", "1_byte_file", "binary_data_file"]);
+     },
++    // Test collision detection where file A + B are the same offset
++    test_collision_same_offset: function test_collision_same_offset() {
++      return extract_and_fail("manipulated_same_offset.mar");
++    },
++    // Test collision detection where file A's indexes are a subset of file B's
++    test_collision_is_contained: function test_collision_is_contained() {
++      return extract_and_fail("manipulated_is_container.mar");
++    },
++    // Test collision detection where file B's indexes are a subset of file A's
++    test_collision_contained_by: function test_collision_contained_by() {
++      return extract_and_fail("manipulated_is_contained.mar");
++    },
++    // Test collision detection where file A ends in file B's indexes
++    test_collision_a_onto_b: function test_collision_a_onto_b() {
++      return extract_and_fail("manipulated_frontend_collision.mar");
++    },
++    // Test collision detection where file B ends in file A's indexes
++    test_collsion_b_onto_a: function test_collsion_b_onto_a()  {
++      return extract_and_fail("manipulated_backend_collision.mar");
++    },
++    // Test collision detection where file C shares indexes with both file A & B
++    test_collision_multiple: function test_collision_multiple() {
++      return extract_and_fail("manipulated_multiple_collision.mar");
++    },
++    // Test collision detection where A is the last file in the list
++    test_collision_last: function test_collision_multiple_last() {
++      return extract_and_fail("manipulated_multiple_collision_last.mar");
++    },
++    // Test collision detection where A is the first file in the list
++    test_collision_first: function test_collision_multiple_first() {
++      return extract_and_fail("manipulated_multiple_collision_first.mar");
++    },
+     // Between each test make sure the out directory and its subfiles do
+     // not exist.
+     cleanup_per_test: function _cleanup_per_test() {
+       let outDir = tempDir.clone();
+       outDir.append("out");
+       if (outDir.exists()) {
+         outDir.remove(true);
+       }
+

+ 291 - 0
mozilla-release/patches/1484846-63a1.patch

@@ -0,0 +1,291 @@
+# HG changeset patch
+# User Dave Townsend <dtownsend@oxymoronical.com>
+# Date 1534863183 0
+#      Tue Aug 21 14:53:03 2018 +0000
+# Node ID 8ffadc97369cb92c41b1ded0c2a559f0d3a30c08
+# Parent  c849b5de60827f267e78436d226bce833f3ca9af
+Bug 1484846: Expose a unique hash for the application install directory. r=froydnj
+
+Profile-per-install uses a hash of the install directory to identify different
+installs of Firefox. This exposes the existing cityhash generated hash from
+nsXREDirProvider and makes it available on all platforms.
+
+Differential Revision: https://phabricator.services.mozilla.com/D3852
+
+diff --git a/other-licenses/nsis/Contrib/CityHash/cityhash/city.h b/other-licenses/nsis/Contrib/CityHash/cityhash/city.h
+--- a/other-licenses/nsis/Contrib/CityHash/cityhash/city.h
++++ b/other-licenses/nsis/Contrib/CityHash/cityhash/city.h
+@@ -38,23 +38,22 @@
+ //
+ // By the way, for some hash functions, given strings a and b, the hash
+ // of a+b is easily derived from the hashes of a and b.  This property
+ // doesn't hold for any hash functions in this file.
+ 
+ #ifndef CITY_HASH_H_
+ #define CITY_HASH_H_
+ 
+-#include "../CityHash.h" // added by moz, specific to nsis project
+-
+ #include <stdlib.h>  // for size_t.
++#include <stdint.h>
+ 
+-typedef unsigned __int8 uint8;
+-typedef unsigned __int32 uint32;
+-typedef unsigned __int64 uint64;
++typedef uint8_t uint8;
++typedef uint32_t uint32;
++typedef uint64_t uint64;
+ 
+ // The standard <utility> header doesn't compile, apparently it conflicts
+ // with... some Mozilla something or other. But all that's used from it
+ // is std::pair, so we can just replace that with mozilla::Pair.
+ #ifndef MOZILLA_CLIENT
+ #include <utility>
+ typedef std::pair<uint64, uint64> uint128;
+ inline uint64 Uint128Low64(const uint128& x) { return x.first; }
+diff --git a/toolkit/xre/moz.build b/toolkit/xre/moz.build
+--- a/toolkit/xre/moz.build
++++ b/toolkit/xre/moz.build
+@@ -40,26 +40,22 @@ EXPORTS.mozilla += [
+ if CONFIG['MOZ_INSTRUMENT_EVENT_LOOP']:
+     EXPORTS += ['EventTracer.h']
+ 
+ if CONFIG['MOZ_WIDGET_TOOLKIT'] == 'windows':
+     EXPORTS.mozilla += [
+ #        'PolicyChecks.h',
+         'WinDllServices.h',
+     ]
+-    SOURCES += [
+-        '../../other-licenses/nsis/Contrib/CityHash/cityhash/city.cpp',
+-    ]
+     UNIFIED_SOURCES += [
+         'nsNativeAppSupportWin.cpp',
+         'WinDllServices.cpp',
+     ]
+     DEFINES['PROXY_PRINTING'] = 1
+     LOCAL_INCLUDES += [
+-        '../../other-licenses/nsis/Contrib/CityHash/cityhash',
+         '../components/printingui/win',
+     ]
+ elif CONFIG['MOZ_WIDGET_TOOLKIT'] == 'cocoa':
+     UNIFIED_SOURCES += [
+         'MacApplicationDelegate.mm',
+         'MacAutoreleasePool.mm',
+         'MacLaunchHelper.mm',
+         'nsCommandLineServiceMac.cpp',
+@@ -101,16 +97,17 @@ UNIFIED_SOURCES += [
+     'nsSigHandlers.cpp',
+     'nsXREDirProvider.cpp',
+ ]
+ 
+ # nsAppRunner.cpp and ProfileReset.cpp cannot be built in unified mode because
+ # they pull in OS X system headers.
+ # nsEmbedFunctions.cpp cannot be built in unified mode because it pulls in X11 headers.
+ SOURCES += [
++    '../../other-licenses/nsis/Contrib/CityHash/cityhash/city.cpp',
+     'nsAppRunner.cpp',
+     'nsEmbedFunctions.cpp',
+     'ProfileReset.cpp',
+ ]
+ 
+ if CONFIG['MOZ_GL_DEFAULT_PROVIDER'] == 'GLX':
+     UNIFIED_SOURCES += [
+         'glxtest.cpp',
+@@ -162,16 +159,17 @@ DEFINES['GRE_MILESTONE'] = CONFIG['GRE_M
+ 
+ for var in ('APP_VERSION', 'APP_ID'):
+     DEFINES[var] = CONFIG['MOZ_%s' % var]
+ 
+ if CONFIG['MOZ_BUILD_APP'] == 'browser':
+     DEFINES['MOZ_BUILD_APP_IS_BROWSER'] = True
+ 
+ LOCAL_INCLUDES += [
++    '../../other-licenses/nsis/Contrib/CityHash/cityhash',
+     '../components/find',
+     '../components/printingui/ipc',
+     '../components/windowwatcher',
+     '../profile',
+     '/config',
+     '/dom/base',
+     '/dom/commandhandler',
+     '/dom/ipc',
+diff --git a/toolkit/xre/nsXREDirProvider.cpp b/toolkit/xre/nsXREDirProvider.cpp
+--- a/toolkit/xre/nsXREDirProvider.cpp
++++ b/toolkit/xre/nsXREDirProvider.cpp
+@@ -39,19 +39,19 @@
+ #include "mozilla/dom/ScriptSettings.h"
+ 
+ #include "mozilla/AutoRestore.h"
+ #include "mozilla/Services.h"
+ #include "mozilla/Omnijar.h"
+ #include "mozilla/Preferences.h"
+ 
+ #include <stdlib.h>
++#include "city.h"
+ 
+ #ifdef XP_WIN
+-#include "city.h"
+ #include <windows.h>
+ #include <shlobj.h>
+ #endif
+ #ifdef XP_MACOSX
+ #include "nsILocalFileMac.h"
+ // for chflags()
+ #include <sys/stat.h>
+ #include <unistd.h>
+@@ -1133,16 +1133,67 @@ GetCachedHash(HKEY rootKey, const nsAStr
+     cachedHash.Assign(cachedHashRaw);
+   }
+   return ERROR_SUCCESS == result;
+ }
+ 
+ #endif
+ 
+ nsresult
++nsXREDirProvider::GetInstallHash(nsAString & aPathHash)
++{
++  nsCOMPtr<nsIFile> updRoot;
++  nsCOMPtr<nsIFile> appFile;
++  bool per = false;
++  nsresult rv = GetFile(XRE_EXECUTABLE_FILE, &per, getter_AddRefs(appFile));
++  NS_ENSURE_SUCCESS(rv, rv);
++  rv = appFile->GetParent(getter_AddRefs(updRoot));
++  NS_ENSURE_SUCCESS(rv, rv);
++
++  nsAutoString appDirPath;
++  rv = updRoot->GetPath(appDirPath);
++  NS_ENSURE_SUCCESS(rv, rv);
++
++#ifdef XP_WIN
++  // Figure out where we should check for a cached hash value. If the
++  // application doesn't have the nsXREAppData vendor value defined check
++  // under SOFTWARE\Mozilla.
++  bool hasVendor = gAppData->vendor && strlen(gAppData->vendor) != 0;
++  wchar_t regPath[1024] = { L'\0' };
++  swprintf_s(regPath, mozilla::ArrayLength(regPath), L"SOFTWARE\\%S\\%S\\TaskBarIDs",
++              (hasVendor ? gAppData->vendor : "Mozilla"), MOZ_APP_BASENAME);
++
++  // If we pre-computed the hash, grab it from the registry.
++  if (GetCachedHash(HKEY_LOCAL_MACHINE, nsDependentString(regPath), appDirPath,
++                    aPathHash)) {
++    return NS_OK;
++  }
++
++  if (GetCachedHash(HKEY_CURRENT_USER, nsDependentString(regPath), appDirPath,
++                    aPathHash)) {
++    return NS_OK;
++  }
++#endif
++
++  // This should only happen when the installer isn't used (e.g. zip builds).
++  void* buffer = appDirPath.BeginWriting();
++  uint32_t length = appDirPath.Length() * sizeof(nsAutoString::char_type);
++  uint64_t hash = CityHash64(static_cast<const char*>(buffer), length);
++  aPathHash.AppendInt((int)(hash >> 32), 16);
++  aPathHash.AppendInt((int)hash, 16);
++  // The installer implementation writes the registry values that were checked
++  // in the previous block for this value in uppercase and since it is an
++  // option to have a case sensitive file system on Windows this value must
++  // also be in uppercase.
++  ToUpperCase(aPathHash);
++
++  return NS_OK;
++}
++
++nsresult
+ nsXREDirProvider::GetUpdateRootDir(nsIFile* *aResult)
+ {
+   nsCOMPtr<nsIFile> updRoot;
+   nsCOMPtr<nsIFile> appFile;
+   bool per = false;
+   nsresult rv = GetFile(XRE_EXECUTABLE_FILE, &per, getter_AddRefs(appFile));
+   NS_ENSURE_SUCCESS(rv, rv);
+   rv = appFile->GetParent(getter_AddRefs(updRoot));
+@@ -1180,59 +1231,26 @@ nsXREDirProvider::GetUpdateRootDir(nsIFi
+     return NS_ERROR_FAILURE;
+   }
+ 
+   localDir.forget(aResult);
+   return NS_OK;
+ 
+ #elif XP_WIN
+   nsAutoString pathHash;
+-  bool pathHashResult = false;
+-  bool hasVendor = gAppData->vendor && strlen(gAppData->vendor) != 0;
+-
+-  nsAutoString appDirPath;
+-  if (SUCCEEDED(updRoot->GetPath(appDirPath))) {
+-
+-    // Figure out where we should check for a cached hash value. If the
+-    // application doesn't have the nsXREAppData vendor value defined check
+-    // under SOFTWARE\Mozilla.
+-    wchar_t regPath[1024] = { L'\0' };
+-    swprintf_s(regPath, mozilla::ArrayLength(regPath), L"SOFTWARE\\%S\\%S\\TaskBarIDs",
+-               (hasVendor ? gAppData->vendor : "Mozilla"), MOZ_APP_BASENAME);
+-
+-    // If we pre-computed the hash, grab it from the registry.
+-    pathHashResult = GetCachedHash(HKEY_LOCAL_MACHINE,
+-                                   nsDependentString(regPath), appDirPath,
+-                                   pathHash);
+-    if (!pathHashResult) {
+-      pathHashResult = GetCachedHash(HKEY_CURRENT_USER,
+-                                     nsDependentString(regPath), appDirPath,
+-                                     pathHash);
+-    }
+-  }
+-
+-  if (!pathHashResult) {
+-    // This should only happen when the installer isn't used (e.g. zip builds).
+-    uint64_t hash = CityHash64(static_cast<const char *>(appDirPath.get()),
+-                               appDirPath.Length() * sizeof(nsAutoString::char_type));
+-    pathHash.AppendInt((int)(hash >> 32), 16);
+-    pathHash.AppendInt((int)hash, 16);
+-    // The installer implementation writes the registry values that were checked
+-    // in the previous block for this value in uppercase and since it is an
+-    // option to have a case sensitive file system on Windows this value must
+-    // also be in uppercase.
+-    ToUpperCase(pathHash);
+-  }
++  rv = GetInstallHash(pathHash);
++  NS_ENSURE_SUCCESS(rv, rv);
+ 
+   // As a last ditch effort, get the local app data directory and if a vendor
+   // name exists append it. If only a product name exists, append it. If neither
+   // exist fallback to old handling. We don't use the product name on purpose
+   // because we want a shared update directory for different apps run from the
+   // same path.
+   nsCOMPtr<nsIFile> localDir;
++  bool hasVendor = gAppData->vendor && strlen(gAppData->vendor) != 0;
+   if ((hasVendor || gAppData->name) &&
+       NS_SUCCEEDED(GetUserDataDirectoryHome(getter_AddRefs(localDir), true)) &&
+       NS_SUCCEEDED(localDir->AppendNative(nsDependentCString(hasVendor ?
+                                           gAppData->vendor : gAppData->name))) &&
+       NS_SUCCEEDED(localDir->Append(NS_LITERAL_STRING("updates"))) &&
+       NS_SUCCEEDED(localDir->Append(pathHash))) {
+     localDir.forget(aResult);
+     return NS_OK;
+diff --git a/toolkit/xre/nsXREDirProvider.h b/toolkit/xre/nsXREDirProvider.h
+--- a/toolkit/xre/nsXREDirProvider.h
++++ b/toolkit/xre/nsXREDirProvider.h
+@@ -75,16 +75,21 @@ public:
+   nsIFile* GetGREBinDir() { return mGREBinDir; }
+   nsIFile* GetAppDir() {
+     if (mXULAppDir)
+       return mXULAppDir;
+     return mGREDir;
+   }
+ 
+   /**
++   * Get a hash for the install directory.
++   */
++  nsresult GetInstallHash(nsAString & aPathHash);
++
++  /**
+    * Get the directory under which update directory is created.
+    * This method may be called before XPCOM is started. aResult
+    * is a clone, it may be modified.
+    */
+   nsresult GetUpdateRootDir(nsIFile* *aResult);
+ 
+   /**
+    * Get the profile startup directory as determined by this class or by

+ 23 - 0
mozilla-release/patches/1488217-64a1.patch

@@ -0,0 +1,23 @@
+# HG changeset patch
+# User Petr Sumbera <petr.sumbera@oracle.com>
+# Date 1535983810 0
+#      Mon Sep 03 14:10:10 2018 +0000
+# Node ID 809efffd8f8011137c03517d619df4a89cd9473c
+# Parent  14d56f95b51f5fd6ef7f0348ad8f24fe051b5392
+Bug 1488217 build correctly snappy on big endian platforms r=glandium
+
+diff --git a/other-licenses/snappy/moz.build b/other-licenses/snappy/moz.build
+--- a/other-licenses/snappy/moz.build
++++ b/other-licenses/snappy/moz.build
+@@ -23,8 +23,11 @@ AllowCompilerWarnings()
+ FINAL_LIBRARY = 'xul'
+ 
+ # Suppress warnings in third-party code.
+ if CONFIG['CC_TYPE'] in ('clang', 'gcc'):
+     CXXFLAGS += [
+         '-Wno-sign-compare',
+         '-Wno-unused-function'
+     ]
++
++if CONFIG['TARGET_ENDIANNESS'] == 'big':
++    DEFINES['IS_BIG_ENDIAN'] = 1

+ 2020 - 0
mozilla-release/patches/1489454-libmar-64a1.patch

@@ -0,0 +1,2020 @@
+# HG changeset patch
+# User Sylvestre Ledru <sledru@mozilla.com>
+# Date 1536331671 0
+#      Fri Sep 07 14:47:51 2018 +0000
+# Node ID 10d5143647cfda21649cf254adcb21d116524c5d
+# Parent  6923b7cb526e9f574099f1d503621a884d9c7392
+Bug 1489454 - Remove all trailing whitespaces (again) r=Ehsan
+
+This also includes moving some files to the regular format.
+
+Differential Revision: https://phabricator.services.mozilla.com/D5249
+
+diff --git a/modules/libmar/sign/mar_sign.c b/modules/libmar/sign/mar_sign.c
+--- a/modules/libmar/sign/mar_sign.c
++++ b/modules/libmar/sign/mar_sign.c
+@@ -21,47 +21,47 @@
+ #include <unistd.h>
+ #endif
+ 
+ #include "nss_secutil.h"
+ #include "base64.h"
+ 
+ /**
+  * Initializes the NSS context.
+- * 
++ *
+  * @param NSSConfigDir The config dir containing the private key to use
+  * @return 0 on success
+  *         -1 on error
+ */
+ int
+ NSSInitCryptoContext(const char *NSSConfigDir)
+ {
+-  SECStatus status = NSS_Initialize(NSSConfigDir, 
++  SECStatus status = NSS_Initialize(NSSConfigDir,
+                                     "", "", SECMOD_DB, NSS_INIT_READONLY);
+   if (SECSuccess != status) {
+     fprintf(stderr, "ERROR: Could not initialize NSS\n");
+     return -1;
+   }
+ 
+   return 0;
+ }
+ 
+-/** 
++/**
+  * Obtains a signing context.
+  *
+  * @param  ctx A pointer to the signing context to fill
+  * @return 0 on success
+  *         -1 on error
+ */
+ int
+-NSSSignBegin(const char *certName, 
+-             SGNContext **ctx, 
+-             SECKEYPrivateKey **privKey, 
++NSSSignBegin(const char *certName,
++             SGNContext **ctx,
++             SECKEYPrivateKey **privKey,
+              CERTCertificate **cert,
+-             uint32_t *signatureLength) 
++             uint32_t *signatureLength)
+ {
+   secuPWData pwdata = { PW_NONE, 0 };
+   if (!certName || !ctx || !privKey || !cert || !signatureLength) {
+     fprintf(stderr, "ERROR: Invalid parameter passed to NSSSignBegin\n");
+     return -1;
+   }
+ 
+   /* Get the cert and embedded public key out of the database */
+@@ -76,41 +76,41 @@ NSSSignBegin(const char *certName,
+   if (!*privKey) {
+     fprintf(stderr, "ERROR: Could not find private key\n");
+     return -1;
+   }
+ 
+   *signatureLength = PK11_SignatureLen(*privKey);
+ 
+   if (*signatureLength > BLOCKSIZE) {
+-    fprintf(stderr, 
++    fprintf(stderr,
+             "ERROR: Program must be compiled with a larger block size"
+-            " to support signing with signatures this large: %u.\n", 
++            " to support signing with signatures this large: %u.\n",
+             *signatureLength);
+     return -1;
+   }
+ 
+   /* Check that the key length is large enough for our requirements */
+   if (*signatureLength < XP_MIN_SIGNATURE_LEN_IN_BYTES) {
+-    fprintf(stderr, "ERROR: Key length must be >= %d bytes\n", 
++    fprintf(stderr, "ERROR: Key length must be >= %d bytes\n",
+             XP_MIN_SIGNATURE_LEN_IN_BYTES);
+     return -1;
+   }
+ 
+   *ctx = SGN_NewContext(SEC_OID_PKCS1_SHA384_WITH_RSA_ENCRYPTION, *privKey);
+   if (!*ctx) {
+     fprintf(stderr, "ERROR: Could not create signature context\n");
+     return -1;
+   }
+-  
++
+   if (SGN_Begin(*ctx) != SECSuccess) {
+     fprintf(stderr, "ERROR: Could not begin signature\n");
+     return -1;
+   }
+-  
++
+   return 0;
+ }
+ 
+ /**
+  * Writes the passed buffer to the file fp and updates the signature contexts.
+  *
+  * @param  fpDest   The file pointer to write to.
+  * @param  buffer   The buffer to write.
+@@ -125,17 +125,17 @@ NSSSignBegin(const char *certName,
+ */
+ int
+ WriteAndUpdateSignatures(FILE *fpDest, void *buffer,
+                          uint32_t size, SGNContext **ctxs,
+                          uint32_t ctxCount,
+                          const char *err)
+ {
+   uint32_t k;
+-  if (!size) { 
++  if (!size) {
+     return 0;
+   }
+ 
+   if (fwrite(buffer, size, 1, fpDest) != 1) {
+     fprintf(stderr, "ERROR: Could not write %s\n", err);
+     return -2;
+   }
+ 
+@@ -143,34 +143,34 @@ WriteAndUpdateSignatures(FILE *fpDest, v
+     if (SGN_Update(ctxs[k], buffer, size) != SECSuccess) {
+       fprintf(stderr, "ERROR: Could not update signature context for %s\n", err);
+       return -3;
+     }
+   }
+   return 0;
+ }
+ 
+-/** 
+- * Adjusts each entry's content offset in the the passed in index by the 
++/**
++ * Adjusts each entry's content offset in the the passed in index by the
+  * specified amount.
+  *
+  * @param indexBuf     A buffer containing the MAR index
+  * @param indexLength  The length of the MAR index
+  * @param offsetAmount The amount to adjust each index entry by
+ */
+ void
+-AdjustIndexContentOffsets(char *indexBuf, uint32_t indexLength, uint32_t offsetAmount) 
++AdjustIndexContentOffsets(char *indexBuf, uint32_t indexLength, uint32_t offsetAmount)
+ {
+   uint32_t *offsetToContent;
+   char *indexBufLoc = indexBuf;
+ 
+   /* Consume the index and adjust each index by the specified amount */
+   while (indexBufLoc != (indexBuf + indexLength)) {
+     /* Adjust the offset */
+-    offsetToContent = (uint32_t *)indexBufLoc; 
++    offsetToContent = (uint32_t *)indexBufLoc;
+     *offsetToContent = ntohl(*offsetToContent);
+     *offsetToContent += offsetAmount;
+     *offsetToContent = htonl(*offsetToContent);
+     /* Skip past the offset, length, and flags */
+     indexBufLoc += 3 * sizeof(uint32_t);
+     indexBufLoc += strlen(indexBufLoc) + 1;
+   }
+ }
+@@ -192,17 +192,17 @@ AdjustIndexContentOffsets(char *indexBuf
+  *         -3 on signature update error
+ */
+ int
+ ReadWriteAndUpdateSignatures(FILE *fpSrc, FILE *fpDest, void *buffer,
+                              uint32_t size, SGNContext **ctxs,
+                              uint32_t ctxCount,
+                              const char *err)
+ {
+-  if (!size) { 
++  if (!size) {
+     return 0;
+   }
+ 
+   if (fread(buffer, size, 1, fpSrc) != 1) {
+     fprintf(stderr, "ERROR: Could not read %s\n", err);
+     return -1;
+   }
+ 
+@@ -218,20 +218,20 @@ ReadWriteAndUpdateSignatures(FILE *fpSrc
+  * @param  buffer The buffer to write.
+  * @param  size   The size of the buffer to write.
+  * @param  err    The name of what is being written to in case of error.
+  * @return  0 on success
+  *         -1 on read error
+  *         -2 on write error
+ */
+ int
+-ReadAndWrite(FILE *fpSrc, FILE *fpDest, void *buffer, 
+-             uint32_t size, const char *err) 
++ReadAndWrite(FILE *fpSrc, FILE *fpDest, void *buffer,
++             uint32_t size, const char *err)
+ {
+-  if (!size) { 
++  if (!size) {
+     return 0;
+   }
+ 
+   if (fread(buffer, size, 1, fpSrc) != 1) {
+     fprintf(stderr, "ERROR: Could not read %s\n", err);
+     return -1;
+   }
+ 
+@@ -242,25 +242,25 @@ ReadAndWrite(FILE *fpSrc, FILE *fpDest, 
+ 
+   return 0;
+ }
+ 
+ /**
+  * Writes out a copy of the MAR at src but with the signature block stripped.
+  *
+  * @param  src  The path of the source MAR file
+- * @param  dest The path of the MAR file to write out that 
++ * @param  dest The path of the MAR file to write out that
+                 has no signature block
+  * @return 0 on success
+  *         -1 on error
+ */
+ int
+ strip_signature_block(const char *src, const char * dest)
+ {
+-  uint32_t offsetToIndex, dstOffsetToIndex, indexLength, 
++  uint32_t offsetToIndex, dstOffsetToIndex, indexLength,
+     numSignatures = 0, leftOver;
+   int32_t stripAmount = 0;
+   int64_t oldPos, sizeOfEntireMAR = 0, realSizeOfSrcMAR, numBytesToCopy,
+     numChunks, i;
+   FILE *fpSrc = NULL, *fpDest = NULL;
+   int rv = -1, hasSignatureBlock;
+   char buf[BLOCKSIZE];
+   char *indexBuf = NULL;
+@@ -309,27 +309,27 @@ strip_signature_block(const char *src, c
+   realSizeOfSrcMAR = ftello(fpSrc);
+   if (fseeko(fpSrc, oldPos, SEEK_SET)) {
+     fprintf(stderr, "ERROR: Could not seek back to current location.\n");
+     goto failure;
+   }
+ 
+   if (hasSignatureBlock) {
+     /* Get the MAR length and adjust its size */
+-    if (fread(&sizeOfEntireMAR, 
++    if (fread(&sizeOfEntireMAR,
+               sizeof(sizeOfEntireMAR), 1, fpSrc) != 1) {
+       fprintf(stderr, "ERROR: Could read mar size\n");
+       goto failure;
+     }
+     sizeOfEntireMAR = NETWORK_TO_HOST64(sizeOfEntireMAR);
+     if (sizeOfEntireMAR != realSizeOfSrcMAR) {
+       fprintf(stderr, "ERROR: Source MAR is not of the right size\n");
+       goto failure;
+     }
+-  
++
+     /* Get the num signatures in the source file so we know what to strip */
+     if (fread(&numSignatures, sizeof(numSignatures), 1, fpSrc) != 1) {
+       fprintf(stderr, "ERROR: Could read num signatures\n");
+       goto failure;
+     }
+     numSignatures = ntohl(numSignatures);
+ 
+     for (i = 0; i < numSignatures; i++) {
+@@ -347,17 +347,17 @@ strip_signature_block(const char *src, c
+       }
+       signatureLen = ntohl(signatureLen);
+ 
+       /* Skip past the signature */
+       if (fseeko(fpSrc, signatureLen, SEEK_CUR)) {
+         fprintf(stderr, "ERROR: Could not skip past signature algorithm ID\n");
+       }
+ 
+-      stripAmount += sizeof(uint32_t) + sizeof(uint32_t) + signatureLen; 
++      stripAmount += sizeof(uint32_t) + sizeof(uint32_t) + signatureLen;
+     }
+ 
+   } else {
+     sizeOfEntireMAR = realSizeOfSrcMAR;
+     numSignatures = 0;
+   }
+ 
+   if (((int64_t)offsetToIndex) > sizeOfEntireMAR) {
+@@ -414,65 +414,65 @@ strip_signature_block(const char *src, c
+   /* Read each file and write it to the MAR file */
+   for (i = 0; i < numChunks; ++i) {
+     if (ReadAndWrite(fpSrc, fpDest, buf, BLOCKSIZE, "content block")) {
+       goto failure;
+     }
+   }
+ 
+   /* Write out the left over */
+-  if (ReadAndWrite(fpSrc, fpDest, buf, 
++  if (ReadAndWrite(fpSrc, fpDest, buf,
+                    leftOver, "left over content block")) {
+     goto failure;
+   }
+ 
+   /* Length of the index */
+-  if (ReadAndWrite(fpSrc, fpDest, &indexLength, 
++  if (ReadAndWrite(fpSrc, fpDest, &indexLength,
+                    sizeof(indexLength), "index length")) {
+     goto failure;
+   }
+   indexLength = ntohl(indexLength);
+ 
+   /* Consume the index and adjust each index by the difference */
+   indexBuf = malloc(indexLength);
+   if (fread(indexBuf, indexLength, 1, fpSrc) != 1) {
+     fprintf(stderr, "ERROR: Could not read index\n");
+     goto failure;
+   }
+ 
+   /* Adjust each entry in the index */
+   if (hasSignatureBlock) {
+     AdjustIndexContentOffsets(indexBuf, indexLength, -stripAmount);
+   } else {
+-    AdjustIndexContentOffsets(indexBuf, indexLength, 
+-                              sizeof(sizeOfEntireMAR) + 
+-                              sizeof(numSignatures) - 
++    AdjustIndexContentOffsets(indexBuf, indexLength,
++                              sizeof(sizeOfEntireMAR) +
++                              sizeof(numSignatures) -
+                               stripAmount);
+   }
+ 
+   if (fwrite(indexBuf, indexLength, 1, fpDest) != 1) {
+     fprintf(stderr, "ERROR: Could not write index\n");
+     goto failure;
+   }
+ 
+   rv = 0;
+-failure: 
++failure:
+   if (fpSrc) {
+     fclose(fpSrc);
+   }
+ 
+   if (fpDest) {
+     fclose(fpDest);
+   }
+ 
+   if (rv) {
+     remove(dest);
+   }
+ 
+-  if (indexBuf) { 
++  if (indexBuf) {
+     free(indexBuf);
+   }
+ 
+   if (rv) {
+     remove(dest);
+   }
+   return rv;
+ }
+@@ -798,41 +798,41 @@ failure:
+     PORT_Free(passedInSignatureRaw);
+   }
+ 
+   return rv;
+ }
+ 
+ /**
+  * Writes out a copy of the MAR at src but with embedded signatures.
+- * The passed in MAR file must not already be signed or an error will 
++ * The passed in MAR file must not already be signed or an error will
+  * be returned.
+  *
+  * @param  NSSConfigDir  The NSS directory containing the private key for signing
+  * @param  certNames     The nicknames of the certificate to use for signing
+  * @param  certCount     The number of certificate names contained in certNames.
+  *                       One signature will be produced for each certificate.
+  * @param  src           The path of the source MAR file to sign
+  * @param  dest          The path of the MAR file to write out that is signed
+  * @return 0 on success
+  *         -1 on error
+ */
+ int
+-mar_repackage_and_sign(const char *NSSConfigDir, 
++mar_repackage_and_sign(const char *NSSConfigDir,
+                        const char * const *certNames,
+                        uint32_t certCount,
+-                       const char *src, 
+-                       const char *dest) 
++                       const char *src,
++                       const char *dest)
+ {
+-  uint32_t offsetToIndex, dstOffsetToIndex, indexLength, 
++  uint32_t offsetToIndex, dstOffsetToIndex, indexLength,
+     numSignatures = 0, leftOver,
+     signatureAlgorithmID, signatureSectionLength = 0;
+   uint32_t signatureLengths[MAX_SIGNATURES];
+-  int64_t oldPos, sizeOfEntireMAR = 0, realSizeOfSrcMAR, 
+-    signaturePlaceholderOffset, numBytesToCopy, 
++  int64_t oldPos, sizeOfEntireMAR = 0, realSizeOfSrcMAR,
++    signaturePlaceholderOffset, numBytesToCopy,
+     numChunks, i;
+   FILE *fpSrc = NULL, *fpDest = NULL;
+   int rv = -1, hasSignatureBlock;
+   SGNContext *ctxs[MAX_SIGNATURES];
+   SECItem secItems[MAX_SIGNATURES];
+   char buf[BLOCKSIZE];
+   SECKEYPrivateKey *privKeys[MAX_SIGNATURES];
+   CERTCertificate *certs[MAX_SIGNATURES];
+@@ -906,27 +906,27 @@ mar_repackage_and_sign(const char *NSSCo
+   realSizeOfSrcMAR = ftello(fpSrc);
+   if (fseeko(fpSrc, oldPos, SEEK_SET)) {
+     fprintf(stderr, "ERROR: Could not seek back to current location.\n");
+     goto failure;
+   }
+ 
+   if (hasSignatureBlock) {
+     /* Get the MAR length and adjust its size */
+-    if (fread(&sizeOfEntireMAR, 
++    if (fread(&sizeOfEntireMAR,
+               sizeof(sizeOfEntireMAR), 1, fpSrc) != 1) {
+       fprintf(stderr, "ERROR: Could read mar size\n");
+       goto failure;
+     }
+     sizeOfEntireMAR = NETWORK_TO_HOST64(sizeOfEntireMAR);
+     if (sizeOfEntireMAR != realSizeOfSrcMAR) {
+       fprintf(stderr, "ERROR: Source MAR is not of the right size\n");
+       goto failure;
+     }
+-  
++
+     /* Get the num signatures in the source file */
+     if (fread(&numSignatures, sizeof(numSignatures), 1, fpSrc) != 1) {
+       fprintf(stderr, "ERROR: Could read num signatures\n");
+       goto failure;
+     }
+     numSignatures = ntohl(numSignatures);
+ 
+     /* We do not support resigning, if you have multiple signatures,
+@@ -1062,19 +1062,19 @@ mar_repackage_and_sign(const char *NSSCo
+     fprintf(stderr, "ERROR: Could not read index\n");
+     goto failure;
+   }
+ 
+   /* Adjust each entry in the index */
+   if (hasSignatureBlock) {
+     AdjustIndexContentOffsets(indexBuf, indexLength, signatureSectionLength);
+   } else {
+-    AdjustIndexContentOffsets(indexBuf, indexLength, 
+-                              sizeof(sizeOfEntireMAR) + 
+-                              sizeof(numSignatures) + 
++    AdjustIndexContentOffsets(indexBuf, indexLength,
++                              sizeof(sizeOfEntireMAR) +
++                              sizeof(numSignatures) +
+                               signatureSectionLength);
+   }
+ 
+   if (WriteAndUpdateSignatures(fpDest, indexBuf,
+                                indexLength, ctxs, certCount, "index")) {
+     goto failure;
+   }
+ 
+@@ -1114,30 +1114,30 @@ mar_repackage_and_sign(const char *NSSCo
+       *** THIS IS NOT SIGNED because it is the signature itself. *** */
+     if (fwrite(secItems[k].data, secItems[k].len, 1, fpDest) != 1) {
+       fprintf(stderr, "ERROR: Could not write signature\n");
+       goto failure;
+     }
+   }
+ 
+   rv = 0;
+-failure: 
++failure:
+   if (fpSrc) {
+     fclose(fpSrc);
+   }
+ 
+   if (fpDest) {
+     fclose(fpDest);
+   }
+ 
+   if (rv) {
+     remove(dest);
+   }
+ 
+-  if (indexBuf) { 
++  if (indexBuf) {
+     free(indexBuf);
+   }
+ 
+   /* Cleanup */
+   for (k = 0; k < certCount; k++) {
+     if (ctxs[k]) {
+       SGN_DestroyContext(ctxs[k], PR_TRUE);
+     }
+diff --git a/modules/libmar/sign/nss_secutil.c b/modules/libmar/sign/nss_secutil.c
+--- a/modules/libmar/sign/nss_secutil.c
++++ b/modules/libmar/sign/nss_secutil.c
+@@ -69,17 +69,17 @@ GetPasswordString(void *arg, char *promp
+     };
+ 
+     input = fopen(consoleName, "r");
+     if (input == NULL) {
+       fprintf(stderr, "Error opening input terminal for read\n");
+       return NULL;
+     }
+   }
+-#endif 
++#endif
+ 
+   if (isInputTerminal) {
+     fprintf(stdout, "Please enter your password:\n");
+     fflush(stdout);
+   }
+ 
+   if (!QUIET_FGETS(phrase, sizeof(phrase), input)) {
+     fprintf(stderr, "QUIET_FGETS failed\n");
+@@ -92,17 +92,17 @@ GetPasswordString(void *arg, char *promp
+ 
+ #ifndef _WINDOWS
+   if (isInputTerminal) {
+     fclose(input);
+   }
+ #endif
+ 
+   /* Strip off the newlines if present */
+-  if (phrase[PORT_Strlen(phrase)-1] == '\n' || 
++  if (phrase[PORT_Strlen(phrase)-1] == '\n' ||
+       phrase[PORT_Strlen(phrase)-1] == '\r') {
+     phrase[PORT_Strlen(phrase)-1] = 0;
+   }
+   return (char*) PORT_Strdup(phrase);
+ }
+ 
+ char *
+ SECU_FilePasswd(PK11SlotInfo *slot, PRBool retry, void *arg)
+@@ -180,17 +180,17 @@ SECU_FilePasswd(PK11SlotInfo *slot, PRBo
+   } while (i<nb);
+ 
+   phrase = PORT_Strdup((char*)phrase);
+   PORT_Free(phrases);
+   return phrase;
+ }
+ 
+ char *
+-SECU_GetModulePassword(PK11SlotInfo *slot, PRBool retry, void *arg) 
++SECU_GetModulePassword(PK11SlotInfo *slot, PRBool retry, void *arg)
+ {
+     char prompt[255];
+     secuPWData *pwdata = (secuPWData *)arg;
+     secuPWData pwnull = { PW_NONE, 0 };
+     secuPWData pwxtrn = { PW_EXTERNAL, "external" };
+     char *pw;
+ 
+     if (pwdata == NULL)
+@@ -214,17 +214,17 @@ SECU_GetModulePassword(PK11SlotInfo *slo
+    * once, then keep it in memory (duh).
+    */
+   pw = SECU_FilePasswd(slot, retry, pwdata->data);
+   pwdata->source = PW_PLAINTEXT;
+   pwdata->data = PL_strdup(pw);
+   /* it's already been dup'ed */
+   return pw;
+     case PW_EXTERNAL:
+-  sprintf(prompt, 
++  sprintf(prompt,
+           "Press Enter, then enter PIN for \"%s\" on external device.\n",
+     PK11_GetTokenName(slot));
+   pw = GetPasswordString(NULL, prompt);
+   if (pw) {
+     memset(pw, 0, PORT_Strlen(pw));
+     PORT_Free(pw);
+   }
+       /* Fall Through */
+diff --git a/modules/libmar/src/mar.h b/modules/libmar/src/mar.h
+--- a/modules/libmar/src/mar.h
++++ b/modules/libmar/src/mar.h
+@@ -117,19 +117,19 @@ int mar_read(MarFile *mar, const MarItem
+  * @param dest      The path to the file to create.  This path must be
+  *                  compatible with fopen.
+  * @param numfiles  The number of files to store in the archive.
+  * @param files     The list of null-terminated file paths.  Each file
+  *                  path must be compatible with fopen.
+  * @param infoBlock The information to store in the product information block.
+  * @return          A non-zero value if an error occurs.
+  */
+-int mar_create(const char *dest, 
+-               int numfiles, 
+-               char **files, 
++int mar_create(const char *dest,
++               int numfiles,
++               char **files,
+                struct ProductInformationBlock *infoBlock);
+ 
+ /**
+  * Extract a MAR file to the current working directory.
+  * @param path      The path to the MAR file to extract.  This path must be
+  *                  compatible with fopen.
+  * @return          A non-zero value if an error occurs.
+  */
+@@ -142,58 +142,58 @@ int mar_extract(const char *path);
+  * write whatever error messages it sees fit. The caller must free the returned
+  * buffer using free().
+  *
+  * @param filePath The path to the file that should be read.
+  * @param maxSize  The maximum valid file size.
+  * @param data     On success, *data will point to a newly-allocated buffer
+  *                 with the file's contents in it.
+  * @param size     On success, *size will be the size of the created buffer.
+- * 
++ *
+  * @return 0 on success, -1 on error
+  */
+ int mar_read_entire_file(const char * filePath,
+                          uint32_t maxSize,
+                          /*out*/ const uint8_t * *data,
+                          /*out*/ uint32_t *size);
+ 
+ /**
+  * Verifies a MAR file by verifying each signature with the corresponding
+  * certificate. That is, the first signature will be verified using the first
+  * certificate given, the second signature will be verified using the second
+  * certificate given, etc. The signature count must exactly match the number of
+  * certificates given, and all signature verifications must succeed.
+- * We do not check that the certificate was issued by any trusted authority. 
+- * We assume it to be self-signed.  We do not check whether the certificate 
++ * We do not check that the certificate was issued by any trusted authority.
++ * We assume it to be self-signed.  We do not check whether the certificate
+  * is valid for this usage.
+- * 
++ *
+  * @param mar            The already opened MAR file.
+  * @param certData       Pointer to the first element in an array of certificate
+  *                       file data.
+  * @param certDataSizes  Pointer to the first element in an array for size of
+  *                       the cert data.
+  * @param certCount      The number of elements in certData and certDataSizes
+  * @return 0 on success
+  *         a negative number if there was an error
+  *         a positive number if the signature does not verify
+  */
+ int mar_verify_signatures(MarFile *mar,
+                           const uint8_t * const *certData,
+                           const uint32_t *certDataSizes,
+                           uint32_t certCount);
+ 
+-/** 
++/**
+  * Reads the product info block from the MAR file's additional block section.
+  * The caller is responsible for freeing the fields in infoBlock
+  * if the return is successful.
+  *
+  * @param infoBlock Out parameter for where to store the result to
+  * @return 0 on success, -1 on failure
+ */
+ int
+-mar_read_product_info_block(MarFile *mar, 
++mar_read_product_info_block(MarFile *mar,
+                             struct ProductInformationBlock *infoBlock);
+ 
+ #ifdef __cplusplus
+ }
+ #endif
+ 
+ #endif  /* MAR_H__ */
+diff --git a/modules/libmar/src/mar_cmdline.h b/modules/libmar/src/mar_cmdline.h
+--- a/modules/libmar/src/mar_cmdline.h
++++ b/modules/libmar/src/mar_cmdline.h
+@@ -18,61 +18,61 @@ struct ProductInformationBlock;
+  *
+  * @param path                   The path of the MAR file to check.
+  * @param hasSignatureBlock      Optional out parameter specifying if the MAR
+  *                               file has a signature block or not.
+  * @param numSignatures          Optional out parameter for storing the number
+  *                               of signatures in the MAR file.
+  * @param hasAdditionalBlocks    Optional out parameter specifying if the MAR
+  *                               file has additional blocks or not.
+- * @param offsetAdditionalBlocks Optional out parameter for the offset to the 
++ * @param offsetAdditionalBlocks Optional out parameter for the offset to the
+  *                               first additional block. Value is only valid if
+  *                               hasAdditionalBlocks is not equal to 0.
+  * @param numAdditionalBlocks    Optional out parameter for the number of
+  *                               additional blocks.  Value is only valid if
+  *                               has_additional_blocks is not equal to 0.
+  * @return 0 on success and non-zero on failure.
+  */
+-int get_mar_file_info(const char *path, 
++int get_mar_file_info(const char *path,
+                       int *hasSignatureBlock,
+                       uint32_t *numSignatures,
+                       int *hasAdditionalBlocks,
+                       uint32_t *offsetAdditionalBlocks,
+                       uint32_t *numAdditionalBlocks);
+ 
+-/** 
++/**
+  * Reads the product info block from the MAR file's additional block section.
+  * The caller is responsible for freeing the fields in infoBlock
+  * if the return is successful.
+  *
+  * @param infoBlock Out parameter for where to store the result to
+  * @return 0 on success, -1 on failure
+ */
+ int
+-read_product_info_block(char *path, 
++read_product_info_block(char *path,
+                         struct ProductInformationBlock *infoBlock);
+ 
+-/** 
++/**
+  * Refreshes the product information block with the new information.
+  * The input MAR must not be signed or the function call will fail.
+- * 
++ *
+  * @param path             The path to the MAR file whose product info block
+  *                         should be refreshed.
+  * @param infoBlock        Out parameter for where to store the result to
+  * @return 0 on success, -1 on failure
+ */
+ int
+ refresh_product_info_block(const char *path,
+                            struct ProductInformationBlock *infoBlock);
+ 
+ /**
+  * Writes out a copy of the MAR at src but with the signature block stripped.
+  *
+  * @param  src  The path of the source MAR file
+- * @param  dest The path of the MAR file to write out that 
++ * @param  dest The path of the MAR file to write out that
+                 has no signature block
+  * @return 0 on success
+  *         -1 on error
+ */
+ int
+ strip_signature_block(const char *src, const char * dest);
+ 
+ /**
+diff --git a/modules/libmar/src/mar_create.c b/modules/libmar/src/mar_create.c
+--- a/modules/libmar/src/mar_create.c
++++ b/modules/libmar/src/mar_create.c
+@@ -32,17 +32,17 @@ struct MarItemStack {
+  * of memory.
+  */
+ static int mar_push(struct MarItemStack *stack, uint32_t length, uint32_t flags,
+                     const char *name) {
+   int namelen;
+   uint32_t n_offset, n_length, n_flags;
+   uint32_t size;
+   char *data;
+-  
++
+   namelen = strlen(name);
+   size = MAR_ITEM_SIZE(namelen);
+ 
+   if (stack->size_allocated - stack->size_used < size) {
+     /* increase size of stack */
+     size_t size_needed = ROUND_UP(stack->size_used + size, BLOCKSIZE);
+     stack->head = realloc(stack->head, size_needed);
+     if (!stack->head)
+@@ -61,17 +61,17 @@ static int mar_push(struct MarItemStack 
+ 
+   memcpy(data, &n_length, sizeof(n_length));
+   data += sizeof(n_length);
+ 
+   memcpy(data, &n_flags, sizeof(n_flags));
+   data += sizeof(n_flags);
+ 
+   memcpy(data, name, namelen + 1);
+-  
++
+   stack->size_used += size;
+   stack->last_offset += length;
+   return 0;
+ }
+ 
+ static int mar_concat_file(FILE *fp, const char *path) {
+   FILE *in;
+   char buf[BLOCKSIZE];
+@@ -95,34 +95,34 @@ static int mar_concat_file(FILE *fp, con
+   fclose(in);
+   return rv;
+ }
+ 
+ /**
+  * Writes out the product information block to the specified file.
+  *
+  * @param fp           The opened MAR file being created.
+- * @param stack        A pointer to the MAR item stack being used to create 
++ * @param stack        A pointer to the MAR item stack being used to create
+  *                     the MAR
+  * @param infoBlock    The product info block to store in the file.
+  * @return 0 on success.
+ */
+ static int
+-mar_concat_product_info_block(FILE *fp, 
++mar_concat_product_info_block(FILE *fp,
+                               struct MarItemStack *stack,
+                               struct ProductInformationBlock *infoBlock)
+ {
+   char buf[PIB_MAX_MAR_CHANNEL_ID_SIZE + PIB_MAX_PRODUCT_VERSION_SIZE];
+   uint32_t additionalBlockID = 1, infoBlockSize, unused;
+-  if (!fp || !infoBlock || 
++  if (!fp || !infoBlock ||
+       !infoBlock->MARChannelID ||
+       !infoBlock->productVersion) {
+     return -1;
+   }
+- 
++
+   /* The MAR channel name must be < 64 bytes per the spec */
+   if (strlen(infoBlock->MARChannelID) > PIB_MAX_MAR_CHANNEL_ID_SIZE) {
+     return -1;
+   }
+ 
+   /* The product version must be < 32 bytes per the spec */
+   if (strlen(infoBlock->productVersion) > PIB_MAX_PRODUCT_VERSION_SIZE) {
+     return -1;
+@@ -137,75 +137,75 @@ mar_concat_product_info_block(FILE *fp,
+                   PIB_MAX_MAR_CHANNEL_ID_SIZE +
+                   PIB_MAX_PRODUCT_VERSION_SIZE + 2;
+   if (stack) {
+     stack->last_offset += infoBlockSize;
+   }
+ 
+   /* Write out the product info block size */
+   infoBlockSize = htonl(infoBlockSize);
+-  if (fwrite(&infoBlockSize, 
++  if (fwrite(&infoBlockSize,
+       sizeof(infoBlockSize), 1, fp) != 1) {
+     return -1;
+   }
+   infoBlockSize = ntohl(infoBlockSize);
+ 
+   /* Write out the product info block ID */
+   additionalBlockID = htonl(additionalBlockID);
+-  if (fwrite(&additionalBlockID, 
++  if (fwrite(&additionalBlockID,
+       sizeof(additionalBlockID), 1, fp) != 1) {
+     return -1;
+   }
+   additionalBlockID = ntohl(additionalBlockID);
+ 
+   /* Write out the channel name and NULL terminator */
+-  if (fwrite(infoBlock->MARChannelID, 
++  if (fwrite(infoBlock->MARChannelID,
+       strlen(infoBlock->MARChannelID) + 1, 1, fp) != 1) {
+     return -1;
+   }
+ 
+   /* Write out the product version string and NULL terminator */
+-  if (fwrite(infoBlock->productVersion, 
++  if (fwrite(infoBlock->productVersion,
+       strlen(infoBlock->productVersion) + 1, 1, fp) != 1) {
+     return -1;
+   }
+ 
+   /* Write out the rest of the block that is unused */
+   unused = infoBlockSize - (sizeof(infoBlockSize) +
+                             sizeof(additionalBlockID) +
+-                            strlen(infoBlock->MARChannelID) + 
++                            strlen(infoBlock->MARChannelID) +
+                             strlen(infoBlock->productVersion) + 2);
+   memset(buf, 0, sizeof(buf));
+   if (fwrite(buf, unused, 1, fp) != 1) {
+     return -1;
+   }
+   return 0;
+ }
+ 
+-/** 
++/**
+  * Refreshes the product information block with the new information.
+  * The input MAR must not be signed or the function call will fail.
+- * 
++ *
+  * @param path             The path to the MAR file whose product info block
+  *                         should be refreshed.
+  * @param infoBlock        Out parameter for where to store the result to
+  * @return 0 on success, -1 on failure
+ */
+ int
+ refresh_product_info_block(const char *path,
+                            struct ProductInformationBlock *infoBlock)
+ {
+   FILE *fp ;
+   int rv;
+   uint32_t numSignatures, additionalBlockSize, additionalBlockID,
+     offsetAdditionalBlocks, numAdditionalBlocks, i;
+   int additionalBlocks, hasSignatureBlock;
+   int64_t oldPos;
+ 
+-  rv = get_mar_file_info(path, 
++  rv = get_mar_file_info(path,
+                          &hasSignatureBlock,
+                          &numSignatures,
+                          &additionalBlocks,
+                          &offsetAdditionalBlocks,
+                          &numAdditionalBlocks);
+   if (rv) {
+     fprintf(stderr, "ERROR: Could not obtain MAR information.\n");
+     return -1;
+@@ -228,27 +228,27 @@ refresh_product_info_block(const char *p
+     return -1;
+   }
+ 
+   for (i = 0; i < numAdditionalBlocks; ++i) {
+     /* Get the position of the start of this block */
+     oldPos = ftello(fp);
+ 
+     /* Read the additional block size */
+-    if (fread(&additionalBlockSize, 
+-              sizeof(additionalBlockSize), 
++    if (fread(&additionalBlockSize,
++              sizeof(additionalBlockSize),
+               1, fp) != 1) {
+       fclose(fp);
+       return -1;
+     }
+     additionalBlockSize = ntohl(additionalBlockSize);
+ 
+     /* Read the additional block ID */
+-    if (fread(&additionalBlockID, 
+-              sizeof(additionalBlockID), 
++    if (fread(&additionalBlockID,
++              sizeof(additionalBlockID),
+               1, fp) != 1) {
+       fclose(fp);
+       return -1;
+     }
+     additionalBlockID = ntohl(additionalBlockID);
+ 
+     if (PRODUCT_INFO_BLOCK_ID == additionalBlockID) {
+       if (fseeko(fp, oldPos, SEEK_SET)) {
+@@ -286,21 +286,21 @@ refresh_product_info_block(const char *p
+  * @param dest      The path to the file to create.  This path must be
+  *                  compatible with fopen.
+  * @param numfiles  The number of files to store in the archive.
+  * @param files     The list of null-terminated file paths.  Each file
+  *                  path must be compatible with fopen.
+  * @param infoBlock The information to store in the product information block.
+  * @return A non-zero value if an error occurs.
+  */
+-int mar_create(const char *dest, int 
+-               num_files, char **files, 
++int mar_create(const char *dest, int
++               num_files, char **files,
+                struct ProductInformationBlock *infoBlock) {
+   struct MarItemStack stack;
+-  uint32_t offset_to_index = 0, size_of_index, 
++  uint32_t offset_to_index = 0, size_of_index,
+     numSignatures, numAdditionalSections;
+   uint64_t sizeOfEntireMAR = 0;
+   struct stat st;
+   FILE *fp;
+   int i, rv = -1;
+ 
+   memset(&stack, 0, sizeof(stack));
+ 
+@@ -310,37 +310,37 @@ int mar_create(const char *dest, int
+     return -1;
+   }
+ 
+   if (fwrite(MAR_ID, MAR_ID_SIZE, 1, fp) != 1)
+     goto failure;
+   if (fwrite(&offset_to_index, sizeof(uint32_t), 1, fp) != 1)
+     goto failure;
+ 
+-  stack.last_offset = MAR_ID_SIZE + 
++  stack.last_offset = MAR_ID_SIZE +
+                       sizeof(offset_to_index) +
+-                      sizeof(numSignatures) + 
++                      sizeof(numSignatures) +
+                       sizeof(numAdditionalSections) +
+                       sizeof(sizeOfEntireMAR);
+ 
+   /* We will circle back on this at the end of the MAR creation to fill it */
+   if (fwrite(&sizeOfEntireMAR, sizeof(sizeOfEntireMAR), 1, fp) != 1) {
+     goto failure;
+   }
+ 
+   /* Write out the number of signatures, for now only at most 1 is supported */
+   numSignatures = 0;
+   if (fwrite(&numSignatures, sizeof(numSignatures), 1, fp) != 1) {
+     goto failure;
+   }
+ 
+-  /* Write out the number of additional sections, for now just 1 
++  /* Write out the number of additional sections, for now just 1
+      for the product info block */
+   numAdditionalSections = htonl(1);
+-  if (fwrite(&numAdditionalSections, 
++  if (fwrite(&numAdditionalSections,
+              sizeof(numAdditionalSections), 1, fp) != 1) {
+     goto failure;
+   }
+   numAdditionalSections = ntohl(numAdditionalSections);
+ 
+   if (mar_concat_product_info_block(fp, &stack, infoBlock)) {
+     goto failure;
+   }
+@@ -361,39 +361,39 @@ int mar_create(const char *dest, int
+ 
+   /* write out the index (prefixed with length of index) */
+   size_of_index = htonl(stack.size_used);
+   if (fwrite(&size_of_index, sizeof(size_of_index), 1, fp) != 1)
+     goto failure;
+   if (fwrite(stack.head, stack.size_used, 1, fp) != 1)
+     goto failure;
+ 
+-  /* To protect against invalid MAR files, we assumes that the MAR file 
++  /* To protect against invalid MAR files, we assumes that the MAR file
+      size is less than or equal to MAX_SIZE_OF_MAR_FILE. */
+   if (ftell(fp) > MAX_SIZE_OF_MAR_FILE) {
+     goto failure;
+   }
+ 
+   /* write out offset to index file in network byte order */
+   offset_to_index = htonl(stack.last_offset);
+   if (fseek(fp, MAR_ID_SIZE, SEEK_SET))
+     goto failure;
+   if (fwrite(&offset_to_index, sizeof(offset_to_index), 1, fp) != 1)
+     goto failure;
+   offset_to_index = ntohl(stack.last_offset);
+-  
++
+   sizeOfEntireMAR = ((uint64_t)stack.last_offset) +
+                     stack.size_used +
+                     sizeof(size_of_index);
+   sizeOfEntireMAR = HOST_TO_NETWORK64(sizeOfEntireMAR);
+   if (fwrite(&sizeOfEntireMAR, sizeof(sizeOfEntireMAR), 1, fp) != 1)
+     goto failure;
+   sizeOfEntireMAR = NETWORK_TO_HOST64(sizeOfEntireMAR);
+ 
+   rv = 0;
+-failure: 
++failure:
+   if (stack.head)
+     free(stack.head);
+   fclose(fp);
+   if (rv)
+     remove(dest);
+   return rv;
+ }
+diff --git a/modules/libmar/src/mar_private.h b/modules/libmar/src/mar_private.h
+--- a/modules/libmar/src/mar_private.h
++++ b/modules/libmar/src/mar_private.h
+@@ -12,51 +12,51 @@
+ #include <stdint.h>
+ 
+ #define BLOCKSIZE 4096
+ #define ROUND_UP(n, incr) (((n) / (incr) + 1) * (incr))
+ 
+ #define MAR_ID "MAR1"
+ #define MAR_ID_SIZE 4
+ 
+-/* The signature block comes directly after the header block 
++/* The signature block comes directly after the header block
+    which is 16 bytes */
+ #define SIGNATURE_BLOCK_OFFSET 16
+ 
+ /* Make sure the file is less than 500MB.  We do this to protect against
+    invalid MAR files. */
+ #define MAX_SIZE_OF_MAR_FILE ((int64_t)524288000)
+ 
+ /* Existing code makes assumptions that the file size is
+    smaller than LONG_MAX. */
+ MOZ_STATIC_ASSERT(MAX_SIZE_OF_MAR_FILE < ((int64_t)LONG_MAX),
+                   "max mar file size is too big");
+ 
+-/* We store at most the size up to the signature block + 4 
++/* We store at most the size up to the signature block + 4
+    bytes per BLOCKSIZE bytes */
+ MOZ_STATIC_ASSERT(sizeof(BLOCKSIZE) < \
+                   (SIGNATURE_BLOCK_OFFSET + sizeof(uint32_t)),
+                   "BLOCKSIZE is too big");
+ 
+ /* The maximum size of any signature supported by current and future
+    implementations of the signmar program. */
+ #define MAX_SIGNATURE_LENGTH 2048
+ 
+-/* Each additional block has a unique ID.  
++/* Each additional block has a unique ID.
+    The product information block has an ID of 1. */
+ #define PRODUCT_INFO_BLOCK_ID 1
+ 
+ #define MAR_ITEM_SIZE(namelen) (3*sizeof(uint32_t) + (namelen) + 1)
+ 
+ /* Product Information Block (PIB) constants */
+ #define PIB_MAX_MAR_CHANNEL_ID_SIZE 63
+ #define PIB_MAX_PRODUCT_VERSION_SIZE 31
+ 
+ /* The mar program is compiled as a host bin so we don't have access to NSPR at
+-   runtime.  For that reason we use ntohl, htonl, and define HOST_TO_NETWORK64 
++   runtime.  For that reason we use ntohl, htonl, and define HOST_TO_NETWORK64
+    instead of the NSPR equivalents. */
+ #ifdef XP_WIN
+ #include <winsock2.h>
+ #define ftello _ftelli64
+ #define fseeko _fseeki64
+ #else
+ #define _FILE_OFFSET_BITS 64
+ #include <netinet/in.h>
+diff --git a/modules/libmar/src/mar_read.c b/modules/libmar/src/mar_read.c
+--- a/modules/libmar/src/mar_read.c
++++ b/modules/libmar/src/mar_read.c
+@@ -28,17 +28,17 @@ static uint32_t mar_hash_name(const char
+ 
+   return val % TABLESIZE;
+ }
+ 
+ static int mar_insert_item(MarFile *mar, const char *name, int namelen,
+                            uint32_t offset, uint32_t length, uint32_t flags) {
+   MarItem *item, *root;
+   uint32_t hash;
+-  
++
+   item = (MarItem *) malloc(sizeof(MarItem) + namelen);
+   if (!item)
+     return -1;
+   item->next = NULL;
+   item->offset = offset;
+   item->length = length;
+   item->flags = flags;
+   memcpy(item->name, name, namelen + 1);
+@@ -218,33 +218,33 @@ void mar_close(MarFile *mar) {
+  *
+  * @param fp                     An opened MAR file in read mode.
+  * @param hasSignatureBlock      Optional out parameter specifying if the MAR
+  *                               file has a signature block or not.
+  * @param numSignatures          Optional out parameter for storing the number
+  *                               of signatures in the MAR file.
+  * @param hasAdditionalBlocks    Optional out parameter specifying if the MAR
+  *                               file has additional blocks or not.
+- * @param offsetAdditionalBlocks Optional out parameter for the offset to the 
++ * @param offsetAdditionalBlocks Optional out parameter for the offset to the
+  *                               first additional block. Value is only valid if
+  *                               hasAdditionalBlocks is not equal to 0.
+  * @param numAdditionalBlocks    Optional out parameter for the number of
+  *                               additional blocks.  Value is only valid if
+  *                               hasAdditionalBlocks is not equal to 0.
+  * @return 0 on success and non-zero on failure.
+  */
+-int get_mar_file_info_fp(FILE *fp, 
++int get_mar_file_info_fp(FILE *fp,
+                          int *hasSignatureBlock,
+                          uint32_t *numSignatures,
+                          int *hasAdditionalBlocks,
+                          uint32_t *offsetAdditionalBlocks,
+                          uint32_t *numAdditionalBlocks)
+ {
+   uint32_t offsetToIndex, offsetToContent, signatureCount, signatureLen, i;
+-  
++
+   /* One of hasSignatureBlock or hasAdditionalBlocks must be non NULL */
+   if (!hasSignatureBlock && !hasAdditionalBlocks) {
+     return -1;
+   }
+ 
+ 
+   /* Skip to the start of the offset index */
+   if (fseek(fp, MAR_ID_SIZE, SEEK_SET)) {
+@@ -265,18 +265,18 @@ int get_mar_file_info_fp(FILE *fp,
+ 
+     /* Read the number of signatures field */
+     if (fread(numSignatures, sizeof(*numSignatures), 1, fp) != 1) {
+       return -1;
+     }
+     *numSignatures = ntohl(*numSignatures);
+   }
+ 
+-  /* Skip to the first index entry past the index size field 
+-     We do it in 2 calls because offsetToIndex + sizeof(uint32_t) 
++  /* Skip to the first index entry past the index size field
++     We do it in 2 calls because offsetToIndex + sizeof(uint32_t)
+      could oerflow in theory. */
+   if (fseek(fp, offsetToIndex, SEEK_SET)) {
+     return -1;
+   }
+ 
+   if (fseek(fp, sizeof(uint32_t), SEEK_CUR)) {
+     return -1;
+   }
+@@ -291,17 +291,17 @@ int get_mar_file_info_fp(FILE *fp,
+   if (hasSignatureBlock) {
+     if (offsetToContent == MAR_ID_SIZE + sizeof(uint32_t)) {
+       *hasSignatureBlock = 0;
+     } else {
+       *hasSignatureBlock = 1;
+     }
+   }
+ 
+-  /* If the caller doesn't care about the product info block 
++  /* If the caller doesn't care about the product info block
+      value, then just return */
+   if (!hasAdditionalBlocks) {
+     return 0;
+   }
+ 
+    /* Skip to the start of the signature block */
+   if (fseeko(fp, SIGNATURE_BLOCK_OFFSET, SEEK_SET)) {
+     return -1;
+@@ -346,102 +346,102 @@ int get_mar_file_info_fp(FILE *fp,
+       if (fread(numAdditionalBlocks, sizeof(uint32_t), 1, fp) != 1) {
+         return -1;
+       }
+       *numAdditionalBlocks = ntohl(*numAdditionalBlocks);
+       if (offsetAdditionalBlocks) {
+         *offsetAdditionalBlocks = ftell(fp);
+       }
+     } else if (offsetAdditionalBlocks) {
+-      /* numAdditionalBlocks is not specified but offsetAdditionalBlocks 
++      /* numAdditionalBlocks is not specified but offsetAdditionalBlocks
+          is, so fill it! */
+       *offsetAdditionalBlocks = ftell(fp) + sizeof(uint32_t);
+     }
+   }
+ 
+   return 0;
+ }
+ 
+-/** 
++/**
+  * Reads the product info block from the MAR file's additional block section.
+  * The caller is responsible for freeing the fields in infoBlock
+  * if the return is successful.
+  *
+  * @param infoBlock Out parameter for where to store the result to
+  * @return 0 on success, -1 on failure
+ */
+ int
+-read_product_info_block(char *path, 
++read_product_info_block(char *path,
+                         struct ProductInformationBlock *infoBlock)
+ {
+   int rv;
+   MarFile mar;
+   mar.fp = fopen(path, "rb");
+   if (!mar.fp) {
+     fprintf(stderr, "ERROR: could not open file in read_product_info_block()\n");
+     perror(path);
+     return -1;
+   }
+   rv = mar_read_product_info_block(&mar, infoBlock);
+   fclose(mar.fp);
+   return rv;
+ }
+ 
+-/** 
++/**
+  * Reads the product info block from the MAR file's additional block section.
+  * The caller is responsible for freeing the fields in infoBlock
+  * if the return is successful.
+  *
+  * @param infoBlock Out parameter for where to store the result to
+  * @return 0 on success, -1 on failure
+ */
+ int
+-mar_read_product_info_block(MarFile *mar, 
++mar_read_product_info_block(MarFile *mar,
+                             struct ProductInformationBlock *infoBlock)
+ {
+   uint32_t i, offsetAdditionalBlocks, numAdditionalBlocks,
+     additionalBlockSize, additionalBlockID;
+   int hasAdditionalBlocks;
+ 
+-  /* The buffer size is 97 bytes because the MAR channel name < 64 bytes, and 
++  /* The buffer size is 97 bytes because the MAR channel name < 64 bytes, and
+      product version < 32 bytes + 3 NULL terminator bytes. */
+   char buf[97] = { '\0' };
+   if (get_mar_file_info_fp(mar->fp, NULL, NULL,
+                            &hasAdditionalBlocks,
+                            &offsetAdditionalBlocks,
+                            &numAdditionalBlocks) != 0) {
+     return -1;
+   }
+   for (i = 0; i < numAdditionalBlocks; ++i) {
+     /* Read the additional block size */
+-    if (fread(&additionalBlockSize, 
+-              sizeof(additionalBlockSize), 
++    if (fread(&additionalBlockSize,
++              sizeof(additionalBlockSize),
+               1, mar->fp) != 1) {
+       return -1;
+     }
+-    additionalBlockSize = ntohl(additionalBlockSize) - 
+-                          sizeof(additionalBlockSize) - 
++    additionalBlockSize = ntohl(additionalBlockSize) -
++                          sizeof(additionalBlockSize) -
+                           sizeof(additionalBlockID);
+ 
+     /* Read the additional block ID */
+-    if (fread(&additionalBlockID, 
+-              sizeof(additionalBlockID), 
++    if (fread(&additionalBlockID,
++              sizeof(additionalBlockID),
+               1, mar->fp) != 1) {
+       return -1;
+     }
+     additionalBlockID = ntohl(additionalBlockID);
+ 
+     if (PRODUCT_INFO_BLOCK_ID == additionalBlockID) {
+       const char *location;
+       int len;
+ 
+       /* This block must be at most 104 bytes.
+-         MAR channel name < 64 bytes, and product version < 32 bytes + 3 NULL 
+-         terminator bytes. We only check for 96 though because we remove 8 
+-         bytes above from the additionalBlockSize: We subtract 
++         MAR channel name < 64 bytes, and product version < 32 bytes + 3 NULL
++         terminator bytes. We only check for 96 though because we remove 8
++         bytes above from the additionalBlockSize: We subtract
+          sizeof(additionalBlockSize) and sizeof(additionalBlockID) */
+       if (additionalBlockSize > 96) {
+         return -1;
+       }
+ 
+     if (fread(buf, additionalBlockSize, 1, mar->fp) != 1) {
+         return -1;
+       }
+@@ -461,19 +461,19 @@ mar_read_product_info_block(MarFile *mar
+       /* Extract the version from the buffer */
+       len = strlen(location);
+       infoBlock->productVersion = location;
+       if (len >= 32) {
+         infoBlock->MARChannelID = NULL;
+         infoBlock->productVersion = NULL;
+         return -1;
+       }
+-      infoBlock->MARChannelID = 
++      infoBlock->MARChannelID =
+         strdup(infoBlock->MARChannelID);
+-      infoBlock->productVersion = 
++      infoBlock->productVersion =
+         strdup(infoBlock->productVersion);
+       return 0;
+     } else {
+       /* This is not the additional block you're looking for. Move along. */
+       if (fseek(mar->fp, additionalBlockSize, SEEK_CUR)) {
+         return -1;
+       }
+     }
+@@ -553,38 +553,38 @@ int mar_read(MarFile *mar, const MarItem
+  *
+  * @param path                   The path of the MAR file to check.
+  * @param hasSignatureBlock      Optional out parameter specifying if the MAR
+  *                               file has a signature block or not.
+  * @param numSignatures          Optional out parameter for storing the number
+  *                               of signatures in the MAR file.
+  * @param hasAdditionalBlocks    Optional out parameter specifying if the MAR
+  *                               file has additional blocks or not.
+- * @param offsetAdditionalBlocks Optional out parameter for the offset to the 
++ * @param offsetAdditionalBlocks Optional out parameter for the offset to the
+  *                               first additional block. Value is only valid if
+  *                               hasAdditionalBlocks is not equal to 0.
+  * @param numAdditionalBlocks    Optional out parameter for the number of
+  *                               additional blocks.  Value is only valid if
+  *                               has_additional_blocks is not equal to 0.
+  * @return 0 on success and non-zero on failure.
+  */
+-int get_mar_file_info(const char *path, 
++int get_mar_file_info(const char *path,
+                       int *hasSignatureBlock,
+                       uint32_t *numSignatures,
+                       int *hasAdditionalBlocks,
+                       uint32_t *offsetAdditionalBlocks,
+                       uint32_t *numAdditionalBlocks)
+ {
+   int rv;
+   FILE *fp = fopen(path, "rb");
+   if (!fp) {
+     fprintf(stderr, "ERROR: could not open file in get_mar_file_info()\n");
+     perror(path);
+     return -1;
+   }
+ 
+-  rv = get_mar_file_info_fp(fp, hasSignatureBlock, 
++  rv = get_mar_file_info_fp(fp, hasSignatureBlock,
+                             numSignatures, hasAdditionalBlocks,
+                             offsetAdditionalBlocks, numAdditionalBlocks);
+ 
+   fclose(fp);
+   return rv;
+ }
+diff --git a/modules/libmar/tool/mar.c b/modules/libmar/tool/mar.c
+--- a/modules/libmar/tool/mar.c
++++ b/modules/libmar/tool/mar.c
+@@ -23,17 +23,17 @@
+ #include "nss.h"
+ #include "pk11pub.h"
+ int NSSInitCryptoContext(const char *NSSConfigDir);
+ #endif
+ 
+ int mar_repackage_and_sign(const char *NSSConfigDir,
+                            const char * const *certNames,
+                            uint32_t certCount,
+-                           const char *src, 
++                           const char *src,
+                            const char * dest);
+ 
+ static void print_version() {
+   printf("Version: %s\n", MOZ_APP_VERSION);
+   printf("Default Channel ID: %s\n", MAR_CHANNEL_ID);
+ }
+ 
+ static void print_usage() {
+@@ -87,18 +87,18 @@ static void print_usage() {
+   printf("  mar [-H MARChannelID] [-V ProductVersion] [-C workingDir] "
+          "-i unsigned_archive_to_refresh.mar\n");
+ 
+   printf("Print executable version:\n");
+   printf("  mar --version\n");
+   printf("This program does not handle unicode file paths properly\n");
+ }
+ 
+-static int mar_test_callback(MarFile *mar, 
+-                             const MarItem *item, 
++static int mar_test_callback(MarFile *mar,
++                             const MarItem *item,
+                              void *unused) {
+   printf("%u\t0%o\t%s\n", item->length, item->flags, item->name);
+   return 0;
+ }
+ 
+ static int mar_test(const char *path) {
+   MarFile *mar;
+ 
+@@ -252,28 +252,28 @@ int main(int argc, char **argv) {
+     infoBlock.MARChannelID = MARChannelID;
+     infoBlock.productVersion = productVersion;
+     return refresh_product_info_block(argv[2], &infoBlock);
+   }
+   case 'T': {
+     struct ProductInformationBlock infoBlock;
+     uint32_t numSignatures, numAdditionalBlocks;
+     int hasSignatureBlock, hasAdditionalBlock;
+-    if (!get_mar_file_info(argv[2], 
++    if (!get_mar_file_info(argv[2],
+                            &hasSignatureBlock,
+                            &numSignatures,
+-                           &hasAdditionalBlock, 
++                           &hasAdditionalBlock,
+                            NULL, &numAdditionalBlocks)) {
+       if (hasSignatureBlock) {
+-        printf("Signature block found with %d signature%s\n", 
+-               numSignatures, 
++        printf("Signature block found with %d signature%s\n",
++               numSignatures,
+                numSignatures != 1 ? "s" : "");
+       }
+       if (hasAdditionalBlock) {
+-        printf("%d additional block%s found:\n", 
++        printf("%d additional block%s found:\n",
+                numAdditionalBlocks,
+                numAdditionalBlocks != 1 ? "s" : "");
+       }
+ 
+       rv = read_product_info_block(argv[2], &infoBlock);
+       if (!rv) {
+         printf("  - Product Information Block:\n");
+         printf("    - MAR channel name: %s\n"
+@@ -390,17 +390,17 @@ int main(int argc, char **argv) {
+ #else
+       /* certBuffers[k] is owned by certs[k] so don't free it */
+       CERT_DestroyCertificate(certs[k]);
+ #endif
+     }
+     if (rv) {
+       /* Determine if the source MAR file has the new fields for signing */
+       int hasSignatureBlock;
+-      if (get_mar_file_info(argv[2], &hasSignatureBlock, 
++      if (get_mar_file_info(argv[2], &hasSignatureBlock,
+                             NULL, NULL, NULL, NULL)) {
+         fprintf(stderr, "ERROR: could not determine if MAR is old or new.\n");
+       } else if (!hasSignatureBlock) {
+         fprintf(stderr, "ERROR: The MAR file is in the old format so has"
+                         " no signature to verify.\n");
+       }
+     }
+ #if (!defined(XP_WIN) && !defined(XP_MACOSX)) || defined(MAR_NSS)
+diff --git a/modules/libmar/verify/cryptox.c b/modules/libmar/verify/cryptox.c
+--- a/modules/libmar/verify/cryptox.c
++++ b/modules/libmar/verify/cryptox.c
+@@ -9,19 +9,19 @@
+ #endif
+ 
+ #include <stdlib.h>
+ #include <stdio.h>
+ #include "cryptox.h"
+ 
+ #if defined(MAR_NSS)
+ 
+-/** 
++/**
+  * Loads the public key for the specified cert name from the NSS store.
+- * 
++ *
+  * @param certData  The DER-encoded X509 certificate to extract the key from.
+  * @param certDataSize The size of certData.
+  * @param publicKey Out parameter for the public key to use.
+  * @return CryptoX_Success on success, CryptoX_Error on error.
+ */
+ CryptoX_Result
+ NSS_LoadPublicKey(const unsigned char *certData, unsigned int certDataSize,
+                   SECKEYPublicKey **publicKey)
+@@ -44,33 +44,33 @@ NSS_LoadPublicKey(const unsigned char *c
+ 
+   if (!*publicKey) {
+     return CryptoX_Error;
+   }
+   return CryptoX_Success;
+ }
+ 
+ CryptoX_Result
+-NSS_VerifyBegin(VFYContext **ctx, 
++NSS_VerifyBegin(VFYContext **ctx,
+                 SECKEYPublicKey * const *publicKey)
+ {
+   SECStatus status;
+   if (!ctx || !publicKey || !*publicKey) {
+     return CryptoX_Error;
+   }
+ 
+   /* Check that the key length is large enough for our requirements */
+-  if ((SECKEY_PublicKeyStrength(*publicKey) * 8) < 
++  if ((SECKEY_PublicKeyStrength(*publicKey) * 8) <
+       XP_MIN_SIGNATURE_LEN_IN_BYTES) {
+-    fprintf(stderr, "ERROR: Key length must be >= %d bytes\n", 
++    fprintf(stderr, "ERROR: Key length must be >= %d bytes\n",
+             XP_MIN_SIGNATURE_LEN_IN_BYTES);
+     return CryptoX_Error;
+   }
+ 
+-  *ctx = VFY_CreateContext(*publicKey, NULL, 
++  *ctx = VFY_CreateContext(*publicKey, NULL,
+                            SEC_OID_PKCS1_SHA384_WITH_RSA_ENCRYPTION, NULL);
+   if (*ctx == NULL) {
+     return CryptoX_Error;
+   }
+ 
+   status = VFY_Begin(*ctx);
+   return SECSuccess == status ? CryptoX_Success : CryptoX_Error;
+ }
+@@ -79,18 +79,18 @@ NSS_VerifyBegin(VFYContext **ctx,
+  * Verifies if a verify context matches the passed in signature.
+  *
+  * @param ctx          The verify context that the signature should match.
+  * @param signature    The signature to match.
+  * @param signatureLen The length of the signature.
+  * @return CryptoX_Success on success, CryptoX_Error on error.
+ */
+ CryptoX_Result
+-NSS_VerifySignature(VFYContext * const *ctx, 
+-                    const unsigned char *signature, 
++NSS_VerifySignature(VFYContext * const *ctx,
++                    const unsigned char *signature,
+                     unsigned int signatureLen)
+ {
+   SECItem signedItem;
+   SECStatus status;
+   if (!ctx || !signature || !*ctx) {
+     return CryptoX_Error;
+   }
+ 
+@@ -108,131 +108,131 @@ NSS_VerifySignature(VFYContext * const *
+  * @param pubKey    The public key to use on the signature.
+  * @param signature The signature to check.
+  * @param signatureLen The length of the signature.
+  * @return CryptoX_Success on success, CryptoX_Error on error.
+ */
+ CryptoX_Result
+ CryptoAPI_VerifySignature(HCRYPTHASH *hash,
+                           HCRYPTKEY *pubKey,
+-                          const BYTE *signature, 
++                          const BYTE *signature,
+                           DWORD signatureLen)
+ {
+   DWORD i;
+   BOOL result;
+-/* Windows APIs expect the bytes in the signature to be in little-endian 
+- * order, but we write the signature in big-endian order.  Other APIs like 
++/* Windows APIs expect the bytes in the signature to be in little-endian
++ * order, but we write the signature in big-endian order.  Other APIs like
+  * NSS and OpenSSL expect big-endian order.
+  */
+   BYTE *signatureReversed;
+   if (!hash || !pubKey || !signature || signatureLen < 1) {
+     return CryptoX_Error;
+   }
+ 
+   signatureReversed = malloc(signatureLen);
+   if (!signatureReversed) {
+     return CryptoX_Error;
+   }
+ 
+   for (i = 0; i < signatureLen; i++) {
+-    signatureReversed[i] = signature[signatureLen - 1 - i]; 
++    signatureReversed[i] = signature[signatureLen - 1 - i];
+   }
+   result = CryptVerifySignature(*hash, signatureReversed,
+                                 signatureLen, *pubKey, NULL, 0);
+   free(signatureReversed);
+   return result ? CryptoX_Success : CryptoX_Error;
+ }
+ 
+-/** 
++/**
+  * Obtains the public key for the passed in cert data
+- * 
++ *
+  * @param provider       The cyrto provider
+  * @param certData       Data of the certificate to extract the public key from
+  * @param sizeOfCertData The size of the certData buffer
+  * @param certStore      Pointer to the handle of the certificate store to use
+  * @param CryptoX_Success on success
+ */
+ CryptoX_Result
+-CryptoAPI_LoadPublicKey(HCRYPTPROV provider, 
++CryptoAPI_LoadPublicKey(HCRYPTPROV provider,
+                         BYTE *certData,
+                         DWORD sizeOfCertData,
+                         HCRYPTKEY *publicKey)
+ {
+   CRYPT_DATA_BLOB blob;
+   CERT_CONTEXT *context;
+   if (!provider || !certData || !publicKey) {
+     return CryptoX_Error;
+   }
+ 
+   blob.cbData = sizeOfCertData;
+   blob.pbData = certData;
+-  if (!CryptQueryObject(CERT_QUERY_OBJECT_BLOB, &blob, 
+-                        CERT_QUERY_CONTENT_FLAG_CERT, 
+-                        CERT_QUERY_FORMAT_FLAG_BINARY, 
+-                        0, NULL, NULL, NULL, 
++  if (!CryptQueryObject(CERT_QUERY_OBJECT_BLOB, &blob,
++                        CERT_QUERY_CONTENT_FLAG_CERT,
++                        CERT_QUERY_FORMAT_FLAG_BINARY,
++                        0, NULL, NULL, NULL,
+                         NULL, NULL, (const void **)&context)) {
+     return CryptoX_Error;
+   }
+ 
+-  if (!CryptImportPublicKeyInfo(provider, 
++  if (!CryptImportPublicKeyInfo(provider,
+                                 PKCS_7_ASN_ENCODING | X509_ASN_ENCODING,
+                                 &context->pCertInfo->SubjectPublicKeyInfo,
+                                 publicKey)) {
+     CertFreeCertificateContext(context);
+     return CryptoX_Error;
+   }
+ 
+   CertFreeCertificateContext(context);
+   return CryptoX_Success;
+ }
+ 
+ /* Try to acquire context in this way:
+   * 1. Enhanced provider without creating a new key set
+   * 2. Enhanced provider with creating a new key set
+   * 3. Default provider without creating a new key set
+   * 4. Default provider without creating a new key set
+-  * #2 and #4 should not be needed because of the CRYPT_VERIFYCONTEXT, 
++  * #2 and #4 should not be needed because of the CRYPT_VERIFYCONTEXT,
+   * but we add it just in case.
+   *
+   * @param provider Out parameter containing the provider handle.
+   * @return CryptoX_Success on success, CryptoX_Error on error.
+  */
+ CryptoX_Result
+ CryptoAPI_InitCryptoContext(HCRYPTPROV *provider)
+ {
+-  if (!CryptAcquireContext(provider, 
+-                           NULL, 
+-                           MS_ENH_RSA_AES_PROV, 
+-                           PROV_RSA_AES, 
++  if (!CryptAcquireContext(provider,
++                           NULL,
++                           MS_ENH_RSA_AES_PROV,
++                           PROV_RSA_AES,
+                            CRYPT_VERIFYCONTEXT)) {
+-    if (!CryptAcquireContext(provider, 
+-                             NULL, 
+-                             MS_ENH_RSA_AES_PROV, 
+-                             PROV_RSA_AES, 
++    if (!CryptAcquireContext(provider,
++                             NULL,
++                             MS_ENH_RSA_AES_PROV,
++                             PROV_RSA_AES,
+                              CRYPT_NEWKEYSET | CRYPT_VERIFYCONTEXT)) {
+-      if (!CryptAcquireContext(provider, 
+-                               NULL, 
+-                               NULL, 
+-                               PROV_RSA_AES, 
++      if (!CryptAcquireContext(provider,
++                               NULL,
++                               NULL,
++                               PROV_RSA_AES,
+                                CRYPT_VERIFYCONTEXT)) {
+-        if (!CryptAcquireContext(provider, 
+-                                 NULL, 
+-                                 NULL, 
+-                                 PROV_RSA_AES, 
++        if (!CryptAcquireContext(provider,
++                                 NULL,
++                                 NULL,
++                                 PROV_RSA_AES,
+                                  CRYPT_NEWKEYSET | CRYPT_VERIFYCONTEXT)) {
+           *provider = CryptoX_InvalidHandleValue;
+           return CryptoX_Error;
+         }
+       }
+     }
+   }
+   return CryptoX_Success;
+ }
+ 
+-/** 
++/**
+   * Begins a signature verification hash context
+   *
+   * @param provider The crypt provider to use
+   * @param hash     Out parameter for a handle to the hash context
+   * @return CryptoX_Success on success, CryptoX_Error on error.
+ */
+ CryptoX_Result
+ CryptoAPI_VerifyBegin(HCRYPTPROV provider, HCRYPTHASH* hash)
+@@ -243,17 +243,17 @@ CryptoAPI_VerifyBegin(HCRYPTPROV provide
+   }
+ 
+   *hash = (HCRYPTHASH)NULL;
+   result = CryptCreateHash(provider, CALG_SHA_384,
+                            0, 0, hash);
+   return result ? CryptoX_Success : CryptoX_Error;
+ }
+ 
+-/** 
++/**
+   * Updates a signature verification hash context
+   *
+   * @param hash The hash context to udpate
+   * @param buf  The buffer to update the hash context with
+   * @param len The size of the passed in buffer
+   * @return CryptoX_Success on success, CryptoX_Error on error.
+ */
+ CryptoX_Result
+diff --git a/modules/libmar/verify/cryptox.h b/modules/libmar/verify/cryptox.h
+--- a/modules/libmar/verify/cryptox.h
++++ b/modules/libmar/verify/cryptox.h
+@@ -99,32 +99,32 @@ void CryptoMac_FreePublicKey(CryptoX_Pub
+                                 aSignatureLen) \
+   CryptoMac_VerifySignature(aInputData, aPublicKey, aSignature, aSignatureLen)
+ #define CryptoX_FreeSignatureHandle(aInputData) \
+   CryptoMac_FreeSignatureHandle(aInputData)
+ #define CryptoX_FreePublicKey(aPublicKey) \
+   CryptoMac_FreePublicKey(aPublicKey)
+ #define CryptoX_FreeCertificate(aCertificate)
+ 
+-#elif defined(XP_WIN) 
++#elif defined(XP_WIN)
+ 
+ #include <windows.h>
+ #include <wincrypt.h>
+ 
+ CryptoX_Result CryptoAPI_InitCryptoContext(HCRYPTPROV *provider);
+-CryptoX_Result CryptoAPI_LoadPublicKey(HCRYPTPROV hProv, 
++CryptoX_Result CryptoAPI_LoadPublicKey(HCRYPTPROV hProv,
+                                        BYTE *certData,
+                                        DWORD sizeOfCertData,
+                                        HCRYPTKEY *publicKey);
+ CryptoX_Result CryptoAPI_VerifyBegin(HCRYPTPROV provider, HCRYPTHASH* hash);
+-CryptoX_Result CryptoAPI_VerifyUpdate(HCRYPTHASH* hash, 
++CryptoX_Result CryptoAPI_VerifyUpdate(HCRYPTHASH* hash,
+                                       BYTE *buf, DWORD len);
+ CryptoX_Result CryptoAPI_VerifySignature(HCRYPTHASH *hash,
+                                          HCRYPTKEY *pubKey,
+-                                         const BYTE *signature, 
++                                         const BYTE *signature,
+                                          DWORD signatureLen);
+ 
+ #define CryptoX_InvalidHandleValue ((ULONG_PTR)NULL)
+ #define CryptoX_ProviderHandle HCRYPTPROV
+ #define CryptoX_SignatureHandle HCRYPTHASH
+ #define CryptoX_PublicKey HCRYPTKEY
+ #define CryptoX_Certificate HCERTSTORE
+ #define CryptoX_InitCryptoProvider(CryptoHandle) \
+diff --git a/modules/libmar/verify/mar_verify.c b/modules/libmar/verify/mar_verify.c
+--- a/modules/libmar/verify/mar_verify.c
++++ b/modules/libmar/verify/mar_verify.c
+@@ -70,40 +70,40 @@ int mar_verify_signatures_for_fp(FILE *f
+                                  uint32_t *numVerified);
+ 
+ /**
+  * Reads the specified number of bytes from the file pointer and
+  * stores them in the passed buffer.
+  *
+  * @param  fp     The file pointer to read from.
+  * @param  buffer The buffer to store the read results.
+- * @param  size   The number of bytes to read, buffer must be 
++ * @param  size   The number of bytes to read, buffer must be
+  *                at least of this size.
+  * @param  ctxs   Pointer to the first element in an array of verify context.
+  * @param  count  The number of elements in ctxs
+  * @param  err    The name of what is being written to in case of error.
+  * @return  0 on success
+  *         -1 on read error
+  *         -2 on verify update error
+ */
+ int
+-ReadAndUpdateVerifyContext(FILE *fp, 
++ReadAndUpdateVerifyContext(FILE *fp,
+                            void *buffer,
+-                           uint32_t size, 
++                           uint32_t size,
+                            CryptoX_SignatureHandle *ctxs,
+                            uint32_t count,
+-                           const char *err) 
++                           const char *err)
+ {
+   uint32_t k;
+   if (!fp || !buffer || !ctxs || count == 0 || !err) {
+     fprintf(stderr, "ERROR: Invalid parameter specified.\n");
+     return CryptoX_Error;
+   }
+ 
+-  if (!size) { 
++  if (!size) {
+     return CryptoX_Success;
+   }
+ 
+   if (fread(buffer, size, 1, fp) != 1) {
+     fprintf(stderr, "ERROR: Could not read %s\n", err);
+     return CryptoX_Error;
+   }
+ 
+@@ -117,17 +117,17 @@ ReadAndUpdateVerifyContext(FILE *fp,
+ }
+ 
+ /**
+  * Verifies a MAR file by verifying each signature with the corresponding
+  * certificate. That is, the first signature will be verified using the first
+  * certificate given, the second signature will be verified using the second
+  * certificate given, etc. The signature count must exactly match the number of
+  * certificates given, and all signature verifications must succeed.
+- * 
++ *
+  * @param  mar            The file who's signature should be calculated
+  * @param  certData       Pointer to the first element in an array of
+  *                        certificate data
+  * @param  certDataSizes  Pointer to the first element in an array for size of
+  *                        the data stored
+  * @param  certCount      The number of elements in certData and certDataSizes
+  * @return 0 on success
+ */
+@@ -135,30 +135,30 @@ int
+ mar_verify_signatures(MarFile *mar,
+                       const uint8_t * const *certData,
+                       const uint32_t *certDataSizes,
+                       uint32_t certCount) {
+   int rv = -1;
+   CryptoX_ProviderHandle provider = CryptoX_InvalidHandleValue;
+   CryptoX_PublicKey keys[MAX_SIGNATURES];
+   uint32_t k;
+-  
++
+   memset(keys, 0, sizeof(keys));
+ 
+   if (!mar || !certData || !certDataSizes || certCount == 0) {
+     fprintf(stderr, "ERROR: Invalid parameter specified.\n");
+     goto failure;
+   }
+ 
+   if (!mar->fp) {
+     fprintf(stderr, "ERROR: MAR file is not open.\n");
+     goto failure;
+   }
+ 
+-  if (CryptoX_Failed(CryptoX_InitCryptoProvider(&provider))) { 
++  if (CryptoX_Failed(CryptoX_InitCryptoProvider(&provider))) {
+     fprintf(stderr, "ERROR: Could not init crytpo library.\n");
+     goto failure;
+   }
+ 
+   for (k = 0; k < certCount; ++k) {
+     if (CryptoX_Failed(CryptoX_LoadPublicKey(provider, certData[k], certDataSizes[k],
+                                              &keys[k]))) {
+       fprintf(stderr, "ERROR: Could not load public key.\n");
+@@ -201,18 +201,18 @@ mar_extract_and_verify_signatures_fp(FIL
+ 
+   memset(signatureAlgorithmIDs, 0, sizeof(signatureAlgorithmIDs));
+   memset(extractedSignatures, 0, sizeof(extractedSignatures));
+ 
+   if (!fp) {
+     fprintf(stderr, "ERROR: Invalid file pointer passed.\n");
+     return CryptoX_Error;
+   }
+-  
+-  /* To protect against invalid MAR files, we assumes that the MAR file 
++
++  /* To protect against invalid MAR files, we assumes that the MAR file
+      size is less than or equal to MAX_SIZE_OF_MAR_FILE. */
+   if (fseeko(fp, 0, SEEK_END)) {
+     fprintf(stderr, "ERROR: Could not seek to the end of the MAR file.\n");
+     return CryptoX_Error;
+   }
+   if (ftello(fp) > MAX_SIZE_OF_MAR_FILE) {
+     fprintf(stderr, "ERROR: MAR file is too large to be verified.\n");
+     return CryptoX_Error;
+@@ -241,17 +241,17 @@ mar_extract_and_verify_signatures_fp(FIL
+ 
+   for (i = 0; i < signatureCount; i++) {
+     /* Get the signature algorithm ID */
+     if (fread(&signatureAlgorithmIDs[i], sizeof(uint32_t), 1, fp) != 1) {
+       fprintf(stderr, "ERROR: Could not read signatures algorithm ID.\n");
+       return CryptoX_Error;
+     }
+     signatureAlgorithmIDs[i] = ntohl(signatureAlgorithmIDs[i]);
+-  
++
+     if (fread(&signatureLen, sizeof(uint32_t), 1, fp) != 1) {
+       fprintf(stderr, "ERROR: Could not read signatures length.\n");
+       return CryptoX_Error;
+     }
+     signatureLen = ntohl(signatureLen);
+ 
+     /* To protected against invalid input make sure the signature length
+        isn't too big. */
+@@ -314,17 +314,17 @@ mar_extract_and_verify_signatures_fp(FIL
+ }
+ 
+ /**
+  * Verifies a MAR file by verifying each signature with the corresponding
+  * certificate. That is, the first signature will be verified using the first
+  * certificate given, the second signature will be verified using the second
+  * certificate given, etc. The signature count must exactly match the number of
+  * certificates given, and all signature verifications must succeed.
+- * 
++ *
+  * @param  fp                   An opened MAR file handle
+  * @param  provider             A library provider
+  * @param  keys                 A pointer to the first element in an
+  *                              array of keys.
+  * @param  extractedSignatures  Pointer to the first element in an array
+  *                              of extracted signatures.
+  * @param  signatureCount       The number of signatures in the MAR file
+  * @param numVerified           Out parameter which will be filled with
+@@ -354,17 +354,17 @@ mar_verify_signatures_for_fp(FILE *fp,
+     fprintf(stderr, "ERROR: Invalid parameter specified.\n");
+     goto failure;
+   }
+ 
+   *numVerified = 0;
+ 
+   /* This function is only called when we have at least one signature,
+      but to protected against future people who call this function we
+-     make sure a non zero value is passed in. 
++     make sure a non zero value is passed in.
+    */
+   if (!signatureCount) {
+     fprintf(stderr, "ERROR: There must be at least one signature.\n");
+     goto failure;
+   }
+ 
+   for (i = 0; i < signatureCount; i++) {
+     if (CryptoX_Failed(CryptoX_VerifyBegin(provider,
+@@ -376,43 +376,43 @@ mar_verify_signatures_for_fp(FILE *fp,
+ 
+   /* Skip to the start of the file */
+   if (fseeko(fp, 0, SEEK_SET)) {
+     fprintf(stderr, "ERROR: Could not seek to start of the file\n");
+     goto failure;
+   }
+ 
+   /* Bytes 0-3: MAR1
+-     Bytes 4-7: index offset 
++     Bytes 4-7: index offset
+      Bytes 8-15: size of entire MAR
+    */
+-  if (CryptoX_Failed(ReadAndUpdateVerifyContext(fp, buf, 
++  if (CryptoX_Failed(ReadAndUpdateVerifyContext(fp, buf,
+                                                 SIGNATURE_BLOCK_OFFSET +
+                                                 sizeof(uint32_t),
+                                                 signatureHandles,
+                                                 signatureCount,
+                                                 "signature block"))) {
+     goto failure;
+   }
+ 
+   /* Read the signature block */
+   for (i = 0; i < signatureCount; i++) {
+     /* Get the signature algorithm ID */
+     if (CryptoX_Failed(ReadAndUpdateVerifyContext(fp,
+-                                                  &buf, 
++                                                  &buf,
+                                                   sizeof(uint32_t),
+                                                   signatureHandles,
+                                                   signatureCount,
+                                                   "signature algorithm ID"))) {
+       goto failure;
+     }
+ 
+-    if (CryptoX_Failed(ReadAndUpdateVerifyContext(fp, 
++    if (CryptoX_Failed(ReadAndUpdateVerifyContext(fp,
+                                                   &signatureLengths[i],
+-                                                  sizeof(uint32_t), 
++                                                  sizeof(uint32_t),
+                                                   signatureHandles,
+                                                   signatureCount,
+                                                   "signature length"))) {
+       goto failure;
+     }
+     signatureLengths[i] = ntohl(signatureLengths[i]);
+     if (signatureLengths[i] > MAX_SIGNATURE_LENGTH) {
+       fprintf(stderr, "ERROR: Embedded signature length is too large.\n");

+ 32 - 0
mozilla-release/patches/1497937-65a1.patch

@@ -0,0 +1,32 @@
+# HG changeset patch
+# User Alex Gaynor <agaynor@mozilla.com>
+# Date 1540313250 0
+# Node ID 02361691547a0220d9c08951e9d7a578507bd039
+# Parent  ebd90074177907cdae2a2ce6c97f3b80074e1087
+Bug 1497937 - fixed a deprecation warning due to including an older header; r=rstrong
+
+Differential Revision: https://phabricator.services.mozilla.com/D8561
+
+diff --git a/modules/libmar/sign/nss_secutil.h b/modules/libmar/sign/nss_secutil.h
+--- a/modules/libmar/sign/nss_secutil.h
++++ b/modules/libmar/sign/nss_secutil.h
+@@ -8,17 +8,17 @@
+ #ifndef NSS_SECUTIL_H_
+ #define NSS_SECUTIL_H_
+ 
+ #include "nss.h"
+ #include "pk11pub.h"
+ #include "cryptohi.h"
+ #include "hasht.h"
+ #include "cert.h"
+-#include "key.h"
++#include "keyhi.h"
+ #include <stdint.h>
+ 
+ typedef struct {
+   enum {
+     PW_NONE = 0,
+     PW_FROMFILE = 1,
+     PW_PLAINTEXT = 2,
+     PW_EXTERNAL = 3
+

+ 579 - 0
mozilla-release/patches/1508782-66a1.patch

@@ -0,0 +1,579 @@
+# HG changeset patch
+# User June Wilde <jewilde@mozilla.com>
+# Date 1544468862 0
+# Node ID 772261da44428f4c4fa0697bed7b953c9654ae90
+# Parent  4b7d29632072810e37a18c348058b40b971edf5f
+Bug 1508782 - Add moz.yaml for bspatch in toolkit/mozapps/update; r=mhowell
+
+Moves bspatch.h and bspatch.cpp into new folder
+Adds LICENSE, moz.yaml, and moz.build for bspatch
+Alters bsdiff and updater build files to account for the new location of bspatch
+Renames toolkit/mozapps/update/common/errors.h to toolkit/mozapps/update/common/updatererrors.h for
+breaking windows builds. It collided with MSVCRT's exported errors.h after being added to the export list for
+the 'updatercommon' library
+
+Differential Revision: https://phabricator.services.mozilla.com/D13735
+
+diff --git a/toolkit/components/maintenanceservice/serviceinstall.cpp b/toolkit/components/maintenanceservice/serviceinstall.cpp
+--- a/toolkit/components/maintenanceservice/serviceinstall.cpp
++++ b/toolkit/components/maintenanceservice/serviceinstall.cpp
+@@ -13,17 +13,17 @@
+ #include <nsWindowsHelpers.h>
+ #include "mozilla/UniquePtr.h"
+ 
+ #include "serviceinstall.h"
+ #include "servicebase.h"
+ #include "updatehelper.h"
+ #include "shellapi.h"
+ #include "readstrings.h"
+-#include "errors.h"
++#include "updatererrors.h"
+ 
+ #pragma comment(lib, "version.lib")
+ 
+ // This uninstall key is defined originally in maintenanceservice_installer.nsi
+ #define MAINT_UNINSTALL_KEY L"Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall\\MozillaMaintenanceService"
+ 
+ static BOOL
+ UpdateUninstallerVersionString(LPWSTR versionString)
+diff --git a/toolkit/components/maintenanceservice/workmonitor.cpp b/toolkit/components/maintenanceservice/workmonitor.cpp
+--- a/toolkit/components/maintenanceservice/workmonitor.cpp
++++ b/toolkit/components/maintenanceservice/workmonitor.cpp
+@@ -21,17 +21,17 @@
+ 
+ #include "workmonitor.h"
+ #include "serviceinstall.h"
+ #include "servicebase.h"
+ #include "registrycertificates.h"
+ #include "uachelper.h"
+ #include "updatehelper.h"
+ #include "pathhash.h"
+-#include "errors.h"
++#include "updatererrors.h"
+ 
+ #define PATCH_DIR_PATH L"\\updates\\0"
+ 
+ // Wait 15 minutes for an update operation to run at most.
+ // Updates usually take less than a minute so this seems like a
+ // significantly large and safe amount of time to wait.
+ static const int TIME_TO_WAIT_ON_UPDATER = 15 * 60 * 1000;
+ BOOL WriteStatusFailure(LPCWSTR updateDirPath, int errorCode);
+diff --git a/toolkit/content/license.html b/toolkit/content/license.html
+--- a/toolkit/content/license.html
++++ b/toolkit/content/license.html
+@@ -2952,18 +2952,18 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ </pre>
+ 
+     <hr>
+ 
+     <h1><a id="bspatch"></a>bspatch License</h1>
+ 
+     <p>This license applies to the files
+-    <code>toolkit/mozapps/update/updater/bspatch.cpp</code> and
+-    <code>toolkit/mozapps/update/updater/bspatch.h</code>.
++    <code>toolkit/mozapps/update/updater/bspatch/bspatch.cpp</code> and
++    <code>toolkit/mozapps/update/updater/bspatch/bspatch.h</code>.
+     </p>
+ 
+ <pre>
+ Copyright 2003,2004 Colin Percival
+ All rights reserved
+ 
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted providing that the following conditions
+diff --git a/toolkit/mozapps/update/common/moz.build b/toolkit/mozapps/update/common/moz.build
+--- a/toolkit/mozapps/update/common/moz.build
++++ b/toolkit/mozapps/update/common/moz.build
+@@ -3,16 +3,17 @@
+ # This Source Code Form is subject to the terms of the Mozilla Public
+ # License, v. 2.0. If a copy of the MPL was not distributed with this
+ # file, You can obtain one at http://mozilla.org/MPL/2.0/.
+ 
+ EXPORTS += [
+     'readstrings.h',
+     'updatecommon.h',
+     'updatedefines.h',
++    'updatererrors.h',
+ ]
+ 
+ if CONFIG['MOZ_WIDGET_TOOLKIT'] == 'windows':
+     EXPORTS += [
+         'pathhash.h',
+         'uachelper.h',
+         'updatehelper.cpp',
+         'updatehelper.h',
+diff --git a/toolkit/mozapps/update/common/readstrings.cpp b/toolkit/mozapps/update/common/readstrings.cpp
+--- a/toolkit/mozapps/update/common/readstrings.cpp
++++ b/toolkit/mozapps/update/common/readstrings.cpp
+@@ -3,17 +3,17 @@
+ /* This Source Code Form is subject to the terms of the Mozilla Public
+  * License, v. 2.0. If a copy of the MPL was not distributed with this
+  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+ 
+ #include <limits.h>
+ #include <string.h>
+ #include <stdio.h>
+ #include "readstrings.h"
+-#include "errors.h"
++#include "updatererrors.h"
+ 
+ #ifdef XP_WIN
+ # define NS_tfopen _wfopen
+ # define OPEN_MODE L"rb"
+ #else
+ # define NS_tfopen fopen
+ # define OPEN_MODE "r"
+ #endif
+diff --git a/toolkit/mozapps/update/common/errors.h b/toolkit/mozapps/update/common/updatererrors.h
+rename from toolkit/mozapps/update/common/errors.h
+rename to toolkit/mozapps/update/common/updatererrors.h
+--- a/toolkit/mozapps/update/common/errors.h
++++ b/toolkit/mozapps/update/common/updatererrors.h
+@@ -1,16 +1,16 @@
+ /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+ /* vim:set ts=2 sw=2 sts=2 et cindent: */
+ /* This Source Code Form is subject to the terms of the Mozilla Public
+  * License, v. 2.0. If a copy of the MPL was not distributed with this
+  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+ 
+-#ifndef Errors_h__
+-#define Errors_h__
++#ifndef UPDATEERRORS_H
++#define UPDATEERRORS_H
+ 
+ #define OK 0
+ 
+ // Error codes that are no longer used should not be used again unless they
+ // aren't used in client code (e.g. nsUpdateService.js, updates.js, etc.).
+ 
+ #define MAR_ERROR_EMPTY_ACTION_LIST 1
+ #define LOADSOURCE_ERROR_WRONG_SIZE 2
+@@ -101,9 +101,9 @@
+ // The following error codes are only used by updater.exe
+ // when a fallback key exists for tests.
+ #define FALLBACKKEY_UNKNOWN_ERROR 100
+ #define FALLBACKKEY_REGPATH_ERROR 101
+ #define FALLBACKKEY_NOKEY_ERROR 102
+ #define FALLBACKKEY_SERVICE_NO_STOP_ERROR 103
+ #define FALLBACKKEY_LAUNCH_ERROR 104
+ 
+-#endif  // Errors_h__
++#endif  // UPDATEERRORS_H
+diff --git a/toolkit/mozapps/update/nsIUpdateService.idl b/toolkit/mozapps/update/nsIUpdateService.idl
+--- a/toolkit/mozapps/update/nsIUpdateService.idl
++++ b/toolkit/mozapps/update/nsIUpdateService.idl
+@@ -47,17 +47,17 @@ interface nsIUpdatePatch : nsISupports
+   /**
+    * The state of this patch
+    */
+   attribute AString state;
+ 
+   /**
+    * A numeric error code that conveys additional information about the state of
+    * a failed update. If the update is not in the "failed" state the value is
+-   * zero. The possible values are located in common/errors.h and values between
++   * zero. The possible values are located in common/updatererrors.h and values between
+    * 80 and 99 are in nsUpdateService.js.
+    */
+   attribute long errorCode;
+ 
+   /**
+    * true if this patch is currently selected as the patch to be downloaded and
+    * installed for this update transaction, false if another patch from this
+    * update has been selected.
+@@ -188,17 +188,17 @@ interface nsIUpdate : nsISupports
+    *   "download-failed"    The update failed to be downloaded.
+    *   "failed"             The update failed to be applied.
+    */
+   attribute AString state;
+ 
+   /**
+    * A numeric error code that conveys additional information about the state of
+    * a failed update. If the update is not in the "failed" state the value is
+-   * zero. The possible values are located in common/errors.h and values between
++   * zero. The possible values are located in common/updatererrors.h and values between
+    * 80 and 99 are in nsUpdateService.js.
+    */
+   attribute long errorCode;
+ 
+   /**
+    * Whether an elevation failure has been encountered for this update.
+    */
+   attribute boolean elevationFailure;
+diff --git a/toolkit/mozapps/update/nsUpdateService.js b/toolkit/mozapps/update/nsUpdateService.js
+--- a/toolkit/mozapps/update/nsUpdateService.js
++++ b/toolkit/mozapps/update/nsUpdateService.js
+@@ -75,17 +75,17 @@ const STATE_PENDING_SERVICE = "pending-s
+ const STATE_PENDING_ELEVATE = "pending-elevate";
+ const STATE_APPLYING        = "applying";
+ const STATE_APPLIED         = "applied";
+ const STATE_APPLIED_SERVICE = "applied-service";
+ const STATE_SUCCEEDED       = "succeeded";
+ const STATE_DOWNLOAD_FAILED = "download-failed";
+ const STATE_FAILED          = "failed";
+ 
+-// The values below used by this code are from common/errors.h
++// The values below used by this code are from common/updatererrors.h
+ const WRITE_ERROR                          = 7;
+ const ELEVATION_CANCELED                   = 9;
+ const SERVICE_UPDATER_COULD_NOT_BE_STARTED = 24;
+ const SERVICE_NOT_ENOUGH_COMMAND_LINE_ARGS = 25;
+ const SERVICE_UPDATER_SIGN_ERROR           = 26;
+ const SERVICE_UPDATER_COMPARE_ERROR        = 27;
+ const SERVICE_UPDATER_IDENTITY_ERROR       = 28;
+ const SERVICE_STILL_APPLYING_ON_SUCCESS    = 29;
+@@ -135,17 +135,17 @@ const SERVICE_ERRORS = [SERVICE_UPDATER_
+                         SERVICE_UPDATER_NOT_FIXED_DRIVE,
+                         SERVICE_COULD_NOT_LOCK_UPDATER,
+                         SERVICE_INSTALLDIR_ERROR,
+                         SERVICE_COULD_NOT_COPY_UPDATER,
+                         SERVICE_STILL_APPLYING_TERMINATED,
+                         SERVICE_STILL_APPLYING_NO_EXIT_CODE];
+ 
+ // Error codes 80 through 99 are reserved for nsUpdateService.js and are not
+-// defined in common/errors.h
++// defined in common/updatererrors.h
+ const ERR_OLDER_VERSION_OR_SAME_BUILD      = 90;
+ const ERR_UPDATE_STATE_NONE                = 91;
+ const ERR_CHANNEL_CHANGE                   = 92;
+ const INVALID_UPDATER_STATE_CODE           = 98;
+ const INVALID_UPDATER_STATUS_CODE          = 99;
+ 
+ // Custom update error codes
+ const BACKGROUNDCHECK_MULTIPLE_FAILURES = 110;
+diff --git a/toolkit/mozapps/update/tests/TestAUSReadStrings.cpp b/toolkit/mozapps/update/tests/TestAUSReadStrings.cpp
+--- a/toolkit/mozapps/update/tests/TestAUSReadStrings.cpp
++++ b/toolkit/mozapps/update/tests/TestAUSReadStrings.cpp
+@@ -25,17 +25,17 @@
+ 
+ #include <stdio.h>
+ #include <stdarg.h>
+ #include <string.h>
+ 
+ #include "updater/resource.h"
+ #include "updater/progressui.h"
+ #include "common/readstrings.h"
+-#include "common/errors.h"
++#include "common/updatererrors.h"
+ #include "mozilla/ArrayUtils.h"
+ 
+ #ifndef MAXPATHLEN
+ # ifdef PATH_MAX
+ #  define MAXPATHLEN PATH_MAX
+ # elif defined(MAX_PATH)
+ #  define MAXPATHLEN MAX_PATH
+ # elif defined(_MAX_PATH)
+diff --git a/toolkit/mozapps/update/tests/data/sharedUpdateXML.js b/toolkit/mozapps/update/tests/data/sharedUpdateXML.js
+--- a/toolkit/mozapps/update/tests/data/sharedUpdateXML.js
++++ b/toolkit/mozapps/update/tests/data/sharedUpdateXML.js
+@@ -41,17 +41,17 @@ const SERVICE_INVALID_WORKING_DIR_PATH_E
+ const INVALID_APPLYTO_DIR_STAGED_ERROR         = 72;
+ const INVALID_APPLYTO_DIR_ERROR                = 74;
+ const INVALID_INSTALL_DIR_PATH_ERROR           = 75;
+ const INVALID_WORKING_DIR_PATH_ERROR           = 76;
+ const INVALID_CALLBACK_PATH_ERROR              = 77;
+ const INVALID_CALLBACK_DIR_ERROR               = 78;
+ 
+ // Error codes 80 through 99 are reserved for nsUpdateService.js and are not
+-// defined in common/errors.h
++// defined in common/updatererrors.h
+ const ERR_OLDER_VERSION_OR_SAME_BUILD      = 90;
+ const ERR_UPDATE_STATE_NONE                = 91;
+ const ERR_CHANNEL_CHANGE                   = 92;
+ 
+ const STATE_FAILED_DELIMETER = ": ";
+ 
+ const STATE_FAILED_LOADSOURCE_ERROR_WRONG_SIZE =
+   STATE_FAILED + STATE_FAILED_DELIMETER + LOADSOURCE_ERROR_WRONG_SIZE;
+diff --git a/toolkit/mozapps/update/updater/archivereader.cpp b/toolkit/mozapps/update/updater/archivereader.cpp
+--- a/toolkit/mozapps/update/updater/archivereader.cpp
++++ b/toolkit/mozapps/update/updater/archivereader.cpp
+@@ -7,17 +7,17 @@
+ 
+ #include <string.h>
+ #include <stdlib.h>
+ #include <fcntl.h>
+ #ifdef XP_WIN
+ #include <windows.h>
+ #endif
+ #include "archivereader.h"
+-#include "errors.h"
++#include "updatererrors.h"
+ #ifdef XP_WIN
+ #include "nsAlgorithm.h" // Needed by nsVersionComparator.cpp
+ #include "updatehelper.h"
+ #endif
+ #define XZ_USE_CRC64
+ #include "xz.h"
+ 
+ // These are generated at compile time based on the DER file for the channel
+diff --git a/toolkit/mozapps/update/updater/bspatch/LICENSE b/toolkit/mozapps/update/updater/bspatch/LICENSE
+new file mode 100644
+--- /dev/null
++++ b/toolkit/mozapps/update/updater/bspatch/LICENSE
+@@ -0,0 +1,23 @@
++Copyright 2003,2004 Colin Percival
++All rights reserved
++
++Redistribution and use in source and binary forms, with or without
++modification, are permitted providing that the following conditions
++are met:
++1. Redistributions of source code must retain the above copyright
++   notice, this list of conditions and the following disclaimer.
++2. Redistributions in binary form must reproduce the above copyright
++   notice, this list of conditions and the following disclaimer in the
++   documentation and/or other materials provided with the distribution.
++
++THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
++IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
++DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
++DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
++OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
++HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
++STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
++IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++POSSIBILITY OF SUCH DAMAGE.
+diff --git a/toolkit/mozapps/update/updater/bspatch.cpp b/toolkit/mozapps/update/updater/bspatch/bspatch.cpp
+rename from toolkit/mozapps/update/updater/bspatch.cpp
+rename to toolkit/mozapps/update/updater/bspatch/bspatch.cpp
+--- a/toolkit/mozapps/update/updater/bspatch.cpp
++++ b/toolkit/mozapps/update/updater/bspatch/bspatch.cpp
+@@ -25,17 +25,17 @@
+  *
+  * Changelog:
+  * 2005-04-26 - Define the header as a C structure, add a CRC32 checksum to
+  *              the header, and make all the types 32-bit.
+  *                --Benjamin Smedberg <benjamin@smedbergs.us>
+  */
+ 
+ #include "bspatch.h"
+-#include "errors.h"
++#include "updatererrors.h"
+ 
+ #include <sys/stat.h>
+ #include <stdlib.h>
+ #include <stdio.h>
+ #include <fcntl.h>
+ #include <string.h>
+ #include <limits.h>
+ 
+diff --git a/toolkit/mozapps/update/updater/bspatch.h b/toolkit/mozapps/update/updater/bspatch/bspatch.h
+rename from toolkit/mozapps/update/updater/bspatch.h
+rename to toolkit/mozapps/update/updater/bspatch/bspatch.h
+diff --git a/toolkit/mozapps/update/updater/bspatch/moz.build b/toolkit/mozapps/update/updater/bspatch/moz.build
+new file mode 100644
+--- /dev/null
++++ b/toolkit/mozapps/update/updater/bspatch/moz.build
+@@ -0,0 +1,22 @@
++# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
++# vim: set filetype=python:
++# This Source Code Form is subject to the terms of the Mozilla Public
++# License, v. 2.0. If a copy of the MPL was not distributed with this
++# file, You can obtain one at http://mozilla.org/MPL/2.0/.
++
++if CONFIG['OS_ARCH'] == 'WINNT':
++    USE_STATIC_LIBS = True
++
++EXPORTS += [
++    'bspatch.h',
++]
++
++SOURCES += [
++    'bspatch.cpp',
++]
++
++USE_LIBS += [
++    'updatecommon',
++]
++
++Library('bspatch')
+diff --git a/toolkit/mozapps/update/updater/bspatch/moz.yaml b/toolkit/mozapps/update/updater/bspatch/moz.yaml
+new file mode 100644
+--- /dev/null
++++ b/toolkit/mozapps/update/updater/bspatch/moz.yaml
+@@ -0,0 +1,30 @@
++# Version of this schema
++schema: 1
++
++bugzilla:
++  # Bugzilla product and component for this directory and subdirectories
++  product: "Toolkit"
++  component: "Application Update"
++
++# The source from this directory was adapted from Colin Percival's bspatch
++# tool in mid 2005 and was obtained from bsdiff version 4.2. Edits were
++# later added by the Chromium dev team and were copied to here as well
++
++# Document the source of externally hosted code
++origin:
++  name: "bsdiff/bspatch"
++  description: "Builds and applies patches to binary files"
++
++  # Full URL for the package's homepage/etc
++  # Usually different from repository url
++  url: "https://www.daemonology.net/bsdiff/bsdiff-4.2.tar.gz"
++
++  # Human-readable identifier for this version/release
++  # Generally "version NNN", "tag SSS", "bookmark SSS"
++  release: "version 4.2"
++
++  # The package's license, where possible using the mnemonic from
++  # https://spdx.org/licenses/
++  # Multiple licenses can be specified (as a YAML list)
++  # A "LICENSE" file must exist containing the full license text
++  license: "BSD-2-Clause"
+diff --git a/toolkit/mozapps/update/updater/progressui_gtk.cpp b/toolkit/mozapps/update/updater/progressui_gtk.cpp
+--- a/toolkit/mozapps/update/updater/progressui_gtk.cpp
++++ b/toolkit/mozapps/update/updater/progressui_gtk.cpp
+@@ -5,17 +5,17 @@
+  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+ 
+ #include <stdio.h>
+ #include <gtk/gtk.h>
+ #include <unistd.h>
+ #include "mozilla/Sprintf.h"
+ #include "progressui.h"
+ #include "readstrings.h"
+-#include "errors.h"
++#include "updatererrors.h"
+ 
+ #define TIMER_INTERVAL 100
+ 
+ static float    sProgressVal;  // between 0 and 100
+ static gboolean sQuit = FALSE;
+ static gboolean sEnableUI;
+ static guint    sTimerID;
+ 
+diff --git a/toolkit/mozapps/update/updater/progressui_osx.mm b/toolkit/mozapps/update/updater/progressui_osx.mm
+--- a/toolkit/mozapps/update/updater/progressui_osx.mm
++++ b/toolkit/mozapps/update/updater/progressui_osx.mm
+@@ -5,17 +5,17 @@
+  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+ 
+ #import <Cocoa/Cocoa.h>
+ #include <stdio.h>
+ #include <unistd.h>
+ #include "mozilla/Sprintf.h"
+ #include "progressui.h"
+ #include "readstrings.h"
+-#include "errors.h"
++#include "updatererrors.h"
+ 
+ #define TIMER_INTERVAL 0.2
+ 
+ static float sProgressVal;  // between 0 and 100
+ static BOOL sQuit = NO;
+ static BOOL sIndeterminate = NO;
+ static StringTable sLabels;
+ static const char *sUpdatePath;
+diff --git a/toolkit/mozapps/update/updater/progressui_win.cpp b/toolkit/mozapps/update/updater/progressui_win.cpp
+--- a/toolkit/mozapps/update/updater/progressui_win.cpp
++++ b/toolkit/mozapps/update/updater/progressui_win.cpp
+@@ -8,17 +8,17 @@
+ #include <windows.h>
+ #include <commctrl.h>
+ #include <process.h>
+ #include <io.h>
+ 
+ #include "resource.h"
+ #include "progressui.h"
+ #include "readstrings.h"
+-#include "errors.h"
++#include "updatererrors.h"
+ 
+ #define TIMER_ID 1
+ #define TIMER_INTERVAL 100
+ 
+ #define RESIZE_WINDOW(hwnd, extrax, extray) \
+   { \
+     RECT windowSize; \
+     GetWindowRect(hwnd, &windowSize); \
+diff --git a/toolkit/mozapps/update/updater/updater-common.build b/toolkit/mozapps/update/updater/updater-common.build
+--- a/toolkit/mozapps/update/updater/updater-common.build
++++ b/toolkit/mozapps/update/updater/updater-common.build
+@@ -1,17 +1,16 @@
+ # -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+ # vim: set filetype=python:
+ # This Source Code Form is subject to the terms of the Mozilla Public
+ # License, v. 2.0. If a copy of the MPL was not distributed with this
+ # file, You can obtain one at http://mozilla.org/MPL/2.0/.
+ 
+ srcs = [
+     'archivereader.cpp',
+-    'bspatch.cpp',
+     'updater.cpp',
+ ]
+ 
+ have_progressui = 0
+ 
+ if CONFIG['MOZ_VERIFY_MAR_SIGNATURE']:
+     USE_LIBS += [
+         'verifymar',
+@@ -44,16 +43,17 @@ if CONFIG['OS_ARCH'] == 'WINNT':
+ elif CONFIG['OS_ARCH'] == 'Linux' and CONFIG['MOZ_VERIFY_MAR_SIGNATURE']:
+     USE_LIBS += [
+         'nss',
+         'signmar',
+     ]
+     OS_LIBS += CONFIG['NSPR_LIBS']
+ 
+ USE_LIBS += [
++    'bspatch',
+     'mar',
+     'updatecommon',
+     'xz-embedded',
+ ]
+ 
+ if CONFIG['MOZ_WIDGET_TOOLKIT'] == 'gtk':
+     have_progressui = 1
+     srcs += [
+diff --git a/toolkit/mozapps/update/updater/updater.cpp b/toolkit/mozapps/update/updater/updater.cpp
+--- a/toolkit/mozapps/update/updater/updater.cpp
++++ b/toolkit/mozapps/update/updater/updater.cpp
+@@ -32,17 +32,17 @@
+  *  precomplete
+  *  -----------
+  *  method   = "remove" | "rmdir"
+  */
+ #include "bspatch.h"
+ #include "progressui.h"
+ #include "archivereader.h"
+ #include "readstrings.h"
+-#include "errors.h"
++#include "updatererrors.h"
+ 
+ #include <stdio.h>
+ #include <string.h>
+ #include <stdlib.h>
+ #include <stdarg.h>
+ 
+ #include <sys/types.h>
+ #include <sys/stat.h>
+diff --git a/toolkit/toolkit.mozbuild b/toolkit/toolkit.mozbuild
+--- a/toolkit/toolkit.mozbuild
++++ b/toolkit/toolkit.mozbuild
+@@ -58,17 +58,20 @@ DIRS += [
+     '/intl',
+     '/netwerk',
+ ]
+ 
+ if CONFIG['MOZ_AUTH_EXTENSION']:
+     DIRS += ['/extensions/auth']
+ 
+ if CONFIG['MOZ_UPDATER']:
+-    DIRS += ['/other-licenses/bsdiff']
++    DIRS += [
++        '/toolkit/mozapps/update/updater/bspatch',
++        '/other-licenses/bsdiff',
++    ]
+ 
+ # Gecko/Core components.
+ 
+ DIRS += [
+     '/ipc',
+     '/js/ipc',
+     '/hal',
+     '/js/xpconnect',

+ 256 - 0
mozilla-release/patches/1511181-bspatch-65a1.patch

@@ -0,0 +1,256 @@
+# HG changeset patch
+# User Sylvestre Ledru <sledru@mozilla.com>
+# Date 1543574808 -3600
+# Node ID 6f3709b3878117466168c40affa7bca0b60cf75b
+# Parent  2ac59ec6f8de97fa704341cec1bf57c489b19810
+Bug 1511181 - Reformat everything to the Google coding style r=ehsan a=clang-format
+# ignore-this-changeset
+
+diff --git a/toolkit/mozapps/update/updater/bspatch.cpp b/toolkit/mozapps/update/updater/bspatch.cpp
+--- a/toolkit/mozapps/update/updater/bspatch.cpp
++++ b/toolkit/mozapps/update/updater/bspatch.cpp
+@@ -35,91 +35,76 @@
+ #include <sys/stat.h>
+ #include <stdlib.h>
+ #include <stdio.h>
+ #include <fcntl.h>
+ #include <string.h>
+ #include <limits.h>
+ 
+ #if defined(XP_WIN)
+-# include <io.h>
++#include <io.h>
+ #else
+-# include <unistd.h>
++#include <unistd.h>
+ #endif
+ 
+ #ifdef XP_WIN
+-# include <winsock2.h>
++#include <winsock2.h>
+ #else
+-# include <arpa/inet.h>
++#include <arpa/inet.h>
+ #endif
+ 
+ #ifndef SSIZE_MAX
+-# define SSIZE_MAX LONG_MAX
++#define SSIZE_MAX LONG_MAX
+ #endif
+ 
+-int
+-MBS_ReadHeader(FILE* file, MBSPatchHeader *header)
+-{
++int MBS_ReadHeader(FILE *file, MBSPatchHeader *header) {
+   size_t s = fread(header, 1, sizeof(MBSPatchHeader), file);
+-  if (s != sizeof(MBSPatchHeader))
+-    return READ_ERROR;
++  if (s != sizeof(MBSPatchHeader)) return READ_ERROR;
+ 
+-  header->slen      = ntohl(header->slen);
+-  header->scrc32    = ntohl(header->scrc32);
+-  header->dlen      = ntohl(header->dlen);
+-  header->cblen     = ntohl(header->cblen);
+-  header->difflen   = ntohl(header->difflen);
+-  header->extralen  = ntohl(header->extralen);
++  header->slen = ntohl(header->slen);
++  header->scrc32 = ntohl(header->scrc32);
++  header->dlen = ntohl(header->dlen);
++  header->cblen = ntohl(header->cblen);
++  header->difflen = ntohl(header->difflen);
++  header->extralen = ntohl(header->extralen);
+ 
+   struct stat hs;
+   s = fstat(fileno(file), &hs);
+-  if (s != 0)
+-    return READ_ERROR;
++  if (s != 0) return READ_ERROR;
+ 
+-  if (memcmp(header->tag, "MBDIFF10", 8) != 0)
+-    return UNEXPECTED_BSPATCH_ERROR;
++  if (memcmp(header->tag, "MBDIFF10", 8) != 0) return UNEXPECTED_BSPATCH_ERROR;
+ 
+-  if (hs.st_size > INT_MAX)
+-    return UNEXPECTED_BSPATCH_ERROR;
++  if (hs.st_size > INT_MAX) return UNEXPECTED_BSPATCH_ERROR;
+ 
+   size_t size = static_cast<size_t>(hs.st_size);
+-  if (size < sizeof(MBSPatchHeader))
+-    return UNEXPECTED_BSPATCH_ERROR;
++  if (size < sizeof(MBSPatchHeader)) return UNEXPECTED_BSPATCH_ERROR;
+   size -= sizeof(MBSPatchHeader);
+ 
+-  if (size < header->cblen)
+-    return UNEXPECTED_BSPATCH_ERROR;
++  if (size < header->cblen) return UNEXPECTED_BSPATCH_ERROR;
+   size -= header->cblen;
+ 
+-  if (size < header->difflen)
+-    return UNEXPECTED_BSPATCH_ERROR;
++  if (size < header->difflen) return UNEXPECTED_BSPATCH_ERROR;
+   size -= header->difflen;
+ 
+-  if (size < header->extralen)
+-    return UNEXPECTED_BSPATCH_ERROR;
++  if (size < header->extralen) return UNEXPECTED_BSPATCH_ERROR;
+   size -= header->extralen;
+ 
+-  if (size != 0)
+-    return UNEXPECTED_BSPATCH_ERROR;
++  if (size != 0) return UNEXPECTED_BSPATCH_ERROR;
+ 
+   return OK;
+ }
+ 
+-int
+-MBS_ApplyPatch(const MBSPatchHeader *header, FILE* patchFile,
+-               unsigned char *fbuffer, FILE* file)
+-{
++int MBS_ApplyPatch(const MBSPatchHeader *header, FILE *patchFile,
++                   unsigned char *fbuffer, FILE *file) {
+   unsigned char *fbufstart = fbuffer;
+   unsigned char *fbufend = fbuffer + header->slen;
+ 
+-  unsigned char *buf = (unsigned char*) malloc(header->cblen +
+-                                               header->difflen +
++  unsigned char *buf = (unsigned char *)malloc(header->cblen + header->difflen +
+                                                header->extralen);
+-  if (!buf)
+-    return BSPATCH_MEM_ERROR;
++  if (!buf) return BSPATCH_MEM_ERROR;
+ 
+   int rv = OK;
+ 
+   size_t r = header->cblen + header->difflen + header->extralen;
+   unsigned char *wb = buf;
+   while (r) {
+     const size_t count = (r > SSIZE_MAX) ? SSIZE_MAX : r;
+     size_t c = fread(wb, 1, count, patchFile);
+@@ -133,77 +118,75 @@ MBS_ApplyPatch(const MBSPatchHeader *hea
+ 
+     if (c == 0 && r) {
+       rv = UNEXPECTED_BSPATCH_ERROR;
+       goto end;
+     }
+   }
+ 
+   {
+-    MBSPatchTriple *ctrlsrc = (MBSPatchTriple*) buf;
++    MBSPatchTriple *ctrlsrc = (MBSPatchTriple *)buf;
+     if (header->cblen % sizeof(MBSPatchTriple) != 0) {
+       rv = UNEXPECTED_BSPATCH_ERROR;
+       goto end;
+     }
+ 
+     unsigned char *diffsrc = buf + header->cblen;
+     unsigned char *extrasrc = diffsrc + header->difflen;
+ 
+-    MBSPatchTriple *ctrlend = (MBSPatchTriple*) diffsrc;
++    MBSPatchTriple *ctrlend = (MBSPatchTriple *)diffsrc;
+     unsigned char *diffend = extrasrc;
+     unsigned char *extraend = extrasrc + header->extralen;
+ 
+     while (ctrlsrc < ctrlend) {
+       ctrlsrc->x = ntohl(ctrlsrc->x);
+       ctrlsrc->y = ntohl(ctrlsrc->y);
+       ctrlsrc->z = ntohl(ctrlsrc->z);
+ 
+ #ifdef DEBUG_bsmedberg
+-      printf("Applying block:\n"
+-             " x: %u\n"
+-             " y: %u\n"
+-             " z: %i\n",
+-             ctrlsrc->x,
+-             ctrlsrc->y,
+-             ctrlsrc->z);
++      printf(
++          "Applying block:\n"
++          " x: %u\n"
++          " y: %u\n"
++          " z: %i\n",
++          ctrlsrc->x, ctrlsrc->y, ctrlsrc->z);
+ #endif
+ 
+       /* Add x bytes from oldfile to x bytes from the diff block */
+ 
+       if (ctrlsrc->x > static_cast<size_t>(fbufend - fbuffer) ||
+           ctrlsrc->x > static_cast<size_t>(diffend - diffsrc)) {
+         rv = UNEXPECTED_BSPATCH_ERROR;
+         goto end;
+       }
+       for (uint32_t i = 0; i < ctrlsrc->x; ++i) {
+         diffsrc[i] += fbuffer[i];
+       }
+-      if ((uint32_t) fwrite(diffsrc, 1, ctrlsrc->x, file) != ctrlsrc->x) {
++      if ((uint32_t)fwrite(diffsrc, 1, ctrlsrc->x, file) != ctrlsrc->x) {
+         rv = WRITE_ERROR_PATCH_FILE;
+         goto end;
+       }
+       fbuffer += ctrlsrc->x;
+       diffsrc += ctrlsrc->x;
+ 
+       /* Copy y bytes from the extra block */
+ 
+       if (ctrlsrc->y > static_cast<size_t>(extraend - extrasrc)) {
+         rv = UNEXPECTED_BSPATCH_ERROR;
+         goto end;
+       }
+-      if ((uint32_t) fwrite(extrasrc, 1, ctrlsrc->y, file) != ctrlsrc->y) {
++      if ((uint32_t)fwrite(extrasrc, 1, ctrlsrc->y, file) != ctrlsrc->y) {
+         rv = WRITE_ERROR_PATCH_FILE;
+         goto end;
+       }
+       extrasrc += ctrlsrc->y;
+ 
+       /* "seek" forwards in oldfile by z bytes */
+ 
+-      if (ctrlsrc->z < fbufstart - fbuffer ||
+-          ctrlsrc->z > fbufend - fbuffer) {
++      if (ctrlsrc->z < fbufstart - fbuffer || ctrlsrc->z > fbufend - fbuffer) {
+         rv = UNEXPECTED_BSPATCH_ERROR;
+         goto end;
+       }
+       fbuffer += ctrlsrc->z;
+ 
+       /* and on to the next control block */
+ 
+       ++ctrlsrc;
+diff --git a/toolkit/mozapps/update/updater/bspatch.h b/toolkit/mozapps/update/updater/bspatch.h
+--- a/toolkit/mozapps/update/updater/bspatch.h
++++ b/toolkit/mozapps/update/updater/bspatch.h
+@@ -63,31 +63,31 @@ typedef struct MBSPatchHeader_ {
+ } MBSPatchHeader;
+ 
+ /**
+  * Read the header of a patch file into the MBSPatchHeader structure.
+  *
+  * @param fd Must have been opened for reading, and be at the beginning
+  *           of the file.
+  */
+-int MBS_ReadHeader(FILE* file, MBSPatchHeader *header);
++int MBS_ReadHeader(FILE* file, MBSPatchHeader* header);
+ 
+ /**
+  * Apply a patch. This method does not validate the checksum of the original
+  * file: client code should validate the checksum before calling this method.
+  *
+  * @param patchfd Must have been processed by MBS_ReadHeader
+  * @param fbuffer The original file read into a memory buffer of length
+  *                header->slen.
+  * @param filefd  Must have been opened for writing. Should be truncated
+  *                to header->dlen if it is an existing file. The offset
+  *                should be at the beginning of the file.
+  */
+-int MBS_ApplyPatch(const MBSPatchHeader *header, FILE* patchFile,
+-                   unsigned char *fbuffer, FILE* file);
++int MBS_ApplyPatch(const MBSPatchHeader* header, FILE* patchFile,
++                   unsigned char* fbuffer, FILE* file);
+ 
+ typedef struct MBSPatchTriple_ {
+   uint32_t x; /* add x bytes from oldfile to x bytes from the diff block */
+   uint32_t y; /* copy y bytes from the extra block */
+-  int32_t  z; /* seek forwards in oldfile by z bytes */
++  int32_t z;  /* seek forwards in oldfile by z bytes */
+ } MBSPatchTriple;
+ 
+ #endif  // bspatch_h__

+ 4068 - 0
mozilla-release/patches/1514532-1-66a1.patch

@@ -0,0 +1,4068 @@
+
+# HG changeset patch
+# User Robert Strong <robert.bugzilla@gmail.com>
+# Date 1545074241 28800
+# Node ID 98b5a78c6cc9d157dba65933e5bfdf319ddb7777
+# Parent  71cdcd3eb846695d25f889176b63cf50420e4662
+Bug 1514532 - the modules/libmar directory isn't clang formatted and there are a few deprecation warnings in app update code. r=mhowell
+Added defines to mar_extract.c, mar_read.c, bsdiff.c so they use the ISO C and C++ conformant name.
+Ran clang format on bspatch.cpp and the files under modules/libmar except for nss_secutil.c and nss_secutil.h since they are copies of nss code.
+
+diff --git a/modules/libmar/sign/mar_sign.c b/modules/libmar/sign/mar_sign.c
+--- a/modules/libmar/sign/mar_sign.c
++++ b/modules/libmar/sign/mar_sign.c
+@@ -25,45 +25,39 @@
+ #include "base64.h"
+ 
+ /**
+  * Initializes the NSS context.
+  *
+  * @param NSSConfigDir The config dir containing the private key to use
+  * @return 0 on success
+  *         -1 on error
+-*/
+-int
+-NSSInitCryptoContext(const char *NSSConfigDir)
+-{
+-  SECStatus status = NSS_Initialize(NSSConfigDir,
+-                                    "", "", SECMOD_DB, NSS_INIT_READONLY);
++ */
++int NSSInitCryptoContext(const char *NSSConfigDir) {
++  SECStatus status =
++      NSS_Initialize(NSSConfigDir, "", "", SECMOD_DB, NSS_INIT_READONLY);
+   if (SECSuccess != status) {
+     fprintf(stderr, "ERROR: Could not initialize NSS\n");
+     return -1;
+   }
+ 
+   return 0;
+ }
+ 
+ /**
+  * Obtains a signing context.
+  *
+  * @param  ctx A pointer to the signing context to fill
+  * @return 0 on success
+  *         -1 on error
+-*/
+-int
+-NSSSignBegin(const char *certName,
+-             SGNContext **ctx,
+-             SECKEYPrivateKey **privKey,
+-             CERTCertificate **cert,
+-             uint32_t *signatureLength)
+-{
+-  secuPWData pwdata = { PW_NONE, 0 };
++ */
++int NSSSignBegin(const char *certName, SGNContext **ctx,
++                 SECKEYPrivateKey **privKey, CERTCertificate **cert,
++                 uint32_t *signatureLength) {
++  secuPWData pwdata = {PW_NONE, 0};
+   if (!certName || !ctx || !privKey || !cert || !signatureLength) {
+     fprintf(stderr, "ERROR: Invalid parameter passed to NSSSignBegin\n");
+     return -1;
+   }
+ 
+   /* Get the cert and embedded public key out of the database */
+   *cert = PK11_FindCertFromNickname(certName, &pwdata);
+   if (!*cert) {
+@@ -117,53 +111,50 @@ NSSSignBegin(const char *certName,
+  * @param  size     The size of the buffer to write.
+  * @param  ctxs     Pointer to the first element in an array of signature
+  *                  contexts to update.
+  * @param  ctxCount The number of signature contexts pointed to by ctxs
+  * @param  err    The name of what is being written to in case of error.
+  * @return  0 on success
+  *         -2 on write error
+  *         -3 on signature update error
+-*/
+-int
+-WriteAndUpdateSignatures(FILE *fpDest, void *buffer,
+-                         uint32_t size, SGNContext **ctxs,
+-                         uint32_t ctxCount,
+-                         const char *err)
+-{
++ */
++int WriteAndUpdateSignatures(FILE *fpDest, void *buffer, uint32_t size,
++                             SGNContext **ctxs, uint32_t ctxCount,
++                             const char *err) {
+   uint32_t k;
+   if (!size) {
+     return 0;
+   }
+ 
+   if (fwrite(buffer, size, 1, fpDest) != 1) {
+     fprintf(stderr, "ERROR: Could not write %s\n", err);
+     return -2;
+   }
+ 
+   for (k = 0; k < ctxCount; ++k) {
+     if (SGN_Update(ctxs[k], buffer, size) != SECSuccess) {
+-      fprintf(stderr, "ERROR: Could not update signature context for %s\n", err);
++      fprintf(stderr, "ERROR: Could not update signature context for %s\n",
++              err);
+       return -3;
+     }
+   }
+   return 0;
+ }
+ 
+ /**
+  * Adjusts each entry's content offset in the the passed in index by the
+  * specified amount.
+  *
+  * @param indexBuf     A buffer containing the MAR index
+  * @param indexLength  The length of the MAR index
+  * @param offsetAmount The amount to adjust each index entry by
+-*/
+-void
+-AdjustIndexContentOffsets(char *indexBuf, uint32_t indexLength, uint32_t offsetAmount)
+-{
++ */
++void AdjustIndexContentOffsets(char *indexBuf, uint32_t indexLength,
++                               uint32_t offsetAmount) {
+   uint32_t *offsetToContent;
+   char *indexBufLoc = indexBuf;
+ 
+   /* Consume the index and adjust each index by the specified amount */
+   while (indexBufLoc != (indexBuf + indexLength)) {
+     /* Adjust the offset */
+     offsetToContent = (uint32_t *)indexBufLoc;
+     *offsetToContent = ntohl(*offsetToContent);
+@@ -185,52 +176,46 @@ AdjustIndexContentOffsets(char *indexBuf
+  * @param  ctxs     Pointer to the first element in an array of signature
+  *                  contexts to update.
+  * @param  ctxCount The number of signature contexts pointed to by ctxs
+  * @param  err    The name of what is being written to in case of error.
+  * @return  0 on success
+  *         -1 on read error
+  *         -2 on write error
+  *         -3 on signature update error
+-*/
+-int
+-ReadWriteAndUpdateSignatures(FILE *fpSrc, FILE *fpDest, void *buffer,
+-                             uint32_t size, SGNContext **ctxs,
+-                             uint32_t ctxCount,
+-                             const char *err)
+-{
++ */
++int ReadWriteAndUpdateSignatures(FILE *fpSrc, FILE *fpDest, void *buffer,
++                                 uint32_t size, SGNContext **ctxs,
++                                 uint32_t ctxCount, const char *err) {
+   if (!size) {
+     return 0;
+   }
+ 
+   if (fread(buffer, size, 1, fpSrc) != 1) {
+     fprintf(stderr, "ERROR: Could not read %s\n", err);
+     return -1;
+   }
+ 
+   return WriteAndUpdateSignatures(fpDest, buffer, size, ctxs, ctxCount, err);
+ }
+ 
+-
+ /**
+  * Reads from fpSrc, writes it to fpDest.
+  *
+  * @param  fpSrc  The file pointer to read from.
+  * @param  fpDest The file pointer to write to.
+  * @param  buffer The buffer to write.
+  * @param  size   The size of the buffer to write.
+  * @param  err    The name of what is being written to in case of error.
+  * @return  0 on success
+  *         -1 on read error
+  *         -2 on write error
+-*/
+-int
+-ReadAndWrite(FILE *fpSrc, FILE *fpDest, void *buffer,
+-             uint32_t size, const char *err)
+-{
++ */
++int ReadAndWrite(FILE *fpSrc, FILE *fpDest, void *buffer, uint32_t size,
++                 const char *err) {
+   if (!size) {
+     return 0;
+   }
+ 
+   if (fread(buffer, size, 1, fpSrc) != 1) {
+     fprintf(stderr, "ERROR: Could not read %s\n", err);
+     return -1;
+   }
+@@ -247,24 +232,22 @@ ReadAndWrite(FILE *fpSrc, FILE *fpDest, 
+  * Writes out a copy of the MAR at src but with the signature block stripped.
+  *
+  * @param  src  The path of the source MAR file
+  * @param  dest The path of the MAR file to write out that
+                 has no signature block
+  * @return 0 on success
+  *         -1 on error
+ */
+-int
+-strip_signature_block(const char *src, const char * dest)
+-{
+-  uint32_t offsetToIndex, dstOffsetToIndex, indexLength,
+-    numSignatures = 0, leftOver;
++int strip_signature_block(const char *src, const char *dest) {
++  uint32_t offsetToIndex, dstOffsetToIndex, indexLength, numSignatures = 0,
++                                                         leftOver;
+   int32_t stripAmount = 0;
+-  int64_t oldPos, sizeOfEntireMAR = 0, realSizeOfSrcMAR, numBytesToCopy,
+-    numChunks, i;
++  int64_t oldPos, numChunks, i, realSizeOfSrcMAR, numBytesToCopy,
++      sizeOfEntireMAR = 0;
+   FILE *fpSrc = NULL, *fpDest = NULL;
+   int rv = -1, hasSignatureBlock;
+   char buf[BLOCKSIZE];
+   char *indexBuf = NULL;
+ 
+   if (!src || !dest) {
+     fprintf(stderr, "ERROR: Invalid parameter passed in.\n");
+     return -1;
+@@ -309,18 +292,17 @@ strip_signature_block(const char *src, c
+   realSizeOfSrcMAR = ftello(fpSrc);
+   if (fseeko(fpSrc, oldPos, SEEK_SET)) {
+     fprintf(stderr, "ERROR: Could not seek back to current location.\n");
+     goto failure;
+   }
+ 
+   if (hasSignatureBlock) {
+     /* Get the MAR length and adjust its size */
+-    if (fread(&sizeOfEntireMAR,
+-              sizeof(sizeOfEntireMAR), 1, fpSrc) != 1) {
++    if (fread(&sizeOfEntireMAR, sizeof(sizeOfEntireMAR), 1, fpSrc) != 1) {
+       fprintf(stderr, "ERROR: Could read mar size\n");
+       goto failure;
+     }
+     sizeOfEntireMAR = NETWORK_TO_HOST64(sizeOfEntireMAR);
+     if (sizeOfEntireMAR != realSizeOfSrcMAR) {
+       fprintf(stderr, "ERROR: Source MAR is not of the right size\n");
+       goto failure;
+     }
+@@ -414,43 +396,41 @@ strip_signature_block(const char *src, c
+   /* Read each file and write it to the MAR file */
+   for (i = 0; i < numChunks; ++i) {
+     if (ReadAndWrite(fpSrc, fpDest, buf, BLOCKSIZE, "content block")) {
+       goto failure;
+     }
+   }
+ 
+   /* Write out the left over */
+-  if (ReadAndWrite(fpSrc, fpDest, buf,
+-                   leftOver, "left over content block")) {
++  if (ReadAndWrite(fpSrc, fpDest, buf, leftOver, "left over content block")) {
+     goto failure;
+   }
+ 
+   /* Length of the index */
+-  if (ReadAndWrite(fpSrc, fpDest, &indexLength,
+-                   sizeof(indexLength), "index length")) {
++  if (ReadAndWrite(fpSrc, fpDest, &indexLength, sizeof(indexLength),
++                   "index length")) {
+     goto failure;
+   }
+   indexLength = ntohl(indexLength);
+ 
+   /* Consume the index and adjust each index by the difference */
+   indexBuf = malloc(indexLength);
+   if (fread(indexBuf, indexLength, 1, fpSrc) != 1) {
+     fprintf(stderr, "ERROR: Could not read index\n");
+     goto failure;
+   }
+ 
+   /* Adjust each entry in the index */
+   if (hasSignatureBlock) {
+     AdjustIndexContentOffsets(indexBuf, indexLength, -stripAmount);
+   } else {
+-    AdjustIndexContentOffsets(indexBuf, indexLength,
+-                              sizeof(sizeOfEntireMAR) +
+-                              sizeof(numSignatures) -
+-                              stripAmount);
++    AdjustIndexContentOffsets(
++        indexBuf, indexLength,
++        sizeof(sizeOfEntireMAR) + sizeof(numSignatures) - stripAmount);
+   }
+ 
+   if (fwrite(indexBuf, indexLength, 1, fpDest) != 1) {
+     fprintf(stderr, "ERROR: Could not write index\n");
+     goto failure;
+   }
+ 
+   rv = 0;
+@@ -480,20 +460,18 @@ failure:
+ /**
+  * Extracts a signature from a MAR file, base64 encodes it, and writes it out
+  *
+  * @param  src       The path of the source MAR file
+  * @param  sigIndex  The index of the signature to extract
+  * @param  dest      The path of file to write the signature to
+  * @return 0 on success
+  *         -1 on error
+-*/
+-int
+-extract_signature(const char *src, uint32_t sigIndex, const char * dest)
+-{
++ */
++int extract_signature(const char *src, uint32_t sigIndex, const char *dest) {
+   FILE *fpSrc = NULL, *fpDest = NULL;
+   uint32_t i;
+   uint32_t signatureCount;
+   uint32_t signatureLen;
+   uint8_t *extractedSignature = NULL;
+   char *base64Encoded = NULL;
+   int rv = -1;
+   if (!src || !dest) {
+@@ -594,31 +572,30 @@ failure:
+ }
+ 
+ /**
+  * Imports a base64 encoded signature into a MAR file
+  *
+  * @param  src           The path of the source MAR file
+  * @param  sigIndex      The index of the signature to import
+  * @param  base64SigFile A file which contains the signature to import
+- * @param  dest          The path of the destination MAR file with replaced signature
++ * @param  dest          The path of the destination MAR file with replaced
++ *         signature
+  * @return 0 on success
+  *         -1 on error
+-*/
+-int
+-import_signature(const char *src, uint32_t sigIndex,
+-                 const char *base64SigFile, const char *dest)
+-{
++ */
++int import_signature(const char *src, uint32_t sigIndex,
++                     const char *base64SigFile, const char *dest) {
+   int rv = -1;
+   FILE *fpSrc = NULL;
+   FILE *fpDest = NULL;
+   FILE *fpSigFile = NULL;
+   uint32_t i;
+-  uint32_t signatureCount, signatureLen, signatureAlgorithmID,
+-           numChunks, leftOver;
++  uint32_t signatureCount, signatureLen, signatureAlgorithmID, numChunks,
++      leftOver;
+   char buf[BLOCKSIZE];
+   uint64_t sizeOfSrcMAR, sizeOfBase64EncodedFile;
+   char *passedInSignatureB64 = NULL;
+   uint8_t *passedInSignatureRaw = NULL;
+   uint8_t *extractedMARSignature = NULL;
+   unsigned int passedInSignatureLenRaw;
+ 
+   if (!src || !dest) {
+@@ -633,17 +610,17 @@ import_signature(const char *src, uint32
+   }
+ 
+   fpDest = fopen(dest, "wb");
+   if (!fpDest) {
+     fprintf(stderr, "ERROR: could not open dest file: %s\n", dest);
+     goto failure;
+   }
+ 
+-  fpSigFile = fopen(base64SigFile , "rb");
++  fpSigFile = fopen(base64SigFile, "rb");
+   if (!fpSigFile) {
+     fprintf(stderr, "ERROR: could not open sig file: %s\n", base64SigFile);
+     goto failure;
+   }
+ 
+   /* Get the src file size */
+   if (fseeko(fpSrc, 0, SEEK_END)) {
+     fprintf(stderr, "ERROR: Could not seek to end of src file.\n");
+@@ -655,46 +632,47 @@ import_signature(const char *src, uint32
+     goto failure;
+   }
+ 
+   /* Get the sig file size */
+   if (fseeko(fpSigFile, 0, SEEK_END)) {
+     fprintf(stderr, "ERROR: Could not seek to end of sig file.\n");
+     goto failure;
+   }
+-  sizeOfBase64EncodedFile= ftello(fpSigFile);
++  sizeOfBase64EncodedFile = ftello(fpSigFile);
+   if (fseeko(fpSigFile, 0, SEEK_SET)) {
+     fprintf(stderr, "ERROR: Could not seek to start of sig file.\n");
+     goto failure;
+   }
+ 
+   /* Read in the base64 encoded signature to import */
+   passedInSignatureB64 = malloc(sizeOfBase64EncodedFile + 1);
+   passedInSignatureB64[sizeOfBase64EncodedFile] = '\0';
+   if (fread(passedInSignatureB64, sizeOfBase64EncodedFile, 1, fpSigFile) != 1) {
+     fprintf(stderr, "ERROR: Could read b64 sig file.\n");
+     goto failure;
+   }
+ 
+   /* Decode the base64 encoded data */
+-  passedInSignatureRaw = ATOB_AsciiToData(passedInSignatureB64, &passedInSignatureLenRaw);
++  passedInSignatureRaw =
++      ATOB_AsciiToData(passedInSignatureB64, &passedInSignatureLenRaw);
+   if (!passedInSignatureRaw) {
+     fprintf(stderr, "ERROR: could not obtain base64 decoded data\n");
+     goto failure;
+   }
+ 
+   /* Read everything up until the signature block offset and write it out */
+-  if (ReadAndWrite(fpSrc, fpDest, buf,
+-                   SIGNATURE_BLOCK_OFFSET, "signature block offset")) {
++  if (ReadAndWrite(fpSrc, fpDest, buf, SIGNATURE_BLOCK_OFFSET,
++                   "signature block offset")) {
+     goto failure;
+   }
+ 
+   /* Get the number of signatures */
+-  if (ReadAndWrite(fpSrc, fpDest, &signatureCount,
+-                   sizeof(signatureCount), "signature count")) {
++  if (ReadAndWrite(fpSrc, fpDest, &signatureCount, sizeof(signatureCount),
++                   "signature count")) {
+     goto failure;
+   }
+   signatureCount = ntohl(signatureCount);
+   if (signatureCount > MAX_SIGNATURES) {
+     fprintf(stderr, "ERROR: Signature count was out of range\n");
+     goto failure;
+   }
+ 
+@@ -703,25 +681,24 @@ import_signature(const char *src, uint32
+     goto failure;
+   }
+ 
+   /* Read and write the whole signature block, but if we reach the
+      signature offset, then we should replace it with the specified
+      base64 decoded signature */
+   for (i = 0; i < signatureCount; i++) {
+     /* Read/Write the signature algorithm ID */
+-    if (ReadAndWrite(fpSrc, fpDest,
+-                     &signatureAlgorithmID,
++    if (ReadAndWrite(fpSrc, fpDest, &signatureAlgorithmID,
+                      sizeof(signatureAlgorithmID), "sig algorithm ID")) {
+       goto failure;
+     }
+ 
+     /* Read/Write the signature length */
+-    if (ReadAndWrite(fpSrc, fpDest,
+-                     &signatureLen, sizeof(signatureLen), "sig length")) {
++    if (ReadAndWrite(fpSrc, fpDest, &signatureLen, sizeof(signatureLen),
++                     "sig length")) {
+       goto failure;
+     }
+     signatureLen = ntohl(signatureLen);
+ 
+     /* Get the signature */
+     if (extractedMARSignature) {
+       free(extractedMARSignature);
+     }
+@@ -733,24 +710,24 @@ import_signature(const char *src, uint32
+         goto failure;
+       }
+ 
+       if (fread(extractedMARSignature, signatureLen, 1, fpSrc) != 1) {
+         fprintf(stderr, "ERROR: Could not read signature\n");
+         goto failure;
+       }
+ 
+-      if (fwrite(passedInSignatureRaw, passedInSignatureLenRaw,
+-                 1, fpDest) != 1) {
++      if (fwrite(passedInSignatureRaw, passedInSignatureLenRaw, 1, fpDest) !=
++          1) {
+         fprintf(stderr, "ERROR: Could not write signature\n");
+         goto failure;
+       }
+     } else {
+-      if (ReadAndWrite(fpSrc, fpDest,
+-                       extractedMARSignature, signatureLen, "signature")) {
++      if (ReadAndWrite(fpSrc, fpDest, extractedMARSignature, signatureLen,
++                       "signature")) {
+         goto failure;
+       }
+     }
+   }
+ 
+   /* We replaced the signature so let's just skip past the rest o the
+      file. */
+   numChunks = (sizeOfSrcMAR - ftello(fpSrc)) / BLOCKSIZE;
+@@ -802,39 +779,34 @@ failure:
+   return rv;
+ }
+ 
+ /**
+  * Writes out a copy of the MAR at src but with embedded signatures.
+  * The passed in MAR file must not already be signed or an error will
+  * be returned.
+  *
+- * @param  NSSConfigDir  The NSS directory containing the private key for signing
++ * @param  NSSConfigDir  The NSS directory containing the private key for
++ *                       signing
+  * @param  certNames     The nicknames of the certificate to use for signing
+  * @param  certCount     The number of certificate names contained in certNames.
+  *                       One signature will be produced for each certificate.
+  * @param  src           The path of the source MAR file to sign
+  * @param  dest          The path of the MAR file to write out that is signed
+  * @return 0 on success
+  *         -1 on error
+-*/
+-int
+-mar_repackage_and_sign(const char *NSSConfigDir,
+-                       const char * const *certNames,
+-                       uint32_t certCount,
+-                       const char *src,
+-                       const char *dest)
+-{
+-  uint32_t offsetToIndex, dstOffsetToIndex, indexLength,
+-    numSignatures = 0, leftOver,
+-    signatureAlgorithmID, signatureSectionLength = 0;
++ */
++int mar_repackage_and_sign(const char *NSSConfigDir,
++                           const char *const *certNames, uint32_t certCount,
++                           const char *src, const char *dest) {
++  uint32_t offsetToIndex, dstOffsetToIndex, indexLength, leftOver,
++      signatureAlgorithmID, numSignatures = 0, signatureSectionLength = 0;
+   uint32_t signatureLengths[MAX_SIGNATURES];
+-  int64_t oldPos, sizeOfEntireMAR = 0, realSizeOfSrcMAR,
+-    signaturePlaceholderOffset, numBytesToCopy,
+-    numChunks, i;
++  int64_t oldPos, numChunks, i, realSizeOfSrcMAR, signaturePlaceholderOffset,
++      numBytesToCopy, sizeOfEntireMAR = 0;
+   FILE *fpSrc = NULL, *fpDest = NULL;
+   int rv = -1, hasSignatureBlock;
+   SGNContext *ctxs[MAX_SIGNATURES];
+   SECItem secItems[MAX_SIGNATURES];
+   char buf[BLOCKSIZE];
+   SECKEYPrivateKey *privKeys[MAX_SIGNATURES];
+   CERTCertificate *certs[MAX_SIGNATURES];
+   char *indexBuf = NULL;
+@@ -872,27 +844,26 @@ mar_repackage_and_sign(const char *NSSCo
+ 
+   /* Determine if the source MAR file has the new fields for signing or not */
+   if (get_mar_file_info(src, &hasSignatureBlock, NULL, NULL, NULL, NULL)) {
+     fprintf(stderr, "ERROR: could not determine if MAR is old or new.\n");
+     goto failure;
+   }
+ 
+   for (k = 0; k < certCount; k++) {
+-    if (NSSSignBegin(certNames[k], &ctxs[k], &privKeys[k],
+-                     &certs[k], &signatureLengths[k])) {
++    if (NSSSignBegin(certNames[k], &ctxs[k], &privKeys[k], &certs[k],
++                     &signatureLengths[k])) {
+       fprintf(stderr, "ERROR: NSSSignBegin failed\n");
+       goto failure;
+     }
+   }
+ 
+   /* MAR ID */
+-  if (ReadWriteAndUpdateSignatures(fpSrc, fpDest,
+-                                   buf, MAR_ID_SIZE,
+-                                   ctxs, certCount, "MAR ID")) {
++  if (ReadWriteAndUpdateSignatures(fpSrc, fpDest, buf, MAR_ID_SIZE, ctxs,
++                                   certCount, "MAR ID")) {
+     goto failure;
+   }
+ 
+   /* Offset to index */
+   if (fread(&offsetToIndex, sizeof(offsetToIndex), 1, fpSrc) != 1) {
+     fprintf(stderr, "ERROR: Could not read offset\n");
+     goto failure;
+   }
+@@ -907,18 +878,17 @@ mar_repackage_and_sign(const char *NSSCo
+   realSizeOfSrcMAR = ftello(fpSrc);
+   if (fseeko(fpSrc, oldPos, SEEK_SET)) {
+     fprintf(stderr, "ERROR: Could not seek back to current location.\n");
+     goto failure;
+   }
+ 
+   if (hasSignatureBlock) {
+     /* Get the MAR length and adjust its size */
+-    if (fread(&sizeOfEntireMAR,
+-              sizeof(sizeOfEntireMAR), 1, fpSrc) != 1) {
++    if (fread(&sizeOfEntireMAR, sizeof(sizeOfEntireMAR), 1, fpSrc) != 1) {
+       fprintf(stderr, "ERROR: Could read mar size\n");
+       goto failure;
+     }
+     sizeOfEntireMAR = NETWORK_TO_HOST64(sizeOfEntireMAR);
+     if (sizeOfEntireMAR != realSizeOfSrcMAR) {
+       fprintf(stderr, "ERROR: Source MAR is not of the right size\n");
+       goto failure;
+     }
+@@ -943,18 +913,17 @@ mar_repackage_and_sign(const char *NSSCo
+   if (((int64_t)offsetToIndex) > sizeOfEntireMAR) {
+     fprintf(stderr, "ERROR: Offset to index is larger than the file size.\n");
+     goto failure;
+   }
+ 
+   /* Calculate the total signature block length */
+   for (k = 0; k < certCount; k++) {
+     signatureSectionLength += sizeof(signatureAlgorithmID) +
+-                              sizeof(signatureLengths[k]) +
+-                              signatureLengths[k];
++                              sizeof(signatureLengths[k]) + signatureLengths[k];
+   }
+   dstOffsetToIndex = offsetToIndex;
+   if (!hasSignatureBlock) {
+     dstOffsetToIndex += sizeof(sizeOfEntireMAR) + sizeof(numSignatures);
+   }
+   dstOffsetToIndex += signatureSectionLength;
+ 
+   /* Write out the index offset */
+@@ -979,40 +948,39 @@ mar_repackage_and_sign(const char *NSSCo
+                                "size of MAR")) {
+     goto failure;
+   }
+   sizeOfEntireMAR = NETWORK_TO_HOST64(sizeOfEntireMAR);
+ 
+   /* Write out the number of signatures */
+   numSignatures = certCount;
+   numSignatures = htonl(numSignatures);
+-  if (WriteAndUpdateSignatures(fpDest, &numSignatures,
+-                               sizeof(numSignatures), ctxs, certCount,
+-                               "num signatures")) {
++  if (WriteAndUpdateSignatures(fpDest, &numSignatures, sizeof(numSignatures),
++                               ctxs, certCount, "num signatures")) {
+     goto failure;
+   }
+   numSignatures = ntohl(numSignatures);
+ 
+   signaturePlaceholderOffset = ftello(fpDest);
+ 
+   for (k = 0; k < certCount; k++) {
+     /* Write out the signature algorithm ID, Only an ID of 2 is supported */
+     signatureAlgorithmID = htonl(2);
+     if (WriteAndUpdateSignatures(fpDest, &signatureAlgorithmID,
+-                                 sizeof(signatureAlgorithmID),
+-                                 ctxs, certCount, "num signatures")) {
++                                 sizeof(signatureAlgorithmID), ctxs, certCount,
++                                 "num signatures")) {
+       goto failure;
+     }
+     signatureAlgorithmID = ntohl(signatureAlgorithmID);
+ 
+     /* Write out the signature length */
+     signatureLengths[k] = htonl(signatureLengths[k]);
+     if (WriteAndUpdateSignatures(fpDest, &signatureLengths[k],
+-                                 sizeof(signatureLengths[k]),
+-                                 ctxs, certCount, "signature length")) {
++                                 sizeof(signatureLengths[k]), ctxs, certCount,
++                                 "signature length")) {
+       goto failure;
+     }
+     signatureLengths[k] = ntohl(signatureLengths[k]);
+ 
+     /* Write out a placeholder for the signature, we'll come back to this later
+       *** THIS IS NOT SIGNED because it is a placeholder that will be replaced
+           below, plus it is going to be the signature itself. *** */
+     memset(buf, 0, sizeof(buf));
+@@ -1030,27 +998,25 @@ mar_repackage_and_sign(const char *NSSCo
+     goto failure;
+   }
+   numBytesToCopy = ((int64_t)offsetToIndex) - ftello(fpSrc);
+   numChunks = numBytesToCopy / BLOCKSIZE;
+   leftOver = numBytesToCopy % BLOCKSIZE;
+ 
+   /* Read each file and write it to the MAR file */
+   for (i = 0; i < numChunks; ++i) {
+-    if (ReadWriteAndUpdateSignatures(fpSrc, fpDest, buf,
+-                                     BLOCKSIZE, ctxs, certCount,
+-                                     "content block")) {
++    if (ReadWriteAndUpdateSignatures(fpSrc, fpDest, buf, BLOCKSIZE, ctxs,
++                                     certCount, "content block")) {
+       goto failure;
+     }
+   }
+ 
+   /* Write out the left over */
+-  if (ReadWriteAndUpdateSignatures(fpSrc, fpDest, buf,
+-                                   leftOver, ctxs, certCount,
+-                                   "left over content block")) {
++  if (ReadWriteAndUpdateSignatures(fpSrc, fpDest, buf, leftOver, ctxs,
++                                   certCount, "left over content block")) {
+     goto failure;
+   }
+ 
+   /* Length of the index */
+   if (ReadWriteAndUpdateSignatures(fpSrc, fpDest, &indexLength,
+                                    sizeof(indexLength), ctxs, certCount,
+                                    "index length")) {
+     goto failure;
+@@ -1064,23 +1030,22 @@ mar_repackage_and_sign(const char *NSSCo
+     goto failure;
+   }
+ 
+   /* Adjust each entry in the index */
+   if (hasSignatureBlock) {
+     AdjustIndexContentOffsets(indexBuf, indexLength, signatureSectionLength);
+   } else {
+     AdjustIndexContentOffsets(indexBuf, indexLength,
+-                              sizeof(sizeOfEntireMAR) +
+-                              sizeof(numSignatures) +
+-                              signatureSectionLength);
++                              sizeof(sizeOfEntireMAR) + sizeof(numSignatures) +
++                                  signatureSectionLength);
+   }
+ 
+-  if (WriteAndUpdateSignatures(fpDest, indexBuf,
+-                               indexLength, ctxs, certCount, "index")) {
++  if (WriteAndUpdateSignatures(fpDest, indexBuf, indexLength, ctxs, certCount,
++                               "index")) {
+     goto failure;
+   }
+ 
+   /* Ensure that we don't sign a file that is too large to be accepted by
+      the verification function. */
+   if (ftello(fpDest) > MAX_SIZE_OF_MAR_FILE) {
+     goto failure;
+   }
+@@ -1100,24 +1065,25 @@ mar_repackage_and_sign(const char *NSSCo
+   /* Get back to the location of the signature placeholder */
+   if (fseeko(fpDest, signaturePlaceholderOffset, SEEK_SET)) {
+     fprintf(stderr, "ERROR: Could not seek to signature offset\n");
+     goto failure;
+   }
+ 
+   for (k = 0; k < certCount; k++) {
+     /* Skip to the position of the next signature */
+-    if (fseeko(fpDest, sizeof(signatureAlgorithmID) +
+-               sizeof(signatureLengths[k]), SEEK_CUR)) {
++    if (fseeko(fpDest,
++               sizeof(signatureAlgorithmID) + sizeof(signatureLengths[k]),
++               SEEK_CUR)) {
+       fprintf(stderr, "ERROR: Could not seek to signature offset\n");
+       goto failure;
+     }
+ 
+     /* Write out the calculated signature.
+-      *** THIS IS NOT SIGNED because it is the signature itself. *** */
++     *** THIS IS NOT SIGNED because it is the signature itself. *** */
+     if (fwrite(secItems[k].data, secItems[k].len, 1, fpDest) != 1) {
+       fprintf(stderr, "ERROR: Could not write signature\n");
+       goto failure;
+     }
+   }
+ 
+   rv = 0;
+ failure:
+diff --git a/modules/libmar/src/mar.h b/modules/libmar/src/mar.h
+--- a/modules/libmar/src/mar.h
++++ b/modules/libmar/src/mar.h
+@@ -14,59 +14,55 @@
+ extern "C" {
+ #endif
+ 
+ /* We have a MAX_SIGNATURES limit so that an invalid MAR will never
+  * waste too much of either updater's or signmar's time.
+  * It is also used at various places internally and will affect memory usage.
+  * If you want to increase this value above 9 then you need to adjust parsing
+  * code in tool/mar.c.
+-*/
++ */
+ #define MAX_SIGNATURES 8
+ #ifdef __cplusplus
+ static_assert(MAX_SIGNATURES <= 9, "too many signatures");
+ #else
+ MOZ_STATIC_ASSERT(MAX_SIGNATURES <= 9, "too many signatures");
+ #endif
+ 
+-struct ProductInformationBlock
+-{
++struct ProductInformationBlock {
+   const char* MARChannelID;
+   const char* productVersion;
+ };
+ 
+ /**
+  * The MAR item data structure.
+  */
+-typedef struct MarItem_
+-{
+-  struct MarItem_* next;  /* private field */
+-  uint32_t offset;        /* offset into archive */
+-  uint32_t length;        /* length of data in bytes */
+-  uint32_t flags;         /* contains file mode bits */
+-  char name[1];           /* file path */
++typedef struct MarItem_ {
++  struct MarItem_* next; /* private field */
++  uint32_t offset;       /* offset into archive */
++  uint32_t length;       /* length of data in bytes */
++  uint32_t flags;        /* contains file mode bits */
++  char name[1];          /* file path */
+ } MarItem;
+ 
+ /**
+  * File offset and length for tracking access of byte indexes
+  */
+-typedef struct SeenIndex_
+-{
++typedef struct SeenIndex_ {
+   struct SeenIndex_* next; /* private field */
+   uint32_t offset;         /* offset into archive */
+   uint32_t length;         /* length of the data in bytes */
+ } SeenIndex;
+ 
+ #define TABLESIZE 256
+ 
+ /**
+  * Mozilla ARchive (MAR) file data structure
+  */
+-struct MarFile_
+-{
++struct MarFile_ {
+   FILE* fp;                       /* file pointer to the archive */
+   MarItem* item_table[TABLESIZE]; /* hash table of files in the archive */
+   SeenIndex* index_list;          /* file indexes processed */
+   int item_table_is_valid;        /* header and index validation flag */
+ };
+ 
+ typedef struct MarFile_ MarFile;
+ 
+@@ -80,113 +76,99 @@ typedef struct MarFile_ MarFile;
+ typedef int (*MarItemCallback)(MarFile* mar, const MarItem* item, void* data);
+ 
+ /**
+  * Open a MAR file for reading.
+  * @param path      Specifies the path to the MAR file to open.  This path must
+  *                  be compatible with fopen.
+  * @return          NULL if an error occurs.
+  */
+-MarFile*
+-mar_open(const char* path);
++MarFile* mar_open(const char* path);
+ 
+ #ifdef XP_WIN
+-MarFile *mar_wopen(const wchar_t *path);
++MarFile* mar_wopen(const wchar_t* path);
+ #endif
+ 
+ /**
+  * Close a MAR file that was opened using mar_open.
+  * @param mar       The MarFile object to close.
+  */
+-void
+-mar_close(MarFile* mar);
++void mar_close(MarFile* mar);
+ 
+ /**
+  * Find an item in the MAR file by name.
+  * @param mar       The MarFile object to query.
+  * @param item      The name of the item to query.
+  * @return          A const reference to a MAR item or NULL if not found.
+  */
+-const MarItem*
+-mar_find_item(MarFile* mar, const char* item);
++const MarItem* mar_find_item(MarFile* mar, const char* item);
+ 
+ /**
+  * Enumerate all MAR items via callback function.
+  * @param mar       The MAR file to enumerate.
+  * @param callback  The function to call for each MAR item.
+  * @param data      A caller specified value that is passed along to the
+  *                  callback function.
+  * @return          0 if the enumeration ran to completion.  Otherwise, any
+  *                  non-zero return value from the callback is returned.
+  */
+-int
+-mar_enum_items(MarFile* mar, MarItemCallback callback, void* data);
++int mar_enum_items(MarFile* mar, MarItemCallback callback, void* data);
+ 
+ /**
+  * Read from MAR item at given offset up to bufsize bytes.
+  * @param mar       The MAR file to read.
+  * @param item      The MAR item to read.
+  * @param offset    The byte offset relative to the start of the item.
+  * @param buf       A pointer to a buffer to copy the data into.
+  * @param bufsize   The length of the buffer to copy the data into.
+  * @return          The number of bytes written or a negative value if an
+  *                  error occurs.
+  */
+-int
+-mar_read(MarFile* mar,
+-         const MarItem* item,
+-         int offset,
+-         uint8_t* buf,
+-         int bufsize);
++int mar_read(MarFile* mar, const MarItem* item, int offset, uint8_t* buf,
++             int bufsize);
+ 
+ /**
+  * Create a MAR file from a set of files.
+  * @param dest      The path to the file to create.  This path must be
+  *                  compatible with fopen.
+  * @param numfiles  The number of files to store in the archive.
+  * @param files     The list of null-terminated file paths.  Each file
+  *                  path must be compatible with fopen.
+  * @param infoBlock The information to store in the product information block.
+  * @return          A non-zero value if an error occurs.
+  */
+-int
+-mar_create(const char* dest,
+-           int numfiles,
+-           char** files,
+-           struct ProductInformationBlock* infoBlock);
++int mar_create(const char* dest, int numfiles, char** files,
++               struct ProductInformationBlock* infoBlock);
+ 
+ /**
+  * Extract a MAR file to the current working directory.
+  * @param path      The path to the MAR file to extract.  This path must be
+  *                  compatible with fopen.
+  * @return          A non-zero value if an error occurs.
+  */
+-int
+-mar_extract(const char* path);
++int mar_extract(const char* path);
+ 
+-#define MAR_MAX_CERT_SIZE (16*1024) // Way larger than necessary
++#define MAR_MAX_CERT_SIZE (16 * 1024)  // Way larger than necessary
+ 
+ /* Read the entire file (not a MAR file) into a newly-allocated buffer.
+  * This function does not write to stderr. Instead, the caller should
+  * write whatever error messages it sees fit. The caller must free the returned
+  * buffer using free().
+  *
+  * @param filePath The path to the file that should be read.
+  * @param maxSize  The maximum valid file size.
+  * @param data     On success, *data will point to a newly-allocated buffer
+  *                 with the file's contents in it.
+  * @param size     On success, *size will be the size of the created buffer.
+  *
+  * @return 0 on success, -1 on error
+  */
+-int
+-mar_read_entire_file(const char* filePath,
+-                     uint32_t maxSize,
+-                     /*out*/ const uint8_t** data,
+-                     /*out*/ uint32_t* size);
++int mar_read_entire_file(const char* filePath, uint32_t maxSize,
++                         /*out*/ const uint8_t** data,
++                         /*out*/ uint32_t* size);
+ 
+ /**
+  * Verifies a MAR file by verifying each signature with the corresponding
+  * certificate. That is, the first signature will be verified using the first
+  * certificate given, the second signature will be verified using the second
+  * certificate given, etc. The signature count must exactly match the number of
+  * certificates given, and all signature verifications must succeed.
+  * We do not check that the certificate was issued by any trusted authority.
+@@ -198,31 +180,27 @@ mar_read_entire_file(const char* filePat
+  *                       file data.
+  * @param certDataSizes  Pointer to the first element in an array for size of
+  *                       the cert data.
+  * @param certCount      The number of elements in certData and certDataSizes
+  * @return 0 on success
+  *         a negative number if there was an error
+  *         a positive number if the signature does not verify
+  */
+-int
+-mar_verify_signatures(MarFile* mar,
+-                      const uint8_t* const* certData,
+-                      const uint32_t* certDataSizes,
+-                      uint32_t certCount);
++int mar_verify_signatures(MarFile* mar, const uint8_t* const* certData,
++                          const uint32_t* certDataSizes, uint32_t certCount);
+ 
+ /**
+  * Reads the product info block from the MAR file's additional block section.
+  * The caller is responsible for freeing the fields in infoBlock
+  * if the return is successful.
+  *
+  * @param infoBlock Out parameter for where to store the result to
+  * @return 0 on success, -1 on failure
+-*/
+-int
+-mar_read_product_info_block(MarFile* mar,
+-                            struct ProductInformationBlock* infoBlock);
++ */
++int mar_read_product_info_block(MarFile* mar,
++                                struct ProductInformationBlock* infoBlock);
+ 
+ #ifdef __cplusplus
+ }
+ #endif
+ 
+-#endif  /* MAR_H__ */
++#endif /* MAR_H__ */
+diff --git a/modules/libmar/src/mar_cmdline.h b/modules/libmar/src/mar_cmdline.h
+--- a/modules/libmar/src/mar_cmdline.h
++++ b/modules/libmar/src/mar_cmdline.h
+@@ -26,85 +26,77 @@ struct ProductInformationBlock;
+  * @param offsetAdditionalBlocks Optional out parameter for the offset to the
+  *                               first additional block. Value is only valid if
+  *                               hasAdditionalBlocks is not equal to 0.
+  * @param numAdditionalBlocks    Optional out parameter for the number of
+  *                               additional blocks.  Value is only valid if
+  *                               has_additional_blocks is not equal to 0.
+  * @return 0 on success and non-zero on failure.
+  */
+-int get_mar_file_info(const char *path,
+-                      int *hasSignatureBlock,
+-                      uint32_t *numSignatures,
+-                      int *hasAdditionalBlocks,
++int get_mar_file_info(const char *path, int *hasSignatureBlock,
++                      uint32_t *numSignatures, int *hasAdditionalBlocks,
+                       uint32_t *offsetAdditionalBlocks,
+                       uint32_t *numAdditionalBlocks);
+ 
+ /**
+  * Reads the product info block from the MAR file's additional block section.
+  * The caller is responsible for freeing the fields in infoBlock
+  * if the return is successful.
+  *
+  * @param infoBlock Out parameter for where to store the result to
+  * @return 0 on success, -1 on failure
+-*/
+-int
+-read_product_info_block(char *path,
+-                        struct ProductInformationBlock *infoBlock);
++ */
++int read_product_info_block(char *path,
++                            struct ProductInformationBlock *infoBlock);
+ 
+ /**
+  * Refreshes the product information block with the new information.
+  * The input MAR must not be signed or the function call will fail.
+  *
+  * @param path             The path to the MAR file whose product info block
+  *                         should be refreshed.
+  * @param infoBlock        Out parameter for where to store the result to
+  * @return 0 on success, -1 on failure
+-*/
+-int
+-refresh_product_info_block(const char *path,
+-                           struct ProductInformationBlock *infoBlock);
++ */
++int refresh_product_info_block(const char *path,
++                               struct ProductInformationBlock *infoBlock);
+ 
+ /**
+  * Writes out a copy of the MAR at src but with the signature block stripped.
+  *
+  * @param  src  The path of the source MAR file
+  * @param  dest The path of the MAR file to write out that
+                 has no signature block
+  * @return 0 on success
+  *         -1 on error
+ */
+-int
+-strip_signature_block(const char *src, const char * dest);
++int strip_signature_block(const char *src, const char *dest);
+ 
+ /**
+  * Extracts a signature from a MAR file, base64 encodes it, and writes it out
+  *
+  * @param  src       The path of the source MAR file
+  * @param  sigIndex  The index of the signature to extract
+  * @param  dest      The path of file to write the signature to
+  * @return 0 on success
+  *         -1 on error
+-*/
+-int
+-extract_signature(const char *src, uint32_t sigIndex, const char * dest);
++ */
++int extract_signature(const char *src, uint32_t sigIndex, const char *dest);
+ 
+ /**
+  * Imports a base64 encoded signature into a MAR file
+  *
+  * @param  src           The path of the source MAR file
+  * @param  sigIndex      The index of the signature to import
+  * @param  base64SigFile A file which contains the signature to import
+- * @param  dest          The path of the destination MAR file with replaced signature
++ * @param  dest          The path of the destination MAR file with replaced
++ *                       signature
+  * @return 0 on success
+  *         -1 on error
+-*/
+-int
+-import_signature(const char *src,
+-                 uint32_t sigIndex,
+-                 const char * base64SigFile,
+-                 const char *dest);
++ */
++int import_signature(const char *src, uint32_t sigIndex,
++                     const char *base64SigFile, const char *dest);
+ 
+ #ifdef __cplusplus
+ }
+ #endif
+ 
+-#endif  /* MAR_CMDLINE_H__ */
++#endif /* MAR_CMDLINE_H__ */
+diff --git a/modules/libmar/src/mar_create.c b/modules/libmar/src/mar_create.c
+--- a/modules/libmar/src/mar_create.c
++++ b/modules/libmar/src/mar_create.c
+@@ -40,22 +40,23 @@ static int mar_push(struct MarItemStack 
+ 
+   namelen = strlen(name);
+   size = MAR_ITEM_SIZE(namelen);
+ 
+   if (stack->size_allocated - stack->size_used < size) {
+     /* increase size of stack */
+     size_t size_needed = ROUND_UP(stack->size_used + size, BLOCKSIZE);
+     stack->head = realloc(stack->head, size_needed);
+-    if (!stack->head)
++    if (!stack->head) {
+       return -1;
++    }
+     stack->size_allocated = size_needed;
+   }
+ 
+-  data = (((char *) stack->head) + stack->size_used);
++  data = (((char *)stack->head) + stack->size_used);
+ 
+   n_offset = htonl(stack->last_offset);
+   n_length = htonl(length);
+   n_flags = htonl(flags);
+ 
+   memcpy(data, &n_offset, sizeof(n_offset));
+   data += sizeof(n_offset);
+ 
+@@ -99,26 +100,23 @@ static int mar_concat_file(FILE *fp, con
+ /**
+  * Writes out the product information block to the specified file.
+  *
+  * @param fp           The opened MAR file being created.
+  * @param stack        A pointer to the MAR item stack being used to create
+  *                     the MAR
+  * @param infoBlock    The product info block to store in the file.
+  * @return 0 on success.
+-*/
+-static int
+-mar_concat_product_info_block(FILE *fp,
+-                              struct MarItemStack *stack,
+-                              struct ProductInformationBlock *infoBlock)
+-{
++ */
++static int mar_concat_product_info_block(
++    FILE *fp, struct MarItemStack *stack,
++    struct ProductInformationBlock *infoBlock) {
+   char buf[PIB_MAX_MAR_CHANNEL_ID_SIZE + PIB_MAX_PRODUCT_VERSION_SIZE];
+   uint32_t additionalBlockID = 1, infoBlockSize, unused;
+-  if (!fp || !infoBlock ||
+-      !infoBlock->MARChannelID ||
++  if (!fp || !infoBlock || !infoBlock->MARChannelID ||
+       !infoBlock->productVersion) {
+     return -1;
+   }
+ 
+   /* The MAR channel name must be < 64 bytes per the spec */
+   if (strlen(infoBlock->MARChannelID) > PIB_MAX_MAR_CHANNEL_ID_SIZE) {
+     return -1;
+   }
+@@ -127,55 +125,51 @@ mar_concat_product_info_block(FILE *fp,
+   if (strlen(infoBlock->productVersion) > PIB_MAX_PRODUCT_VERSION_SIZE) {
+     return -1;
+   }
+ 
+   /* Although we don't need the product information block size to include the
+      maximum MAR channel name and product version, we allocate the maximum
+      amount to make it easier to modify the MAR file for repurposing MAR files
+      to different MAR channels. + 2 is for the NULL terminators. */
+-  infoBlockSize = sizeof(infoBlockSize) +
+-                  sizeof(additionalBlockID) +
+-                  PIB_MAX_MAR_CHANNEL_ID_SIZE +
+-                  PIB_MAX_PRODUCT_VERSION_SIZE + 2;
++  infoBlockSize = sizeof(infoBlockSize) + sizeof(additionalBlockID) +
++                  PIB_MAX_MAR_CHANNEL_ID_SIZE + PIB_MAX_PRODUCT_VERSION_SIZE +
++                  2;
+   if (stack) {
+     stack->last_offset += infoBlockSize;
+   }
+ 
+   /* Write out the product info block size */
+   infoBlockSize = htonl(infoBlockSize);
+-  if (fwrite(&infoBlockSize,
+-      sizeof(infoBlockSize), 1, fp) != 1) {
++  if (fwrite(&infoBlockSize, sizeof(infoBlockSize), 1, fp) != 1) {
+     return -1;
+   }
+   infoBlockSize = ntohl(infoBlockSize);
+ 
+   /* Write out the product info block ID */
+   additionalBlockID = htonl(additionalBlockID);
+-  if (fwrite(&additionalBlockID,
+-      sizeof(additionalBlockID), 1, fp) != 1) {
++  if (fwrite(&additionalBlockID, sizeof(additionalBlockID), 1, fp) != 1) {
+     return -1;
+   }
+   additionalBlockID = ntohl(additionalBlockID);
+ 
+   /* Write out the channel name and NULL terminator */
+-  if (fwrite(infoBlock->MARChannelID,
+-      strlen(infoBlock->MARChannelID) + 1, 1, fp) != 1) {
++  if (fwrite(infoBlock->MARChannelID, strlen(infoBlock->MARChannelID) + 1, 1,
++             fp) != 1) {
+     return -1;
+   }
+ 
+   /* Write out the product version string and NULL terminator */
+-  if (fwrite(infoBlock->productVersion,
+-      strlen(infoBlock->productVersion) + 1, 1, fp) != 1) {
++  if (fwrite(infoBlock->productVersion, strlen(infoBlock->productVersion) + 1,
++             1, fp) != 1) {
+     return -1;
+   }
+ 
+   /* Write out the rest of the block that is unused */
+-  unused = infoBlockSize - (sizeof(infoBlockSize) +
+-                            sizeof(additionalBlockID) +
++  unused = infoBlockSize - (sizeof(infoBlockSize) + sizeof(additionalBlockID) +
+                             strlen(infoBlock->MARChannelID) +
+                             strlen(infoBlock->productVersion) + 2);
+   memset(buf, 0, sizeof(buf));
+   if (fwrite(buf, unused, 1, fp) != 1) {
+     return -1;
+   }
+   return 0;
+ }
+@@ -183,33 +177,28 @@ mar_concat_product_info_block(FILE *fp,
+ /**
+  * Refreshes the product information block with the new information.
+  * The input MAR must not be signed or the function call will fail.
+  *
+  * @param path             The path to the MAR file whose product info block
+  *                         should be refreshed.
+  * @param infoBlock        Out parameter for where to store the result to
+  * @return 0 on success, -1 on failure
+-*/
+-int
+-refresh_product_info_block(const char *path,
+-                           struct ProductInformationBlock *infoBlock)
+-{
+-  FILE *fp ;
++ */
++int refresh_product_info_block(const char *path,
++                               struct ProductInformationBlock *infoBlock) {
++  FILE *fp;
+   int rv;
+   uint32_t numSignatures, additionalBlockSize, additionalBlockID,
+-    offsetAdditionalBlocks, numAdditionalBlocks, i;
++      offsetAdditionalBlocks, numAdditionalBlocks, i;
+   int additionalBlocks, hasSignatureBlock;
+   int64_t oldPos;
+ 
+-  rv = get_mar_file_info(path,
+-                         &hasSignatureBlock,
+-                         &numSignatures,
+-                         &additionalBlocks,
+-                         &offsetAdditionalBlocks,
++  rv = get_mar_file_info(path, &hasSignatureBlock, &numSignatures,
++                         &additionalBlocks, &offsetAdditionalBlocks,
+                          &numAdditionalBlocks);
+   if (rv) {
+     fprintf(stderr, "ERROR: Could not obtain MAR information.\n");
+     return -1;
+   }
+ 
+   if (hasSignatureBlock && numSignatures) {
+     fprintf(stderr, "ERROR: Cannot refresh a signed MAR\n");
+@@ -228,28 +217,24 @@ refresh_product_info_block(const char *p
+     return -1;
+   }
+ 
+   for (i = 0; i < numAdditionalBlocks; ++i) {
+     /* Get the position of the start of this block */
+     oldPos = ftello(fp);
+ 
+     /* Read the additional block size */
+-    if (fread(&additionalBlockSize,
+-              sizeof(additionalBlockSize),
+-              1, fp) != 1) {
++    if (fread(&additionalBlockSize, sizeof(additionalBlockSize), 1, fp) != 1) {
+       fclose(fp);
+       return -1;
+     }
+     additionalBlockSize = ntohl(additionalBlockSize);
+ 
+     /* Read the additional block ID */
+-    if (fread(&additionalBlockID,
+-              sizeof(additionalBlockID),
+-              1, fp) != 1) {
++    if (fread(&additionalBlockID, sizeof(additionalBlockID), 1, fp) != 1) {
+       fclose(fp);
+       return -1;
+     }
+     additionalBlockID = ntohl(additionalBlockID);
+ 
+     if (PRODUCT_INFO_BLOCK_ID == additionalBlockID) {
+       if (fseeko(fp, oldPos, SEEK_SET)) {
+         fprintf(stderr, "Could not seek back to Product Information Block\n");
+@@ -286,114 +271,121 @@ refresh_product_info_block(const char *p
+  * @param dest      The path to the file to create.  This path must be
+  *                  compatible with fopen.
+  * @param numfiles  The number of files to store in the archive.
+  * @param files     The list of null-terminated file paths.  Each file
+  *                  path must be compatible with fopen.
+  * @param infoBlock The information to store in the product information block.
+  * @return A non-zero value if an error occurs.
+  */
+-int mar_create(const char *dest, int
+-               num_files, char **files,
++int mar_create(const char *dest, int num_files, char **files,
+                struct ProductInformationBlock *infoBlock) {
+   struct MarItemStack stack;
+-  uint32_t offset_to_index = 0, size_of_index,
+-    numSignatures, numAdditionalSections;
++  uint32_t offset_to_index = 0, size_of_index, numSignatures,
++           numAdditionalSections;
+   uint64_t sizeOfEntireMAR = 0;
+   struct stat st;
+   FILE *fp;
+   int i, rv = -1;
+ 
+   memset(&stack, 0, sizeof(stack));
+ 
+   fp = fopen(dest, "wb");
+   if (!fp) {
+     fprintf(stderr, "ERROR: could not create target file: %s\n", dest);
+     return -1;
+   }
+ 
+-  if (fwrite(MAR_ID, MAR_ID_SIZE, 1, fp) != 1)
++  if (fwrite(MAR_ID, MAR_ID_SIZE, 1, fp) != 1) {
+     goto failure;
+-  if (fwrite(&offset_to_index, sizeof(uint32_t), 1, fp) != 1)
++  }
++  if (fwrite(&offset_to_index, sizeof(uint32_t), 1, fp) != 1) {
+     goto failure;
++  }
+ 
+-  stack.last_offset = MAR_ID_SIZE +
+-                      sizeof(offset_to_index) +
+-                      sizeof(numSignatures) +
+-                      sizeof(numAdditionalSections) +
++  stack.last_offset = MAR_ID_SIZE + sizeof(offset_to_index) +
++                      sizeof(numSignatures) + sizeof(numAdditionalSections) +
+                       sizeof(sizeOfEntireMAR);
+ 
+   /* We will circle back on this at the end of the MAR creation to fill it */
+   if (fwrite(&sizeOfEntireMAR, sizeof(sizeOfEntireMAR), 1, fp) != 1) {
+     goto failure;
+   }
+ 
+   /* Write out the number of signatures, for now only at most 1 is supported */
+   numSignatures = 0;
+   if (fwrite(&numSignatures, sizeof(numSignatures), 1, fp) != 1) {
+     goto failure;
+   }
+ 
+   /* Write out the number of additional sections, for now just 1
+      for the product info block */
+   numAdditionalSections = htonl(1);
+-  if (fwrite(&numAdditionalSections,
+-             sizeof(numAdditionalSections), 1, fp) != 1) {
++  if (fwrite(&numAdditionalSections, sizeof(numAdditionalSections), 1, fp) !=
++      1) {
+     goto failure;
+   }
+   numAdditionalSections = ntohl(numAdditionalSections);
+ 
+   if (mar_concat_product_info_block(fp, &stack, infoBlock)) {
+     goto failure;
+   }
+ 
+   for (i = 0; i < num_files; ++i) {
+     if (stat(files[i], &st)) {
+       fprintf(stderr, "ERROR: file not found: %s\n", files[i]);
+       goto failure;
+     }
+ 
+-    if (mar_push(&stack, st.st_size, st.st_mode & 0777, files[i]))
++    if (mar_push(&stack, st.st_size, st.st_mode & 0777, files[i])) {
+       goto failure;
++    }
+ 
+     /* concatenate input file to archive */
+-    if (mar_concat_file(fp, files[i]))
++    if (mar_concat_file(fp, files[i])) {
+       goto failure;
++    }
+   }
+ 
+   /* write out the index (prefixed with length of index) */
+   size_of_index = htonl(stack.size_used);
+-  if (fwrite(&size_of_index, sizeof(size_of_index), 1, fp) != 1)
++  if (fwrite(&size_of_index, sizeof(size_of_index), 1, fp) != 1) {
+     goto failure;
+-  if (fwrite(stack.head, stack.size_used, 1, fp) != 1)
++  }
++  if (fwrite(stack.head, stack.size_used, 1, fp) != 1) {
+     goto failure;
++  }
+ 
+   /* To protect against invalid MAR files, we assumes that the MAR file
+      size is less than or equal to MAX_SIZE_OF_MAR_FILE. */
+   if (ftell(fp) > MAX_SIZE_OF_MAR_FILE) {
+     goto failure;
+   }
+ 
+   /* write out offset to index file in network byte order */
+   offset_to_index = htonl(stack.last_offset);
+-  if (fseek(fp, MAR_ID_SIZE, SEEK_SET))
++  if (fseek(fp, MAR_ID_SIZE, SEEK_SET)) {
+     goto failure;
+-  if (fwrite(&offset_to_index, sizeof(offset_to_index), 1, fp) != 1)
++  }
++  if (fwrite(&offset_to_index, sizeof(offset_to_index), 1, fp) != 1) {
+     goto failure;
++  }
+   offset_to_index = ntohl(stack.last_offset);
+ 
+-  sizeOfEntireMAR = ((uint64_t)stack.last_offset) +
+-                    stack.size_used +
+-                    sizeof(size_of_index);
++  sizeOfEntireMAR =
++      ((uint64_t)stack.last_offset) + stack.size_used + sizeof(size_of_index);
+   sizeOfEntireMAR = HOST_TO_NETWORK64(sizeOfEntireMAR);
+-  if (fwrite(&sizeOfEntireMAR, sizeof(sizeOfEntireMAR), 1, fp) != 1)
++  if (fwrite(&sizeOfEntireMAR, sizeof(sizeOfEntireMAR), 1, fp) != 1) {
+     goto failure;
++  }
+   sizeOfEntireMAR = NETWORK_TO_HOST64(sizeOfEntireMAR);
+ 
+   rv = 0;
+ failure:
+-  if (stack.head)
++  if (stack.head) {
+     free(stack.head);
++  }
+   fclose(fp);
+-  if (rv)
++  if (rv) {
+     remove(dest);
++  }
+   return rv;
+ }
+diff --git a/modules/libmar/src/mar_extract.c b/modules/libmar/src/mar_extract.c
+--- a/modules/libmar/src/mar_extract.c
++++ b/modules/libmar/src/mar_extract.c
+@@ -10,24 +10,23 @@
+ #include <string.h>
+ #include <stdlib.h>
+ #include "mar_private.h"
+ #include "mar.h"
+ 
+ #ifdef XP_WIN
+ #include <io.h>
+ #include <direct.h>
++#define fdopen _fdopen
+ #endif
+ 
+ /* Ensure that the directory containing this file exists */
+-static int mar_ensure_parent_dir(const char *path)
+-{
++static int mar_ensure_parent_dir(const char *path) {
+   char *slash = strrchr(path, '/');
+-  if (slash)
+-  {
++  if (slash) {
+     *slash = '\0';
+     mar_ensure_parent_dir(path);
+ #ifdef XP_WIN
+     _mkdir(path);
+ #else
+     mkdir(path, 0755);
+ #endif
+     *slash = '/';
+@@ -35,49 +34,54 @@ static int mar_ensure_parent_dir(const c
+   return 0;
+ }
+ 
+ static int mar_test_callback(MarFile *mar, const MarItem *item, void *unused) {
+   FILE *fp;
+   uint8_t buf[BLOCKSIZE];
+   int fd, len, offset = 0;
+ 
+-  if (mar_ensure_parent_dir(item->name))
++  if (mar_ensure_parent_dir(item->name)) {
+     return -1;
++  }
+ 
+ #ifdef XP_WIN
+-  fd = _open(item->name, _O_BINARY|_O_CREAT|_O_TRUNC|_O_WRONLY, item->flags);
++  fd = _open(item->name, _O_BINARY | _O_CREAT | _O_TRUNC | _O_WRONLY,
++             item->flags);
+ #else
+   fd = creat(item->name, item->flags);
+ #endif
+   if (fd == -1) {
+     fprintf(stderr, "ERROR: could not create file in mar_test_callback()\n");
+     perror(item->name);
+     return -1;
+   }
+ 
+   fp = fdopen(fd, "wb");
+-  if (!fp)
++  if (!fp) {
+     return -1;
++  }
+ 
+   while ((len = mar_read(mar, item, offset, buf, sizeof(buf))) > 0) {
+-    if (fwrite(buf, len, 1, fp) != 1)
++    if (fwrite(buf, len, 1, fp) != 1) {
+       break;
++    }
+     offset += len;
+   }
+ 
+   fclose(fp);
+   return len == 0 ? 0 : -1;
+ }
+ 
+ int mar_extract(const char *path) {
+   MarFile *mar;
+   int rv;
+ 
+   mar = mar_open(path);
+-  if (!mar)
++  if (!mar) {
+     return -1;
++  }
+ 
+   rv = mar_enum_items(mar, mar_test_callback, NULL);
+ 
+   mar_close(mar);
+   return rv;
+ }
+diff --git a/modules/libmar/src/mar_private.h b/modules/libmar/src/mar_private.h
+--- a/modules/libmar/src/mar_private.h
++++ b/modules/libmar/src/mar_private.h
+@@ -27,29 +27,29 @@
+ 
+ /* Existing code makes assumptions that the file size is
+    smaller than LONG_MAX. */
+ MOZ_STATIC_ASSERT(MAX_SIZE_OF_MAR_FILE < ((int64_t)LONG_MAX),
+                   "max mar file size is too big");
+ 
+ /* We store at most the size up to the signature block + 4
+    bytes per BLOCKSIZE bytes */
+-MOZ_STATIC_ASSERT(sizeof(BLOCKSIZE) < \
+-                  (SIGNATURE_BLOCK_OFFSET + sizeof(uint32_t)),
++MOZ_STATIC_ASSERT(sizeof(BLOCKSIZE) <
++                      (SIGNATURE_BLOCK_OFFSET + sizeof(uint32_t)),
+                   "BLOCKSIZE is too big");
+ 
+ /* The maximum size of any signature supported by current and future
+    implementations of the signmar program. */
+ #define MAX_SIGNATURE_LENGTH 2048
+ 
+ /* Each additional block has a unique ID.
+    The product information block has an ID of 1. */
+ #define PRODUCT_INFO_BLOCK_ID 1
+ 
+-#define MAR_ITEM_SIZE(namelen) (3*sizeof(uint32_t) + (namelen) + 1)
++#define MAR_ITEM_SIZE(namelen) (3 * sizeof(uint32_t) + (namelen) + 1)
+ 
+ /* Product Information Block (PIB) constants */
+ #define PIB_MAX_MAR_CHANNEL_ID_SIZE 63
+ #define PIB_MAX_PRODUCT_VERSION_SIZE 31
+ 
+ /* The mar program is compiled as a host bin so we don't have access to NSPR at
+    runtime.  For that reason we use ntohl, htonl, and define HOST_TO_NETWORK64
+    instead of the NSPR equivalents. */
+@@ -60,20 +60,18 @@ MOZ_STATIC_ASSERT(sizeof(BLOCKSIZE) < \
+ #else
+ #define _FILE_OFFSET_BITS 64
+ #include <netinet/in.h>
+ #include <unistd.h>
+ #endif
+ 
+ #include <stdio.h>
+ 
+-#define HOST_TO_NETWORK64(x) ( \
+-  ((((uint64_t) x) & 0xFF) << 56) | \
+-  ((((uint64_t) x) >> 8) & 0xFF) << 48) | \
+-  (((((uint64_t) x) >> 16) & 0xFF) << 40) | \
+-  (((((uint64_t) x) >> 24) & 0xFF) << 32) | \
+-  (((((uint64_t) x) >> 32) & 0xFF) << 24) | \
+-  (((((uint64_t) x) >> 40) & 0xFF) << 16) | \
+-  (((((uint64_t) x) >> 48) & 0xFF) << 8) | \
+-  (((uint64_t) x) >> 56)
++#define HOST_TO_NETWORK64(x)                                               \
++  (((((uint64_t)x) & 0xFF) << 56) | ((((uint64_t)x) >> 8) & 0xFF) << 48) | \
++      (((((uint64_t)x) >> 16) & 0xFF) << 40) |                             \
++      (((((uint64_t)x) >> 24) & 0xFF) << 32) |                             \
++      (((((uint64_t)x) >> 32) & 0xFF) << 24) |                             \
++      (((((uint64_t)x) >> 40) & 0xFF) << 16) |                             \
++      (((((uint64_t)x) >> 48) & 0xFF) << 8) | (((uint64_t)x) >> 56)
+ #define NETWORK_TO_HOST64 HOST_TO_NETWORK64
+ 
+-#endif  /* MAR_PRIVATE_H__ */
++#endif /* MAR_PRIVATE_H__ */
+diff --git a/modules/libmar/src/mar_read.c b/modules/libmar/src/mar_read.c
+--- a/modules/libmar/src/mar_read.c
++++ b/modules/libmar/src/mar_read.c
+@@ -6,83 +6,77 @@
+ 
+ #include <sys/types.h>
+ #include <fcntl.h>
+ #include <stdlib.h>
+ #include <string.h>
+ #include "city.h"
+ #include "mar_private.h"
+ #include "mar.h"
++#ifdef XP_WIN
++#define strdup _strdup
++#endif
+ 
+ /* This block must be at most 104 bytes.
+    MAR channel name < 64 bytes, and product version < 32 bytes + 3 NULL
+    terminator bytes. We only check for 96 though because we remove 8
+    bytes above from the additionalBlockSize: We subtract
+    sizeof(additionalBlockSize) and sizeof(additionalBlockID) */
+ #define MAXADDITIONALBLOCKSIZE 96
+ 
+-static uint32_t
+-mar_hash_name(const char* name)
+-{
++static uint32_t mar_hash_name(const char* name) {
+   return CityHash64(name, strlen(name)) % TABLESIZE;
+ }
+ 
+-static int
+-mar_insert_item(MarFile* mar,
+-                const char* name,
+-                int namelen,
+-                uint32_t offset,
+-                uint32_t length,
+-                uint32_t flags)
+-{
++static int mar_insert_item(MarFile* mar, const char* name, int namelen,
++                           uint32_t offset, uint32_t length, uint32_t flags) {
+   MarItem *item, *root;
+   uint32_t hash;
+ 
+-  item = (MarItem *) malloc(sizeof(MarItem) + namelen);
+-  if (!item)
++  item = (MarItem*)malloc(sizeof(MarItem) + namelen);
++  if (!item) {
+     return -1;
++  }
+   item->next = NULL;
+   item->offset = offset;
+   item->length = length;
+   item->flags = flags;
+   memcpy(item->name, name, namelen + 1);
+ 
+   hash = mar_hash_name(name);
+ 
+   root = mar->item_table[hash];
+   if (!root) {
+     mar->item_table[hash] = item;
+   } else {
+     /* append item */
+-    while (root->next)
+-      root = root->next;
++    while (root->next) root = root->next;
+     root->next = item;
+   }
+   return 0;
+ }
+ 
+-static int
+-mar_consume_index(MarFile* mar, char** buf, const char* buf_end)
+-{
++static int mar_consume_index(MarFile* mar, char** buf, const char* buf_end) {
+   /*
+    * Each item has the following structure:
+    *   uint32_t offset      (network byte order)
+    *   uint32_t length      (network byte order)
+    *   uint32_t flags       (network byte order)
+    *   char     name[N]     (where N >= 1)
+    *   char     null_byte;
+    */
+   uint32_t offset;
+   uint32_t length;
+   uint32_t flags;
+-  const char *name;
++  const char* name;
+   int namelen;
+ 
+-  if ((buf_end - *buf) < (int)(3*sizeof(uint32_t) + 2))
++  if ((buf_end - *buf) < (int)(3 * sizeof(uint32_t) + 2)) {
+     return -1;
++  }
+ 
+   memcpy(&offset, *buf, sizeof(offset));
+   *buf += sizeof(offset);
+ 
+   memcpy(&length, *buf, sizeof(length));
+   *buf += sizeof(length);
+ 
+   memcpy(&flags, *buf, sizeof(flags));
+@@ -91,83 +85,88 @@ mar_consume_index(MarFile* mar, char** b
+   offset = ntohl(offset);
+   length = ntohl(length);
+   flags = ntohl(flags);
+ 
+   name = *buf;
+   /* find namelen; must take care not to read beyond buf_end */
+   while (**buf) {
+     /* buf_end points one byte past the end of buf's allocation */
+-    if (*buf == (buf_end - 1))
++    if (*buf == (buf_end - 1)) {
+       return -1;
++    }
+     ++(*buf);
+   }
+   namelen = (*buf - name);
+   /* must ensure that namelen is valid */
+   if (namelen < 0) {
+     return -1;
+   }
+   /* consume null byte */
+-  if (*buf == buf_end)
++  if (*buf == buf_end) {
+     return -1;
++  }
+   ++(*buf);
+ 
+   return mar_insert_item(mar, name, namelen, offset, length, flags);
+ }
+ 
+-static int
+-mar_read_index(MarFile* mar)
+-{
++static int mar_read_index(MarFile* mar) {
+   char id[MAR_ID_SIZE], *buf, *bufptr, *bufend;
+   uint32_t offset_to_index, size_of_index;
+ 
+   /* verify MAR ID */
+   fseek(mar->fp, 0, SEEK_SET);
+-  if (fread(id, MAR_ID_SIZE, 1, mar->fp) != 1)
++  if (fread(id, MAR_ID_SIZE, 1, mar->fp) != 1) {
+     return -1;
+-  if (memcmp(id, MAR_ID, MAR_ID_SIZE) != 0)
++  }
++  if (memcmp(id, MAR_ID, MAR_ID_SIZE) != 0) {
+     return -1;
++  }
+ 
+-  if (fread(&offset_to_index, sizeof(uint32_t), 1, mar->fp) != 1)
++  if (fread(&offset_to_index, sizeof(uint32_t), 1, mar->fp) != 1) {
+     return -1;
++  }
+   offset_to_index = ntohl(offset_to_index);
+ 
+-  if (fseek(mar->fp, offset_to_index, SEEK_SET))
++  if (fseek(mar->fp, offset_to_index, SEEK_SET)) {
+     return -1;
+-  if (fread(&size_of_index, sizeof(uint32_t), 1, mar->fp) != 1)
++  }
++  if (fread(&size_of_index, sizeof(uint32_t), 1, mar->fp) != 1) {
+     return -1;
++  }
+   size_of_index = ntohl(size_of_index);
+ 
+-  buf = (char *) malloc(size_of_index);
+-  if (!buf)
++  buf = (char*)malloc(size_of_index);
++  if (!buf) {
+     return -1;
++  }
+   if (fread(buf, size_of_index, 1, mar->fp) != 1) {
+     free(buf);
+     return -1;
+   }
+ 
+   bufptr = buf;
+   bufend = buf + size_of_index;
+-  while (bufptr < bufend && mar_consume_index(mar, &bufptr, bufend) == 0);
++  while (bufptr < bufend && mar_consume_index(mar, &bufptr, bufend) == 0)
++    ;
+ 
+   free(buf);
+   return (bufptr == bufend) ? 0 : -1;
+ }
+ 
+ /**
+  * Adds an offset and length to the MarFile's index_list
+  * @param mar     The MarFile that owns this offset length pair
+  * @param offset  The byte offset in the archive to be marked as processed
+  * @param length  The length corresponding to this byte offset
+  * @return int    1 on success, 0 if offset has been previously processed
+  *                -1 if unable to allocate space for the SeenIndexes
+  */
+-static int
+-mar_insert_offset(MarFile* mar, uint32_t offset, uint32_t length)
+-{
++static int mar_insert_offset(MarFile* mar, uint32_t offset, uint32_t length) {
+   /* Ignore files with no length */
+   if (length == 0) {
+     return 1;
+   }
+ 
+   SeenIndex* index = (SeenIndex*)malloc(sizeof(SeenIndex));
+   if (!index) {
+     return -1;
+@@ -208,70 +207,62 @@ mar_insert_offset(MarFile* mar, uint32_t
+   previous->next = index;
+   return 1;
+ }
+ 
+ /**
+  * Internal shared code for mar_open and mar_wopen.
+  * On failure, will fclose(fp).
+  */
+-static MarFile*
+-mar_fpopen(FILE* fp)
+-{
++static MarFile* mar_fpopen(FILE* fp) {
+   MarFile* mar;
+ 
+   mar = (MarFile*)malloc(sizeof(*mar));
+   if (!mar) {
+     fclose(fp);
+     return NULL;
+   }
+ 
+   mar->fp = fp;
+   mar->item_table_is_valid = 0;
+   memset(mar->item_table, 0, sizeof(mar->item_table));
+   mar->index_list = NULL;
+ 
+   return mar;
+ }
+ 
+-MarFile*
+-mar_open(const char* path)
+-{
+-  FILE *fp;
++MarFile* mar_open(const char* path) {
++  FILE* fp;
+ 
+   fp = fopen(path, "rb");
+   if (!fp) {
+     fprintf(stderr, "ERROR: could not open file in mar_open()\n");
+     perror(path);
+     return NULL;
+   }
+ 
+   return mar_fpopen(fp);
+ }
+ 
+ #ifdef XP_WIN
+-MarFile*
+-mar_wopen(const wchar_t* path)
+-{
+-  FILE *fp;
++MarFile* mar_wopen(const wchar_t* path) {
++  FILE* fp;
+ 
+   _wfopen_s(&fp, path, L"rb");
+   if (!fp) {
+     fprintf(stderr, "ERROR: could not open file in mar_wopen()\n");
+     _wperror(path);
+     return NULL;
+   }
+ 
+   return mar_fpopen(fp);
+ }
+ #endif
+ 
+-void
+-mar_close(MarFile* mar)
+-{
++void mar_close(MarFile* mar) {
+   MarItem* item;
+   SeenIndex* index;
+   int i;
+ 
+   fclose(mar->fp);
+ 
+   for (i = 0; i < TABLESIZE; ++i) {
+     item = mar->item_table[i];
+@@ -304,45 +295,40 @@ mar_close(MarFile* mar)
+  * @param offsetAdditionalBlocks Optional out parameter for the offset to the
+  *                               first additional block. Value is only valid if
+  *                               hasAdditionalBlocks is not equal to 0.
+  * @param numAdditionalBlocks    Optional out parameter for the number of
+  *                               additional blocks.  Value is only valid if
+  *                               hasAdditionalBlocks is not equal to 0.
+  * @return 0 on success and non-zero on failure.
+  */
+-int
+-get_mar_file_info_fp(FILE* fp,
+-                     int* hasSignatureBlock,
+-                     uint32_t* numSignatures,
+-                     int* hasAdditionalBlocks,
+-                     uint32_t* offsetAdditionalBlocks,
+-                     uint32_t* numAdditionalBlocks)
+-{
++int get_mar_file_info_fp(FILE* fp, int* hasSignatureBlock,
++                         uint32_t* numSignatures, int* hasAdditionalBlocks,
++                         uint32_t* offsetAdditionalBlocks,
++                         uint32_t* numAdditionalBlocks) {
+   uint32_t offsetToIndex, offsetToContent, signatureCount, signatureLen, i;
+ 
+   /* One of hasSignatureBlock or hasAdditionalBlocks must be non NULL */
+   if (!hasSignatureBlock && !hasAdditionalBlocks) {
+     return -1;
+   }
+ 
+-
+   /* Skip to the start of the offset index */
+   if (fseek(fp, MAR_ID_SIZE, SEEK_SET)) {
+     return -1;
+   }
+ 
+   /* Read the offset to the index. */
+   if (fread(&offsetToIndex, sizeof(offsetToIndex), 1, fp) != 1) {
+     return -1;
+   }
+   offsetToIndex = ntohl(offsetToIndex);
+ 
+   if (numSignatures) {
+-     /* Skip past the MAR file size field */
++    /* Skip past the MAR file size field */
+     if (fseek(fp, sizeof(uint64_t), SEEK_CUR)) {
+       return -1;
+     }
+ 
+     /* Read the number of signatures field */
+     if (fread(numSignatures, sizeof(*numSignatures), 1, fp) != 1) {
+       return -1;
+     }
+@@ -376,17 +362,17 @@ get_mar_file_info_fp(FILE* fp,
+   }
+ 
+   /* If the caller doesn't care about the product info block
+      value, then just return */
+   if (!hasAdditionalBlocks) {
+     return 0;
+   }
+ 
+-   /* Skip to the start of the signature block */
++  /* Skip to the start of the signature block */
+   if (fseeko(fp, SIGNATURE_BLOCK_OFFSET, SEEK_SET)) {
+     return -1;
+   }
+ 
+   /* Get the number of signatures */
+   if (fread(&signatureCount, sizeof(signatureCount), 1, fp) != 1) {
+     return -1;
+   }
+@@ -441,87 +427,81 @@ get_mar_file_info_fp(FILE* fp,
+ 
+ /**
+  * Reads the product info block from the MAR file's additional block section.
+  * The caller is responsible for freeing the fields in infoBlock
+  * if the return is successful.
+  *
+  * @param infoBlock Out parameter for where to store the result to
+  * @return 0 on success, -1 on failure
+-*/
+-int
+-read_product_info_block(char* path, struct ProductInformationBlock* infoBlock)
+-{
++ */
++int read_product_info_block(char* path,
++                            struct ProductInformationBlock* infoBlock) {
+   int rv;
+   MarFile mar;
+   mar.fp = fopen(path, "rb");
+   if (!mar.fp) {
+-    fprintf(stderr, "ERROR: could not open file in read_product_info_block()\n");
++    fprintf(stderr,
++            "ERROR: could not open file in read_product_info_block()\n");
+     perror(path);
+     return -1;
+   }
+   rv = mar_read_product_info_block(&mar, infoBlock);
+   fclose(mar.fp);
+   return rv;
+ }
+ 
+ /**
+  * Reads the product info block from the MAR file's additional block section.
+  * The caller is responsible for freeing the fields in infoBlock
+  * if the return is successful.
+  *
+  * @param infoBlock Out parameter for where to store the result to
+  * @return 0 on success, -1 on failure
+-*/
+-int
+-mar_read_product_info_block(MarFile* mar,
+-                            struct ProductInformationBlock* infoBlock)
+-{
+-  uint32_t offsetAdditionalBlocks, numAdditionalBlocks,
+-    additionalBlockSize, additionalBlockID;
++ */
++int mar_read_product_info_block(MarFile* mar,
++                                struct ProductInformationBlock* infoBlock) {
++  uint32_t offsetAdditionalBlocks, numAdditionalBlocks, additionalBlockSize,
++      additionalBlockID;
+   int hasAdditionalBlocks;
+ 
+   /* The buffer size is 97 bytes because the MAR channel name < 64 bytes, and
+      product version < 32 bytes + 3 NULL terminator bytes. */
+-  char buf[MAXADDITIONALBLOCKSIZE + 1] = { '\0' };
+-  if (get_mar_file_info_fp(mar->fp, NULL, NULL,
+-                           &hasAdditionalBlocks,
++  char buf[MAXADDITIONALBLOCKSIZE + 1] = {'\0'};
++  if (get_mar_file_info_fp(mar->fp, NULL, NULL, &hasAdditionalBlocks,
+                            &offsetAdditionalBlocks,
+                            &numAdditionalBlocks) != 0) {
+     return -1;
+   }
+ 
+   /* We only have the one additional block type and only one is expected to be
+      in a MAR file so check if any exist and process the first found */
+   if (numAdditionalBlocks > 0) {
+     /* Read the additional block size */
+-    if (fread(&additionalBlockSize,
+-              sizeof(additionalBlockSize),
+-              1, mar->fp) != 1) {
++    if (fread(&additionalBlockSize, sizeof(additionalBlockSize), 1, mar->fp) !=
++        1) {
+       return -1;
+     }
+     additionalBlockSize = ntohl(additionalBlockSize) -
+                           sizeof(additionalBlockSize) -
+                           sizeof(additionalBlockID);
+ 
+     /* Additional Block sizes should only be 96 bytes long */
+     if (additionalBlockSize > MAXADDITIONALBLOCKSIZE) {
+       return -1;
+     }
+ 
+     /* Read the additional block ID */
+-    if (fread(&additionalBlockID,
+-              sizeof(additionalBlockID),
+-              1, mar->fp) != 1) {
++    if (fread(&additionalBlockID, sizeof(additionalBlockID), 1, mar->fp) != 1) {
+       return -1;
+     }
+     additionalBlockID = ntohl(additionalBlockID);
+ 
+     if (PRODUCT_INFO_BLOCK_ID == additionalBlockID) {
+-      const char *location;
++      const char* location;
+       int len;
+ 
+       if (fread(buf, additionalBlockSize, 1, mar->fp) != 1) {
+         return -1;
+       }
+ 
+       /* Extract the MAR channel name from the buffer.  For now we
+          point to the stack allocated buffer but we strdup this
+@@ -538,36 +518,32 @@ mar_read_product_info_block(MarFile* mar
+       /* Extract the version from the buffer */
+       len = strlen(location);
+       infoBlock->productVersion = location;
+       if (len >= 32) {
+         infoBlock->MARChannelID = NULL;
+         infoBlock->productVersion = NULL;
+         return -1;
+       }
+-      infoBlock->MARChannelID =
+-        strdup(infoBlock->MARChannelID);
+-      infoBlock->productVersion =
+-        strdup(infoBlock->productVersion);
++      infoBlock->MARChannelID = strdup(infoBlock->MARChannelID);
++      infoBlock->productVersion = strdup(infoBlock->productVersion);
+       return 0;
+     } else {
+       /* This is not the additional block you're looking for. Move along. */
+       if (fseek(mar->fp, additionalBlockSize, SEEK_CUR)) {
+         return -1;
+       }
+     }
+   }
+ 
+   /* If we had a product info block we would have already returned */
+   return -1;
+ }
+ 
+-const MarItem*
+-mar_find_item(MarFile* mar, const char* name)
+-{
++const MarItem* mar_find_item(MarFile* mar, const char* name) {
+   uint32_t hash;
+   const MarItem* item;
+ 
+   if (!mar->item_table_is_valid) {
+     if (mar_read_index(mar)) {
+       return NULL;
+     } else {
+       mar->item_table_is_valid = 1;
+@@ -585,19 +561,17 @@ mar_find_item(MarFile* mar, const char* 
+   if (mar_insert_offset(mar, item->offset, item->length) == 1) {
+     return item;
+   } else {
+     fprintf(stderr, "ERROR: file content collision in mar_find_item()\n");
+     return NULL;
+   }
+ }
+ 
+-int
+-mar_enum_items(MarFile* mar, MarItemCallback callback, void* closure)
+-{
++int mar_enum_items(MarFile* mar, MarItemCallback callback, void* closure) {
+   MarItem* item;
+   int i, rv;
+ 
+   if (!mar->item_table_is_valid) {
+     if (mar_read_index(mar)) {
+       return -1;
+     } else {
+       mar->item_table_is_valid = 1;
+@@ -619,36 +593,35 @@ mar_enum_items(MarFile* mar, MarItemCall
+       }
+       item = item->next;
+     }
+   }
+ 
+   return 0;
+ }
+ 
+-int
+-mar_read(MarFile* mar,
+-         const MarItem* item,
+-         int offset,
+-         uint8_t* buf,
+-         int bufsize)
+-{
++int mar_read(MarFile* mar, const MarItem* item, int offset, uint8_t* buf,
++             int bufsize) {
+   int nr;
+ 
+-  if (offset == (int) item->length)
++  if (offset == (int)item->length) {
+     return 0;
+-  if (offset > (int) item->length)
++  }
++  if (offset > (int)item->length) {
+     return -1;
++  }
+ 
+   nr = item->length - offset;
+-  if (nr > bufsize)
++  if (nr > bufsize) {
+     nr = bufsize;
++  }
+ 
+-  if (fseek(mar->fp, item->offset + offset, SEEK_SET))
++  if (fseek(mar->fp, item->offset + offset, SEEK_SET)) {
+     return -1;
++  }
+ 
+   return fread(buf, 1, nr, mar->fp);
+ }
+ 
+ /**
+  * Determines the MAR file information.
+  *
+  * @param path                   The path of the MAR file to check.
+@@ -661,31 +634,27 @@ mar_read(MarFile* mar,
+  * @param offsetAdditionalBlocks Optional out parameter for the offset to the
+  *                               first additional block. Value is only valid if
+  *                               hasAdditionalBlocks is not equal to 0.
+  * @param numAdditionalBlocks    Optional out parameter for the number of
+  *                               additional blocks.  Value is only valid if
+  *                               has_additional_blocks is not equal to 0.
+  * @return 0 on success and non-zero on failure.
+  */
+-int
+-get_mar_file_info(const char* path,
+-                  int* hasSignatureBlock,
+-                  uint32_t* numSignatures,
+-                  int* hasAdditionalBlocks,
+-                  uint32_t* offsetAdditionalBlocks,
+-                  uint32_t* numAdditionalBlocks)
+-{
++int get_mar_file_info(const char* path, int* hasSignatureBlock,
++                      uint32_t* numSignatures, int* hasAdditionalBlocks,
++                      uint32_t* offsetAdditionalBlocks,
++                      uint32_t* numAdditionalBlocks) {
+   int rv;
+-  FILE *fp = fopen(path, "rb");
++  FILE* fp = fopen(path, "rb");
+   if (!fp) {
+     fprintf(stderr, "ERROR: could not open file in get_mar_file_info()\n");
+     perror(path);
+     return -1;
+   }
+ 
+-  rv = get_mar_file_info_fp(fp, hasSignatureBlock,
+-                            numSignatures, hasAdditionalBlocks,
+-                            offsetAdditionalBlocks, numAdditionalBlocks);
++  rv = get_mar_file_info_fp(fp, hasSignatureBlock, numSignatures,
++                            hasAdditionalBlocks, offsetAdditionalBlocks,
++                            numAdditionalBlocks);
+ 
+   fclose(fp);
+   return rv;
+ }
+diff --git a/modules/libmar/tool/mar.c b/modules/libmar/tool/mar.c
+--- a/modules/libmar/tool/mar.c
++++ b/modules/libmar/tool/mar.c
+@@ -21,95 +21,105 @@
+ #if !defined(NO_SIGN_VERIFY) && (!defined(XP_WIN) || defined(MAR_NSS))
+ #include "cert.h"
+ #include "nss.h"
+ #include "pk11pub.h"
+ int NSSInitCryptoContext(const char *NSSConfigDir);
+ #endif
+ 
+ int mar_repackage_and_sign(const char *NSSConfigDir,
+-                           const char * const *certNames,
+-                           uint32_t certCount,
+-                           const char *src,
+-                           const char * dest);
++                           const char *const *certNames, uint32_t certCount,
++                           const char *src, const char *dest);
+ 
+ static void print_version() {
+   printf("Version: %s\n", MOZ_APP_VERSION);
+   printf("Default Channel ID: %s\n", MAR_CHANNEL_ID);
+ }
+ 
+ static void print_usage() {
+   printf("usage:\n");
+   printf("Create a MAR file:\n");
+-  printf("  mar [-H MARChannelID] [-V ProductVersion] [-C workingDir] "
+-         "-c archive.mar [files...]\n");
++  printf(
++      "  mar [-H MARChannelID] [-V ProductVersion] [-C workingDir] "
++      "-c archive.mar [files...]\n");
+ 
+   printf("Extract a MAR file:\n");
+   printf("  mar [-C workingDir] -x archive.mar\n");
+ #ifndef NO_SIGN_VERIFY
+   printf("Sign a MAR file:\n");
+-  printf("  mar [-C workingDir] -d NSSConfigDir -n certname -s "
+-         "archive.mar out_signed_archive.mar\n");
++  printf(
++      "  mar [-C workingDir] -d NSSConfigDir -n certname -s "
++      "archive.mar out_signed_archive.mar\n");
+ 
+   printf("Strip a MAR signature:\n");
+-  printf("  mar [-C workingDir] -r "
+-         "signed_input_archive.mar output_archive.mar\n");
++  printf(
++      "  mar [-C workingDir] -r "
++      "signed_input_archive.mar output_archive.mar\n");
+ 
+   printf("Extract a MAR signature:\n");
+-  printf("  mar [-C workingDir] -n(i) -X "
+-         "signed_input_archive.mar base_64_encoded_signature_file\n");
++  printf(
++      "  mar [-C workingDir] -n(i) -X "
++      "signed_input_archive.mar base_64_encoded_signature_file\n");
+ 
+   printf("Import a MAR signature:\n");
+-  printf("  mar [-C workingDir] -n(i) -I "
+-         "signed_input_archive.mar base_64_encoded_signature_file "
+-         "changed_signed_output.mar\n");
++  printf(
++      "  mar [-C workingDir] -n(i) -I "
++      "signed_input_archive.mar base_64_encoded_signature_file "
++      "changed_signed_output.mar\n");
+   printf("(i) is the index of the certificate to extract\n");
+ #if defined(XP_MACOSX) || (defined(XP_WIN) && !defined(MAR_NSS))
+   printf("Verify a MAR file:\n");
+   printf("  mar [-C workingDir] -D DERFilePath -v signed_archive.mar\n");
+-  printf("At most %d signature certificate DER files are specified by "
+-         "-D0 DERFilePath1 -D1 DERFilePath2, ...\n", MAX_SIGNATURES);
++  printf(
++      "At most %d signature certificate DER files are specified by "
++      "-D0 DERFilePath1 -D1 DERFilePath2, ...\n",
++      MAX_SIGNATURES);
+ #else
+   printf("Verify a MAR file:\n");
+-  printf("  mar [-C workingDir] -d NSSConfigDir -n certname "
+-         "-v signed_archive.mar\n");
+-  printf("At most %d signature certificate names are specified by "
+-         "-n0 certName -n1 certName2, ...\n", MAX_SIGNATURES);
++  printf(
++      "  mar [-C workingDir] -d NSSConfigDir -n certname "
++      "-v signed_archive.mar\n");
++  printf(
++      "At most %d signature certificate names are specified by "
++      "-n0 certName -n1 certName2, ...\n",
++      MAX_SIGNATURES);
+ #endif
+-  printf("At most %d verification certificate names are specified by "
+-         "-n0 certName -n1 certName2, ...\n", MAX_SIGNATURES);
++  printf(
++      "At most %d verification certificate names are specified by "
++      "-n0 certName -n1 certName2, ...\n",
++      MAX_SIGNATURES);
+ #endif
+   printf("Print information on a MAR file:\n");
+   printf("  mar -t archive.mar\n");
+ 
+   printf("Print detailed information on a MAR file including signatures:\n");
+   printf("  mar -T archive.mar\n");
+ 
+   printf("Refresh the product information block of a MAR file:\n");
+-  printf("  mar [-H MARChannelID] [-V ProductVersion] [-C workingDir] "
+-         "-i unsigned_archive_to_refresh.mar\n");
++  printf(
++      "  mar [-H MARChannelID] [-V ProductVersion] [-C workingDir] "
++      "-i unsigned_archive_to_refresh.mar\n");
+ 
+   printf("Print executable version:\n");
+   printf("  mar --version\n");
+   printf("This program does not handle unicode file paths properly\n");
+ }
+ 
+-static int mar_test_callback(MarFile *mar,
+-                             const MarItem *item,
+-                             void *unused) {
++static int mar_test_callback(MarFile *mar, const MarItem *item, void *unused) {
+   printf("%u\t0%o\t%s\n", item->length, item->flags, item->name);
+   return 0;
+ }
+ 
+ static int mar_test(const char *path) {
+   MarFile *mar;
+ 
+   mar = mar_open(path);
+-  if (!mar)
++  if (!mar) {
+     return -1;
++  }
+ 
+   printf("SIZE\tMODE\tNAME\n");
+   mar_enum_items(mar, mar_test_callback, NULL);
+ 
+   mar_close(mar);
+   return 0;
+ }
+ 
+@@ -120,308 +130,294 @@ int main(int argc, char **argv) {
+   char *productVersion = MOZ_APP_VERSION;
+   uint32_t k;
+   int rv = -1;
+   uint32_t certCount = 0;
+   int32_t sigIndex = -1;
+ 
+ #if !defined(NO_SIGN_VERIFY)
+   uint32_t fileSizes[MAX_SIGNATURES];
+-  const uint8_t* certBuffers[MAX_SIGNATURES];
++  const uint8_t *certBuffers[MAX_SIGNATURES];
+ #if ((!defined(MAR_NSS) && defined(XP_WIN)) || defined(XP_MACOSX)) || \
+     ((defined(XP_WIN) || defined(XP_MACOSX)) && !defined(MAR_NSS))
+-  char* DERFilePaths[MAX_SIGNATURES];
++  char *DERFilePaths[MAX_SIGNATURES];
+ #endif
+ #if (!defined(XP_WIN) && !defined(XP_MACOSX)) || defined(MAR_NSS)
+-  CERTCertificate* certs[MAX_SIGNATURES];
++  CERTCertificate *certs[MAX_SIGNATURES];
+ #endif
+ #endif
+ 
+-  memset((void*)certNames, 0, sizeof(certNames));
++  memset((void *)certNames, 0, sizeof(certNames));
+ #if defined(XP_WIN) && !defined(MAR_NSS) && !defined(NO_SIGN_VERIFY)
+-  memset((void*)certBuffers, 0, sizeof(certBuffers));
++  memset((void *)certBuffers, 0, sizeof(certBuffers));
+ #endif
+-#if !defined(NO_SIGN_VERIFY) && ((!defined(MAR_NSS) && defined(XP_WIN)) || \
+-                                 defined(XP_MACOSX))
++#if !defined(NO_SIGN_VERIFY) && \
++    ((!defined(MAR_NSS) && defined(XP_WIN)) || defined(XP_MACOSX))
+   memset(DERFilePaths, 0, sizeof(DERFilePaths));
+   memset(fileSizes, 0, sizeof(fileSizes));
+ #endif
+ 
+   if (argc > 1 && 0 == strcmp(argv[1], "--version")) {
+     print_version();
+     return 0;
+   }
+ 
+   if (argc < 3) {
+     print_usage();
+     return -1;
+   }
+ 
+   while (argc > 0) {
+-    if (argv[1][0] == '-' && (argv[1][1] == 'c' ||
+-        argv[1][1] == 't' || argv[1][1] == 'x' ||
+-        argv[1][1] == 'v' || argv[1][1] == 's' ||
+-        argv[1][1] == 'i' || argv[1][1] == 'T' ||
+-        argv[1][1] == 'r' || argv[1][1] == 'X' ||
+-        argv[1][1] == 'I')) {
++    if (argv[1][0] == '-' &&
++        (argv[1][1] == 'c' || argv[1][1] == 't' || argv[1][1] == 'x' ||
++         argv[1][1] == 'v' || argv[1][1] == 's' || argv[1][1] == 'i' ||
++         argv[1][1] == 'T' || argv[1][1] == 'r' || argv[1][1] == 'X' ||
++         argv[1][1] == 'I')) {
+       break;
+-    /* -C workingdirectory */
++      /* -C workingdirectory */
+     }
+     if (argv[1][0] == '-' && argv[1][1] == 'C') {
+       if (chdir(argv[2]) != 0) {
+         return -1;
+       }
+       argv += 2;
+       argc -= 2;
+     }
+-#if !defined(NO_SIGN_VERIFY) && ((!defined(MAR_NSS) && defined(XP_WIN)) || \
+-                                 defined(XP_MACOSX))
++#if !defined(NO_SIGN_VERIFY) && \
++    ((!defined(MAR_NSS) && defined(XP_WIN)) || defined(XP_MACOSX))
+     /* -D DERFilePath, also matches -D[index] DERFilePath
+        We allow an index for verifying to be symmetric
+        with the import and export command line arguments. */
+-    else if (argv[1][0] == '-' &&
+-             argv[1][1] == 'D' &&
++    else if (argv[1][0] == '-' && argv[1][1] == 'D' &&
+              (argv[1][2] == (char)('0' + certCount) || argv[1][2] == '\0')) {
+       if (certCount >= MAX_SIGNATURES) {
+         print_usage();
+         return -1;
+       }
+       DERFilePaths[certCount++] = argv[2];
+       argv += 2;
+       argc -= 2;
+     }
+ #endif
+     /* -d NSSConfigdir */
+     else if (argv[1][0] == '-' && argv[1][1] == 'd') {
+       NSSConfigDir = argv[2];
+       argv += 2;
+       argc -= 2;
+-     /* -n certName, also matches -n[index] certName
+-        We allow an index for verifying to be symmetric
+-        with the import and export command line arguments. */
+-    } else if (argv[1][0] == '-' &&
+-               argv[1][1] == 'n' &&
+-               (argv[1][2] == (char)('0' + certCount) ||
+-                argv[1][2] == '\0' ||
+-                !strcmp(argv[2], "-X") ||
+-                !strcmp(argv[2], "-I"))) {
++      /* -n certName, also matches -n[index] certName
++         We allow an index for verifying to be symmetric
++         with the import and export command line arguments. */
++    } else if (argv[1][0] == '-' && argv[1][1] == 'n' &&
++               (argv[1][2] == (char)('0' + certCount) || argv[1][2] == '\0' ||
++                !strcmp(argv[2], "-X") || !strcmp(argv[2], "-I"))) {
+       if (certCount >= MAX_SIGNATURES) {
+         print_usage();
+         return -1;
+       }
+       certNames[certCount++] = argv[2];
+       if (strlen(argv[1]) > 2 &&
+           (!strcmp(argv[2], "-X") || !strcmp(argv[2], "-I")) &&
+           argv[1][2] >= '0' && argv[1][2] <= '9') {
+         sigIndex = argv[1][2] - '0';
+         argv++;
+         argc--;
+       } else {
+         argv += 2;
+         argc -= 2;
+       }
+-    /* MAR channel ID */
+-    } else if (argv[1][0] == '-' && argv[1][1] == 'H') {
++    } else if (argv[1][0] == '-' && argv[1][1] == 'H') {  // MAR channel ID
+       MARChannelID = argv[2];
+       argv += 2;
+       argc -= 2;
+-    /* Product Version */
+-    } else if (argv[1][0] == '-' && argv[1][1] == 'V') {
++    } else if (argv[1][0] == '-' && argv[1][1] == 'V') {  // Product Version
+       productVersion = argv[2];
+       argv += 2;
+       argc -= 2;
+-    }
+-    else {
++    } else {
+       print_usage();
+       return -1;
+     }
+   }
+ 
+   if (argv[1][0] != '-') {
+     print_usage();
+     return -1;
+   }
+ 
+   switch (argv[1][1]) {
+-  case 'c': {
+-    struct ProductInformationBlock infoBlock;
+-    infoBlock.MARChannelID = MARChannelID;
+-    infoBlock.productVersion = productVersion;
+-    return mar_create(argv[2], argc - 3, argv + 3, &infoBlock);
+-  }
+-  case 'i': {
+-    struct ProductInformationBlock infoBlock;
+-    infoBlock.MARChannelID = MARChannelID;
+-    infoBlock.productVersion = productVersion;
+-    return refresh_product_info_block(argv[2], &infoBlock);
+-  }
+-  case 'T': {
+-    struct ProductInformationBlock infoBlock;
+-    uint32_t numSignatures, numAdditionalBlocks;
+-    int hasSignatureBlock, hasAdditionalBlock;
+-    if (!get_mar_file_info(argv[2],
+-                           &hasSignatureBlock,
+-                           &numSignatures,
+-                           &hasAdditionalBlock,
+-                           NULL, &numAdditionalBlocks)) {
+-      if (hasSignatureBlock) {
+-        printf("Signature block found with %d signature%s\n",
+-               numSignatures,
+-               numSignatures != 1 ? "s" : "");
++    case 'c': {
++      struct ProductInformationBlock infoBlock;
++      infoBlock.MARChannelID = MARChannelID;
++      infoBlock.productVersion = productVersion;
++      return mar_create(argv[2], argc - 3, argv + 3, &infoBlock);
++    }
++    case 'i': {
++      struct ProductInformationBlock infoBlock;
++      infoBlock.MARChannelID = MARChannelID;
++      infoBlock.productVersion = productVersion;
++      return refresh_product_info_block(argv[2], &infoBlock);
++    }
++    case 'T': {
++      struct ProductInformationBlock infoBlock;
++      uint32_t numSignatures, numAdditionalBlocks;
++      int hasSignatureBlock, hasAdditionalBlock;
++      if (!get_mar_file_info(argv[2], &hasSignatureBlock, &numSignatures,
++                             &hasAdditionalBlock, NULL, &numAdditionalBlocks)) {
++        if (hasSignatureBlock) {
++          printf("Signature block found with %d signature%s\n", numSignatures,
++                 numSignatures != 1 ? "s" : "");
++        }
++        if (hasAdditionalBlock) {
++          printf("%d additional block%s found:\n", numAdditionalBlocks,
++                 numAdditionalBlocks != 1 ? "s" : "");
++        }
++
++        rv = read_product_info_block(argv[2], &infoBlock);
++        if (!rv) {
++          printf("  - Product Information Block:\n");
++          printf(
++              "    - MAR channel name: %s\n"
++              "    - Product version: %s\n",
++              infoBlock.MARChannelID, infoBlock.productVersion);
++          free((void *)infoBlock.MARChannelID);
++          free((void *)infoBlock.productVersion);
++        }
+       }
+-      if (hasAdditionalBlock) {
+-        printf("%d additional block%s found:\n",
+-               numAdditionalBlocks,
+-               numAdditionalBlocks != 1 ? "s" : "");
+-      }
++      printf("\n");
++      /* The fall through from 'T' to 't' is intentional */
++    }
++    case 't':
++      return mar_test(argv[2]);
+ 
+-      rv = read_product_info_block(argv[2], &infoBlock);
+-      if (!rv) {
+-        printf("  - Product Information Block:\n");
+-        printf("    - MAR channel name: %s\n"
+-               "    - Product version: %s\n",
+-               infoBlock.MARChannelID,
+-               infoBlock.productVersion);
+-        free((void *)infoBlock.MARChannelID);
+-        free((void *)infoBlock.productVersion);
+-      }
+-     }
+-    printf("\n");
+-    /* The fall through from 'T' to 't' is intentional */
+-  }
+-  case 't':
+-    return mar_test(argv[2]);
+-
+-  /* Extract a MAR file */
+-  case 'x':
+-    return mar_extract(argv[2]);
++    case 'x':  // Extract a MAR file
++      return mar_extract(argv[2]);
+ 
+ #ifndef NO_SIGN_VERIFY
+-  /* Extract a MAR signature */
+-  case 'X':
+-    if (sigIndex == -1) {
+-      fprintf(stderr, "ERROR: Signature index was not passed.\n");
+-      return -1;
+-    }
+-    if (sigIndex >= MAX_SIGNATURES || sigIndex < -1) {
+-      fprintf(stderr, "ERROR: Signature index is out of range: %d.\n",
+-              sigIndex);
+-      return -1;
+-    }
+-    return extract_signature(argv[2], sigIndex, argv[3]);
++    case 'X':  // Extract a MAR signature
++      if (sigIndex == -1) {
++        fprintf(stderr, "ERROR: Signature index was not passed.\n");
++        return -1;
++      }
++      if (sigIndex >= MAX_SIGNATURES || sigIndex < -1) {
++        fprintf(stderr, "ERROR: Signature index is out of range: %d.\n",
++                sigIndex);
++        return -1;
++      }
++      return extract_signature(argv[2], sigIndex, argv[3]);
+ 
+-  /* Import a MAR signature */
+-  case 'I':
+-    if (sigIndex == -1) {
+-      fprintf(stderr, "ERROR: signature index was not passed.\n");
+-      return -1;
+-    }
+-    if (sigIndex >= MAX_SIGNATURES || sigIndex < -1) {
+-      fprintf(stderr, "ERROR: Signature index is out of range: %d.\n",
+-              sigIndex);
+-      return -1;
+-    }
+-    if (argc < 5) {
+-      print_usage();
+-      return -1;
+-    }
+-    return import_signature(argv[2], sigIndex, argv[3], argv[4]);
++    case 'I':  // Import a MAR signature
++      if (sigIndex == -1) {
++        fprintf(stderr, "ERROR: signature index was not passed.\n");
++        return -1;
++      }
++      if (sigIndex >= MAX_SIGNATURES || sigIndex < -1) {
++        fprintf(stderr, "ERROR: Signature index is out of range: %d.\n",
++                sigIndex);
++        return -1;
++      }
++      if (argc < 5) {
++        print_usage();
++        return -1;
++      }
++      return import_signature(argv[2], sigIndex, argv[3], argv[4]);
+ 
+-  case 'v':
+-    if (certCount == 0) {
+-      print_usage();
+-      return -1;
+-    }
++    case 'v':
++      if (certCount == 0) {
++        print_usage();
++        return -1;
++      }
+ 
+ #if (!defined(XP_WIN) && !defined(XP_MACOSX)) || defined(MAR_NSS)
+-    if (!NSSConfigDir || certCount == 0) {
+-      print_usage();
+-      return -1;
+-    }
+-
+-    if (NSSInitCryptoContext(NSSConfigDir)) {
+-      fprintf(stderr, "ERROR: Could not initialize crypto library.\n");
+-      return -1;
+-    }
+-#endif
+-
+-    rv = 0;
+-    for (k = 0; k < certCount; ++k) {
+-#if (defined(XP_WIN) || defined(XP_MACOSX)) && !defined(MAR_NSS)
+-      rv = mar_read_entire_file(DERFilePaths[k], MAR_MAX_CERT_SIZE,
+-                                &certBuffers[k], &fileSizes[k]);
++      if (!NSSConfigDir || certCount == 0) {
++        print_usage();
++        return -1;
++      }
+ 
+-      if (rv) {
+-        fprintf(stderr, "ERROR: could not read file %s", DERFilePaths[k]);
+-        break;
+-      }
+-#else
+-      /* It is somewhat circuitous to look up a CERTCertificate and then pass
+-       * in its DER encoding just so we can later re-create that
+-       * CERTCertificate to extract the public key out of it. However, by doing
+-       * things this way, we maximize the reuse of the mar_verify_signatures
+-       * function and also we keep the control flow as similar as possible
+-       * between programs and operating systems, at least for the functions
+-       * that are critically important to security.
+-       */
+-      certs[k] = PK11_FindCertFromNickname(certNames[k], NULL);
+-      if (certs[k]) {
+-        certBuffers[k] = certs[k]->derCert.data;
+-        fileSizes[k] = certs[k]->derCert.len;
+-      } else {
+-        rv = -1;
+-        fprintf(stderr, "ERROR: could not find cert from nickname %s", certNames[k]);
+-        break;
++      if (NSSInitCryptoContext(NSSConfigDir)) {
++        fprintf(stderr, "ERROR: Could not initialize crypto library.\n");
++        return -1;
+       }
+ #endif
+-    }
++
++      rv = 0;
++      for (k = 0; k < certCount; ++k) {
++#if (defined(XP_WIN) || defined(XP_MACOSX)) && !defined(MAR_NSS)
++        rv = mar_read_entire_file(DERFilePaths[k], MAR_MAX_CERT_SIZE,
++                                  &certBuffers[k], &fileSizes[k]);
+ 
+-    if (!rv) {
+-      MarFile *mar = mar_open(argv[2]);
+-      if (mar) {
+-        rv = mar_verify_signatures(mar, certBuffers, fileSizes, certCount);
+-        mar_close(mar);
+-      } else {
+-        fprintf(stderr, "ERROR: Could not open MAR file.\n");
+-        rv = -1;
+-      }
+-    }
+-    for (k = 0; k < certCount; ++k) {
+-#if (defined(XP_WIN) || defined(XP_MACOSX)) && !defined(MAR_NSS)
+-      free((void*)certBuffers[k]);
++        if (rv) {
++          fprintf(stderr, "ERROR: could not read file %s", DERFilePaths[k]);
++          break;
++        }
+ #else
+-      /* certBuffers[k] is owned by certs[k] so don't free it */
+-      CERT_DestroyCertificate(certs[k]);
++        /* It is somewhat circuitous to look up a CERTCertificate and then pass
++         * in its DER encoding just so we can later re-create that
++         * CERTCertificate to extract the public key out of it. However, by
++         * doing things this way, we maximize the reuse of the
++         * mar_verify_signatures function and also we keep the control flow as
++         * similar as possible between programs and operating systems, at least
++         * for the functions that are critically important to security.
++         */
++        certs[k] = PK11_FindCertFromNickname(certNames[k], NULL);
++        if (certs[k]) {
++          certBuffers[k] = certs[k]->derCert.data;
++          fileSizes[k] = certs[k]->derCert.len;
++        } else {
++          rv = -1;
++          fprintf(stderr, "ERROR: could not find cert from nickname %s",
++                  certNames[k]);
++          break;
++        }
+ #endif
+-    }
+-    if (rv) {
+-      /* Determine if the source MAR file has the new fields for signing */
+-      int hasSignatureBlock;
+-      if (get_mar_file_info(argv[2], &hasSignatureBlock,
+-                            NULL, NULL, NULL, NULL)) {
+-        fprintf(stderr, "ERROR: could not determine if MAR is old or new.\n");
+-      } else if (!hasSignatureBlock) {
+-        fprintf(stderr, "ERROR: The MAR file is in the old format so has"
+-                        " no signature to verify.\n");
++      }
++
++      if (!rv) {
++        MarFile *mar = mar_open(argv[2]);
++        if (mar) {
++          rv = mar_verify_signatures(mar, certBuffers, fileSizes, certCount);
++          mar_close(mar);
++        } else {
++          fprintf(stderr, "ERROR: Could not open MAR file.\n");
++          rv = -1;
++        }
++      }
++      for (k = 0; k < certCount; ++k) {
++#if (defined(XP_WIN) || defined(XP_MACOSX)) && !defined(MAR_NSS)
++        free((void *)certBuffers[k]);
++#else
++        /* certBuffers[k] is owned by certs[k] so don't free it */
++        CERT_DestroyCertificate(certs[k]);
++#endif
+       }
+-    }
++      if (rv) {
++        /* Determine if the source MAR file has the new fields for signing */
++        int hasSignatureBlock;
++        if (get_mar_file_info(argv[2], &hasSignatureBlock, NULL, NULL, NULL,
++                              NULL)) {
++          fprintf(stderr, "ERROR: could not determine if MAR is old or new.\n");
++        } else if (!hasSignatureBlock) {
++          fprintf(stderr,
++                  "ERROR: The MAR file is in the old format so has"
++                  " no signature to verify.\n");
++        }
++      }
+ #if (!defined(XP_WIN) && !defined(XP_MACOSX)) || defined(MAR_NSS)
+-    (void) NSS_Shutdown();
++      (void)NSS_Shutdown();
+ #endif
+-    return rv ? -1 : 0;
++      return rv ? -1 : 0;
+ 
+-  case 's':
+-    if (!NSSConfigDir || certCount == 0 || argc < 4) {
++    case 's':
++      if (!NSSConfigDir || certCount == 0 || argc < 4) {
++        print_usage();
++        return -1;
++      }
++      return mar_repackage_and_sign(NSSConfigDir, certNames, certCount, argv[2],
++                                    argv[3]);
++
++    case 'r':
++      return strip_signature_block(argv[2], argv[3]);
++#endif /* endif NO_SIGN_VERIFY disabled */
++
++    default:
+       print_usage();
+       return -1;
+-    }
+-    return mar_repackage_and_sign(NSSConfigDir, certNames, certCount,
+-                                  argv[2], argv[3]);
+-
+-  case 'r':
+-    return strip_signature_block(argv[2], argv[3]);
+-#endif /* endif NO_SIGN_VERIFY disabled */
+-
+-  default:
+-    print_usage();
+-    return -1;
+   }
+ }
+diff --git a/modules/libmar/verify/MacVerifyCrypto.cpp b/modules/libmar/verify/MacVerifyCrypto.cpp
+--- a/modules/libmar/verify/MacVerifyCrypto.cpp
++++ b/modules/libmar/verify/MacVerifyCrypto.cpp
+@@ -6,197 +6,176 @@
+ #include <Security/Security.h>
+ #include <dlfcn.h>
+ 
+ #include "cryptox.h"
+ 
+ // We declare the necessary parts of the Security Transforms API here since
+ // we're building with the 10.6 SDK, which doesn't know about Security
+ // Transforms.
+-#ifdef __cplusplus
++#if defined(__cplusplus)
+ extern "C" {
+ #endif
+-  const CFStringRef kSecTransformInputAttributeName = CFSTR("INPUT");
+-  typedef CFTypeRef SecTransformRef;
+-  typedef struct OpaqueSecKeyRef* SecKeyRef;
++const CFStringRef kSecTransformInputAttributeName = CFSTR("INPUT");
++typedef CFTypeRef SecTransformRef;
++typedef struct OpaqueSecKeyRef* SecKeyRef;
+ 
+-  typedef SecTransformRef (*SecTransformCreateReadTransformWithReadStreamFunc)
+-                            (CFReadStreamRef inputStream);
+-  SecTransformCreateReadTransformWithReadStreamFunc
++typedef SecTransformRef (*SecTransformCreateReadTransformWithReadStreamFunc)(
++    CFReadStreamRef inputStream);
++SecTransformCreateReadTransformWithReadStreamFunc
+     SecTransformCreateReadTransformWithReadStreamPtr = NULL;
+-  typedef CFTypeRef (*SecTransformExecuteFunc)(SecTransformRef transform,
+-                                               CFErrorRef* error);
+-  SecTransformExecuteFunc SecTransformExecutePtr = NULL;
+-  typedef SecTransformRef (*SecVerifyTransformCreateFunc)(SecKeyRef key,
+-                                                          CFDataRef signature,
+-                                                          CFErrorRef* error);
+-  SecVerifyTransformCreateFunc SecVerifyTransformCreatePtr = NULL;
+-  typedef Boolean (*SecTransformSetAttributeFunc)(SecTransformRef transform,
+-                                                  CFStringRef key,
+-                                                  CFTypeRef value,
+-                                                  CFErrorRef* error);
+-  SecTransformSetAttributeFunc SecTransformSetAttributePtr = NULL;
+-#ifdef __cplusplus
++typedef CFTypeRef (*SecTransformExecuteFunc)(SecTransformRef transform,
++                                             CFErrorRef* error);
++SecTransformExecuteFunc SecTransformExecutePtr = NULL;
++typedef SecTransformRef (*SecVerifyTransformCreateFunc)(SecKeyRef key,
++                                                        CFDataRef signature,
++                                                        CFErrorRef* error);
++SecVerifyTransformCreateFunc SecVerifyTransformCreatePtr = NULL;
++typedef Boolean (*SecTransformSetAttributeFunc)(SecTransformRef transform,
++                                                CFStringRef key,
++                                                CFTypeRef value,
++                                                CFErrorRef* error);
++SecTransformSetAttributeFunc SecTransformSetAttributePtr = NULL;
++#if defined(__cplusplus)
+ }
+ #endif
+ 
+-CryptoX_Result
+-CryptoMac_InitCryptoProvider()
+-{
++CryptoX_Result CryptoMac_InitCryptoProvider() {
+   if (!SecTransformCreateReadTransformWithReadStreamPtr) {
+     SecTransformCreateReadTransformWithReadStreamPtr =
+-      (SecTransformCreateReadTransformWithReadStreamFunc)
+-        dlsym(RTLD_DEFAULT, "SecTransformCreateReadTransformWithReadStream");
++        (SecTransformCreateReadTransformWithReadStreamFunc)dlsym(
++            RTLD_DEFAULT, "SecTransformCreateReadTransformWithReadStream");
+   }
+   if (!SecTransformExecutePtr) {
+-    SecTransformExecutePtr = (SecTransformExecuteFunc)
+-      dlsym(RTLD_DEFAULT, "SecTransformExecute");
++    SecTransformExecutePtr =
++        (SecTransformExecuteFunc)dlsym(RTLD_DEFAULT, "SecTransformExecute");
+   }
+   if (!SecVerifyTransformCreatePtr) {
+-    SecVerifyTransformCreatePtr = (SecVerifyTransformCreateFunc)
+-      dlsym(RTLD_DEFAULT, "SecVerifyTransformCreate");
++    SecVerifyTransformCreatePtr = (SecVerifyTransformCreateFunc)dlsym(
++        RTLD_DEFAULT, "SecVerifyTransformCreate");
+   }
+   if (!SecTransformSetAttributePtr) {
+-    SecTransformSetAttributePtr = (SecTransformSetAttributeFunc)
+-      dlsym(RTLD_DEFAULT, "SecTransformSetAttribute");
++    SecTransformSetAttributePtr = (SecTransformSetAttributeFunc)dlsym(
++        RTLD_DEFAULT, "SecTransformSetAttribute");
+   }
+   if (!SecTransformCreateReadTransformWithReadStreamPtr ||
+-      !SecTransformExecutePtr ||
+-      !SecVerifyTransformCreatePtr ||
++      !SecTransformExecutePtr || !SecVerifyTransformCreatePtr ||
+       !SecTransformSetAttributePtr) {
+     return CryptoX_Error;
+   }
+   return CryptoX_Success;
+ }
+ 
+-CryptoX_Result
+-CryptoMac_VerifyBegin(CryptoX_SignatureHandle* aInputData)
+-{
++CryptoX_Result CryptoMac_VerifyBegin(CryptoX_SignatureHandle* aInputData) {
+   if (!aInputData) {
+     return CryptoX_Error;
+   }
+ 
+   void* inputData = CFDataCreateMutable(kCFAllocatorDefault, 0);
+   if (!inputData) {
+     return CryptoX_Error;
+   }
+ 
+   *aInputData = inputData;
+   return CryptoX_Success;
+ }
+ 
+-CryptoX_Result
+-CryptoMac_VerifyUpdate(CryptoX_SignatureHandle* aInputData, void* aBuf,
+-                       unsigned int aLen)
+-{
++CryptoX_Result CryptoMac_VerifyUpdate(CryptoX_SignatureHandle* aInputData,
++                                      void* aBuf, unsigned int aLen) {
+   if (aLen == 0) {
+     return CryptoX_Success;
+   }
+   if (!aInputData || !*aInputData) {
+     return CryptoX_Error;
+   }
+ 
+   CFMutableDataRef inputData = (CFMutableDataRef)*aInputData;
+ 
+   CFDataAppendBytes(inputData, (const uint8*)aBuf, aLen);
+   return CryptoX_Success;
+ }
+ 
+-CryptoX_Result
+-CryptoMac_LoadPublicKey(const unsigned char* aCertData,
+-                        unsigned int aDataSize,
+-                        CryptoX_PublicKey* aPublicKey)
+-{
++CryptoX_Result CryptoMac_LoadPublicKey(const unsigned char* aCertData,
++                                       unsigned int aDataSize,
++                                       CryptoX_PublicKey* aPublicKey) {
+   if (!aCertData || aDataSize == 0 || !aPublicKey) {
+     return CryptoX_Error;
+   }
+   *aPublicKey = NULL;
+-  CFDataRef certData = CFDataCreate(kCFAllocatorDefault,
+-                                    aCertData,
+-                                    aDataSize);
++  CFDataRef certData = CFDataCreate(kCFAllocatorDefault, aCertData, aDataSize);
+   if (!certData) {
+     return CryptoX_Error;
+   }
+ 
+-  SecCertificateRef cert = SecCertificateCreateWithData(kCFAllocatorDefault,
+-                                                        certData);
++  SecCertificateRef cert =
++      SecCertificateCreateWithData(kCFAllocatorDefault, certData);
+   CFRelease(certData);
+   if (!cert) {
+     return CryptoX_Error;
+   }
+ 
+-  OSStatus status = SecCertificateCopyPublicKey(cert,
+-                                                (SecKeyRef*)aPublicKey);
++  OSStatus status = SecCertificateCopyPublicKey(cert, (SecKeyRef*)aPublicKey);
+   CFRelease(cert);
+   if (status != 0) {
+     return CryptoX_Error;
+   }
+ 
+   return CryptoX_Success;
+ }
+ 
+-CryptoX_Result
+-CryptoMac_VerifySignature(CryptoX_SignatureHandle* aInputData,
+-                          CryptoX_PublicKey* aPublicKey,
+-                          const unsigned char* aSignature,
+-                          unsigned int aSignatureLen)
+-{
++CryptoX_Result CryptoMac_VerifySignature(CryptoX_SignatureHandle* aInputData,
++                                         CryptoX_PublicKey* aPublicKey,
++                                         const unsigned char* aSignature,
++                                         unsigned int aSignatureLen) {
+   if (!aInputData || !*aInputData || !aPublicKey || !*aPublicKey ||
+       !aSignature || aSignatureLen == 0) {
+     return CryptoX_Error;
+   }
+ 
+-  CFDataRef signatureData = CFDataCreate(kCFAllocatorDefault,
+-                                         aSignature, aSignatureLen);
++  CFDataRef signatureData =
++      CFDataCreate(kCFAllocatorDefault, aSignature, aSignatureLen);
+   if (!signatureData) {
+     return CryptoX_Error;
+   }
+ 
+   CFErrorRef error;
+-  SecTransformRef verifier =
+-    SecVerifyTransformCreatePtr((SecKeyRef)*aPublicKey,
+-                                signatureData,
+-                                &error);
++  SecTransformRef verifier = SecVerifyTransformCreatePtr((SecKeyRef)*aPublicKey,
++                                                         signatureData, &error);
+   if (!verifier || error) {
+     if (error) {
+       CFRelease(error);
+     }
+     CFRelease(signatureData);
+     return CryptoX_Error;
+   }
+ 
+-  SecTransformSetAttributePtr(verifier,
+-                              kSecDigestTypeAttribute,
+-                              kSecDigestSHA2,
++  SecTransformSetAttributePtr(verifier, kSecDigestTypeAttribute, kSecDigestSHA2,
+                               &error);
+   if (error) {
+     CFRelease(error);
+     CFRelease(signatureData);
+     CFRelease(verifier);
+     return CryptoX_Error;
+   }
+ 
+   int digestLength = 384;
+-  CFNumberRef dLen = CFNumberCreate(kCFAllocatorDefault, kCFNumberIntType, &digestLength);
+-  SecTransformSetAttributePtr(verifier,
+-                              kSecDigestLengthAttribute,
+-                              dLen,
++  CFNumberRef dLen =
++      CFNumberCreate(kCFAllocatorDefault, kCFNumberIntType, &digestLength);
++  SecTransformSetAttributePtr(verifier, kSecDigestLengthAttribute, dLen,
+                               &error);
+   CFRelease(dLen);
+   if (error) {
+     CFRelease(error);
+     CFRelease(signatureData);
+     CFRelease(verifier);
+     return CryptoX_Error;
+   }
+ 
+-  SecTransformSetAttributePtr(verifier,
+-                              kSecTransformInputAttributeName,
+-                              (CFDataRef)*aInputData,
+-                              &error);
++  SecTransformSetAttributePtr(verifier, kSecTransformInputAttributeName,
++                              (CFDataRef)*aInputData, &error);
+   if (error) {
+     CFRelease(error);
+     CFRelease(signatureData);
+     CFRelease(verifier);
+     return CryptoX_Error;
+   }
+ 
+   CryptoX_Result result = CryptoX_Error;
+@@ -214,30 +193,26 @@ CryptoMac_VerifySignature(CryptoX_Signat
+   }
+ 
+   CFRelease(signatureData);
+   CFRelease(verifier);
+ 
+   return result;
+ }
+ 
+-void
+-CryptoMac_FreeSignatureHandle(CryptoX_SignatureHandle* aInputData)
+-{
++void CryptoMac_FreeSignatureHandle(CryptoX_SignatureHandle* aInputData) {
+   if (!aInputData || !*aInputData) {
+     return;
+   }
+ 
+   CFMutableDataRef inputData = NULL;
+   inputData = (CFMutableDataRef)*aInputData;
+ 
+   CFRelease(inputData);
+ }
+ 
+-void
+-CryptoMac_FreePublicKey(CryptoX_PublicKey* aPublicKey)
+-{
++void CryptoMac_FreePublicKey(CryptoX_PublicKey* aPublicKey) {
+   if (!aPublicKey || !*aPublicKey) {
+     return;
+   }
+ 
+   CFRelease((SecKeyRef)*aPublicKey);
+ }
+diff --git a/modules/libmar/verify/cryptox.c b/modules/libmar/verify/cryptox.c
+--- a/modules/libmar/verify/cryptox.c
++++ b/modules/libmar/verify/cryptox.c
+@@ -16,23 +16,22 @@
+ 
+ /**
+  * Loads the public key for the specified cert name from the NSS store.
+  *
+  * @param certData  The DER-encoded X509 certificate to extract the key from.
+  * @param certDataSize The size of certData.
+  * @param publicKey Out parameter for the public key to use.
+  * @return CryptoX_Success on success, CryptoX_Error on error.
+-*/
+-CryptoX_Result
+-NSS_LoadPublicKey(const unsigned char *certData, unsigned int certDataSize,
+-                  SECKEYPublicKey **publicKey)
+-{
+-  CERTCertificate * cert;
+-  SECItem certDataItem = { siBuffer, (unsigned char*) certData, certDataSize };
++ */
++CryptoX_Result NSS_LoadPublicKey(const unsigned char *certData,
++                                 unsigned int certDataSize,
++                                 SECKEYPublicKey **publicKey) {
++  CERTCertificate *cert;
++  SECItem certDataItem = {siBuffer, (unsigned char *)certData, certDataSize};
+ 
+   if (!certData || !publicKey) {
+     return CryptoX_Error;
+   }
+ 
+   cert = CERT_NewTempCertificate(CERT_GetDefaultCertDB(), &certDataItem, NULL,
+                                  PR_FALSE, PR_TRUE);
+   /* Get the cert and embedded public key out of the database */
+@@ -43,20 +42,18 @@ NSS_LoadPublicKey(const unsigned char *c
+   CERT_DestroyCertificate(cert);
+ 
+   if (!*publicKey) {
+     return CryptoX_Error;
+   }
+   return CryptoX_Success;
+ }
+ 
+-CryptoX_Result
+-NSS_VerifyBegin(VFYContext **ctx,
+-                SECKEYPublicKey * const *publicKey)
+-{
++CryptoX_Result NSS_VerifyBegin(VFYContext **ctx,
++                               SECKEYPublicKey *const *publicKey) {
+   SECStatus status;
+   if (!ctx || !publicKey || !*publicKey) {
+     return CryptoX_Error;
+   }
+ 
+   /* Check that the key length is large enough for our requirements */
+   if ((SECKEY_PublicKeyStrength(*publicKey) * 8) <
+       XP_MIN_SIGNATURE_LEN_IN_BYTES) {
+@@ -77,193 +74,164 @@ NSS_VerifyBegin(VFYContext **ctx,
+ 
+ /**
+  * Verifies if a verify context matches the passed in signature.
+  *
+  * @param ctx          The verify context that the signature should match.
+  * @param signature    The signature to match.
+  * @param signatureLen The length of the signature.
+  * @return CryptoX_Success on success, CryptoX_Error on error.
+-*/
+-CryptoX_Result
+-NSS_VerifySignature(VFYContext * const *ctx,
+-                    const unsigned char *signature,
+-                    unsigned int signatureLen)
+-{
++ */
++CryptoX_Result NSS_VerifySignature(VFYContext *const *ctx,
++                                   const unsigned char *signature,
++                                   unsigned int signatureLen) {
+   SECItem signedItem;
+   SECStatus status;
+   if (!ctx || !signature || !*ctx) {
+     return CryptoX_Error;
+   }
+ 
+   signedItem.len = signatureLen;
+-  signedItem.data = (unsigned char*)signature;
++  signedItem.data = (unsigned char *)signature;
+   status = VFY_EndWithSignature(*ctx, &signedItem);
+   return SECSuccess == status ? CryptoX_Success : CryptoX_Error;
+ }
+ 
+ #elif defined(XP_WIN)
+ /**
+  * Verifies if a signature + public key matches a hash context.
+  *
+  * @param hash      The hash context that the signature should match.
+  * @param pubKey    The public key to use on the signature.
+  * @param signature The signature to check.
+  * @param signatureLen The length of the signature.
+  * @return CryptoX_Success on success, CryptoX_Error on error.
+-*/
+-CryptoX_Result
+-CryptoAPI_VerifySignature(HCRYPTHASH *hash,
+-                          HCRYPTKEY *pubKey,
+-                          const BYTE *signature,
+-                          DWORD signatureLen)
+-{
++ */
++CryptoX_Result CryptoAPI_VerifySignature(HCRYPTHASH *hash, HCRYPTKEY *pubKey,
++                                         const BYTE *signature,
++                                         DWORD signatureLen) {
+   DWORD i;
+   BOOL result;
+-/* Windows APIs expect the bytes in the signature to be in little-endian
+- * order, but we write the signature in big-endian order.  Other APIs like
+- * NSS and OpenSSL expect big-endian order.
+- */
++  /* Windows APIs expect the bytes in the signature to be in little-endian
++   * order, but we write the signature in big-endian order.  Other APIs like
++   * NSS and OpenSSL expect big-endian order.
++   */
+   BYTE *signatureReversed;
+   if (!hash || !pubKey || !signature || signatureLen < 1) {
+     return CryptoX_Error;
+   }
+ 
+   signatureReversed = malloc(signatureLen);
+   if (!signatureReversed) {
+     return CryptoX_Error;
+   }
+ 
+   for (i = 0; i < signatureLen; i++) {
+     signatureReversed[i] = signature[signatureLen - 1 - i];
+   }
+-  result = CryptVerifySignature(*hash, signatureReversed,
+-                                signatureLen, *pubKey, NULL, 0);
++  result = CryptVerifySignature(*hash, signatureReversed, signatureLen, *pubKey,
++                                NULL, 0);
+   free(signatureReversed);
+   return result ? CryptoX_Success : CryptoX_Error;
+ }
+ 
+ /**
+  * Obtains the public key for the passed in cert data
+  *
+  * @param provider       The cyrto provider
+  * @param certData       Data of the certificate to extract the public key from
+  * @param sizeOfCertData The size of the certData buffer
+  * @param certStore      Pointer to the handle of the certificate store to use
+  * @param CryptoX_Success on success
+-*/
+-CryptoX_Result
+-CryptoAPI_LoadPublicKey(HCRYPTPROV provider,
+-                        BYTE *certData,
+-                        DWORD sizeOfCertData,
+-                        HCRYPTKEY *publicKey)
+-{
++ */
++CryptoX_Result CryptoAPI_LoadPublicKey(HCRYPTPROV provider, BYTE *certData,
++                                       DWORD sizeOfCertData,
++                                       HCRYPTKEY *publicKey) {
+   CRYPT_DATA_BLOB blob;
+   CERT_CONTEXT *context;
+   if (!provider || !certData || !publicKey) {
+     return CryptoX_Error;
+   }
+ 
+   blob.cbData = sizeOfCertData;
+   blob.pbData = certData;
+   if (!CryptQueryObject(CERT_QUERY_OBJECT_BLOB, &blob,
+                         CERT_QUERY_CONTENT_FLAG_CERT,
+-                        CERT_QUERY_FORMAT_FLAG_BINARY,
+-                        0, NULL, NULL, NULL,
++                        CERT_QUERY_FORMAT_FLAG_BINARY, 0, NULL, NULL, NULL,
+                         NULL, NULL, (const void **)&context)) {
+     return CryptoX_Error;
+   }
+ 
+-  if (!CryptImportPublicKeyInfo(provider,
+-                                PKCS_7_ASN_ENCODING | X509_ASN_ENCODING,
+-                                &context->pCertInfo->SubjectPublicKeyInfo,
+-                                publicKey)) {
++  if (!CryptImportPublicKeyInfo(
++          provider, PKCS_7_ASN_ENCODING | X509_ASN_ENCODING,
++          &context->pCertInfo->SubjectPublicKeyInfo, publicKey)) {
+     CertFreeCertificateContext(context);
+     return CryptoX_Error;
+   }
+ 
+   CertFreeCertificateContext(context);
+   return CryptoX_Success;
+ }
+ 
+ /* Try to acquire context in this way:
+-  * 1. Enhanced provider without creating a new key set
+-  * 2. Enhanced provider with creating a new key set
+-  * 3. Default provider without creating a new key set
+-  * 4. Default provider without creating a new key set
+-  * #2 and #4 should not be needed because of the CRYPT_VERIFYCONTEXT,
+-  * but we add it just in case.
+-  *
+-  * @param provider Out parameter containing the provider handle.
+-  * @return CryptoX_Success on success, CryptoX_Error on error.
++ * 1. Enhanced provider without creating a new key set
++ * 2. Enhanced provider with creating a new key set
++ * 3. Default provider without creating a new key set
++ * 4. Default provider without creating a new key set
++ * #2 and #4 should not be needed because of the CRYPT_VERIFYCONTEXT,
++ * but we add it just in case.
++ *
++ * @param provider Out parameter containing the provider handle.
++ * @return CryptoX_Success on success, CryptoX_Error on error.
+  */
+-CryptoX_Result
+-CryptoAPI_InitCryptoContext(HCRYPTPROV *provider)
+-{
+-  if (!CryptAcquireContext(provider,
+-                           NULL,
+-                           MS_ENH_RSA_AES_PROV,
+-                           PROV_RSA_AES,
++CryptoX_Result CryptoAPI_InitCryptoContext(HCRYPTPROV *provider) {
++  if (!CryptAcquireContext(provider, NULL, MS_ENH_RSA_AES_PROV, PROV_RSA_AES,
+                            CRYPT_VERIFYCONTEXT)) {
+-    if (!CryptAcquireContext(provider,
+-                             NULL,
+-                             MS_ENH_RSA_AES_PROV,
+-                             PROV_RSA_AES,
++    if (!CryptAcquireContext(provider, NULL, MS_ENH_RSA_AES_PROV, PROV_RSA_AES,
+                              CRYPT_NEWKEYSET | CRYPT_VERIFYCONTEXT)) {
+-      if (!CryptAcquireContext(provider,
+-                               NULL,
+-                               NULL,
+-                               PROV_RSA_AES,
++      if (!CryptAcquireContext(provider, NULL, NULL, PROV_RSA_AES,
+                                CRYPT_VERIFYCONTEXT)) {
+-        if (!CryptAcquireContext(provider,
+-                                 NULL,
+-                                 NULL,
+-                                 PROV_RSA_AES,
++        if (!CryptAcquireContext(provider, NULL, NULL, PROV_RSA_AES,
+                                  CRYPT_NEWKEYSET | CRYPT_VERIFYCONTEXT)) {
+           *provider = CryptoX_InvalidHandleValue;
+           return CryptoX_Error;
+         }
+       }
+     }
+   }
+   return CryptoX_Success;
+ }
+ 
+ /**
+-  * Begins a signature verification hash context
+-  *
+-  * @param provider The crypt provider to use
+-  * @param hash     Out parameter for a handle to the hash context
+-  * @return CryptoX_Success on success, CryptoX_Error on error.
+-*/
+-CryptoX_Result
+-CryptoAPI_VerifyBegin(HCRYPTPROV provider, HCRYPTHASH* hash)
+-{
++ * Begins a signature verification hash context
++ *
++ * @param provider The crypt provider to use
++ * @param hash     Out parameter for a handle to the hash context
++ * @return CryptoX_Success on success, CryptoX_Error on error.
++ */
++CryptoX_Result CryptoAPI_VerifyBegin(HCRYPTPROV provider, HCRYPTHASH *hash) {
+   BOOL result;
+   if (!provider || !hash) {
+     return CryptoX_Error;
+   }
+ 
+   *hash = (HCRYPTHASH)NULL;
+-  result = CryptCreateHash(provider, CALG_SHA_384,
+-                           0, 0, hash);
++  result = CryptCreateHash(provider, CALG_SHA_384, 0, 0, hash);
+   return result ? CryptoX_Success : CryptoX_Error;
+ }
+ 
+ /**
+-  * Updates a signature verification hash context
+-  *
+-  * @param hash The hash context to udpate
+-  * @param buf  The buffer to update the hash context with
+-  * @param len The size of the passed in buffer
+-  * @return CryptoX_Success on success, CryptoX_Error on error.
+-*/
+-CryptoX_Result
+-CryptoAPI_VerifyUpdate(HCRYPTHASH* hash, BYTE *buf, DWORD len)
+-{
++ * Updates a signature verification hash context
++ *
++ * @param hash The hash context to udpate
++ * @param buf  The buffer to update the hash context with
++ * @param len The size of the passed in buffer
++ * @return CryptoX_Success on success, CryptoX_Error on error.
++ */
++CryptoX_Result CryptoAPI_VerifyUpdate(HCRYPTHASH *hash, BYTE *buf, DWORD len) {
+   BOOL result;
+   if (!hash || !buf) {
+     return CryptoX_Error;
+   }
+ 
+   result = CryptHashData(*hash, buf, len, 0);
+   return result ? CryptoX_Success : CryptoX_Error;
+ }
+diff --git a/modules/libmar/verify/cryptox.h b/modules/libmar/verify/cryptox.h
+--- a/modules/libmar/verify/cryptox.h
++++ b/modules/libmar/verify/cryptox.h
+@@ -15,60 +15,57 @@
+ 
+ #if defined(MAR_NSS)
+ 
+ #include "cert.h"
+ #include "keyhi.h"
+ #include "cryptohi.h"
+ 
+ #define CryptoX_InvalidHandleValue NULL
+-#define CryptoX_ProviderHandle void*
++#define CryptoX_ProviderHandle void *
+ #define CryptoX_SignatureHandle VFYContext *
+ #define CryptoX_PublicKey SECKEYPublicKey *
+ #define CryptoX_Certificate CERTCertificate *
+ 
+ #ifdef __cplusplus
+ extern "C" {
+ #endif
+-CryptoX_Result NSS_LoadPublicKey(const unsigned char* certData,
++CryptoX_Result NSS_LoadPublicKey(const unsigned char *certData,
+                                  unsigned int certDataSize,
+-                                 SECKEYPublicKey** publicKey);
++                                 SECKEYPublicKey **publicKey);
+ CryptoX_Result NSS_VerifyBegin(VFYContext **ctx,
+-                               SECKEYPublicKey * const *publicKey);
+-CryptoX_Result NSS_VerifySignature(VFYContext * const *ctx ,
++                               SECKEYPublicKey *const *publicKey);
++CryptoX_Result NSS_VerifySignature(VFYContext *const *ctx,
+                                    const unsigned char *signature,
+                                    unsigned int signatureLen);
+ #ifdef __cplusplus
+-} // extern "C"
++}  // extern "C"
+ #endif
+ 
+-#define CryptoX_InitCryptoProvider(CryptoHandle) \
+-  CryptoX_Success
++#define CryptoX_InitCryptoProvider(CryptoHandle) CryptoX_Success
+ #define CryptoX_VerifyBegin(CryptoHandle, SignatureHandle, PublicKey) \
+   NSS_VerifyBegin(SignatureHandle, PublicKey)
+ #define CryptoX_FreeSignatureHandle(SignatureHandle) \
+   VFY_DestroyContext(*SignatureHandle, PR_TRUE)
+ #define CryptoX_VerifyUpdate(SignatureHandle, buf, len) \
+-  VFY_Update(*SignatureHandle, (const unsigned char*)(buf), len)
++  VFY_Update(*SignatureHandle, (const unsigned char *)(buf), len)
+ #define CryptoX_LoadPublicKey(CryptoHandle, certData, dataSize, publicKey) \
+   NSS_LoadPublicKey(certData, dataSize, publicKey)
+ #define CryptoX_VerifySignature(hash, publicKey, signedData, len) \
+   NSS_VerifySignature(hash, (const unsigned char *)(signedData), len)
+-#define CryptoX_FreePublicKey(key) \
+-  SECKEY_DestroyPublicKey(*key)
+-#define CryptoX_FreeCertificate(cert) \
+-  CERT_DestroyCertificate(*cert)
++#define CryptoX_FreePublicKey(key) SECKEY_DestroyPublicKey(*key)
++#define CryptoX_FreeCertificate(cert) CERT_DestroyCertificate(*cert)
+ 
+ #elif XP_MACOSX
+ 
+ #define CryptoX_InvalidHandleValue NULL
+-#define CryptoX_ProviderHandle void*
+-#define CryptoX_SignatureHandle void*
+-#define CryptoX_PublicKey void*
+-#define CryptoX_Certificate void*
++#define CryptoX_ProviderHandle void *
++#define CryptoX_SignatureHandle void *
++#define CryptoX_PublicKey void *
++#define CryptoX_Certificate void *
+ 
+ // Forward-declare Objective-C functions implemented in MacVerifyCrypto.mm.
+ #ifdef __cplusplus
+ extern "C" {
+ #endif
+ CryptoX_Result CryptoMac_InitCryptoProvider();
+ CryptoX_Result CryptoMac_VerifyBegin(CryptoX_SignatureHandle* aInputData);
+ CryptoX_Result CryptoMac_VerifyUpdate(CryptoX_SignatureHandle* aInputData,
+@@ -78,91 +75,85 @@ CryptoX_Result CryptoMac_LoadPublicKey(c
+                                        CryptoX_PublicKey* aPublicKey);
+ CryptoX_Result CryptoMac_VerifySignature(CryptoX_SignatureHandle* aInputData,
+                                          CryptoX_PublicKey* aPublicKey,
+                                          const unsigned char* aSignature,
+                                          unsigned int aSignatureLen);
+ void CryptoMac_FreeSignatureHandle(CryptoX_SignatureHandle* aInputData);
+ void CryptoMac_FreePublicKey(CryptoX_PublicKey* aPublicKey);
+ #ifdef __cplusplus
+-} // extern "C"
++}  // extern "C"
+ #endif
+ 
+ #define CryptoX_InitCryptoProvider(aProviderHandle) \
+   CryptoMac_InitCryptoProvider()
+ #define CryptoX_VerifyBegin(aCryptoHandle, aInputData, aPublicKey) \
+   CryptoMac_VerifyBegin(aInputData)
+ #define CryptoX_VerifyUpdate(aInputData, aBuf, aLen) \
+   CryptoMac_VerifyUpdate(aInputData, aBuf, aLen)
+ #define CryptoX_LoadPublicKey(aProviderHandle, aCertData, aDataSize, \
+-                              aPublicKey) \
++                              aPublicKey)                            \
+   CryptoMac_LoadPublicKey(aCertData, aDataSize, aPublicKey)
+ #define CryptoX_VerifySignature(aInputData, aPublicKey, aSignature, \
+-                                aSignatureLen) \
++                                aSignatureLen)                      \
+   CryptoMac_VerifySignature(aInputData, aPublicKey, aSignature, aSignatureLen)
+ #define CryptoX_FreeSignatureHandle(aInputData) \
+   CryptoMac_FreeSignatureHandle(aInputData)
+-#define CryptoX_FreePublicKey(aPublicKey) \
+-  CryptoMac_FreePublicKey(aPublicKey)
++#define CryptoX_FreePublicKey(aPublicKey) CryptoMac_FreePublicKey(aPublicKey)
+ #define CryptoX_FreeCertificate(aCertificate)
+ 
+ #elif defined(XP_WIN)
+ 
+ #include <windows.h>
+ #include <wincrypt.h>
+ 
+ CryptoX_Result CryptoAPI_InitCryptoContext(HCRYPTPROV *provider);
+-CryptoX_Result CryptoAPI_LoadPublicKey(HCRYPTPROV hProv,
+-                                       BYTE *certData,
++CryptoX_Result CryptoAPI_LoadPublicKey(HCRYPTPROV hProv, BYTE *certData,
+                                        DWORD sizeOfCertData,
+                                        HCRYPTKEY *publicKey);
+-CryptoX_Result CryptoAPI_VerifyBegin(HCRYPTPROV provider, HCRYPTHASH* hash);
+-CryptoX_Result CryptoAPI_VerifyUpdate(HCRYPTHASH* hash,
+-                                      BYTE *buf, DWORD len);
+-CryptoX_Result CryptoAPI_VerifySignature(HCRYPTHASH *hash,
+-                                         HCRYPTKEY *pubKey,
++CryptoX_Result CryptoAPI_VerifyBegin(HCRYPTPROV provider, HCRYPTHASH *hash);
++CryptoX_Result CryptoAPI_VerifyUpdate(HCRYPTHASH *hash, BYTE *buf, DWORD len);
++CryptoX_Result CryptoAPI_VerifySignature(HCRYPTHASH *hash, HCRYPTKEY *pubKey,
+                                          const BYTE *signature,
+                                          DWORD signatureLen);
+ 
+ #define CryptoX_InvalidHandleValue ((ULONG_PTR)NULL)
+ #define CryptoX_ProviderHandle HCRYPTPROV
+ #define CryptoX_SignatureHandle HCRYPTHASH
+ #define CryptoX_PublicKey HCRYPTKEY
+ #define CryptoX_Certificate HCERTSTORE
+ #define CryptoX_InitCryptoProvider(CryptoHandle) \
+   CryptoAPI_InitCryptoContext(CryptoHandle)
+ #define CryptoX_VerifyBegin(CryptoHandle, SignatureHandle, PublicKey) \
+   CryptoAPI_VerifyBegin(CryptoHandle, SignatureHandle)
+ #define CryptoX_FreeSignatureHandle(SignatureHandle)
+ #define CryptoX_VerifyUpdate(SignatureHandle, buf, len) \
+   CryptoAPI_VerifyUpdate(SignatureHandle, (BYTE *)(buf), len)
+ #define CryptoX_LoadPublicKey(CryptoHandle, certData, dataSize, publicKey) \
+-  CryptoAPI_LoadPublicKey(CryptoHandle, (BYTE*)(certData), dataSize, publicKey)
++  CryptoAPI_LoadPublicKey(CryptoHandle, (BYTE *)(certData), dataSize, publicKey)
+ #define CryptoX_VerifySignature(hash, publicKey, signedData, len) \
+   CryptoAPI_VerifySignature(hash, publicKey, signedData, len)
+-#define CryptoX_FreePublicKey(key) \
+-  CryptDestroyKey(*(key))
++#define CryptoX_FreePublicKey(key) CryptDestroyKey(*(key))
+ #define CryptoX_FreeCertificate(cert) \
+   CertCloseStore(*(cert), CERT_CLOSE_STORE_FORCE_FLAG);
+ 
+ #else
+ 
+ /* This default implementation is necessary because we don't want to
+  * link to NSS from updater code on non Windows platforms.  On Windows
+  * we use CyrptoAPI instead of NSS.  We don't call any function as they
+  * would just fail, but this simplifies linking.
+  */
+ 
+ #define CryptoX_InvalidHandleValue NULL
+-#define CryptoX_ProviderHandle void*
+-#define CryptoX_SignatureHandle void*
+-#define CryptoX_PublicKey void*
+-#define CryptoX_Certificate void*
+-#define CryptoX_InitCryptoProvider(CryptoHandle) \
+-  CryptoX_Error
++#define CryptoX_ProviderHandle void *
++#define CryptoX_SignatureHandle void *
++#define CryptoX_PublicKey void *
++#define CryptoX_Certificate void *
++#define CryptoX_InitCryptoProvider(CryptoHandle) CryptoX_Error
+ #define CryptoX_VerifyBegin(CryptoHandle, SignatureHandle, PublicKey) \
+   CryptoX_Error
+ #define CryptoX_FreeSignatureHandle(SignatureHandle)
+ #define CryptoX_VerifyUpdate(SignatureHandle, buf, len) CryptoX_Error
+ #define CryptoX_LoadPublicKey(CryptoHandle, certData, dataSize, publicKey) \
+   CryptoX_Error
+ #define CryptoX_VerifySignature(hash, publicKey, signedData, len) CryptoX_Error
+ #define CryptoX_FreePublicKey(key) CryptoX_Error
+diff --git a/modules/libmar/verify/mar_verify.c b/modules/libmar/verify/mar_verify.c
+--- a/modules/libmar/verify/mar_verify.c
++++ b/modules/libmar/verify/mar_verify.c
+@@ -12,40 +12,38 @@
+ #include <sys/stat.h>
+ #include <fcntl.h>
+ #include <stdlib.h>
+ #include <string.h>
+ #include "mar_private.h"
+ #include "mar.h"
+ #include "cryptox.h"
+ 
+-int
+-mar_read_entire_file(const char * filePath, uint32_t maxSize,
+-                     /*out*/ const uint8_t * *data,
+-                     /*out*/ uint32_t *size)
+-{
++int mar_read_entire_file(const char *filePath, uint32_t maxSize,
++                         /*out*/ const uint8_t **data,
++                         /*out*/ uint32_t *size) {
+   int result;
+-  FILE * f;
++  FILE *f;
+ 
+   if (!filePath || !data || !size) {
+     return -1;
+   }
+ 
+   f = fopen(filePath, "rb");
+   if (!f) {
+     return -1;
+   }
+ 
+   result = -1;
+   if (!fseeko(f, 0, SEEK_END)) {
+     int64_t fileSize = ftello(f);
+     if (fileSize > 0 && fileSize <= maxSize && !fseeko(f, 0, SEEK_SET)) {
+-      unsigned char * fileData;
++      unsigned char *fileData;
+ 
+-      *size = (unsigned int) fileSize;
++      *size = (unsigned int)fileSize;
+       fileData = malloc(*size);
+       if (fileData) {
+         if (fread(fileData, *size, 1, f) == 1) {
+           *data = fileData;
+           result = 0;
+         } else {
+           free(fileData);
+         }
+@@ -57,46 +55,39 @@ mar_read_entire_file(const char * filePa
+ 
+   return result;
+ }
+ 
+ int mar_extract_and_verify_signatures_fp(FILE *fp,
+                                          CryptoX_ProviderHandle provider,
+                                          CryptoX_PublicKey *keys,
+                                          uint32_t keyCount);
+-int mar_verify_signatures_for_fp(FILE *fp,
+-                                 CryptoX_ProviderHandle provider,
++int mar_verify_signatures_for_fp(FILE *fp, CryptoX_ProviderHandle provider,
+                                  CryptoX_PublicKey *keys,
+-                                 const uint8_t * const *extractedSignatures,
+-                                 uint32_t keyCount,
+-                                 uint32_t *numVerified);
++                                 const uint8_t *const *extractedSignatures,
++                                 uint32_t keyCount, uint32_t *numVerified);
+ 
+ /**
+  * Reads the specified number of bytes from the file pointer and
+  * stores them in the passed buffer.
+  *
+  * @param  fp     The file pointer to read from.
+  * @param  buffer The buffer to store the read results.
+  * @param  size   The number of bytes to read, buffer must be
+  *                at least of this size.
+  * @param  ctxs   Pointer to the first element in an array of verify context.
+  * @param  count  The number of elements in ctxs
+  * @param  err    The name of what is being written to in case of error.
+  * @return  0 on success
+  *         -1 on read error
+  *         -2 on verify update error
+-*/
+-int
+-ReadAndUpdateVerifyContext(FILE *fp,
+-                           void *buffer,
+-                           uint32_t size,
+-                           CryptoX_SignatureHandle *ctxs,
+-                           uint32_t count,
+-                           const char *err)
+-{
++ */
++int ReadAndUpdateVerifyContext(FILE *fp, void *buffer, uint32_t size,
++                               CryptoX_SignatureHandle *ctxs, uint32_t count,
++                               const char *err) {
+   uint32_t k;
+   if (!fp || !buffer || !ctxs || count == 0 || !err) {
+     fprintf(stderr, "ERROR: Invalid parameter specified.\n");
+     return CryptoX_Error;
+   }
+ 
+   if (!size) {
+     return CryptoX_Success;
+@@ -125,22 +116,19 @@ ReadAndUpdateVerifyContext(FILE *fp,
+  *
+  * @param  mar            The file who's signature should be calculated
+  * @param  certData       Pointer to the first element in an array of
+  *                        certificate data
+  * @param  certDataSizes  Pointer to the first element in an array for size of
+  *                        the data stored
+  * @param  certCount      The number of elements in certData and certDataSizes
+  * @return 0 on success
+-*/
+-int
+-mar_verify_signatures(MarFile *mar,
+-                      const uint8_t * const *certData,
+-                      const uint32_t *certDataSizes,
+-                      uint32_t certCount) {
++ */
++int mar_verify_signatures(MarFile *mar, const uint8_t *const *certData,
++                          const uint32_t *certDataSizes, uint32_t certCount) {
+   int rv = -1;
+   CryptoX_ProviderHandle provider = CryptoX_InvalidHandleValue;
+   CryptoX_PublicKey keys[MAX_SIGNATURES];
+   uint32_t k;
+ 
+   memset(keys, 0, sizeof(keys));
+ 
+   if (!mar || !certData || !certDataSizes || certCount == 0) {
+@@ -154,18 +142,18 @@ mar_verify_signatures(MarFile *mar,
+   }
+ 
+   if (CryptoX_Failed(CryptoX_InitCryptoProvider(&provider))) {
+     fprintf(stderr, "ERROR: Could not init crytpo library.\n");
+     goto failure;
+   }
+ 
+   for (k = 0; k < certCount; ++k) {
+-    if (CryptoX_Failed(CryptoX_LoadPublicKey(provider, certData[k], certDataSizes[k],
+-                                             &keys[k]))) {
++    if (CryptoX_Failed(CryptoX_LoadPublicKey(provider, certData[k],
++                                             certDataSizes[k], &keys[k]))) {
+       fprintf(stderr, "ERROR: Could not load public key.\n");
+       goto failure;
+     }
+   }
+ 
+   rv = mar_extract_and_verify_signatures_fp(mar->fp, provider, keys, certCount);
+ 
+ failure:
+@@ -183,22 +171,21 @@ failure:
+  * Extracts each signature from the specified MAR file,
+  * then calls mar_verify_signatures_for_fp to verify each signature.
+  *
+  * @param  fp       An opened MAR file handle
+  * @param  provider A library provider
+  * @param  keys     The public keys to use to verify the MAR
+  * @param  keyCount The number of keys pointed to by keys
+  * @return 0 on success
+-*/
+-int
+-mar_extract_and_verify_signatures_fp(FILE *fp,
+-                                     CryptoX_ProviderHandle provider,
+-                                     CryptoX_PublicKey *keys,
+-                                     uint32_t keyCount) {
++ */
++int mar_extract_and_verify_signatures_fp(FILE *fp,
++                                         CryptoX_ProviderHandle provider,
++                                         CryptoX_PublicKey *keys,
++                                         uint32_t keyCount) {
+   uint32_t signatureCount, signatureLen, numVerified = 0;
+   uint32_t signatureAlgorithmIDs[MAX_SIGNATURES];
+   uint8_t *extractedSignatures[MAX_SIGNATURES];
+   uint32_t i;
+ 
+   memset(signatureAlgorithmIDs, 0, sizeof(signatureAlgorithmIDs));
+   memset(extractedSignatures, 0, sizeof(extractedSignatures));
+ 
+@@ -281,22 +268,19 @@ mar_extract_and_verify_signatures_fp(FIL
+       }
+       return CryptoX_Error;
+     }
+   }
+ 
+   if (ftello(fp) == -1) {
+     return CryptoX_Error;
+   }
+-  if (mar_verify_signatures_for_fp(fp,
+-                                   provider,
+-                                   keys,
+-                                   (const uint8_t * const *)extractedSignatures,
+-                                   signatureCount,
+-                                   &numVerified) == CryptoX_Error) {
++  if (mar_verify_signatures_for_fp(
++          fp, provider, keys, (const uint8_t *const *)extractedSignatures,
++          signatureCount, &numVerified) == CryptoX_Error) {
+     return CryptoX_Error;
+   }
+   for (i = 0; i < signatureCount; ++i) {
+     free(extractedSignatures[i]);
+   }
+ 
+   /* If we reached here and we verified every
+      signature, return success. */
+@@ -327,25 +311,22 @@ mar_extract_and_verify_signatures_fp(FIL
+  * @param  extractedSignatures  Pointer to the first element in an array
+  *                              of extracted signatures.
+  * @param  signatureCount       The number of signatures in the MAR file
+  * @param numVerified           Out parameter which will be filled with
+  *                              the number of verified signatures.
+  *                              This information can be useful for printing
+  *                              error messages.
+  * @return 0 on success, *numVerified == signatureCount.
+-*/
+-int
+-mar_verify_signatures_for_fp(FILE *fp,
+-                             CryptoX_ProviderHandle provider,
+-                             CryptoX_PublicKey *keys,
+-                             const uint8_t * const *extractedSignatures,
+-                             uint32_t signatureCount,
+-                             uint32_t *numVerified)
+-{
++ */
++int mar_verify_signatures_for_fp(FILE *fp, CryptoX_ProviderHandle provider,
++                                 CryptoX_PublicKey *keys,
++                                 const uint8_t *const *extractedSignatures,
++                                 uint32_t signatureCount,
++                                 uint32_t *numVerified) {
+   CryptoX_SignatureHandle signatureHandles[MAX_SIGNATURES];
+   char buf[BLOCKSIZE];
+   uint32_t signatureLengths[MAX_SIGNATURES];
+   uint32_t i;
+   int rv = CryptoX_Error;
+ 
+   memset(signatureHandles, 0, sizeof(signatureHandles));
+   memset(signatureLengths, 0, sizeof(signatureLengths));
+@@ -362,60 +343,51 @@ mar_verify_signatures_for_fp(FILE *fp,
+      make sure a non zero value is passed in.
+    */
+   if (!signatureCount) {
+     fprintf(stderr, "ERROR: There must be at least one signature.\n");
+     goto failure;
+   }
+ 
+   for (i = 0; i < signatureCount; i++) {
+-    if (CryptoX_Failed(CryptoX_VerifyBegin(provider,
+-                                           &signatureHandles[i], &keys[i]))) {
++    if (CryptoX_Failed(
++            CryptoX_VerifyBegin(provider, &signatureHandles[i], &keys[i]))) {
+       fprintf(stderr, "ERROR: Could not initialize signature handle.\n");
+       goto failure;
+     }
+   }
+ 
+   /* Skip to the start of the file */
+   if (fseeko(fp, 0, SEEK_SET)) {
+     fprintf(stderr, "ERROR: Could not seek to start of the file\n");
+     goto failure;
+   }
+ 
+   /* Bytes 0-3: MAR1
+      Bytes 4-7: index offset
+      Bytes 8-15: size of entire MAR
+    */
+-  if (CryptoX_Failed(ReadAndUpdateVerifyContext(fp, buf,
+-                                                SIGNATURE_BLOCK_OFFSET +
+-                                                sizeof(uint32_t),
+-                                                signatureHandles,
+-                                                signatureCount,
+-                                                "signature block"))) {
++  if (CryptoX_Failed(ReadAndUpdateVerifyContext(
++          fp, buf, SIGNATURE_BLOCK_OFFSET + sizeof(uint32_t), signatureHandles,
++          signatureCount, "signature block"))) {
+     goto failure;
+   }
+ 
+   /* Read the signature block */
+   for (i = 0; i < signatureCount; i++) {
+     /* Get the signature algorithm ID */
+-    if (CryptoX_Failed(ReadAndUpdateVerifyContext(fp,
+-                                                  &buf,
+-                                                  sizeof(uint32_t),
+-                                                  signatureHandles,
+-                                                  signatureCount,
+-                                                  "signature algorithm ID"))) {
++    if (CryptoX_Failed(ReadAndUpdateVerifyContext(
++            fp, &buf, sizeof(uint32_t), signatureHandles, signatureCount,
++            "signature algorithm ID"))) {
+       goto failure;
+     }
+ 
+-    if (CryptoX_Failed(ReadAndUpdateVerifyContext(fp,
+-                                                  &signatureLengths[i],
+-                                                  sizeof(uint32_t),
+-                                                  signatureHandles,
+-                                                  signatureCount,
+-                                                  "signature length"))) {
++    if (CryptoX_Failed(ReadAndUpdateVerifyContext(
++            fp, &signatureLengths[i], sizeof(uint32_t), signatureHandles,
++            signatureCount, "signature length"))) {
+       goto failure;
+     }
+     signatureLengths[i] = ntohl(signatureLengths[i]);
+     if (signatureLengths[i] > MAX_SIGNATURE_LENGTH) {
+       fprintf(stderr, "ERROR: Embedded signature length is too large.\n");
+       goto failure;
+     }
+ 
+@@ -423,36 +395,36 @@ mar_verify_signatures_for_fp(FILE *fp,
+     if (fseeko(fp, signatureLengths[i], SEEK_CUR)) {
+       fprintf(stderr, "ERROR: Could not seek past signature.\n");
+       goto failure;
+     }
+   }
+ 
+   /* Read the rest of the file after the signature block */
+   while (!feof(fp)) {
+-    int numRead = fread(buf, 1, BLOCKSIZE , fp);
++    int numRead = fread(buf, 1, BLOCKSIZE, fp);
+     if (ferror(fp)) {
+       fprintf(stderr, "ERROR: Error reading data block.\n");
+       goto failure;
+     }
+ 
+     for (i = 0; i < signatureCount; i++) {
+-      if (CryptoX_Failed(CryptoX_VerifyUpdate(&signatureHandles[i],
+-                                              buf, numRead))) {
+-        fprintf(stderr, "ERROR: Error updating verify context with"
+-                        " data block.\n");
++      if (CryptoX_Failed(
++              CryptoX_VerifyUpdate(&signatureHandles[i], buf, numRead))) {
++        fprintf(stderr,
++                "ERROR: Error updating verify context with"
++                " data block.\n");
+         goto failure;
+       }
+     }
+   }
+ 
+   /* Verify the signatures */
+   for (i = 0; i < signatureCount; i++) {
+-    if (CryptoX_Failed(CryptoX_VerifySignature(&signatureHandles[i],
+-                                               &keys[i],
++    if (CryptoX_Failed(CryptoX_VerifySignature(&signatureHandles[i], &keys[i],
+                                                extractedSignatures[i],
+                                                signatureLengths[i]))) {
+       fprintf(stderr, "ERROR: Error verifying signature.\n");
+       goto failure;
+     }
+     ++*numVerified;
+   }
+ 
+diff --git a/other-licenses/bsdiff/bsdiff.c b/other-licenses/bsdiff/bsdiff.c
+--- a/other-licenses/bsdiff/bsdiff.c
++++ b/other-licenses/bsdiff/bsdiff.c
+@@ -21,16 +21,21 @@
+ #include <stdlib.h>
+ #include <stdio.h>
+ #include <string.h>
+ #include <fcntl.h>
+ #include <stdarg.h>
+ #ifdef XP_WIN
+ #include <io.h>
+ #include <winsock2.h>
++#define open _open
++#define close _close
++#define read _read
++#define lseek _lseek
++#define write _write
+ #else
+ #include <unistd.h>
+ #include <arpa/inet.h>
+ #define _O_BINARY 0
+ #endif
+ 
+ #include "crctable.h"
+ 
+diff --git a/toolkit/mozapps/update/updater/bspatch/bspatch.cpp b/toolkit/mozapps/update/updater/bspatch/bspatch.cpp
+--- a/toolkit/mozapps/update/updater/bspatch/bspatch.cpp
++++ b/toolkit/mozapps/update/updater/bspatch/bspatch.cpp
+@@ -52,59 +52,79 @@
+ #endif
+ 
+ #ifndef SSIZE_MAX
+ #define SSIZE_MAX LONG_MAX
+ #endif
+ 
+ int MBS_ReadHeader(FILE *file, MBSPatchHeader *header) {
+   size_t s = fread(header, 1, sizeof(MBSPatchHeader), file);
+-  if (s != sizeof(MBSPatchHeader)) return READ_ERROR;
++  if (s != sizeof(MBSPatchHeader)) {
++    return READ_ERROR;
++  }
+ 
+   header->slen = ntohl(header->slen);
+   header->scrc32 = ntohl(header->scrc32);
+   header->dlen = ntohl(header->dlen);
+   header->cblen = ntohl(header->cblen);
+   header->difflen = ntohl(header->difflen);
+   header->extralen = ntohl(header->extralen);
+ 
+   struct stat hs;
+   s = fstat(fileno(file), &hs);
+-  if (s != 0) return READ_ERROR;
++  if (s != 0) {
++    return READ_ERROR;
++  }
+ 
+-  if (memcmp(header->tag, "MBDIFF10", 8) != 0) return UNEXPECTED_BSPATCH_ERROR;
++  if (memcmp(header->tag, "MBDIFF10", 8) != 0) {
++    return UNEXPECTED_BSPATCH_ERROR;
++  }
+ 
+-  if (hs.st_size > INT_MAX) return UNEXPECTED_BSPATCH_ERROR;
++  if (hs.st_size > INT_MAX) {
++    return UNEXPECTED_BSPATCH_ERROR;
++  }
+ 
+   size_t size = static_cast<size_t>(hs.st_size);
+-  if (size < sizeof(MBSPatchHeader)) return UNEXPECTED_BSPATCH_ERROR;
++  if (size < sizeof(MBSPatchHeader)) {
++    return UNEXPECTED_BSPATCH_ERROR;
++  }
+   size -= sizeof(MBSPatchHeader);
+ 
+-  if (size < header->cblen) return UNEXPECTED_BSPATCH_ERROR;
++  if (size < header->cblen) {
++    return UNEXPECTED_BSPATCH_ERROR;
++  }
+   size -= header->cblen;
+ 
+-  if (size < header->difflen) return UNEXPECTED_BSPATCH_ERROR;
++  if (size < header->difflen) {
++    return UNEXPECTED_BSPATCH_ERROR;
++  }
+   size -= header->difflen;
+ 
+-  if (size < header->extralen) return UNEXPECTED_BSPATCH_ERROR;
++  if (size < header->extralen) {
++    return UNEXPECTED_BSPATCH_ERROR;
++  }
+   size -= header->extralen;
+ 
+-  if (size != 0) return UNEXPECTED_BSPATCH_ERROR;
++  if (size != 0) {
++    return UNEXPECTED_BSPATCH_ERROR;
++  }
+ 
+   return OK;
+ }
+ 
+ int MBS_ApplyPatch(const MBSPatchHeader *header, FILE *patchFile,
+                    unsigned char *fbuffer, FILE *file) {
+   unsigned char *fbufstart = fbuffer;
+   unsigned char *fbufend = fbuffer + header->slen;
+ 
+   unsigned char *buf = (unsigned char *)malloc(header->cblen + header->difflen +
+                                                header->extralen);
+-  if (!buf) return BSPATCH_MEM_ERROR;
++  if (!buf) {
++    return BSPATCH_MEM_ERROR;
++  }
+ 
+   int rv = OK;
+ 
+   size_t r = header->cblen + header->difflen + header->extralen;
+   unsigned char *wb = buf;
+   while (r) {
+     const size_t count = (r > SSIZE_MAX) ? SSIZE_MAX : r;
+     size_t c = fread(wb, 1, count, patchFile);

+ 31 - 0
mozilla-release/patches/1514532-2-66a1.patch

@@ -0,0 +1,31 @@
+
+# HG changeset patch
+# User Robert Strong <robert.bugzilla@gmail.com>
+# Date 1545075791 28800
+# Node ID a46e55bbb59169f36aea1035818682aa203628b9
+# Parent  b0d5f304e740f0dcd1516d78270f5d731cf4a054
+Bug 1514532 - followup comment format fix. r=me
+
+diff --git a/modules/libmar/sign/mar_sign.c b/modules/libmar/sign/mar_sign.c
+--- a/modules/libmar/sign/mar_sign.c
++++ b/modules/libmar/sign/mar_sign.c
+@@ -573,17 +573,17 @@ failure:
+ 
+ /**
+  * Imports a base64 encoded signature into a MAR file
+  *
+  * @param  src           The path of the source MAR file
+  * @param  sigIndex      The index of the signature to import
+  * @param  base64SigFile A file which contains the signature to import
+  * @param  dest          The path of the destination MAR file with replaced
+- *         signature
++ *                       signature
+  * @return 0 on success
+  *         -1 on error
+  */
+ int import_signature(const char *src, uint32_t sigIndex,
+                      const char *base64SigFile, const char *dest) {
+   int rv = -1;
+   FILE *fpSrc = NULL;
+   FILE *fpDest = NULL;
+

+ 47 - 0
mozilla-release/patches/1540142-68a1.patch

@@ -0,0 +1,47 @@
+# HG changeset patch
+# User Petr Sumbera <petr.sumbera@oracle.com>
+# Date 1554226076 0
+# Node ID 48f0ab684d3facb118a545dd2ca27dfb1394c4cd
+# Parent  b211dc9046496a0fcf13d4fc0c887df441800e43
+Bug 1540142 - Avoid SIGBUS on SPARC systems due CityHash64 r=froydnj
+
+Differential Revision: https://phabricator.services.mozilla.com/D25442
+
+diff --git a/other-licenses/nsis/Contrib/CityHash/cityhash/city.cpp b/other-licenses/nsis/Contrib/CityHash/cityhash/city.cpp
+--- a/other-licenses/nsis/Contrib/CityHash/cityhash/city.cpp
++++ b/other-licenses/nsis/Contrib/CityHash/cityhash/city.cpp
+@@ -28,18 +28,33 @@
+ // compromising on hash quality.
+ 
+ #include "city.h"
+ 
+ #include <algorithm>
+ 
+ using namespace std;
+ 
++#if __sparc__
++#include <string.h>
++static inline uint64 UNALIGNED_LOAD64(const char *p) {
++  uint64 val;
++  memcpy(&val, p, sizeof(uint64));
++  return val;
++}
++
++static inline uint32 UNALIGNED_LOAD32(const char *p) {
++  uint32 val;
++  memcpy(&val, p, sizeof(uint32));
++  return val;
++}
++#else
+ #define UNALIGNED_LOAD64(p) (*(const uint64*)(p))
+ #define UNALIGNED_LOAD32(p) (*(const uint32*)(p))
++#endif
+ 
+ #if !defined(LIKELY)
+ #if defined(__GNUC__)
+ #define LIKELY(x) (__builtin_expect(!!(x), 1))
+ #else
+ #define LIKELY(x) (x)
+ #endif
+ #endif
+

+ 51 - 5
mozilla-release/patches/1567642-1-71a1.patch

@@ -2,7 +2,7 @@
 # User Andrew Halberstadt <ahalberstadt@mozilla.com>
 # Date 1568073332 0
 # Node ID 216d19f500f2bc4f65a5e0202eb3bdcba5f20120
-# Parent  f281d84b39fb42a6437d2e96b965883251ba800b
+# Parent  43a2d8c2d2c433804ea7825b5d6a6a7b0eba021f
 Bug 1567642 - [mozbase] Fix flake8 under py3 lint errors r=gbrown
 
 Differential Revision: https://phabricator.services.mozilla.com/D45243
@@ -70,10 +70,17 @@ diff --git a/testing/mozbase/manifestparser/manifestparser/filters.py b/testing/
 diff --git a/testing/mozbase/manifestparser/manifestparser/ini.py b/testing/mozbase/manifestparser/manifestparser/ini.py
 --- a/testing/mozbase/manifestparser/manifestparser/ini.py
 +++ b/testing/mozbase/manifestparser/manifestparser/ini.py
-@@ -7,17 +7,17 @@ from __future__ import absolute_import
+@@ -2,22 +2,24 @@
+ # License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ # You can obtain one at http://mozilla.org/MPL/2.0/.
+ 
+ from __future__ import absolute_import
+ 
  import os
  import sys
  
++from six import string_types
++
  __all__ = ['read_ini', 'combine_fields']
  
  
@@ -89,6 +96,27 @@ diff --git a/testing/mozbase/manifestparser/manifestparser/ini.py b/testing/mozb
          msg = "Error parsing manifest file '{}', line {}: {}".format(path, linenum, msg)
          super(IniParseError, self).__init__(msg)
  
+@@ -38,18 +40,18 @@ def read_ini(fp, variables=None, default
+ 
+     # variables
+     variables = variables or {}
+     comments = comments or ('#',)
+     separators = separators or ('=', ':')
+     sections = []
+     key = value = None
+     section_names = set()
+-    if isinstance(fp, basestring):
+-        fp = file(fp)
++    if isinstance(fp, string_types):
++        fp = open(fp)
+ 
+     # read the lines
+     for (linenum, line) in enumerate(fp.read().splitlines(), start=1):
+ 
+         stripped = line.strip()
+ 
+         # ignore blank lines
+         if not stripped:
 diff --git a/testing/mozbase/manifestparser/manifestparser/manifestparser.py b/testing/mozbase/manifestparser/manifestparser/manifestparser.py
 --- a/testing/mozbase/manifestparser/manifestparser/manifestparser.py
 +++ b/testing/mozbase/manifestparser/manifestparser/manifestparser.py
@@ -380,6 +408,25 @@ diff --git a/testing/mozbase/mozcrash/mozcrash/mozcrash.py b/testing/mozbase/moz
 diff --git a/testing/mozbase/mozdevice/mozdevice/adb.py b/testing/mozbase/mozdevice/mozdevice/adb.py
 --- a/testing/mozbase/mozdevice/mozdevice/adb.py
 +++ b/testing/mozbase/mozdevice/mozdevice/adb.py
+@@ -290,17 +290,17 @@ class ADBCommand(object):
+                                    (' '.join(adb_process.args),
+                                     timeout,
+                                     adb_process.timedout,
+                                     adb_process.exitcode,
+                                     output))
+ 
+             return output
+         finally:
+-            if adb_process and isinstance(adb_process.stdout_file, file):
++            if adb_process and isinstance(adb_process.stdout_file, io.IOBase):
+                 adb_process.stdout_file.close()
+ 
+ 
+ class ADBHost(ADBCommand):
+     """ADBHost provides a basic interface to adb host commands
+     which do not target a specific device.
+ 
+     ::
 @@ -1912,17 +1912,17 @@ class ADBDevice(ADBCommand):
                      self._logger.error('get_process_list: %s %s\n%s' % (
                          header, line, traceback.format_exc()))
@@ -528,7 +575,7 @@ diff --git a/testing/mozbase/mozrunner/mozrunner/base/runner.py b/testing/mozbas
 diff --git a/testing/mozbase/mozrunner/mozrunner/devices/android_device.py b/testing/mozbase/mozrunner/mozrunner/devices/android_device.py
 --- a/testing/mozbase/mozrunner/mozrunner/devices/android_device.py
 +++ b/testing/mozbase/mozrunner/mozrunner/devices/android_device.py
-@@ -11,19 +11,19 @@ import platform
+@@ -11,18 +11,18 @@ import platform
  import shutil
  import signal
  import sys
@@ -539,8 +586,8 @@ diff --git a/testing/mozbase/mozrunner/mozrunner/devices/android_device.py b/tes
  import psutil
 -import six.moves.urllib as urllib
  from mozdevice import DeviceManagerADB, DMError
- from mozprocess import ProcessHandler
 +from six.moves import input, urllib
+ from mozprocess import ProcessHandler
  from six.moves.urllib.parse import urlparse
  
  EMULATOR_HOME_DIR = os.path.join(os.path.expanduser('~'), '.mozbuild', 'android-device')
@@ -548,7 +595,6 @@ diff --git a/testing/mozbase/mozrunner/mozrunner/devices/android_device.py b/tes
  EMULATOR_AUTH_FILE = os.path.join(os.path.expanduser('~'), '.emulator_console_auth_token')
  
  TOOLTOOL_URL = 'https://raw.githubusercontent.com/mozilla/build-tooltool/master/tooltool.py'
- 
 @@ -109,17 +109,17 @@ def verify_android_device(build_obj, ins
         already connected.
      """

+ 12 - 12
mozilla-release/patches/1583854-75a1.patch

@@ -2,7 +2,7 @@
 # User Stephen A Pohl <spohl.mozilla.bugs@gmail.com>
 # Date 1582831578 0
 # Node ID 04787960947b8292c8daf3fe492f6f8c0a204402
-# Parent  00b73504cc306c1b3386eb5c5206fc9c0e7f9318
+# Parent  112c4a1f69006fe21cb0b3f1e13a36d50db5caaf
 Bug 1583854: Fix a build issue that prevented builds with the macOS 10.15 SDK from succeeding. Based on a patch by Pete Collins. r=mstange
 
 Differential Revision: https://phabricator.services.mozilla.com/D64447
@@ -14,18 +14,18 @@ diff --git a/modules/libmar/verify/MacVerifyCrypto.cpp b/modules/libmar/verify/M
  // We declare the necessary parts of the Security Transforms API here since
  // we're building with the 10.6 SDK, which doesn't know about Security
  // Transforms.
- #ifdef __cplusplus
+ #if defined(__cplusplus)
  extern "C" {
  #endif
-   const CFStringRef kSecTransformInputAttributeName = CFSTR("INPUT");
-   typedef CFTypeRef SecTransformRef;
--  typedef struct OpaqueSecKeyRef* SecKeyRef;
-+  typedef OpaqueSecKeyRef* SecKeyRef;
+ const CFStringRef kSecTransformInputAttributeName = CFSTR("INPUT");
+ typedef CFTypeRef SecTransformRef;
+-typedef struct OpaqueSecKeyRef* SecKeyRef;
++typedef OpaqueSecKeyRef* SecKeyRef;
  
-   typedef SecTransformRef (*SecTransformCreateReadTransformWithReadStreamFunc)
-                             (CFReadStreamRef inputStream);
-   SecTransformCreateReadTransformWithReadStreamFunc
+ typedef SecTransformRef (*SecTransformCreateReadTransformWithReadStreamFunc)(
+     CFReadStreamRef inputStream);
+ SecTransformCreateReadTransformWithReadStreamFunc
      SecTransformCreateReadTransformWithReadStreamPtr = NULL;
-   typedef CFTypeRef (*SecTransformExecuteFunc)(SecTransformRef transform,
-                                                CFErrorRef* error);
-   SecTransformExecuteFunc SecTransformExecutePtr = NULL;
+ typedef CFTypeRef (*SecTransformExecuteFunc)(SecTransformRef transform,
+                                              CFErrorRef* error);
+ SecTransformExecuteFunc SecTransformExecutePtr = NULL;

+ 1950 - 0
mozilla-release/patches/1596660-PARTIAL-removeonly-73a1.patch

@@ -0,0 +1,1950 @@
+# HG changeset patch
+# User Matthew Noorenberghe <mozilla@noorenberghe.ca>
+# Date 1576041763 0
+# Node ID 2d7d6f9a04a47be448eba7f68e71067346ba5297
+# Parent  d59143031488daf4a17b9b9f28b880c0e43854a8
+Bug 1596660 - Replace pwmgr storage-mozStorage with a stub storage-geckoview. r=sfoster
+
+* storage-mozStorage was used by Fenenc but that is no longer built from m-c.
+* Existing Android tests need to be disabled since they rely on being able to manipulate storage but this commit only stubs searching for logins.
+* A later commit will actually filter the logins returned by a GV delegate.
+
+Differential Revision: https://phabricator.services.mozilla.com/D54139
+
+diff --git a/toolkit/components/passwordmgr/moz.build b/toolkit/components/passwordmgr/moz.build
+--- a/toolkit/components/passwordmgr/moz.build
++++ b/toolkit/components/passwordmgr/moz.build
+@@ -29,44 +29,31 @@ XPIDL_SOURCES += [
+ 
+ XPIDL_MODULE = 'loginmgr'
+ 
+ EXTRA_COMPONENTS += [
+     'crypto-SDR.js',
+     'nsLoginInfo.js',
+     'nsLoginManager.js',
+     'nsLoginManagerPrompter.js',
+-]
+-
+-EXTRA_PP_COMPONENTS += [
+     'passwordmgr.manifest',
++    'storage-json.js',
+ ]
+ 
+ EXTRA_JS_MODULES += [
+     'InsecurePasswordUtils.jsm',
+     'LoginHelper.jsm',
++    'LoginImport.jsm',
+     'LoginManagerContent.jsm',
+     'LoginManagerParent.jsm',
+     'LoginRecipes.jsm',
++    'LoginStore.jsm',
+     'OSCrypto.jsm',
+ ]
+ 
+-if CONFIG['OS_TARGET'] == 'Android':
+-    EXTRA_COMPONENTS += [
+-        'storage-mozStorage.js',
+-    ]
+-else:
+-    EXTRA_COMPONENTS += [
+-        'storage-json.js',
+-    ]
+-    EXTRA_JS_MODULES += [
+-        'LoginImport.jsm',
+-        'LoginStore.jsm',
+-    ]
+-
+ if CONFIG['OS_TARGET'] == 'WINNT':
+     EXTRA_JS_MODULES += [
+         'OSCrypto_win.js',
+     ]
+ 
+ if CONFIG['MOZ_BUILD_APP'] == 'browser' or CONFIG['MOZ_SUITE']:
+     EXTRA_JS_MODULES += [
+         'LoginManagerContextMenu.jsm',
+diff --git a/toolkit/components/passwordmgr/nsLoginManager.js b/toolkit/components/passwordmgr/nsLoginManager.js
+--- a/toolkit/components/passwordmgr/nsLoginManager.js
++++ b/toolkit/components/passwordmgr/nsLoginManager.js
+@@ -101,34 +101,29 @@ LoginManager.prototype = {
+       this._initStorage();
+     }
+ 
+     Services.obs.addObserver(this._observer, "gather-telemetry");
+   },
+ 
+ 
+   _initStorage() {
+-    let contractID;
+-    if (AppConstants.platform == "android") {
+-      contractID = "@mozilla.org/login-manager/storage/mozStorage;1";
+-    } else {
+-      contractID = "@mozilla.org/login-manager/storage/json;1";
+-    }
++    let contractID = "@mozilla.org/login-manager/storage/json;1";
+     try {
+-      let catMan = Cc["@mozilla.org/categorymanager;1"].
+-                   getService(Ci.nsICategoryManager);
++      let catMan = Cc["@mozilla.org/categorymanager;1"]
++                     .getService(Ci.nsICategoryManager);
+       contractID = catMan.getCategoryEntry("login-manager-storage",
+                                            "nsILoginManagerStorage");
+       log.debug("Found alternate nsILoginManagerStorage with contract ID:", contractID);
+     } catch (e) {
+       log.debug("No alternate nsILoginManagerStorage registered");
+     }
+ 
+-    this._storage = Cc[contractID].
+-                    createInstance(Ci.nsILoginManagerStorage);
++    this._storage = Cc[contractID]
++                      .createInstance(Ci.nsILoginManagerStorage);
+     this.initializationPromise = this._storage.initialize();
+   },
+ 
+ 
+   /* ---------- Utility objects ---------- */
+ 
+ 
+   /**
+diff --git a/toolkit/components/passwordmgr/passwordmgr.manifest b/toolkit/components/passwordmgr/passwordmgr.manifest
+--- a/toolkit/components/passwordmgr/passwordmgr.manifest
++++ b/toolkit/components/passwordmgr/passwordmgr.manifest
+@@ -1,17 +1,12 @@
+ component {cb9e0de8-3598-4ed7-857b-827f011ad5d8} nsLoginManager.js
+ contract @mozilla.org/login-manager;1 {cb9e0de8-3598-4ed7-857b-827f011ad5d8}
+ component {749e62f4-60ae-4569-a8a2-de78b649660e} nsLoginManagerPrompter.js
+ contract @mozilla.org/passwordmanager/authpromptfactory;1 {749e62f4-60ae-4569-a8a2-de78b649660e}
+ component {8aa66d77-1bbb-45a6-991e-b8f47751c291} nsLoginManagerPrompter.js
+ contract @mozilla.org/login-manager/prompter;1 {8aa66d77-1bbb-45a6-991e-b8f47751c291}
+ component {0f2f347c-1e4f-40cc-8efd-792dea70a85e} nsLoginInfo.js
+ contract @mozilla.org/login-manager/loginInfo;1 {0f2f347c-1e4f-40cc-8efd-792dea70a85e}
+-#ifdef ANDROID
+-component {8c2023b9-175c-477e-9761-44ae7b549756} storage-mozStorage.js
+-contract @mozilla.org/login-manager/storage/mozStorage;1 {8c2023b9-175c-477e-9761-44ae7b549756}
+-#else
+ component {c00c432d-a0c9-46d7-bef6-9c45b4d07341} storage-json.js
+ contract @mozilla.org/login-manager/storage/json;1 {c00c432d-a0c9-46d7-bef6-9c45b4d07341}
+-#endif
+ component {dc6c2976-0f73-4f1f-b9ff-3d72b4e28309} crypto-SDR.js
+ contract @mozilla.org/login-manager/crypto/SDR;1 {dc6c2976-0f73-4f1f-b9ff-3d72b4e28309}
+\ No newline at end of file
+diff --git a/toolkit/components/passwordmgr/storage-mozStorage.js b/toolkit/components/passwordmgr/storage-mozStorage.js
+deleted file mode 100644
+--- a/toolkit/components/passwordmgr/storage-mozStorage.js
++++ /dev/null
+@@ -1,1252 +0,0 @@
+-/* This Source Code Form is subject to the terms of the Mozilla Public
+- * License, v. 2.0. If a copy of the MPL was not distributed with this
+- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+-
+-const DB_VERSION = 6; // The database schema version
+-const PERMISSION_SAVE_LOGINS = "login-saving";
+-
+-ChromeUtils.import("resource://gre/modules/XPCOMUtils.jsm");
+-ChromeUtils.import("resource://gre/modules/Services.jsm");
+-
+-ChromeUtils.defineModuleGetter(this, "LoginHelper",
+-                               "resource://gre/modules/LoginHelper.jsm");
+-
+-/**
+- * Object that manages a database transaction properly so consumers don't have
+- * to worry about it throwing.
+- *
+- * @param aDatabase
+- *        The mozIStorageConnection to start a transaction on.
+- */
+-function Transaction(aDatabase) {
+-  this._db = aDatabase;
+-
+-  this._hasTransaction = false;
+-  try {
+-    this._db.beginTransaction();
+-    this._hasTransaction = true;
+-  } catch (e) { /* om nom nom exceptions */ }
+-}
+-
+-Transaction.prototype = {
+-  commit() {
+-    if (this._hasTransaction)
+-      this._db.commitTransaction();
+-  },
+-
+-  rollback() {
+-    if (this._hasTransaction)
+-      this._db.rollbackTransaction();
+-  },
+-};
+-
+-
+-function LoginManagerStorage_mozStorage() { }
+-
+-LoginManagerStorage_mozStorage.prototype = {
+-
+-  classID: Components.ID("{8c2023b9-175c-477e-9761-44ae7b549756}"),
+-  QueryInterface: XPCOMUtils.generateQI([Ci.nsILoginManagerStorage,
+-                                          Ci.nsIInterfaceRequestor]),
+-  getInterface(aIID) {
+-    if (aIID.equals(Ci.nsIVariant)) {
+-      // Allows unwrapping the JavaScript object for regression tests.
+-      return this;
+-    }
+-
+-    if (aIID.equals(Ci.mozIStorageConnection)) {
+-      return this._dbConnection;
+-    }
+-
+-    throw new Components.Exception("Interface not available", Cr.NS_ERROR_NO_INTERFACE);
+-  },
+-
+-  __crypto: null,  // nsILoginManagerCrypto service
+-  get _crypto() {
+-    if (!this.__crypto)
+-      this.__crypto = Cc["@mozilla.org/login-manager/crypto/SDR;1"].
+-                      getService(Ci.nsILoginManagerCrypto);
+-    return this.__crypto;
+-  },
+-
+-  __profileDir: null,  // nsIFile for the user's profile dir
+-  get _profileDir() {
+-    if (!this.__profileDir)
+-      this.__profileDir = Services.dirsvc.get("ProfD", Ci.nsIFile);
+-    return this.__profileDir;
+-  },
+-
+-  __uuidService: null,
+-  get _uuidService() {
+-    if (!this.__uuidService)
+-      this.__uuidService = Cc["@mozilla.org/uuid-generator;1"].
+-                           getService(Ci.nsIUUIDGenerator);
+-    return this.__uuidService;
+-  },
+-
+-
+-  // The current database schema.
+-  _dbSchema: {
+-    tables: {
+-      moz_logins:         "id                  INTEGER PRIMARY KEY," +
+-                          "hostname            TEXT NOT NULL," +
+-                          "httpRealm           TEXT," +
+-                          "formSubmitURL       TEXT," +
+-                          "usernameField       TEXT NOT NULL," +
+-                          "passwordField       TEXT NOT NULL," +
+-                          "encryptedUsername   TEXT NOT NULL," +
+-                          "encryptedPassword   TEXT NOT NULL," +
+-                          "guid                TEXT," +
+-                          "encType             INTEGER," +
+-                          "timeCreated         INTEGER," +
+-                          "timeLastUsed        INTEGER," +
+-                          "timePasswordChanged INTEGER," +
+-                          "timesUsed           INTEGER",
+-      // Changes must be reflected in this._dbAreExpectedColumnsPresent(),
+-      // this._searchLogins(), and this.modifyLogin().
+-
+-      moz_disabledHosts:  "id                 INTEGER PRIMARY KEY," +
+-                          "hostname           TEXT UNIQUE ON CONFLICT REPLACE",
+-
+-      moz_deleted_logins: "id                  INTEGER PRIMARY KEY," +
+-                          "guid                TEXT," +
+-                          "timeDeleted         INTEGER",
+-    },
+-    indices: {
+-      moz_logins_hostname_index: {
+-        table: "moz_logins",
+-        columns: ["hostname"]
+-      },
+-      moz_logins_hostname_formSubmitURL_index: {
+-        table: "moz_logins",
+-        columns: ["hostname", "formSubmitURL"]
+-      },
+-      moz_logins_hostname_httpRealm_index: {
+-          table: "moz_logins",
+-          columns: ["hostname", "httpRealm"]
+-      },
+-      moz_logins_guid_index: {
+-          table: "moz_logins",
+-          columns: ["guid"]
+-      },
+-      moz_logins_encType_index: {
+-          table: "moz_logins",
+-          columns: ["encType"]
+-      }
+-    }
+-  },
+-  _dbConnection: null,  // The database connection
+-  _dbStmts: null,  // Database statements for memoization
+-
+-  _signonsFile: null,  // nsIFile for "signons.sqlite"
+-
+-
+-  /*
+-   * Internal method used by regression tests only.  It overrides the default
+-   * database location.
+-   */
+-  initWithFile(aDBFile) {
+-    if (aDBFile)
+-      this._signonsFile = aDBFile;
+-
+-    this.initialize();
+-  },
+-
+-
+-  initialize() {
+-    this._dbStmts = {};
+-
+-    let isFirstRun;
+-    try {
+-      // Force initialization of the crypto module.
+-      // See bug 717490 comment 17.
+-      this._crypto;
+-
+-      // If initWithFile is calling us, _signonsFile may already be set.
+-      if (!this._signonsFile) {
+-        // Initialize signons.sqlite
+-        this._signonsFile = this._profileDir.clone();
+-        this._signonsFile.append("signons.sqlite");
+-      }
+-      this.log("Opening database at " + this._signonsFile.path);
+-
+-      // Initialize the database (create, migrate as necessary)
+-      isFirstRun = this._dbInit();
+-
+-      this._initialized = true;
+-
+-      return Promise.resolve();
+-    } catch (e) {
+-      this.log("Initialization failed: " + e);
+-      // If the import fails on first run, we want to delete the db
+-      if (isFirstRun && e == "Import failed")
+-        this._dbCleanup(false);
+-      throw new Error("Initialization failed");
+-    }
+-  },
+-
+-
+-  /**
+-   * Internal method used by regression tests only.  It is called before
+-   * replacing this storage module with a new instance.
+-   */
+-  terminate() {
+-    return Promise.resolve();
+-  },
+-
+-
+-  addLogin(login) {
+-    // Throws if there are bogus values.
+-    LoginHelper.checkLoginValues(login);
+-
+-    let [encUsername, encPassword, encType] = this._encryptLogin(login);
+-
+-    // Clone the login, so we don't modify the caller's object.
+-    let loginClone = login.clone();
+-
+-    // Initialize the nsILoginMetaInfo fields, unless the caller gave us values
+-    loginClone.QueryInterface(Ci.nsILoginMetaInfo);
+-    if (loginClone.guid) {
+-      if (!this._isGuidUnique(loginClone.guid))
+-        throw new Error("specified GUID already exists");
+-    } else {
+-      loginClone.guid = this._uuidService.generateUUID().toString();
+-    }
+-
+-    // Set timestamps
+-    let currentTime = Date.now();
+-    if (!loginClone.timeCreated)
+-      loginClone.timeCreated = currentTime;
+-    if (!loginClone.timeLastUsed)
+-      loginClone.timeLastUsed = currentTime;
+-    if (!loginClone.timePasswordChanged)
+-      loginClone.timePasswordChanged = currentTime;
+-    if (!loginClone.timesUsed)
+-      loginClone.timesUsed = 1;
+-
+-    let query =
+-        "INSERT INTO moz_logins " +
+-        "(hostname, httpRealm, formSubmitURL, usernameField, " +
+-         "passwordField, encryptedUsername, encryptedPassword, " +
+-         "guid, encType, timeCreated, timeLastUsed, timePasswordChanged, " +
+-         "timesUsed) " +
+-        "VALUES (:hostname, :httpRealm, :formSubmitURL, :usernameField, " +
+-                ":passwordField, :encryptedUsername, :encryptedPassword, " +
+-                ":guid, :encType, :timeCreated, :timeLastUsed, " +
+-                ":timePasswordChanged, :timesUsed)";
+-
+-    let params = {
+-      hostname:            loginClone.hostname,
+-      httpRealm:           loginClone.httpRealm,
+-      formSubmitURL:       loginClone.formSubmitURL,
+-      usernameField:       loginClone.usernameField,
+-      passwordField:       loginClone.passwordField,
+-      encryptedUsername:   encUsername,
+-      encryptedPassword:   encPassword,
+-      guid:                loginClone.guid,
+-      encType,
+-      timeCreated:         loginClone.timeCreated,
+-      timeLastUsed:        loginClone.timeLastUsed,
+-      timePasswordChanged: loginClone.timePasswordChanged,
+-      timesUsed:           loginClone.timesUsed
+-    };
+-
+-    let stmt;
+-    try {
+-      stmt = this._dbCreateStatement(query, params);
+-      stmt.execute();
+-    } catch (e) {
+-      this.log("addLogin failed: " + e.name + " : " + e.message);
+-      throw new Error("Couldn't write to database, login not added.");
+-    } finally {
+-      if (stmt) {
+-        stmt.reset();
+-      }
+-    }
+-
+-    // Send a notification that a login was added.
+-    LoginHelper.notifyStorageChanged("addLogin", loginClone);
+-    return loginClone;
+-  },
+-
+-
+-  removeLogin(login) {
+-    let [idToDelete, storedLogin] = this._getIdForLogin(login);
+-    if (!idToDelete)
+-      throw new Error("No matching logins");
+-
+-    // Execute the statement & remove from DB
+-    let query  = "DELETE FROM moz_logins WHERE id = :id";
+-    let params = { id: idToDelete };
+-    let stmt;
+-    let transaction = new Transaction(this._dbConnection);
+-    try {
+-      stmt = this._dbCreateStatement(query, params);
+-      stmt.execute();
+-      this.storeDeletedLogin(storedLogin);
+-      transaction.commit();
+-    } catch (e) {
+-      this.log("_removeLogin failed: " + e.name + " : " + e.message);
+-      transaction.rollback();
+-      throw new Error("Couldn't write to database, login not removed.");
+-    } finally {
+-      if (stmt) {
+-        stmt.reset();
+-      }
+-    }
+-    LoginHelper.notifyStorageChanged("removeLogin", storedLogin);
+-  },
+-
+-  modifyLogin(oldLogin, newLoginData) {
+-    let [idToModify, oldStoredLogin] = this._getIdForLogin(oldLogin);
+-    if (!idToModify)
+-      throw new Error("No matching logins");
+-
+-    let newLogin = LoginHelper.buildModifiedLogin(oldStoredLogin, newLoginData);
+-
+-    // Check if the new GUID is duplicate.
+-    if (newLogin.guid != oldStoredLogin.guid &&
+-        !this._isGuidUnique(newLogin.guid)) {
+-      throw new Error("specified GUID already exists");
+-    }
+-
+-    // Look for an existing entry in case key properties changed.
+-    if (!newLogin.matches(oldLogin, true)) {
+-      let logins = this.findLogins({}, newLogin.hostname,
+-                                   newLogin.formSubmitURL,
+-                                   newLogin.httpRealm);
+-
+-      if (logins.some(login => newLogin.matches(login, true)))
+-        throw new Error("This login already exists.");
+-    }
+-
+-    // Get the encrypted value of the username and password.
+-    let [encUsername, encPassword, encType] = this._encryptLogin(newLogin);
+-
+-    let query =
+-        "UPDATE moz_logins " +
+-        "SET hostname = :hostname, " +
+-            "httpRealm = :httpRealm, " +
+-            "formSubmitURL = :formSubmitURL, " +
+-            "usernameField = :usernameField, " +
+-            "passwordField = :passwordField, " +
+-            "encryptedUsername = :encryptedUsername, " +
+-            "encryptedPassword = :encryptedPassword, " +
+-            "guid = :guid, " +
+-            "encType = :encType, " +
+-            "timeCreated = :timeCreated, " +
+-            "timeLastUsed = :timeLastUsed, " +
+-            "timePasswordChanged = :timePasswordChanged, " +
+-            "timesUsed = :timesUsed " +
+-        "WHERE id = :id";
+-
+-    let params = {
+-      id:                  idToModify,
+-      hostname:            newLogin.hostname,
+-      httpRealm:           newLogin.httpRealm,
+-      formSubmitURL:       newLogin.formSubmitURL,
+-      usernameField:       newLogin.usernameField,
+-      passwordField:       newLogin.passwordField,
+-      encryptedUsername:   encUsername,
+-      encryptedPassword:   encPassword,
+-      guid:                newLogin.guid,
+-      encType,
+-      timeCreated:         newLogin.timeCreated,
+-      timeLastUsed:        newLogin.timeLastUsed,
+-      timePasswordChanged: newLogin.timePasswordChanged,
+-      timesUsed:           newLogin.timesUsed
+-    };
+-
+-    let stmt;
+-    try {
+-      stmt = this._dbCreateStatement(query, params);
+-      stmt.execute();
+-    } catch (e) {
+-      this.log("modifyLogin failed: " + e.name + " : " + e.message);
+-      throw new Error("Couldn't write to database, login not modified.");
+-    } finally {
+-      if (stmt) {
+-        stmt.reset();
+-      }
+-    }
+-
+-    LoginHelper.notifyStorageChanged("modifyLogin", [oldStoredLogin, newLogin]);
+-  },
+-
+-
+-  /**
+-   * Returns an array of nsILoginInfo.
+-   */
+-  getAllLogins(count) {
+-    let [logins, ids] = this._searchLogins({});
+-
+-    // decrypt entries for caller.
+-    logins = this._decryptLogins(logins);
+-
+-    this.log("_getAllLogins: returning " + logins.length + " logins.");
+-    if (count)
+-      count.value = logins.length; // needed for XPCOM
+-    return logins;
+-  },
+-
+-
+-  /**
+-   * Public wrapper around _searchLogins to convert the nsIPropertyBag to a
+-   * JavaScript object and decrypt the results.
+-   *
+-   * @return {nsILoginInfo[]} which are decrypted.
+-   */
+-  searchLogins(count, matchData) {
+-    let realMatchData = {};
+-    let options = {};
+-    // Convert nsIPropertyBag to normal JS object
+-    let propEnum = matchData.enumerator;
+-    while (propEnum.hasMoreElements()) {
+-      let prop = propEnum.getNext().QueryInterface(Ci.nsIProperty);
+-      switch (prop.name) {
+-        // Some property names aren't field names but are special options to affect the search.
+-        case "schemeUpgrades": {
+-          options[prop.name] = prop.value;
+-          break;
+-        }
+-        default: {
+-          realMatchData[prop.name] = prop.value;
+-          break;
+-        }
+-      }
+-    }
+-
+-    let [logins, ids] = this._searchLogins(realMatchData, options);
+-
+-    // Decrypt entries found for the caller.
+-    logins = this._decryptLogins(logins);
+-
+-    count.value = logins.length; // needed for XPCOM
+-    return logins;
+-  },
+-
+-
+-  /**
+-   * Private method to perform arbitrary searches on any field. Decryption is
+-   * left to the caller.
+-   *
+-   * Returns [logins, ids] for logins that match the arguments, where logins
+-   * is an array of encrypted nsLoginInfo and ids is an array of associated
+-   * ids in the database.
+-   */
+-  _searchLogins(matchData, aOptions = {
+-    schemeUpgrades: false,
+-  }) {
+-    let conditions = [], params = {};
+-
+-    for (let field in matchData) {
+-      let value = matchData[field];
+-      let condition = "";
+-      switch (field) {
+-        case "formSubmitURL":
+-          if (value != null) {
+-            // Historical compatibility requires this special case
+-            condition = "formSubmitURL = '' OR ";
+-          }
+-          // Fall through
+-        case "hostname":
+-          if (value != null) {
+-            condition += `${field} = :${field}`;
+-            params[field] = value;
+-            let valueURI;
+-            try {
+-              if (aOptions.schemeUpgrades && (valueURI = Services.io.newURI(value)) &&
+-                  valueURI.scheme == "https") {
+-                condition += ` OR ${field} = :http${field}`;
+-                params["http" + field] = "http://" + valueURI.hostPort;
+-              }
+-            } catch (ex) {
+-              // newURI will throw for some values (e.g. chrome://FirefoxAccounts)
+-              // but those URLs wouldn't support upgrades anyways.
+-            }
+-            break;
+-          }
+-          // Fall through
+-        // Normal cases.
+-        case "httpRealm":
+-        case "id":
+-        case "usernameField":
+-        case "passwordField":
+-        case "encryptedUsername":
+-        case "encryptedPassword":
+-        case "guid":
+-        case "encType":
+-        case "timeCreated":
+-        case "timeLastUsed":
+-        case "timePasswordChanged":
+-        case "timesUsed":
+-          if (value == null) {
+-            condition = field + " isnull";
+-          } else {
+-            condition = field + " = :" + field;
+-            params[field] = value;
+-          }
+-          break;
+-        // Fail if caller requests an unknown property.
+-        default:
+-          throw new Error("Unexpected field: " + field);
+-      }
+-      if (condition) {
+-        conditions.push(condition);
+-      }
+-    }
+-
+-    // Build query
+-    let query = "SELECT * FROM moz_logins";
+-    if (conditions.length) {
+-      conditions = conditions.map(c => "(" + c + ")");
+-      query += " WHERE " + conditions.join(" AND ");
+-    }
+-
+-    let stmt;
+-    let logins = [], ids = [];
+-    try {
+-      stmt = this._dbCreateStatement(query, params);
+-      // We can't execute as usual here, since we're iterating over rows
+-      while (stmt.executeStep()) {
+-        // Create the new nsLoginInfo object, push to array
+-        let login = Cc["@mozilla.org/login-manager/loginInfo;1"].
+-                    createInstance(Ci.nsILoginInfo);
+-        login.init(stmt.row.hostname, stmt.row.formSubmitURL,
+-                   stmt.row.httpRealm, stmt.row.encryptedUsername,
+-                   stmt.row.encryptedPassword, stmt.row.usernameField,
+-                   stmt.row.passwordField);
+-        // set nsILoginMetaInfo values
+-        login.QueryInterface(Ci.nsILoginMetaInfo);
+-        login.guid = stmt.row.guid;
+-        login.timeCreated = stmt.row.timeCreated;
+-        login.timeLastUsed = stmt.row.timeLastUsed;
+-        login.timePasswordChanged = stmt.row.timePasswordChanged;
+-        login.timesUsed = stmt.row.timesUsed;
+-        logins.push(login);
+-        ids.push(stmt.row.id);
+-      }
+-    } catch (e) {
+-      this.log("_searchLogins failed: " + e.name + " : " + e.message);
+-    } finally {
+-      if (stmt) {
+-        stmt.reset();
+-      }
+-    }
+-
+-    this.log("_searchLogins: returning " + logins.length + " logins");
+-    return [logins, ids];
+-  },
+-
+-  /**
+-   * Moves a login to the deleted logins table
+-   */
+-  storeDeletedLogin(aLogin) {
+-    let stmt = null;
+-    try {
+-      this.log("Storing " + aLogin.guid + " in deleted passwords\n");
+-      let query = "INSERT INTO moz_deleted_logins (guid, timeDeleted) VALUES (:guid, :timeDeleted)";
+-      let params = { guid: aLogin.guid,
+-                     timeDeleted: Date.now() };
+-      let stmt = this._dbCreateStatement(query, params);
+-      stmt.execute();
+-    } catch (ex) {
+-      throw ex;
+-    } finally {
+-      if (stmt)
+-        stmt.reset();
+-    }
+-  },
+-
+-
+-  /**
+-   * Removes all logins from storage.
+-   */
+-  removeAllLogins() {
+-    this.log("Removing all logins");
+-    let query;
+-    let stmt;
+-    let transaction = new Transaction(this._dbConnection);
+-
+-    // Disabled hosts kept, as one presumably doesn't want to erase those.
+-    // TODO: Add these items to the deleted items table once we've sorted
+-    //       out the issues from bug 756701
+-    query = "DELETE FROM moz_logins";
+-    try {
+-      stmt = this._dbCreateStatement(query);
+-      stmt.execute();
+-      transaction.commit();
+-    } catch (e) {
+-      this.log("_removeAllLogins failed: " + e.name + " : " + e.message);
+-      transaction.rollback();
+-      throw new Error("Couldn't write to database");
+-    } finally {
+-      if (stmt) {
+-        stmt.reset();
+-      }
+-    }
+-
+-    LoginHelper.notifyStorageChanged("removeAllLogins", null);
+-  },
+-
+-
+-  findLogins(count, hostname, formSubmitURL, httpRealm) {
+-    let loginData = {
+-      hostname,
+-      formSubmitURL,
+-      httpRealm
+-    };
+-    let matchData = { };
+-    for (let field of ["hostname", "formSubmitURL", "httpRealm"])
+-      if (loginData[field] != "")
+-        matchData[field] = loginData[field];
+-    let [logins, ids] = this._searchLogins(matchData);
+-
+-    // Decrypt entries found for the caller.
+-    logins = this._decryptLogins(logins);
+-
+-    this.log("_findLogins: returning " + logins.length + " logins");
+-    count.value = logins.length; // needed for XPCOM
+-    return logins;
+-  },
+-
+-
+-  countLogins(hostname, formSubmitURL, httpRealm) {
+-
+-    let _countLoginsHelper = (hostname, formSubmitURL, httpRealm) => {
+-      // Do checks for null and empty strings, adjust conditions and params
+-      let [conditions, params] =
+-          this._buildConditionsAndParams(hostname, formSubmitURL, httpRealm);
+-
+-      let query = "SELECT COUNT(1) AS numLogins FROM moz_logins";
+-      if (conditions.length) {
+-        conditions = conditions.map(c => "(" + c + ")");
+-        query += " WHERE " + conditions.join(" AND ");
+-      }
+-
+-      let stmt, numLogins;
+-      try {
+-        stmt = this._dbCreateStatement(query, params);
+-        stmt.executeStep();
+-        numLogins = stmt.row.numLogins;
+-      } catch (e) {
+-        this.log("_countLogins failed: " + e.name + " : " + e.message);
+-      } finally {
+-        if (stmt) {
+-          stmt.reset();
+-        }
+-      }
+-      return numLogins;
+-    };
+-
+-    let resultLogins = _countLoginsHelper(hostname, formSubmitURL, httpRealm);
+-    this.log("_countLogins: counted logins: " + resultLogins);
+-    return resultLogins;
+-  },
+-
+-
+-  get uiBusy() {
+-    return this._crypto.uiBusy;
+-  },
+-
+-
+-  get isLoggedIn() {
+-    return this._crypto.isLoggedIn;
+-  },
+-
+-
+-  /**
+-   * Returns an array with two items: [id, login]. If the login was not
+-   * found, both items will be null. The returned login contains the actual
+-   * stored login (useful for looking at the actual nsILoginMetaInfo values).
+-   */
+-  _getIdForLogin(login) {
+-    let matchData = { };
+-    for (let field of ["hostname", "formSubmitURL", "httpRealm"])
+-      if (login[field] != "")
+-        matchData[field] = login[field];
+-    let [logins, ids] = this._searchLogins(matchData);
+-
+-    let id = null;
+-    let foundLogin = null;
+-
+-    // The specified login isn't encrypted, so we need to ensure
+-    // the logins we're comparing with are decrypted. We decrypt one entry
+-    // at a time, lest _decryptLogins return fewer entries and screw up
+-    // indices between the two.
+-    for (let i = 0; i < logins.length; i++) {
+-      let [decryptedLogin] = this._decryptLogins([logins[i]]);
+-
+-      if (!decryptedLogin || !decryptedLogin.equals(login))
+-        continue;
+-
+-      // We've found a match, set id and break
+-      foundLogin = decryptedLogin;
+-      id = ids[i];
+-      break;
+-    }
+-
+-    return [id, foundLogin];
+-  },
+-
+-
+-  /**
+-   * Adjusts the WHERE conditions and parameters for statements prior to the
+-   * statement being created. This fixes the cases where nulls are involved
+-   * and the empty string is supposed to be a wildcard match
+-   */
+-  _buildConditionsAndParams(hostname, formSubmitURL, httpRealm) {
+-    let conditions = [], params = {};
+-
+-    if (hostname == null) {
+-      conditions.push("hostname isnull");
+-    } else if (hostname != "") {
+-      conditions.push("hostname = :hostname");
+-      params.hostname = hostname;
+-    }
+-
+-    if (formSubmitURL == null) {
+-      conditions.push("formSubmitURL isnull");
+-    } else if (formSubmitURL != "") {
+-      conditions.push("formSubmitURL = :formSubmitURL OR formSubmitURL = ''");
+-      params.formSubmitURL = formSubmitURL;
+-    }
+-
+-    if (httpRealm == null) {
+-      conditions.push("httpRealm isnull");
+-    } else if (httpRealm != "") {
+-      conditions.push("httpRealm = :httpRealm");
+-      params.httpRealm = httpRealm;
+-    }
+-
+-    return [conditions, params];
+-  },
+-
+-
+-  /**
+-   * Checks to see if the specified GUID already exists.
+-   */
+-  _isGuidUnique(guid) {
+-    let query = "SELECT COUNT(1) AS numLogins FROM moz_logins WHERE guid = :guid";
+-    let params = { guid };
+-
+-    let stmt, numLogins;
+-    try {
+-      stmt = this._dbCreateStatement(query, params);
+-      stmt.executeStep();
+-      numLogins = stmt.row.numLogins;
+-    } catch (e) {
+-      this.log("_isGuidUnique failed: " + e.name + " : " + e.message);
+-    } finally {
+-      if (stmt) {
+-        stmt.reset();
+-      }
+-    }
+-
+-    return (numLogins == 0);
+-  },
+-
+-
+-  /**
+-   * Returns the encrypted username, password, and encrypton type for the specified
+-   * login. Can throw if the user cancels a master password entry.
+-   */
+-  _encryptLogin(login) {
+-    let encUsername = this._crypto.encrypt(login.username);
+-    let encPassword = this._crypto.encrypt(login.password);
+-    let encType     = this._crypto.defaultEncType;
+-
+-    return [encUsername, encPassword, encType];
+-  },
+-
+-
+-  /**
+-   * Decrypts username and password fields in the provided array of
+-   * logins.
+-   *
+-   * The entries specified by the array will be decrypted, if possible.
+-   * An array of successfully decrypted logins will be returned. The return
+-   * value should be given to external callers (since still-encrypted
+-   * entries are useless), whereas internal callers generally don't want
+-   * to lose unencrypted entries (eg, because the user clicked Cancel
+-   * instead of entering their master password)
+-   */
+-  _decryptLogins(logins) {
+-    let result = [];
+-
+-    for (let login of logins) {
+-      try {
+-        login.username = this._crypto.decrypt(login.username);
+-        login.password = this._crypto.decrypt(login.password);
+-      } catch (e) {
+-        // If decryption failed (corrupt entry?), just skip it.
+-        // Rethrow other errors (like canceling entry of a master pw)
+-        if (e.result == Cr.NS_ERROR_FAILURE)
+-          continue;
+-        throw e;
+-      }
+-      result.push(login);
+-    }
+-
+-    return result;
+-  },
+-
+-
+-  // Database Creation & Access
+-
+-  /**
+-   * Creates a statement, wraps it, and then does parameter replacement
+-   * Returns the wrapped statement for execution.  Will use memoization
+-   * so that statements can be reused.
+-   */
+-  _dbCreateStatement(query, params) {
+-    let wrappedStmt = this._dbStmts[query];
+-    // Memoize the statements
+-    if (!wrappedStmt) {
+-      this.log("Creating new statement for query: " + query);
+-      wrappedStmt = this._dbConnection.createStatement(query);
+-      this._dbStmts[query] = wrappedStmt;
+-    }
+-    // Replace parameters, must be done 1 at a time
+-    if (params)
+-      for (let i in params)
+-        wrappedStmt.params[i] = params[i];
+-    return wrappedStmt;
+-  },
+-
+-
+-  /**
+-   * Attempts to initialize the database. This creates the file if it doesn't
+-   * exist, performs any migrations, etc. Return if this is the first run.
+-   */
+-  _dbInit() {
+-    this.log("Initializing Database");
+-    let isFirstRun = false;
+-    try {
+-      this._dbConnection = Services.storage.openDatabase(this._signonsFile);
+-      // Get the version of the schema in the file. It will be 0 if the
+-      // database has not been created yet.
+-      let version = this._dbConnection.schemaVersion;
+-      if (version == 0) {
+-        this._dbCreate();
+-        isFirstRun = true;
+-      } else if (version != DB_VERSION) {
+-        this._dbMigrate(version);
+-      }
+-    } catch (e) {
+-      if (e.result == Cr.NS_ERROR_FILE_CORRUPTED) {
+-        // Database is corrupted, so we backup the database, then throw
+-        // causing initialization to fail and a new db to be created next use
+-        this._dbCleanup(true);
+-      }
+-      throw e;
+-    }
+-
+-    Services.obs.addObserver(this, "profile-before-change");
+-    return isFirstRun;
+-  },
+-
+-  observe(subject, topic, data) {
+-    switch (topic) {
+-      case "profile-before-change":
+-        Services.obs.removeObserver(this, "profile-before-change");
+-        this._dbClose();
+-      break;
+-    }
+-  },
+-
+-  _dbCreate() {
+-    this.log("Creating Database");
+-    this._dbCreateSchema();
+-    this._dbConnection.schemaVersion = DB_VERSION;
+-  },
+-
+-
+-  _dbCreateSchema() {
+-    this._dbCreateTables();
+-    this._dbCreateIndices();
+-  },
+-
+-
+-  _dbCreateTables() {
+-    this.log("Creating Tables");
+-    for (let name in this._dbSchema.tables)
+-      this._dbConnection.createTable(name, this._dbSchema.tables[name]);
+-  },
+-
+-
+-  _dbCreateIndices() {
+-    this.log("Creating Indices");
+-    for (let name in this._dbSchema.indices) {
+-      let index = this._dbSchema.indices[name];
+-      let statement = "CREATE INDEX IF NOT EXISTS " + name + " ON " + index.table +
+-                      "(" + index.columns.join(", ") + ")";
+-      this._dbConnection.executeSimpleSQL(statement);
+-    }
+-  },
+-
+-
+-  _dbMigrate(oldVersion) {
+-    this.log("Attempting to migrate from version " + oldVersion);
+-
+-    if (oldVersion > DB_VERSION) {
+-      this.log("Downgrading to version " + DB_VERSION);
+-      // User's DB is newer. Sanity check that our expected columns are
+-      // present, and if so mark the lower version and merrily continue
+-      // on. If the columns are borked, something is wrong so blow away
+-      // the DB and start from scratch. [Future incompatible upgrades
+-      // should swtich to a different table or file.]
+-
+-      if (!this._dbAreExpectedColumnsPresent())
+-        throw Components.Exception("DB is missing expected columns",
+-                                   Cr.NS_ERROR_FILE_CORRUPTED);
+-
+-      // Change the stored version to the current version. If the user
+-      // runs the newer code again, it will see the lower version number
+-      // and re-upgrade (to fixup any entries the old code added).
+-      this._dbConnection.schemaVersion = DB_VERSION;
+-      return;
+-    }
+-
+-    // Upgrade to newer version...
+-
+-    let transaction = new Transaction(this._dbConnection);
+-
+-    try {
+-      for (let v = oldVersion + 1; v <= DB_VERSION; v++) {
+-        this.log("Upgrading to version " + v + "...");
+-        let migrateFunction = "_dbMigrateToVersion" + v;
+-        this[migrateFunction]();
+-      }
+-    } catch (e) {
+-      this.log("Migration failed: " + e);
+-      transaction.rollback();
+-      throw e;
+-    }
+-
+-    this._dbConnection.schemaVersion = DB_VERSION;
+-    transaction.commit();
+-    this.log("DB migration completed.");
+-  },
+-
+-
+-  /**
+-   * Version 2 adds a GUID column. Existing logins are assigned a random GUID.
+-   */
+-  _dbMigrateToVersion2() {
+-    // Check to see if GUID column already exists, add if needed
+-    let query;
+-    if (!this._dbColumnExists("guid")) {
+-      query = "ALTER TABLE moz_logins ADD COLUMN guid TEXT";
+-      this._dbConnection.executeSimpleSQL(query);
+-
+-      query = "CREATE INDEX IF NOT EXISTS moz_logins_guid_index ON moz_logins (guid)";
+-      this._dbConnection.executeSimpleSQL(query);
+-    }
+-
+-    // Get a list of IDs for existing logins
+-    let ids = [];
+-    query = "SELECT id FROM moz_logins WHERE guid isnull";
+-    let stmt;
+-    try {
+-      stmt = this._dbCreateStatement(query);
+-      while (stmt.executeStep())
+-        ids.push(stmt.row.id);
+-    } catch (e) {
+-      this.log("Failed getting IDs: " + e);
+-      throw e;
+-    } finally {
+-      if (stmt) {
+-        stmt.reset();
+-      }
+-    }
+-
+-    // Generate a GUID for each login and update the DB.
+-    query = "UPDATE moz_logins SET guid = :guid WHERE id = :id";
+-    for (let id of ids) {
+-      let params = {
+-        id,
+-        guid: this._uuidService.generateUUID().toString()
+-      };
+-
+-      try {
+-        stmt = this._dbCreateStatement(query, params);
+-        stmt.execute();
+-      } catch (e) {
+-        this.log("Failed setting GUID: " + e);
+-        throw e;
+-      } finally {
+-        if (stmt) {
+-          stmt.reset();
+-        }
+-      }
+-    }
+-  },
+-
+-
+-  /**
+-   * Version 3 adds a encType column.
+-   */
+-  _dbMigrateToVersion3() {
+-    // Check to see if encType column already exists, add if needed
+-    let query;
+-    if (!this._dbColumnExists("encType")) {
+-      query = "ALTER TABLE moz_logins ADD COLUMN encType INTEGER";
+-      this._dbConnection.executeSimpleSQL(query);
+-
+-      query = "CREATE INDEX IF NOT EXISTS " +
+-                  "moz_logins_encType_index ON moz_logins (encType)";
+-      this._dbConnection.executeSimpleSQL(query);
+-    }
+-
+-    // Get a list of existing logins
+-    let logins = [];
+-    let stmt;
+-    query = "SELECT id, encryptedUsername, encryptedPassword " +
+-                "FROM moz_logins WHERE encType isnull";
+-    try {
+-      stmt = this._dbCreateStatement(query);
+-      while (stmt.executeStep()) {
+-        let params = { id: stmt.row.id };
+-        // We will tag base64 logins correctly, but no longer support their use.
+-        if (stmt.row.encryptedUsername.charAt(0) == "~" ||
+-            stmt.row.encryptedPassword.charAt(0) == "~")
+-          params.encType = Ci.nsILoginManagerCrypto.ENCTYPE_BASE64;
+-        else
+-          params.encType = Ci.nsILoginManagerCrypto.ENCTYPE_SDR;
+-        logins.push(params);
+-      }
+-    } catch (e) {
+-      this.log("Failed getting logins: " + e);
+-      throw e;
+-    } finally {
+-      if (stmt) {
+-        stmt.reset();
+-      }
+-    }
+-
+-    // Determine encryption type for each login and update the DB.
+-    query = "UPDATE moz_logins SET encType = :encType WHERE id = :id";
+-    for (let params of logins) {
+-      try {
+-        stmt = this._dbCreateStatement(query, params);
+-        stmt.execute();
+-      } catch (e) {
+-        this.log("Failed setting encType: " + e);
+-        throw e;
+-      } finally {
+-        if (stmt) {
+-          stmt.reset();
+-        }
+-      }
+-    }
+-  },
+-
+-
+-  /**
+-   * Version 4 adds timeCreated, timeLastUsed, timePasswordChanged,
+-   * and timesUsed columns
+-   */
+-  _dbMigrateToVersion4() {
+-    let query;
+-    // Add the new columns, if needed.
+-    for (let column of ["timeCreated", "timeLastUsed", "timePasswordChanged", "timesUsed"]) {
+-      if (!this._dbColumnExists(column)) {
+-        query = "ALTER TABLE moz_logins ADD COLUMN " + column + " INTEGER";
+-        this._dbConnection.executeSimpleSQL(query);
+-      }
+-    }
+-
+-    // Get a list of IDs for existing logins.
+-    let ids = [];
+-    let stmt;
+-    query = "SELECT id FROM moz_logins WHERE timeCreated isnull OR " +
+-            "timeLastUsed isnull OR timePasswordChanged isnull OR timesUsed isnull";
+-    try {
+-      stmt = this._dbCreateStatement(query);
+-      while (stmt.executeStep())
+-        ids.push(stmt.row.id);
+-    } catch (e) {
+-      this.log("Failed getting IDs: " + e);
+-      throw e;
+-    } finally {
+-      if (stmt) {
+-        stmt.reset();
+-      }
+-    }
+-
+-    // Initialize logins with current time.
+-    query = "UPDATE moz_logins SET timeCreated = :initTime, timeLastUsed = :initTime, " +
+-            "timePasswordChanged = :initTime, timesUsed = 1 WHERE id = :id";
+-    let params = {
+-      id:       null,
+-      initTime: Date.now()
+-    };
+-    for (let id of ids) {
+-      params.id = id;
+-      try {
+-        stmt = this._dbCreateStatement(query, params);
+-        stmt.execute();
+-      } catch (e) {
+-        this.log("Failed setting timestamps: " + e);
+-        throw e;
+-      } finally {
+-        if (stmt) {
+-          stmt.reset();
+-        }
+-      }
+-    }
+-  },
+-
+-
+-  /**
+-   * Version 5 adds the moz_deleted_logins table
+-   */
+-  _dbMigrateToVersion5() {
+-    if (!this._dbConnection.tableExists("moz_deleted_logins")) {
+-      this._dbConnection.createTable("moz_deleted_logins", this._dbSchema.tables.moz_deleted_logins);
+-    }
+-  },
+-
+-  /**
+-   * Version 6 migrates all the hosts from
+-   * moz_disabledHosts to the permission manager.
+-   */
+-  _dbMigrateToVersion6() {
+-    let disabledHosts = [];
+-    let query = "SELECT hostname FROM moz_disabledHosts";
+-    let stmt;
+-
+-    try {
+-      stmt = this._dbCreateStatement(query);
+-
+-      while (stmt.executeStep()) {
+-        disabledHosts.push(stmt.row.hostname);
+-      }
+-
+-      for (let host of disabledHosts) {
+-        try {
+-          let uri = Services.io.newURI(host);
+-          Services.perms.add(uri, PERMISSION_SAVE_LOGINS, Services.perms.DENY_ACTION);
+-        } catch (e) {
+-          Cu.reportError(e);
+-        }
+-      }
+-    } catch (e) {
+-      this.log(`_dbMigrateToVersion6 failed: ${e.name} : ${e.message}`);
+-    } finally {
+-      if (stmt) {
+-        stmt.reset();
+-      }
+-    }
+-
+-    query = "DELETE FROM moz_disabledHosts";
+-    this._dbConnection.executeSimpleSQL(query);
+-  },
+-
+-  /**
+-   * Sanity check to ensure that the columns this version of the code expects
+-   * are present in the DB we're using.
+-   */
+-  _dbAreExpectedColumnsPresent() {
+-    let query = "SELECT " +
+-                   "id, " +
+-                   "hostname, " +
+-                   "httpRealm, " +
+-                   "formSubmitURL, " +
+-                   "usernameField, " +
+-                   "passwordField, " +
+-                   "encryptedUsername, " +
+-                   "encryptedPassword, " +
+-                   "guid, " +
+-                   "encType, " +
+-                   "timeCreated, " +
+-                   "timeLastUsed, " +
+-                   "timePasswordChanged, " +
+-                   "timesUsed " +
+-                "FROM moz_logins";
+-    try {
+-      let stmt = this._dbConnection.createStatement(query);
+-      // (no need to execute statement, if it compiled we're good)
+-      stmt.finalize();
+-    } catch (e) {
+-      return false;
+-    }
+-
+-    query = "SELECT " +
+-               "id, " +
+-               "hostname " +
+-            "FROM moz_disabledHosts";
+-    try {
+-      let stmt = this._dbConnection.createStatement(query);
+-      // (no need to execute statement, if it compiled we're good)
+-      stmt.finalize();
+-    } catch (e) {
+-      return false;
+-    }
+-
+-    this.log("verified that expected columns are present in DB.");
+-    return true;
+-  },
+-
+-
+-  /**
+-   * Checks to see if the named column already exists.
+-   */
+-  _dbColumnExists(columnName) {
+-    let query = "SELECT " + columnName + " FROM moz_logins";
+-    try {
+-      let stmt = this._dbConnection.createStatement(query);
+-      // (no need to execute statement, if it compiled we're good)
+-      stmt.finalize();
+-      return true;
+-    } catch (e) {
+-      return false;
+-    }
+-  },
+-
+-  _dbClose() {
+-    this.log("Closing the DB connection.");
+-    // Finalize all statements to free memory, avoid errors later
+-    for (let query in this._dbStmts) {
+-      let stmt = this._dbStmts[query];
+-      stmt.finalize();
+-    }
+-    this._dbStmts = {};
+-
+-    if (this._dbConnection !== null) {
+-      try {
+-        this._dbConnection.close();
+-      } catch (e) {
+-        Cu.reportError(e);
+-      }
+-    }
+-    this._dbConnection = null;
+-  },
+-
+-  /**
+-   * Called when database creation fails. Finalizes database statements,
+-   * closes the database connection, deletes the database file.
+-   */
+-  _dbCleanup(backup) {
+-    this.log("Cleaning up DB file - close & remove & backup=" + backup);
+-
+-    // Create backup file
+-    if (backup) {
+-      let backupFile = this._signonsFile.leafName + ".corrupt";
+-      Services.storage.backupDatabaseFile(this._signonsFile, backupFile);
+-    }
+-
+-    this._dbClose();
+-    this._signonsFile.remove(false);
+-  }
+-
+-}; // end of nsLoginManagerStorage_mozStorage implementation
+-
+-XPCOMUtils.defineLazyGetter(this.LoginManagerStorage_mozStorage.prototype, "log", () => {
+-  let logger = LoginHelper.createLogger("Login storage");
+-  return logger.log.bind(logger);
+-});
+-
+-var component = [LoginManagerStorage_mozStorage];
+-this.NSGetFactory = XPCOMUtils.generateNSGetFactory(component);
+diff --git a/toolkit/components/passwordmgr/test/unit/head.js b/toolkit/components/passwordmgr/test/unit/head.js
+--- a/toolkit/components/passwordmgr/test/unit/head.js
++++ b/toolkit/components/passwordmgr/test/unit/head.js
+@@ -60,22 +60,16 @@ add_task(async function test_common_init
+   // Before initializing the service for the first time, we should copy the key
+   // file required to decrypt the logins contained in the SQLite databases used
+   // by migration tests.  This file is not required for the other tests.
+   await OS.File.copy(do_get_file("data/key3.db").path,
+                      OS.Path.join(OS.Constants.Path.profileDir, "key3.db"));
+ 
+   // Ensure that the service and the storage module are initialized.
+   await Services.logins.initializationPromise;
+-
+-  // Ensure that every test file starts with an empty database.
+-  LoginTestUtils.clearData();
+-
+-  // Clean up after every test.
+-  registerCleanupFunction(() => LoginTestUtils.clearData());
+ });
+ 
+ /**
+  * Compare two FormLike to see if they represent the same information. Elements
+  * are compared using their @id attribute.
+  */
+ function formLikeEqual(a, b) {
+   Assert.strictEqual(Object.keys(a).length, Object.keys(b).length,
+diff --git a/toolkit/components/passwordmgr/test/unit/test_storage_mozStorage.js b/toolkit/components/passwordmgr/test/unit/test_storage_mozStorage.js
+deleted file mode 100644
+--- a/toolkit/components/passwordmgr/test/unit/test_storage_mozStorage.js
++++ /dev/null
+@@ -1,507 +0,0 @@
+-/*
+- * This test interfaces directly with the mozStorage password storage module,
+- * bypassing the normal password manager usage.
+- */
+-
+-
+-const ENCTYPE_BASE64 = 0;
+-const ENCTYPE_SDR = 1;
+-const PERMISSION_SAVE_LOGINS = "login-saving";
+-
+-// Current schema version used by storage-mozStorage.js. This will need to be
+-// kept in sync with the version there (or else the tests fail).
+-const CURRENT_SCHEMA = 6;
+-
+-async function copyFile(aLeafName)
+-{
+-  await OS.File.copy(OS.Path.join(do_get_file("data").path, aLeafName),
+-                     OS.Path.join(OS.Constants.Path.profileDir, aLeafName));
+-}
+-
+-function openDB(aLeafName)
+-{
+-  var dbFile = new FileUtils.File(OS.Constants.Path.profileDir);
+-  dbFile.append(aLeafName);
+-
+-  return Services.storage.openDatabase(dbFile);
+-}
+-
+-function deleteFile(pathname, filename)
+-{
+-  var file = new FileUtils.File(pathname);
+-  file.append(filename);
+-
+-  // Suppress failures, this happens in the mozstorage tests on Windows
+-  // because the module may still be holding onto the DB. (We don't
+-  // have a way to explicitly shutdown/GC the module).
+-  try {
+-    if (file.exists())
+-      file.remove(false);
+-  } catch (e) {}
+-}
+-
+-function reloadStorage(aInputPathName, aInputFileName)
+-{
+-  var inputFile = null;
+-  if (aInputFileName) {
+-      inputFile  = Cc["@mozilla.org/file/local;1"].
+-                       createInstance(Ci.nsIFile);
+-      inputFile.initWithPath(aInputPathName);
+-      inputFile.append(aInputFileName);
+-  }
+-
+-  let storage = Cc["@mozilla.org/login-manager/storage/mozStorage;1"]
+-                  .createInstance(Ci.nsILoginManagerStorage);
+-  storage.QueryInterface(Ci.nsIInterfaceRequestor)
+-         .getInterface(Ci.nsIVariant)
+-         .initWithFile(inputFile);
+-
+-  return storage;
+-}
+-
+-function checkStorageData(storage, ref_disabledHosts, ref_logins)
+-{
+-  LoginTestUtils.assertLoginListsEqual(storage.getAllLogins(), ref_logins);
+-  LoginTestUtils.assertDisabledHostsEqual(getAllDisabledHostsFromPermissionManager(),
+-                                          ref_disabledHosts);
+-}
+-
+-function getAllDisabledHostsFromPermissionManager() {
+-  let disabledHosts = [];
+-  let enumerator = Services.perms.enumerator;
+-
+-  while (enumerator.hasMoreElements()) {
+-    let perm = enumerator.getNext();
+-    if (perm.type == PERMISSION_SAVE_LOGINS && perm.capability == Services.perms.DENY_ACTION) {
+-      disabledHosts.push(perm.principal.URI.prePath);
+-    }
+-  }
+-
+-  return disabledHosts;
+-}
+-
+-function setLoginSavingEnabled(origin, enabled) {
+-  let uri = Services.io.newURI(origin);
+-
+-  if (enabled) {
+-    Services.perms.remove(uri, PERMISSION_SAVE_LOGINS);
+-  } else {
+-    Services.perms.add(uri, PERMISSION_SAVE_LOGINS, Services.perms.DENY_ACTION);
+-  }
+-}
+-
+-add_task(async function test_execute()
+-{
+-
+-const OUTDIR = OS.Constants.Path.profileDir;
+-
+-try {
+-
+-var isGUID = /^\{[0-9a-f\d]{8}-[0-9a-f\d]{4}-[0-9a-f\d]{4}-[0-9a-f\d]{4}-[0-9a-f\d]{12}\}$/;
+-function getGUIDforID(conn, id) {
+-    var stmt = conn.createStatement("SELECT guid from moz_logins WHERE id = " + id);
+-    stmt.executeStep();
+-    var guid = stmt.getString(0);
+-    stmt.finalize();
+-    return guid;
+-}
+-
+-function getEncTypeForID(conn, id) {
+-    var stmt = conn.createStatement("SELECT encType from moz_logins WHERE id = " + id);
+-    stmt.executeStep();
+-    var encType = stmt.row.encType;
+-    stmt.finalize();
+-    return encType;
+-}
+-
+-function getAllDisabledHostsFromMozStorage(conn) {
+-    let disabledHosts = [];
+-    let stmt = conn.createStatement("SELECT hostname from moz_disabledHosts");
+-
+-    while (stmt.executeStep()) {
+-      disabledHosts.push(stmt.row.hostname);
+-    }
+-
+-    return disabledHosts;
+-}
+-
+-var storage;
+-var dbConnection;
+-var testnum = 0;
+-var testdesc = "Setup of nsLoginInfo test-users";
+-var nsLoginInfo = new Components.Constructor(
+-                    "@mozilla.org/login-manager/loginInfo;1",
+-                    Ci.nsILoginInfo);
+-Assert.ok(nsLoginInfo != null);
+-
+-var testuser1 = new nsLoginInfo;
+-testuser1.init("http://test.com", "http://test.com", null,
+-               "testuser1", "testpass1", "u1", "p1");
+-var testuser1B = new nsLoginInfo;
+-testuser1B.init("http://test.com", "http://test.com", null,
+-                "testuser1B", "testpass1B", "u1", "p1");
+-var testuser2 = new nsLoginInfo;
+-testuser2.init("http://test.org", "http://test.org", null,
+-               "testuser2", "testpass2", "u2", "p2");
+-var testuser3 = new nsLoginInfo;
+-testuser3.init("http://test.gov", "http://test.gov", null,
+-               "testuser3", "testpass3", "u3", "p3");
+-var testuser4 = new nsLoginInfo;
+-testuser4.init("http://test.gov", "http://test.gov", null,
+-               "testuser1", "testpass2", "u4", "p4");
+-var testuser5 = new nsLoginInfo;
+-testuser5.init("http://test.gov", "http://test.gov", null,
+-               "testuser2", "testpass1", "u5", "p5");
+-
+-
+-/* ========== 1 ========== */
+-testnum++;
+-testdesc = "Test downgrade from v999 storage";
+-
+-await copyFile("signons-v999.sqlite");
+-// Verify the schema version in the test file.
+-dbConnection = openDB("signons-v999.sqlite");
+-Assert.equal(999, dbConnection.schemaVersion);
+-dbConnection.close();
+-
+-storage = reloadStorage(OUTDIR, "signons-v999.sqlite");
+-setLoginSavingEnabled("https://disabled.net", false);
+-checkStorageData(storage, ["https://disabled.net"], [testuser1]);
+-
+-// Check to make sure we downgraded the schema version.
+-dbConnection = openDB("signons-v999.sqlite");
+-Assert.equal(CURRENT_SCHEMA, dbConnection.schemaVersion);
+-dbConnection.close();
+-
+-deleteFile(OUTDIR, "signons-v999.sqlite");
+-
+-/* ========== 2 ========== */
+-testnum++;
+-testdesc = "Test downgrade from incompat v999 storage";
+-// This file has a testuser999/testpass999, but is missing an expected column
+-
+-var origFile = OS.Path.join(OUTDIR, "signons-v999-2.sqlite");
+-var failFile = OS.Path.join(OUTDIR, "signons-v999-2.sqlite.corrupt");
+-
+-// Make sure we always start clean in a clean state.
+-await copyFile("signons-v999-2.sqlite");
+-await OS.File.remove(failFile);
+-
+-Assert.throws(() => reloadStorage(OUTDIR, "signons-v999-2.sqlite"),
+-              /Initialization failed/);
+-
+-// Check to ensure the DB file was renamed to .corrupt.
+-Assert.equal(false, await OS.File.exists(origFile));
+-Assert.equal(false, await OS.File.exists(failFile));
+-
+-await OS.File.remove(failFile);
+-
+-/* ========== 3 ========== */
+-testnum++;
+-testdesc = "Test upgrade from v1->v2 storage";
+-
+-await copyFile("signons-v1.sqlite");
+-// Sanity check the test file.
+-dbConnection = openDB("signons-v1.sqlite");
+-Assert.equal(1, dbConnection.schemaVersion);
+-dbConnection.close();
+-
+-storage = reloadStorage(OUTDIR, "signons-v1.sqlite");
+-checkStorageData(storage, ["https://disabled.net"], [testuser1, testuser2]);
+-
+-// Check to see that we added a GUIDs to the logins.
+-dbConnection = openDB("signons-v1.sqlite");
+-Assert.equal(CURRENT_SCHEMA, dbConnection.schemaVersion);
+-var guid = getGUIDforID(dbConnection, 1);
+-Assert.ok(isGUID.test(guid));
+-guid = getGUIDforID(dbConnection, 2);
+-Assert.ok(isGUID.test(guid));
+-dbConnection.close();
+-
+-deleteFile(OUTDIR, "signons-v1.sqlite");
+-
+-/* ========== 4 ========== */
+-testnum++;
+-testdesc = "Test upgrade v2->v1 storage";
+-// This is the case where a v2 DB has been accessed with v1 code, and now we
+-// are upgrading it again. Any logins added by the v1 code must be properly
+-// upgraded.
+-
+-await copyFile("signons-v1v2.sqlite");
+-// Sanity check the test file.
+-dbConnection = openDB("signons-v1v2.sqlite");
+-Assert.equal(1, dbConnection.schemaVersion);
+-dbConnection.close();
+-
+-storage = reloadStorage(OUTDIR, "signons-v1v2.sqlite");
+-checkStorageData(storage, ["https://disabled.net"], [testuser1, testuser2, testuser3]);
+-
+-// While we're here, try modifying a login, to ensure that doing so doesn't
+-// change the existing GUID.
+-storage.modifyLogin(testuser1, testuser1B);
+-checkStorageData(storage, ["https://disabled.net"], [testuser1B, testuser2, testuser3]);
+-
+-// Check the GUIDs. Logins 1 and 2 should retain their original GUID, login 3
+-// should have one created (because it didn't have one previously).
+-dbConnection = openDB("signons-v1v2.sqlite");
+-Assert.equal(CURRENT_SCHEMA, dbConnection.schemaVersion);
+-guid = getGUIDforID(dbConnection, 1);
+-Assert.equal("{655c7358-f1d6-6446-adab-53f98ac5d80f}", guid);
+-guid = getGUIDforID(dbConnection, 2);
+-Assert.equal("{13d9bfdc-572a-4d4e-9436-68e9803e84c1}", guid);
+-guid = getGUIDforID(dbConnection, 3);
+-Assert.ok(isGUID.test(guid));
+-dbConnection.close();
+-
+-deleteFile(OUTDIR, "signons-v1v2.sqlite");
+-
+-/* ========== 5 ========== */
+-testnum++;
+-testdesc = "Test upgrade from v2->v3 storage";
+-
+-await copyFile("signons-v2.sqlite");
+-// Sanity check the test file.
+-dbConnection = openDB("signons-v2.sqlite");
+-Assert.equal(2, dbConnection.schemaVersion);
+-
+-storage = reloadStorage(OUTDIR, "signons-v2.sqlite");
+-
+-// Check to see that we added the correct encType to the logins.
+-Assert.equal(CURRENT_SCHEMA, dbConnection.schemaVersion);
+-var encTypes = [ENCTYPE_BASE64, ENCTYPE_SDR, ENCTYPE_BASE64, ENCTYPE_BASE64];
+-for (let i = 0; i < encTypes.length; i++)
+-    Assert.equal(encTypes[i], getEncTypeForID(dbConnection, i + 1));
+-dbConnection.close();
+-
+-// There are 4 logins, but 3 will be invalid because we can no longer decrypt
+-// base64-encoded items. (testuser1/4/5)
+-checkStorageData(storage, ["https://disabled.net"],
+-    [testuser2]);
+-
+-deleteFile(OUTDIR, "signons-v2.sqlite");
+-
+-/* ========== 6 ========== */
+-testnum++;
+-testdesc = "Test upgrade v3->v2 storage";
+-// This is the case where a v3 DB has been accessed with v2 code, and now we
+-// are upgrading it again. Any logins added by the v2 code must be properly
+-// upgraded.
+-
+-await copyFile("signons-v2v3.sqlite");
+-// Sanity check the test file.
+-dbConnection = openDB("signons-v2v3.sqlite");
+-Assert.equal(2, dbConnection.schemaVersion);
+-encTypes = [ENCTYPE_BASE64, ENCTYPE_SDR, ENCTYPE_BASE64, ENCTYPE_BASE64, null];
+-for (let i = 0; i < encTypes.length; i++)
+-    Assert.equal(encTypes[i], getEncTypeForID(dbConnection, i + 1));
+-
+-// Reload storage, check that the new login now has encType=1, others untouched
+-storage = reloadStorage(OUTDIR, "signons-v2v3.sqlite");
+-Assert.equal(CURRENT_SCHEMA, dbConnection.schemaVersion);
+-
+-encTypes = [ENCTYPE_BASE64, ENCTYPE_SDR, ENCTYPE_BASE64, ENCTYPE_BASE64, ENCTYPE_SDR];
+-for (let i = 0; i < encTypes.length; i++)
+-    Assert.equal(encTypes[i], getEncTypeForID(dbConnection, i + 1));
+-
+-// Sanity check that the data gets migrated
+-// There are 5 logins, but 3 will be invalid because we can no longer decrypt
+-// base64-encoded items. (testuser1/4/5). We no longer reencrypt with SDR.
+-checkStorageData(storage, ["https://disabled.net"], [testuser2, testuser3]);
+-encTypes = [ENCTYPE_BASE64, ENCTYPE_SDR, ENCTYPE_BASE64, ENCTYPE_BASE64, ENCTYPE_SDR];
+-for (let i = 0; i < encTypes.length; i++)
+-    Assert.equal(encTypes[i], getEncTypeForID(dbConnection, i + 1));
+-dbConnection.close();
+-
+-deleteFile(OUTDIR, "signons-v2v3.sqlite");
+-
+-
+-/* ========== 7 ========== */
+-testnum++;
+-testdesc = "Test upgrade from v3->v4 storage";
+-
+-await copyFile("signons-v3.sqlite");
+-// Sanity check the test file.
+-dbConnection = openDB("signons-v3.sqlite");
+-Assert.equal(3, dbConnection.schemaVersion);
+-
+-storage = reloadStorage(OUTDIR, "signons-v3.sqlite");
+-Assert.equal(CURRENT_SCHEMA, dbConnection.schemaVersion);
+-
+-// Remove old entry from permission manager.
+-setLoginSavingEnabled("https://disabled.net", true);
+-
+-// Check that timestamps and counts were initialized correctly
+-checkStorageData(storage, [], [testuser1, testuser2]);
+-
+-var logins = storage.getAllLogins();
+-for (var i = 0; i < 2; i++) {
+-    Assert.ok(logins[i] instanceof Ci.nsILoginMetaInfo);
+-    Assert.equal(1, logins[i].timesUsed);
+-    LoginTestUtils.assertTimeIsAboutNow(logins[i].timeCreated);
+-    LoginTestUtils.assertTimeIsAboutNow(logins[i].timeLastUsed);
+-    LoginTestUtils.assertTimeIsAboutNow(logins[i].timePasswordChanged);
+-}
+-
+-/* ========== 8 ========== */
+-testnum++;
+-testdesc = "Test upgrade from v3->v4->v3 storage";
+-
+-await copyFile("signons-v3v4.sqlite");
+-// Sanity check the test file.
+-dbConnection = openDB("signons-v3v4.sqlite");
+-Assert.equal(3, dbConnection.schemaVersion);
+-
+-storage = reloadStorage(OUTDIR, "signons-v3v4.sqlite");
+-Assert.equal(CURRENT_SCHEMA, dbConnection.schemaVersion);
+-
+-// testuser1 already has timestamps, testuser2 does not.
+-checkStorageData(storage, [], [testuser1, testuser2]);
+-
+-logins = storage.getAllLogins();
+-
+-var t1, t2;
+-if (logins[0].username == "testuser1") {
+-    t1 = logins[0];
+-    t2 = logins[1];
+-} else {
+-    t1 = logins[1];
+-    t2 = logins[0];
+-}
+-
+-Assert.ok(t1 instanceof Ci.nsILoginMetaInfo);
+-Assert.ok(t2 instanceof Ci.nsILoginMetaInfo);
+-
+-Assert.equal(9, t1.timesUsed);
+-Assert.equal(1262049951275, t1.timeCreated);
+-Assert.equal(1262049951275, t1.timeLastUsed);
+-Assert.equal(1262049951275, t1.timePasswordChanged);
+-
+-Assert.equal(1, t2.timesUsed);
+-LoginTestUtils.assertTimeIsAboutNow(t2.timeCreated);
+-LoginTestUtils.assertTimeIsAboutNow(t2.timeLastUsed);
+-LoginTestUtils.assertTimeIsAboutNow(t2.timePasswordChanged);
+-
+-
+-/* ========== 9 ========== */
+-testnum++;
+-testdesc = "Test upgrade from v4 storage";
+-
+-await copyFile("signons-v4.sqlite");
+-// Sanity check the test file.
+-dbConnection = openDB("signons-v4.sqlite");
+-Assert.equal(4, dbConnection.schemaVersion);
+-Assert.ok(!dbConnection.tableExists("moz_deleted_logins"));
+-
+-storage = reloadStorage(OUTDIR, "signons-v4.sqlite");
+-Assert.equal(CURRENT_SCHEMA, dbConnection.schemaVersion);
+-Assert.ok(dbConnection.tableExists("moz_deleted_logins"));
+-
+-
+-/* ========== 10 ========== */
+-testnum++;
+-testdesc = "Test upgrade from v4->v5->v4 storage";
+-
+-await copyFile("signons-v4v5.sqlite");
+-// Sanity check the test file.
+-dbConnection = openDB("signons-v4v5.sqlite");
+-Assert.equal(4, dbConnection.schemaVersion);
+-Assert.ok(dbConnection.tableExists("moz_deleted_logins"));
+-
+-storage = reloadStorage(OUTDIR, "signons-v4v5.sqlite");
+-Assert.equal(CURRENT_SCHEMA, dbConnection.schemaVersion);
+-Assert.ok(dbConnection.tableExists("moz_deleted_logins"));
+-
+-/* ========== 11 ========== */
+-testnum++;
+-testdesc = "Test upgrade from v5->v6 storage";
+-
+-await copyFile("signons-v5v6.sqlite");
+-
+-// Sanity check the test file.
+-dbConnection = openDB("signons-v5v6.sqlite");
+-Assert.equal(5, dbConnection.schemaVersion);
+-Assert.ok(dbConnection.tableExists("moz_disabledHosts"));
+-
+-// Initial disabled hosts inside signons-v5v6.sqlite
+-var disabledHosts = [
+-  "http://disabled1.example.com",
+-  "http://大.net",
+-  "http://xn--19g.com"
+-];
+-
+-LoginTestUtils.assertDisabledHostsEqual(disabledHosts, getAllDisabledHostsFromMozStorage(dbConnection));
+-
+-// Reload storage
+-storage = reloadStorage(OUTDIR, "signons-v5v6.sqlite");
+-Assert.equal(CURRENT_SCHEMA, dbConnection.schemaVersion);
+-
+-// moz_disabledHosts should now be empty after migration.
+-LoginTestUtils.assertDisabledHostsEqual([], getAllDisabledHostsFromMozStorage(dbConnection));
+-
+-// Get all the other hosts currently saved in the permission manager.
+-let hostsInPermissionManager = getAllDisabledHostsFromPermissionManager();
+-
+-// All disabledHosts should have migrated to the permission manager
+-LoginTestUtils.assertDisabledHostsEqual(disabledHosts, hostsInPermissionManager);
+-
+-// Remove all disabled hosts from the permission manager before test ends
+-for (let host of disabledHosts) {
+-  setLoginSavingEnabled(host, true);
+-}
+-
+-/* ========== 12 ========== */
+-testnum++;
+-testdesc = "Create nsILoginInfo instances for testing with";
+-
+-testuser1 = new nsLoginInfo;
+-testuser1.init("http://dummyhost.mozilla.org", "", null,
+-    "dummydude", "itsasecret", "put_user_here", "put_pw_here");
+-
+-
+-/*
+- * ---------------------- DB Corruption ----------------------
+- * Try to initialize with a corrupt database file. This should create a backup
+- * file, then upon next use create a new database file.
+- */
+-
+-/* ========== 13 ========== */
+-testnum++;
+-testdesc = "Corrupt database and backup";
+-
+-const filename = "signons-c.sqlite";
+-const filepath = OS.Path.join(OS.Constants.Path.profileDir, filename);
+-
+-await OS.File.copy(do_get_file("data/corruptDB.sqlite").path, filepath);
+-
+-// will init mozStorage module with corrupt database, init should fail
+-Assert.throws(
+-  () => reloadStorage(OS.Constants.Path.profileDir, filename),
+-  /Initialization failed/);
+-
+-// check that the backup file exists
+-Assert.ok(await OS.File.exists(filepath + ".corrupt"));
+-
+-// check that the original corrupt file has been deleted
+-Assert.equal(false, await OS.File.exists(filepath));
+-
+-// initialize the storage module again
+-storage = reloadStorage(OS.Constants.Path.profileDir, filename);
+-
+-// use the storage module again, should work now
+-storage.addLogin(testuser1);
+-checkStorageData(storage, [], [testuser1]);
+-
+-// check the file exists
+-var file = Cc["@mozilla.org/file/local;1"].createInstance(Ci.nsIFile);
+-file.initWithPath(OS.Constants.Path.profileDir);
+-file.append(filename);
+-Assert.ok(file.exists());
+-
+-deleteFile(OS.Constants.Path.profileDir, filename + ".corrupt");
+-deleteFile(OS.Constants.Path.profileDir, filename);
+-
+-} catch (e) {
+-    throw new Error("FAILED in test #" + testnum + " -- " + testdesc + ": " + e);
+-}
+-
+-});
+diff --git a/toolkit/components/passwordmgr/test/unit/xpcshell.ini b/toolkit/components/passwordmgr/test/unit/xpcshell.ini
+--- a/toolkit/components/passwordmgr/test/unit/xpcshell.ini
++++ b/toolkit/components/passwordmgr/test/unit/xpcshell.ini
+@@ -5,21 +5,17 @@ support-files = data/**
+ # Test JSON file access and import from SQLite, not applicable to Android.
+ [test_module_LoginImport.js]
+ skip-if = os == "android"
+ [test_module_LoginStore.js]
+ skip-if = os == "android"
+ [test_removeLegacySignonFiles.js]
+ skip-if = os == "android"
+ 
+-# Test SQLite database backup and migration, applicable to Android only.
+-[test_storage_mozStorage.js]
+-skip-if = true || os != "android" # Bug 1171687: Needs fixing on Android
+-
+-# The following tests apply to any storage back-end.
++# The following tests apply to any storage back-end that supports add/modify/remove.
+ [test_context_menu.js]
+ skip-if = os == "android" # The context menu isn't used on Android.
+ # LoginManagerContextMenu is only included for MOZ_BUILD_APP == 'browser'.
+ run-if = buildapp == "browser"
+ [test_dedupeLogins.js]
+ [test_disabled_hosts.js]
+ [test_getFormFields.js]
+ [test_getPasswordFields.js]

+ 6 - 6
mozilla-release/patches/1604360-7-73a1.patch

@@ -2,7 +2,7 @@
 # User Andrew Halberstadt <ahalberstadt@mozilla.com>
 # Date 1576713266 0
 # Node ID c6a0928caab6a65d1511fca8c44927a0cf8a2a4c
-# Parent  80d46e8f12d137ee02b68b8f982ce27fac932a18
+# Parent  7a86559342dc2d418bf3c70dd914169a556fa8ce
 Bug 1604360 - [manifestparser] Properly merge [DEFAULT] section of manifest with parent defaults r=gbrown
 
 Previously the [DEFAULT] section of a manifest would simply overwrite whatever
@@ -15,7 +15,7 @@ Differential Revision: https://phabricator.services.mozilla.com/D57410
 diff --git a/testing/mozbase/manifestparser/manifestparser/ini.py b/testing/mozbase/manifestparser/manifestparser/ini.py
 --- a/testing/mozbase/manifestparser/manifestparser/ini.py
 +++ b/testing/mozbase/manifestparser/manifestparser/ini.py
-@@ -17,31 +17,32 @@ class IniParseError(Exception):
+@@ -19,31 +19,32 @@ class IniParseError(Exception):
          elif hasattr(fp, 'name'):
              path = fp.name
          else:
@@ -48,10 +48,10 @@ diff --git a/testing/mozbase/manifestparser/manifestparser/ini.py b/testing/mozb
      sections = []
      key = value = None
      section_names = set()
-     if isinstance(fp, basestring):
-         fp = file(fp)
+     if isinstance(fp, string_types):
+         fp = open(fp)
  
-@@ -82,17 +83,17 @@ def read_ini(fp, variables=None, default
+@@ -84,17 +85,17 @@ def read_ini(fp, variables=None, default
              section = stripped[1:-1].strip()
              key = value = key_indent = None
  
@@ -70,7 +70,7 @@ diff --git a/testing/mozbase/manifestparser/manifestparser/ini.py b/testing/mozb
                      section, section_names)
  
              section_names.add(section)
-@@ -115,42 +116,47 @@ def read_ini(fp, variables=None, default
+@@ -117,42 +118,47 @@ def read_ini(fp, variables=None, default
          # (key, value) pair
          for separator in separators:
              if separator in stripped:

+ 7 - 14
mozilla-release/patches/1616989-75a1.patch

@@ -2,7 +2,7 @@
 # User Ricky Stewart <rstewart@mozilla.com>
 # Date 1582820546 0
 # Node ID b77c81cc3d735be321af7ae0ff87a8350d4ea36f
-# Parent  6252618ad538e581ef704b3eb790fa2164372f0b
+# Parent  21462c123ea965586e8e9870de391417d0714499
 Bug 1616989 - mozbuild/frontend/reader.py supports Python 3 r=firefox-build-system-reviewers,mshal
 
 Differential Revision: https://phabricator.services.mozilla.com/D63566
@@ -111,7 +111,7 @@ diff --git a/python/mozbuild/mozbuild/test/python2.ini b/python/mozbuild/mozbuil
 diff --git a/testing/mozbase/manifestparser/manifestparser/ini.py b/testing/mozbase/manifestparser/manifestparser/ini.py
 --- a/testing/mozbase/manifestparser/manifestparser/ini.py
 +++ b/testing/mozbase/manifestparser/manifestparser/ini.py
-@@ -1,17 +1,20 @@
+@@ -1,14 +1,15 @@
  # This Source Code Form is subject to the terms of the Mozilla Public
  # License, v. 2.0. If a copy of the MPL was not distributed with this file,
  # You can obtain one at http://mozilla.org/MPL/2.0/.
@@ -122,18 +122,12 @@ diff --git a/testing/mozbase/manifestparser/manifestparser/ini.py b/testing/mozb
  import os
  import sys
  
-+from six import string_types
-+
+ from six import string_types
+ 
  __all__ = ['read_ini', 'combine_fields']
  
  
- class IniParseError(Exception):
-     def __init__(self, fp, linenum, msg):
-         if isinstance(fp, string_types):
-             path = fp
-         elif hasattr(fp, 'name'):
-@@ -38,18 +41,18 @@ def read_ini(fp, defaults=None, default=
-     # variables
+@@ -41,17 +42,17 @@ def read_ini(fp, defaults=None, default=
      defaults = defaults or {}
      default_section = {}
      comments = comments or ('#',)
@@ -141,9 +135,8 @@ diff --git a/testing/mozbase/manifestparser/manifestparser/ini.py b/testing/mozb
      sections = []
      key = value = None
      section_names = set()
--    if isinstance(fp, basestring):
--        fp = file(fp)
-+    if isinstance(fp, string_types):
+     if isinstance(fp, string_types):
+-        fp = open(fp)
 +        fp = io.open(fp, encoding='utf-8')
  
      # read the lines

+ 207 - 0
mozilla-release/patches/1648336-120a1.patch

@@ -0,0 +1,207 @@
+# HG changeset patch
+# User Spiros Vita <vitaspiros@gmail.com>
+# Date 1696508945 0
+# Node ID 11b3d438788a84aecc7e53efdda19bb0814e13be
+# Parent  370abfd61be3c385a7914955dc555442f74ac3a7
+Bug 1013947 - Removed signons.sqlite references r=mak,credential-management-reviewers,dimi
+
+Added functionality to remove signons.sqlite.corrupt and fixed code
+
+Differential Revision: https://phabricator.services.mozilla.com/D189349
+
+diff --git a/browser/components/migration/FirefoxProfileMigrator.js b/browser/components/migration/FirefoxProfileMigrator.js
+--- a/browser/components/migration/FirefoxProfileMigrator.js
++++ b/browser/components/migration/FirefoxProfileMigrator.js
+@@ -123,19 +123,22 @@ FirefoxProfileMigrator.prototype._getRes
+       },
+     };
+   };
+ 
+   let types = MigrationUtils.resourceTypes;
+   let places = getFileResource(types.HISTORY, ["places.sqlite", "places.sqlite-wal"]);
+   let favicons = getFileResource(types.HISTORY, ["favicons.sqlite", "favicons.sqlite-wal"]);
+   let cookies = getFileResource(types.COOKIES, ["cookies.sqlite", "cookies.sqlite-wal"]);
+-  let passwords = getFileResource(types.PASSWORDS,
+-    ["signons.sqlite", "logins.json", "key3.db", "key4.db",
+-     "signedInUser.json"]);
++  let passwords = getFileResource(types.PASSWORDS, [
++    "logins.json",
++    "key3.db",
++    "key4.db",
++    "signedInUser.json",
++  ]);
+   let formData = getFileResource(types.FORMDATA, [
+     "formhistory.sqlite",
+     "autofill-profiles.json",
+   ]);
+   let bookmarksBackups = getFileResource(types.OTHERDATA,
+     [PlacesBackups.profileRelativeFolderPath]);
+   let dictionary = getFileResource(types.OTHERDATA, ["persdict.dat"]);
+ 
+diff --git a/browser/components/nsBrowserGlue.js b/browser/components/nsBrowserGlue.js
+--- a/browser/components/nsBrowserGlue.js
++++ b/browser/components/nsBrowserGlue.js
+@@ -1676,17 +1676,17 @@ BrowserGlue.prototype = {
+       if (toolbarIsCustomized || getToolbarFolderCount() > NUM_TOOLBAR_BOOKMARKS_TO_UNHIDE) {
+         xulStore.setValue(BROWSER_DOCURL, "PersonalToolbar", "collapsed", "false");
+       }
+     }
+   },
+ 
+   // eslint-disable-next-line complexity
+   _migrateUI: function BG__migrateUI() {
+-    const UI_VERSION = 56;
++    const UI_VERSION = 57;
+     const BROWSER_DOCURL = "chrome://browser/content/browser.xul";
+ 
+     let currentUIVersion;
+     if (Services.prefs.prefHasUserValue("browser.migration.version")) {
+       currentUIVersion = Services.prefs.getIntPref("browser.migration.version");
+     } else {
+       // This is a new profile, nothing to migrate.
+       Services.prefs.setIntPref("browser.migration.version", UI_VERSION);
+@@ -2089,16 +2089,25 @@ BrowserGlue.prototype = {
+             } catch (e) { /* Don't panic if the value is not a valid locale code. */ }
+           }
+         }
+         Services.prefs.clearUserPref(SELECTED_LOCALE_PREF);
+         Services.prefs.clearUserPref(MATCHOS_LOCALE_PREF);
+       }
+     }
+ 
++    if (currentUIVersion < 57) {
++      for (const filename of ["signons.sqlite", "signons.sqlite.corrupt"]) {
++        let signonFile = Services.dirsvc.get("ProfD", Ci.nsIFile);
++        signonFile.append(filename);
++        OS.File.remove(signonFile.path, {ignoreAbsent: true})
++          .catch(ex => Cu.reportError(ex));
++      }
++    }
++
+     // Update the migration version.
+     Services.prefs.setIntPref("browser.migration.version", UI_VERSION);
+   },
+ 
+   // ------------------------------
+   // public nsIBrowserGlue members
+   // ------------------------------
+ 
+diff --git a/toolkit/components/passwordmgr/test/unit/data/signons-v1.sqlite b/toolkit/components/passwordmgr/test/unit/data/signons-v1.sqlite
+deleted file mode 100644
+index fe030b61fddfff4cd685479bdb41600eb7db8906..0000000000000000000000000000000000000000
+GIT binary patch
+literal 0
+Hc$@<O00001
+
+diff --git a/toolkit/components/passwordmgr/test/unit/data/signons-v1v2.sqlite b/toolkit/components/passwordmgr/test/unit/data/signons-v1v2.sqlite
+deleted file mode 100644
+index 729512a12b0c9a9b11fa76c6e2087e83e3e73d02..0000000000000000000000000000000000000000
+GIT binary patch
+literal 0
+Hc$@<O00001
+
+diff --git a/toolkit/components/passwordmgr/test/unit/data/signons-v2.sqlite b/toolkit/components/passwordmgr/test/unit/data/signons-v2.sqlite
+deleted file mode 100644
+index a6c72b31e8973a9dde6ad2e03a4ce86bd0636ab7..0000000000000000000000000000000000000000
+GIT binary patch
+literal 0
+Hc$@<O00001
+
+diff --git a/toolkit/components/passwordmgr/test/unit/data/signons-v2v3.sqlite b/toolkit/components/passwordmgr/test/unit/data/signons-v2v3.sqlite
+deleted file mode 100644
+index 359df5d31130414f0bb9a31fe5af1501b0fc73fe..0000000000000000000000000000000000000000
+GIT binary patch
+literal 0
+Hc$@<O00001
+
+diff --git a/toolkit/components/passwordmgr/test/unit/data/signons-v3.sqlite b/toolkit/components/passwordmgr/test/unit/data/signons-v3.sqlite
+deleted file mode 100644
+index 918f4142fe0529d103d53580d03a138abdcefe07..0000000000000000000000000000000000000000
+GIT binary patch
+literal 0
+Hc$@<O00001
+
+diff --git a/toolkit/components/passwordmgr/test/unit/data/signons-v3v4.sqlite b/toolkit/components/passwordmgr/test/unit/data/signons-v3v4.sqlite
+deleted file mode 100644
+index e06c33aae33a5ff2e267daffb4a576b76fa7da9c..0000000000000000000000000000000000000000
+GIT binary patch
+literal 0
+Hc$@<O00001
+
+diff --git a/toolkit/components/passwordmgr/test/unit/data/signons-v4.sqlite b/toolkit/components/passwordmgr/test/unit/data/signons-v4.sqlite
+deleted file mode 100644
+index 227c09c816b9a36c4df6aff86a9937acd7a4e599..0000000000000000000000000000000000000000
+GIT binary patch
+literal 0
+Hc$@<O00001
+
+diff --git a/toolkit/components/passwordmgr/test/unit/data/signons-v4v5.sqlite b/toolkit/components/passwordmgr/test/unit/data/signons-v4v5.sqlite
+deleted file mode 100644
+index 4534cf255332b380f45efcd5519c4eed8cb3bc69..0000000000000000000000000000000000000000
+GIT binary patch
+literal 0
+Hc$@<O00001
+
+diff --git a/toolkit/components/passwordmgr/test/unit/data/signons-v5v6.sqlite b/toolkit/components/passwordmgr/test/unit/data/signons-v5v6.sqlite
+deleted file mode 100644
+index eb4ee6d01e0d9c89d4447515eab1a7beffc3c89c..0000000000000000000000000000000000000000
+GIT binary patch
+literal 0
+Hc$@<O00001
+
+diff --git a/toolkit/components/passwordmgr/test/unit/data/signons-v999-2.sqlite b/toolkit/components/passwordmgr/test/unit/data/signons-v999-2.sqlite
+deleted file mode 100644
+index e09c4f710080422e2bfcc3aadcbae99294e5de14..0000000000000000000000000000000000000000
+GIT binary patch
+literal 0
+Hc$@<O00001
+
+diff --git a/toolkit/components/passwordmgr/test/unit/data/signons-v999.sqlite b/toolkit/components/passwordmgr/test/unit/data/signons-v999.sqlite
+deleted file mode 100644
+index 0328a1a02ad410ebfe85c23dc6707d5639b977c3..0000000000000000000000000000000000000000
+GIT binary patch
+literal 0
+Hc$@<O00001
+
+diff --git a/toolkit/components/telemetry/core/Telemetry.cpp b/toolkit/components/telemetry/core/Telemetry.cpp
+--- a/toolkit/components/telemetry/core/Telemetry.cpp
++++ b/toolkit/components/telemetry/core/Telemetry.cpp
+@@ -1281,17 +1281,16 @@ static constexpr TrackedDBEntry kTracked
+   TRACKEDDB_ENTRY("favicons.sqlite"),
+   TRACKEDDB_ENTRY("formhistory.sqlite"),
+   TRACKEDDB_ENTRY("index.sqlite"),
+   TRACKEDDB_ENTRY("netpredictions.sqlite"),
+   TRACKEDDB_ENTRY("permissions.sqlite"),
+   TRACKEDDB_ENTRY("places.sqlite"),
+   TRACKEDDB_ENTRY("reading-list.sqlite"),
+   TRACKEDDB_ENTRY("search.sqlite"),
+-  TRACKEDDB_ENTRY("signons.sqlite"),
+   TRACKEDDB_ENTRY("urlclassifier3.sqlite"),
+   TRACKEDDB_ENTRY("webappsstore.sqlite")
+ };
+ 
+ // A whitelist of database name prefixes. If the database name begins with
+ // one of these prefixes then its SQL statements will always be recorded.
+ static const TrackedDBEntry kTrackedDBPrefixes[] = {
+   TRACKEDDB_ENTRY("indexedDB-")
+diff --git a/toolkit/forgetaboutsite/test/unit/head_forgetaboutsite.js b/toolkit/forgetaboutsite/test/unit/head_forgetaboutsite.js
+--- a/toolkit/forgetaboutsite/test/unit/head_forgetaboutsite.js
++++ b/toolkit/forgetaboutsite/test/unit/head_forgetaboutsite.js
+@@ -10,17 +10,16 @@ var profileDir = do_get_profile();
+ 
+ /**
+  * Removes any files that could make our tests fail.
+  */
+ function cleanUp() {
+   let files = [
+     "places.sqlite",
+     "cookies.sqlite",
+-    "signons.sqlite",
+     "permissions.sqlite"
+   ];
+ 
+   for (let i = 0; i < files.length; i++) {
+     let file = Services.dirsvc.get("ProfD", Ci.nsIFile);
+     file.append(files[i]);
+     if (file.exists())
+       file.remove(false);

+ 536 - 0
mozilla-release/patches/1648336-79a1.patch

@@ -0,0 +1,536 @@
+# HG changeset patch
+# User Matthew Noorenberghe <mozilla@noorenberghe.ca>
+# Date 1593107921 0
+# Node ID b334a875e83bb973d524ab195df08e4c0ccfcfe1
+# Parent  a3a63cde3a47253b4af2ebc309c0ad0d332cff8c
+Bug 1648336 - Stop migrating logins from SQLite to JSON via LoginImport.jsm. r=severin
+
+Differential Revision: https://phabricator.services.mozilla.com/D81011
+
+diff --git a/toolkit/components/passwordmgr/LoginImport.jsm b/toolkit/components/passwordmgr/LoginImport.jsm
+deleted file mode 100644
+--- a/toolkit/components/passwordmgr/LoginImport.jsm
++++ /dev/null
+@@ -1,167 +0,0 @@
+-/* -*- indent-tabs-mode: nil; js-indent-level: 2 -*- */
+-/* vim: set ts=2 et sw=2 tw=80 filetype=javascript: */
+-/* This Source Code Form is subject to the terms of the Mozilla Public
+- * License, v. 2.0. If a copy of the MPL was not distributed with this
+- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+-
+-/**
+- * Provides an object that has a method to import login-related data from the
+- * previous SQLite storage format.
+- */
+-
+-"use strict";
+-
+-var EXPORTED_SYMBOLS = [
+-  "LoginImport",
+-];
+-
+-// Globals
+-
+-ChromeUtils.import("resource://gre/modules/XPCOMUtils.jsm");
+-
+-ChromeUtils.defineModuleGetter(this, "OS",
+-                               "resource://gre/modules/osfile.jsm");
+-ChromeUtils.defineModuleGetter(this, "Sqlite",
+-                               "resource://gre/modules/Sqlite.jsm");
+-ChromeUtils.defineModuleGetter(this, "NetUtil",
+-                               "resource://gre/modules/NetUtil.jsm");
+-
+-// LoginImport
+-
+-/**
+- * Provides an object that has a method to import login-related data from the
+- * previous SQLite storage format.
+- *
+- * @param aStore
+- *        LoginStore object where imported data will be added.
+- * @param aPath
+- *        String containing the file path of the SQLite login database.
+- */
+-var LoginImport = function(aStore, aPath) {
+-  this.store = aStore;
+-  this.path = aPath;
+-};
+-
+-this.LoginImport.prototype = {
+-  /**
+-   * LoginStore object where imported data will be added.
+-   */
+-  store: null,
+-
+-  /**
+-   * String containing the file path of the SQLite login database.
+-   */
+-  path: null,
+-
+-  /**
+-   * Imports login-related data from the previous SQLite storage format.
+-   */
+-  async import() {
+-    // We currently migrate data directly from the database to the JSON store at
+-    // first run, then we set a preference to prevent repeating the import.
+-    // Thus, merging with existing data is not a use case we support.  This
+-    // restriction might be removed to support re-importing passwords set by an
+-    // old version by flipping the import preference and restarting.
+-    if (this.store.data.logins.length > 0 ||
+-        this.store.data.disabledHosts.length > 0) {
+-      throw new Error("Unable to import saved passwords because some data " +
+-                      "has already been imported or saved.");
+-    }
+-
+-    // When a timestamp is not specified, we will use the same reference time.
+-    let referenceTimeMs = Date.now();
+-
+-    let connection = await Sqlite.openConnection({ path: this.path });
+-    try {
+-      let schemaVersion = await connection.getSchemaVersion();
+-
+-      // We support importing database schema versions from 3 onwards.
+-      // Version 3 was implemented in bug 316084 (Firefox 3.6, March 2009).
+-      // Version 4 was implemented in bug 465636 (Firefox 4, March 2010).
+-      // Version 5 was implemented in bug 718817 (Firefox 13, February 2012).
+-      if (schemaVersion < 3) {
+-        throw new Error("Unable to import saved passwords because " +
+-                        "the existing profile is too old.");
+-      }
+-
+-      let rows = await connection.execute("SELECT * FROM moz_logins");
+-      for (let row of rows) {
+-        try {
+-          let hostname = row.getResultByName("hostname");
+-          let httpRealm = row.getResultByName("httpRealm");
+-          let formSubmitURL = row.getResultByName("formSubmitURL");
+-          let usernameField = row.getResultByName("usernameField");
+-          let passwordField = row.getResultByName("passwordField");
+-          let encryptedUsername = row.getResultByName("encryptedUsername");
+-          let encryptedPassword = row.getResultByName("encryptedPassword");
+-
+-          // The "guid" field was introduced in schema version 2, and the
+-          // "enctype" field was introduced in schema version 3.  We don't
+-          // support upgrading from older versions of the database.
+-          let guid = row.getResultByName("guid");
+-          let encType = row.getResultByName("encType");
+-
+-          // The time and count fields were introduced in schema version 4.
+-          let timeCreated = null;
+-          let timeLastUsed = null;
+-          let timePasswordChanged = null;
+-          let timesUsed = null;
+-          try {
+-            timeCreated = row.getResultByName("timeCreated");
+-            timeLastUsed = row.getResultByName("timeLastUsed");
+-            timePasswordChanged = row.getResultByName("timePasswordChanged");
+-            timesUsed = row.getResultByName("timesUsed");
+-          } catch (ex) { }
+-
+-          // These columns may be null either because they were not present in
+-          // the database or because the record was created on a new schema
+-          // version by an old application version.
+-          if (!timeCreated) {
+-            timeCreated = referenceTimeMs;
+-          }
+-          if (!timeLastUsed) {
+-            timeLastUsed = referenceTimeMs;
+-          }
+-          if (!timePasswordChanged) {
+-            timePasswordChanged = referenceTimeMs;
+-          }
+-          if (!timesUsed) {
+-            timesUsed = 1;
+-          }
+-
+-          this.store.data.logins.push({
+-            id: this.store.data.nextId++,
+-            hostname,
+-            httpRealm,
+-            formSubmitURL,
+-            usernameField,
+-            passwordField,
+-            encryptedUsername,
+-            encryptedPassword,
+-            guid,
+-            encType,
+-            timeCreated,
+-            timeLastUsed,
+-            timePasswordChanged,
+-            timesUsed,
+-          });
+-        } catch (ex) {
+-          Cu.reportError("Error importing login: " + ex);
+-        }
+-      }
+-
+-      rows = await connection.execute("SELECT * FROM moz_disabledHosts");
+-      for (let row of rows) {
+-        try {
+-          let hostname = row.getResultByName("hostname");
+-
+-          this.store.data.disabledHosts.push(hostname);
+-        } catch (ex) {
+-          Cu.reportError("Error importing disabled host: " + ex);
+-        }
+-      }
+-    } finally {
+-      await connection.close();
+-    }
+-  },
+-};
+diff --git a/toolkit/components/passwordmgr/moz.build b/toolkit/components/passwordmgr/moz.build
+--- a/toolkit/components/passwordmgr/moz.build
++++ b/toolkit/components/passwordmgr/moz.build
+@@ -36,17 +36,16 @@ EXTRA_COMPONENTS += [
+     'nsLoginManagerPrompter.js',
+     'passwordmgr.manifest',
+     'storage-json.js',
+ ]
+ 
+ EXTRA_JS_MODULES += [
+     'InsecurePasswordUtils.jsm',
+     'LoginHelper.jsm',
+-    'LoginImport.jsm',
+     'LoginManagerContent.jsm',
+     'LoginManagerParent.jsm',
+     'LoginRecipes.jsm',
+     'LoginStore.jsm',
+     'OSCrypto.jsm',
+ ]
+ 
+ if CONFIG['OS_TARGET'] == 'WINNT':
+diff --git a/toolkit/components/passwordmgr/storage-json.js b/toolkit/components/passwordmgr/storage-json.js
+--- a/toolkit/components/passwordmgr/storage-json.js
++++ b/toolkit/components/passwordmgr/storage-json.js
+@@ -8,18 +8,16 @@
+ 
+ "use strict";
+ 
+ ChromeUtils.import("resource://gre/modules/XPCOMUtils.jsm");
+ ChromeUtils.import("resource://gre/modules/Services.jsm");
+ 
+ ChromeUtils.defineModuleGetter(this, "LoginHelper",
+                                "resource://gre/modules/LoginHelper.jsm");
+-ChromeUtils.defineModuleGetter(this, "LoginImport",
+-                               "resource://gre/modules/LoginImport.jsm");
+ ChromeUtils.defineModuleGetter(this, "LoginStore",
+                                "resource://gre/modules/LoginStore.jsm");
+ ChromeUtils.defineModuleGetter(this, "OS",
+                                "resource://gre/modules/osfile.jsm");
+ 
+ XPCOMUtils.defineLazyServiceGetter(this, "gUUIDGenerator",
+                                    "@mozilla.org/uuid-generator;1",
+                                    "nsIUUIDGenerator");
+@@ -48,44 +46,16 @@ this.LoginManagerStorage_json.prototype 
+       let jsonPath = OS.Path.join(OS.Constants.Path.profileDir,
+                                   "logins.json");
+       this._store = new LoginStore(jsonPath);
+ 
+       return (async () => {
+         // Load the data asynchronously.
+         this.log("Opening database at", this._store.path);
+         await this._store.load();
+-
+-        // The import from previous versions operates the first time
+-        // that this built-in storage back-end is used.  This may be
+-        // later than expected, in case add-ons have registered an
+-        // alternate storage that disabled the default one.
+-        try {
+-          if (Services.prefs.getBoolPref("signon.importedFromSqlite")) {
+-            return;
+-          }
+-        } catch (ex) {
+-          // If the preference does not exist, we need to import.
+-        }
+-
+-        // Import only happens asynchronously.
+-        let sqlitePath = OS.Path.join(OS.Constants.Path.profileDir,
+-                                      "signons.sqlite");
+-        if (await OS.File.exists(sqlitePath)) {
+-          let loginImport = new LoginImport(this._store, sqlitePath);
+-          // Failures during import, for example due to a corrupt
+-          // file or a schema version that is too old, will not
+-          // prevent us from marking the operation as completed.
+-          // At the next startup, we will not try the import again.
+-          await loginImport.import().catch(Cu.reportError);
+-          this._store.saveSoon();
+-        }
+-
+-        // We won't attempt import again on next startup.
+-        Services.prefs.setBoolPref("signon.importedFromSqlite", true);
+       })().catch(Cu.reportError);
+     } catch (e) {
+       this.log("Initialization failed:", e);
+       throw new Error("Initialization failed");
+     }
+   },
+ 
+   /**
+diff --git a/toolkit/components/passwordmgr/test/unit/test_module_LoginImport.js b/toolkit/components/passwordmgr/test/unit/test_module_LoginImport.js
+deleted file mode 100644
+--- a/toolkit/components/passwordmgr/test/unit/test_module_LoginImport.js
++++ /dev/null
+@@ -1,242 +0,0 @@
+-/* -*- indent-tabs-mode: nil; js-indent-level: 2 -*- */
+-/* vim: set ts=2 et sw=2 tw=80: */
+-/* Any copyright is dedicated to the Public Domain.
+- * http://creativecommons.org/publicdomain/zero/1.0/ */
+-
+-/**
+- * Tests the LoginImport object.
+- */
+-
+-"use strict";
+-
+-// Globals
+-
+-
+-ChromeUtils.defineModuleGetter(this, "LoginHelper",
+-                               "resource://gre/modules/LoginHelper.jsm");
+-ChromeUtils.defineModuleGetter(this, "LoginImport",
+-                               "resource://gre/modules/LoginImport.jsm");
+-ChromeUtils.defineModuleGetter(this, "LoginStore",
+-                               "resource://gre/modules/LoginStore.jsm");
+-ChromeUtils.defineModuleGetter(this, "Sqlite",
+-                               "resource://gre/modules/Sqlite.jsm");
+-
+-XPCOMUtils.defineLazyServiceGetter(this, "gLoginManagerCrypto",
+-                                   "@mozilla.org/login-manager/crypto/SDR;1",
+-                                   "nsILoginManagerCrypto");
+-XPCOMUtils.defineLazyServiceGetter(this, "gUUIDGenerator",
+-                                   "@mozilla.org/uuid-generator;1",
+-                                   "nsIUUIDGenerator");
+-
+-/**
+- * Creates empty login data tables in the given SQLite connection, resembling
+- * the most recent schema version (excluding indices).
+- */
+-function promiseCreateDatabaseSchema(aConnection)
+-{
+-  return (async function() {
+-    await aConnection.setSchemaVersion(5);
+-    await aConnection.execute("CREATE TABLE moz_logins (" +
+-                              "id                  INTEGER PRIMARY KEY," +
+-                              "hostname            TEXT NOT NULL," +
+-                              "httpRealm           TEXT," +
+-                              "formSubmitURL       TEXT," +
+-                              "usernameField       TEXT NOT NULL," +
+-                              "passwordField       TEXT NOT NULL," +
+-                              "encryptedUsername   TEXT NOT NULL," +
+-                              "encryptedPassword   TEXT NOT NULL," +
+-                              "guid                TEXT," +
+-                              "encType             INTEGER," +
+-                              "timeCreated         INTEGER," +
+-                              "timeLastUsed        INTEGER," +
+-                              "timePasswordChanged INTEGER," +
+-                              "timesUsed           INTEGER)");
+-    await aConnection.execute("CREATE TABLE moz_disabledHosts (" +
+-                              "id                  INTEGER PRIMARY KEY," +
+-                              "hostname            TEXT UNIQUE)");
+-    await aConnection.execute("CREATE TABLE moz_deleted_logins (" +
+-                              "id                  INTEGER PRIMARY KEY," +
+-                              "guid                TEXT," +
+-                              "timeDeleted         INTEGER)");
+-  })();
+-}
+-
+-/**
+- * Inserts a new entry in the database resembling the given nsILoginInfo object.
+- */
+-function promiseInsertLoginInfo(aConnection, aLoginInfo)
+-{
+-  aLoginInfo.QueryInterface(Ci.nsILoginMetaInfo);
+-
+-  // We can't use the aLoginInfo object directly in the execute statement
+-  // because the bind code in Sqlite.jsm doesn't allow objects with extra
+-  // properties beyond those being binded. So we might as well use an array as
+-  // it is simpler.
+-  let values = [
+-    aLoginInfo.hostname,
+-    aLoginInfo.httpRealm,
+-    aLoginInfo.formSubmitURL,
+-    aLoginInfo.usernameField,
+-    aLoginInfo.passwordField,
+-    gLoginManagerCrypto.encrypt(aLoginInfo.username),
+-    gLoginManagerCrypto.encrypt(aLoginInfo.password),
+-    aLoginInfo.guid,
+-    aLoginInfo.encType,
+-    aLoginInfo.timeCreated,
+-    aLoginInfo.timeLastUsed,
+-    aLoginInfo.timePasswordChanged,
+-    aLoginInfo.timesUsed,
+-  ];
+-
+-  return aConnection.execute("INSERT INTO moz_logins (hostname, " +
+-                             "httpRealm, formSubmitURL, usernameField, " +
+-                             "passwordField, encryptedUsername, " +
+-                             "encryptedPassword, guid, encType, timeCreated, " +
+-                             "timeLastUsed, timePasswordChanged, timesUsed) " +
+-                             "VALUES (?" + ",?".repeat(12) + ")", values);
+-}
+-
+-/**
+- * Inserts a new disabled host entry in the database.
+- */
+-function promiseInsertDisabledHost(aConnection, aHostname)
+-{
+-  return aConnection.execute("INSERT INTO moz_disabledHosts (hostname) " +
+-                             "VALUES (?)", [aHostname]);
+-}
+-
+-// Tests
+-
+-/**
+- * Imports login data from a SQLite file constructed using the test data.
+- */
+-add_task(async function test_import()
+-{
+-  let store = new LoginStore(getTempFile("test-import.json").path);
+-  let loginsSqlite = getTempFile("test-logins.sqlite").path;
+-
+-  // Prepare the logins to be imported, including the nsILoginMetaInfo data.
+-  let loginList = TestData.loginList();
+-  for (let loginInfo of loginList) {
+-    loginInfo.QueryInterface(Ci.nsILoginMetaInfo);
+-    loginInfo.guid = gUUIDGenerator.generateUUID().toString();
+-    loginInfo.timeCreated = Date.now();
+-    loginInfo.timeLastUsed = Date.now();
+-    loginInfo.timePasswordChanged = Date.now();
+-    loginInfo.timesUsed = 1;
+-  }
+-
+-  // Create and populate the SQLite database first.
+-  let connection = await Sqlite.openConnection({ path: loginsSqlite });
+-  try {
+-    await promiseCreateDatabaseSchema(connection);
+-    for (let loginInfo of loginList) {
+-      await promiseInsertLoginInfo(connection, loginInfo);
+-    }
+-    await promiseInsertDisabledHost(connection, "http://www.example.com");
+-    await promiseInsertDisabledHost(connection, "https://www.example.org");
+-  } finally {
+-    await connection.close();
+-  }
+-
+-  // The "load" method must be called before importing data.
+-  await store.load();
+-  await new LoginImport(store, loginsSqlite).import();
+-
+-  // Verify that every login in the test data has a matching imported row.
+-  Assert.equal(loginList.length, store.data.logins.length);
+-  Assert.ok(loginList.every(function(loginInfo) {
+-    return store.data.logins.some(function(loginDataItem) {
+-      let username = gLoginManagerCrypto.decrypt(loginDataItem.encryptedUsername);
+-      let password = gLoginManagerCrypto.decrypt(loginDataItem.encryptedPassword);
+-      return loginDataItem.hostname == loginInfo.hostname &&
+-             loginDataItem.httpRealm == loginInfo.httpRealm &&
+-             loginDataItem.formSubmitURL == loginInfo.formSubmitURL &&
+-             loginDataItem.usernameField == loginInfo.usernameField &&
+-             loginDataItem.passwordField == loginInfo.passwordField &&
+-             username == loginInfo.username &&
+-             password == loginInfo.password &&
+-             loginDataItem.guid == loginInfo.guid &&
+-             loginDataItem.encType == loginInfo.encType &&
+-             loginDataItem.timeCreated == loginInfo.timeCreated &&
+-             loginDataItem.timeLastUsed == loginInfo.timeLastUsed &&
+-             loginDataItem.timePasswordChanged == loginInfo.timePasswordChanged &&
+-             loginDataItem.timesUsed == loginInfo.timesUsed;
+-    });
+-  }));
+-
+-  // Verify that disabled hosts have been imported.
+-  Assert.equal(store.data.disabledHosts.length, 2);
+-  Assert.ok(store.data.disabledHosts.includes("http://www.example.com"));
+-  Assert.ok(store.data.disabledHosts.includes("https://www.example.org"));
+-});
+-
+-/**
+- * Tests imports of NULL values due to a downgraded database.
+- */
+-add_task(async function test_import_downgraded()
+-{
+-  let store = new LoginStore(getTempFile("test-import-downgraded.json").path);
+-  let loginsSqlite = getTempFile("test-logins-downgraded.sqlite").path;
+-
+-  // Create and populate the SQLite database first.
+-  let connection = await Sqlite.openConnection({ path: loginsSqlite });
+-  try {
+-    await promiseCreateDatabaseSchema(connection);
+-    await connection.setSchemaVersion(3);
+-    await promiseInsertLoginInfo(connection, TestData.formLogin({
+-      guid: gUUIDGenerator.generateUUID().toString(),
+-      timeCreated: null,
+-      timeLastUsed: null,
+-      timePasswordChanged: null,
+-      timesUsed: 0,
+-    }));
+-  } finally {
+-    await connection.close();
+-  }
+-
+-  // The "load" method must be called before importing data.
+-  await store.load();
+-  await new LoginImport(store, loginsSqlite).import();
+-
+-  // Verify that the missing metadata was generated correctly.
+-  let loginItem = store.data.logins[0];
+-  let creationTime = loginItem.timeCreated;
+-  LoginTestUtils.assertTimeIsAboutNow(creationTime);
+-  Assert.equal(loginItem.timeLastUsed, creationTime);
+-  Assert.equal(loginItem.timePasswordChanged, creationTime);
+-  Assert.equal(loginItem.timesUsed, 1);
+-});
+-
+-/**
+- * Verifies that importing from a SQLite file with database version 2 fails.
+- */
+-add_task(async function test_import_v2()
+-{
+-  let store = new LoginStore(getTempFile("test-import-v2.json").path);
+-  let loginsSqlite = do_get_file("data/signons-v2.sqlite").path;
+-
+-  // The "load" method must be called before importing data.
+-  await store.load();
+-  try {
+-    await new LoginImport(store, loginsSqlite).import();
+-    do_throw("The operation should have failed.");
+-  } catch (ex) { }
+-});
+-
+-/**
+- * Imports login data from a SQLite file, with database version 3.
+- */
+-add_task(async function test_import_v3()
+-{
+-  let store = new LoginStore(getTempFile("test-import-v3.json").path);
+-  let loginsSqlite = do_get_file("data/signons-v3.sqlite").path;
+-
+-  // The "load" method must be called before importing data.
+-  await store.load();
+-  await new LoginImport(store, loginsSqlite).import();
+-
+-  // We only execute basic integrity checks.
+-  Assert.equal(store.data.logins[0].usernameField, "u1");
+-  Assert.equal(store.data.disabledHosts.length, 0);
+-});
+diff --git a/toolkit/components/passwordmgr/test/unit/xpcshell.ini b/toolkit/components/passwordmgr/test/unit/xpcshell.ini
+--- a/toolkit/components/passwordmgr/test/unit/xpcshell.ini
++++ b/toolkit/components/passwordmgr/test/unit/xpcshell.ini
+@@ -1,15 +1,13 @@
+ [DEFAULT]
+ head = head.js
+ support-files = data/**
+ 
+-# Test JSON file access and import from SQLite, not applicable to Android.
+-[test_module_LoginImport.js]
+-skip-if = os == "android"
++# Test logins.json file access, not applicable to Android.
+ [test_module_LoginStore.js]
+ skip-if = os == "android"
+ [test_removeLegacySignonFiles.js]
+ skip-if = os == "android"
+ 
+ # The following tests apply to any storage back-end that supports add/modify/remove.
+ [test_context_menu.js]
+ skip-if = os == "android" # The context menu isn't used on Android.

+ 24 - 0
mozilla-release/patches/1691957-87a1.patch

@@ -0,0 +1,24 @@
+# HG changeset patch
+# User Petr Sumbera <petr.sumbera@oracle.com>
+# Date 1613033783 0
+#      Thu Feb 11 08:56:23 2021 +0000
+# Node ID c138550a43f9e6558fb087b293012d930a35d3c1
+# Parent  f11a5a0bf4083b9729267c9372394dfd4bffff58
+Bug 1691957 - Snappy 1.1.8 now requires SNAPPY_IS_BIG_ENDIAN on big endian systems r=dom-storage-reviewers,sg
+
+Differential Revision: https://phabricator.services.mozilla.com/D104686
+
+diff --git a/other-licenses/snappy/moz.build b/other-licenses/snappy/moz.build
+--- a/other-licenses/snappy/moz.build
++++ b/other-licenses/snappy/moz.build
+@@ -25,9 +25,9 @@ FINAL_LIBRARY = 'xul'
+ # Suppress warnings in third-party code.
+ if CONFIG['CC_TYPE'] in ('clang', 'gcc'):
+     CXXFLAGS += [
+         '-Wno-sign-compare',
+         '-Wno-unused-function'
+     ]
+ 
+ if CONFIG['TARGET_ENDIANNESS'] == 'big':
+-    DEFINES['IS_BIG_ENDIAN'] = 1
++    DEFINES['SNAPPY_IS_BIG_ENDIAN'] = 1

+ 6061 - 0
mozilla-release/patches/1743947-104a1.patch

@@ -0,0 +1,6061 @@
+# HG changeset patch
+# User Ryan VanderMeulen <ryanvm@gmail.com>
+# Date 1657640303 0
+#      Tue Jul 12 15:38:23 2022 +0000
+# Node ID 0e2eb956bfbdf2404a973e2a9661a8c54ed28549
+# Parent  0ee4444a6d715655fcfe54d09b77b456e959c0fb
+Bug 1743947 - Update Snappy to version 1.1.9. r=dom-storage-reviewers,janv
+
+Differential Revision: https://phabricator.services.mozilla.com/D132651
+
+diff --git a/dom/cache/FileUtils.cpp b/dom/cache/FileUtils.cpp
+--- a/dom/cache/FileUtils.cpp
++++ b/dom/cache/FileUtils.cpp
+@@ -17,21 +17,24 @@
+ #include "nsIFile.h"
+ #include "nsIUUIDGenerator.h"
+ #include "nsNetCID.h"
+ #include "nsNetUtil.h"
+ #include "nsISimpleEnumerator.h"
+ #include "nsServiceManagerUtils.h"
+ #include "nsString.h"
+ #include "nsThreadUtils.h"
++#include "snappy/snappy.h"
+ 
+ namespace mozilla {
+ namespace dom {
+ namespace cache {
+ 
++static_assert(SNAPPY_VERSION == 0x010109, "Check snappy version");
++
+ using mozilla::dom::quota::FileInputStream;
+ using mozilla::dom::quota::FileOutputStream;
+ using mozilla::dom::quota::PERSISTENCE_TYPE_DEFAULT;
+ using mozilla::dom::quota::QuotaManager;
+ using mozilla::dom::quota::QuotaObject;
+ 
+ namespace {
+ 
+diff --git a/dom/indexedDB/ActorsParent.cpp b/dom/indexedDB/ActorsParent.cpp
+--- a/dom/indexedDB/ActorsParent.cpp
++++ b/dom/indexedDB/ActorsParent.cpp
+@@ -132,17 +132,17 @@ namespace mozilla {
+ 
+ MOZ_TYPE_SPECIFIC_SCOPED_POINTER_TEMPLATE(ScopedPRFileDesc,
+                                           PRFileDesc,
+                                           PR_Close);
+ 
+ namespace dom {
+ namespace indexedDB {
+ 
+-static_assert(SNAPPY_VERSION == 0x010108);
++static_assert(SNAPPY_VERSION == 0x010109, "Check snappy version");
+ 
+ using namespace mozilla::dom::quota;
+ using namespace mozilla::ipc;
+ 
+ namespace {
+ 
+ class ConnectionPool;
+ class Cursor;
+diff --git a/dom/localstorage/SnappyUtils.cpp.1743947.later b/dom/localstorage/SnappyUtils.cpp.1743947.later
+new file mode 100644
+--- /dev/null
++++ b/dom/localstorage/SnappyUtils.cpp.1743947.later
+@@ -0,0 +1,21 @@
++--- SnappyUtils.cpp
+++++ SnappyUtils.cpp
++@@ -11,16 +11,18 @@
++ #include "mozilla/CheckedInt.h"
++ #include "mozilla/fallible.h"
++ #include "nsDebug.h"
++ #include "nsString.h"
++ #include "snappy/snappy.h"
++ 
++ namespace mozilla::dom {
++ 
+++static_assert(SNAPPY_VERSION == 0x010109);
+++
++ bool SnappyCompress(const nsACString& aSource, nsACString& aDest) {
++   MOZ_ASSERT(!aSource.IsVoid());
++ 
++   size_t uncompressedLength = aSource.Length();
++ 
++   if (uncompressedLength <= 16) {
++     aDest.SetIsVoid(true);
++     return true;
+diff --git a/other-licenses/snappy/README b/other-licenses/snappy/README
+--- a/other-licenses/snappy/README
++++ b/other-licenses/snappy/README
+@@ -1,24 +1,26 @@
+ See src/README for the README that ships with snappy.
+ 
+ Mozilla does not modify the actual snappy source with the exception of the
+ 'snappy-stubs-public.h' header. We have replaced its build system with our own.
+ 
+ Snappy comes from:
+   https://github.com/google/snappy
+ 
+-We are currently using revision: 1.1.8
++We are currently using revision: 1.1.9
+ 
+ To upgrade to a newer version:
+   1. Check out the new code using subversion.
+   2. Update 'snappy-stubs-public.h' in this directory with any changes that were
+      made to 'snappy-stubs-public.h.in' in the new source.
+   3. Copy the major/minor/patch versions from 'CMakeLists.txt' into
+      'snappy-stubs-public.h'.
+   4. Copy all source files from the new version into the src subdirectory. The
+      following are not needed:
+-       - 'CMakeLists.txt' file
++       - 'CMakeLists.txt', 'snappy_benchmark.cc', 'snappy_test_data.cc',
++         'snappy_test_data.h', and 'snappy_test_tool.cc' files
+        - 'cmake' subdirectory
+        - 'docs' subdirectory
+        - 'testdata' subdirectory
++       - 'third_party' subdirectory
+   5. Update the revision stamp in this file.
+ 
+diff --git a/other-licenses/snappy/snappy-stubs-public.h b/other-licenses/snappy/snappy-stubs-public.h
+--- a/other-licenses/snappy/snappy-stubs-public.h
++++ b/other-licenses/snappy/snappy-stubs-public.h
+@@ -31,59 +31,26 @@
+ //
+ // This file cannot include config.h, as it is included from snappy.h,
+ // which is a public header. Instead, snappy-stubs-public.h is generated by
+ // from snappy-stubs-public.h.in at configure time.
+ 
+ #ifndef THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
+ #define THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
+ 
+-#include <stdint.h>
+-
+-#if defined IS_BIG_ENDIAN || defined __BIG_ENDIAN__
+-#  define WORDS_BIGENDIAN
+-#endif
++#include <cstddef>
+ 
+ #define SNAPPY_MAJOR 1
+ #define SNAPPY_MINOR 1
+-#define SNAPPY_PATCHLEVEL 8
++#define SNAPPY_PATCHLEVEL 9
+ #define SNAPPY_VERSION \
+   ((SNAPPY_MAJOR << 16) | (SNAPPY_MINOR << 8) | SNAPPY_PATCHLEVEL)
+ 
+-#include <string>
+-
+ namespace snappy {
+ 
+-typedef int8_t int8;
+-typedef uint8_t uint8;
+-typedef int16_t int16;
+-typedef uint16_t uint16;
+-typedef int32_t int32;
+-typedef uint32_t uint32;
+-typedef int64_t int64;
+-typedef uint64_t uint64;
+-
+-typedef std::string string;
+-
+-#ifndef DISALLOW_COPY_AND_ASSIGN
+-#  define DISALLOW_COPY_AND_ASSIGN(TypeName) \
+-    TypeName(const TypeName&);               \
+-    void operator=(const TypeName&)
+-#endif
+-
+ struct iovec {
+   void* iov_base;
+   size_t iov_len;
+ };
+ 
+-#if defined(_WIN32) || defined(_WIN64)
+-#  if defined(_WIN64)
+-typedef __int64 LONG_PTR;
+-#  else
+-typedef long LONG_PTR;
+-#  endif
+-typedef LONG_PTR SSIZE_T;
+-typedef SSIZE_T ssize_t;
+-#endif
+-
+ }  // namespace snappy
+ 
+ #endif  // THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
+diff --git a/other-licenses/snappy/src/CONTRIBUTING.md b/other-licenses/snappy/src/CONTRIBUTING.md
+--- a/other-licenses/snappy/src/CONTRIBUTING.md
++++ b/other-licenses/snappy/src/CONTRIBUTING.md
+@@ -1,13 +1,33 @@
+ # How to Contribute
+ 
+ We'd love to accept your patches and contributions to this project. There are
+ just a few small guidelines you need to follow.
+ 
++## Project Goals
++
++In addition to the aims listed at the top of the [README](README.md) Snappy
++explicitly supports the following:
++
++1. C++11
++2. Clang (gcc and MSVC are best-effort).
++3. Low level optimizations (e.g. assembly or equivalent intrinsics) for:
++  1. [x86](https://en.wikipedia.org/wiki/X86)
++  2. [x86-64](https://en.wikipedia.org/wiki/X86-64)
++  3. ARMv7 (32-bit)
++  4. ARMv8 (AArch64)
++4. Supports only the Snappy compression scheme as described in
++  [format_description.txt](format_description.txt).
++5. CMake for building
++
++Changes adding features or dependencies outside of the core area of focus listed
++above might not be accepted. If in doubt post a message to the
++[Snappy discussion mailing list](https://groups.google.com/g/snappy-compression).
++
+ ## Contributor License Agreement
+ 
+ Contributions to this project must be accompanied by a Contributor License
+ Agreement. You (or your employer) retain the copyright to your contribution,
+ this simply gives us permission to use and redistribute your contributions as
+ part of the project. Head over to <https://cla.developers.google.com/> to see
+ your current agreements on file or to sign a new one.
+ 
+diff --git a/other-licenses/snappy/src/NEWS b/other-licenses/snappy/src/NEWS
+--- a/other-licenses/snappy/src/NEWS
++++ b/other-licenses/snappy/src/NEWS
+@@ -1,8 +1,14 @@
++Snappy v1.1.9, May 4th 2021:
++
++  * Performance improvements.
++
++  * Google Test and Google Benchmark are now bundled in third_party/.
++
+ Snappy v1.1.8, January 15th 2020:
+ 
+   * Small performance improvements.
+ 
+   * Removed snappy::string alias for std::string.
+ 
+   * Improved CMake configuration.
+ 
+diff --git a/other-licenses/snappy/src/README.md b/other-licenses/snappy/src/README.md
+--- a/other-licenses/snappy/src/README.md
++++ b/other-licenses/snappy/src/README.md
+@@ -1,10 +1,12 @@
+ Snappy, a fast compressor/decompressor.
+ 
++[![Build Status](https://travis-ci.org/google/snappy.svg?branch=master)](https://travis-ci.org/google/snappy)
++[![Build status](https://ci.appveyor.com/api/projects/status/t9nubcqkwo8rw8yn/branch/master?svg=true)](https://ci.appveyor.com/project/pwnall/leveldb)
+ 
+ Introduction
+ ============
+ 
+ Snappy is a compression/decompression library. It does not aim for maximum
+ compression, or compatibility with any other compression library; instead,
+ it aims for very high speeds and reasonable compression. For instance,
+ compared to the fastest mode of zlib, Snappy is an order of magnitude faster
+@@ -64,16 +66,17 @@ are of course most welcome; see "Contact
+ 
+ Building
+ ========
+ 
+ You need the CMake version specified in [CMakeLists.txt](./CMakeLists.txt)
+ or later to build:
+ 
+ ```bash
++git submodule update --init
+ mkdir build
+ cd build && cmake ../ && make
+ ```
+ 
+ Usage
+ =====
+ 
+ Note that Snappy, both the implementation and the main interface,
+@@ -102,47 +105,36 @@ where "input" and "output" are both inst
+ There are other interfaces that are more flexible in various ways, including
+ support for custom (non-array) input sources. See the header file for more
+ information.
+ 
+ 
+ Tests and benchmarks
+ ====================
+ 
+-When you compile Snappy, snappy_unittest is compiled in addition to the
+-library itself. You do not need it to use the compressor from your own library,
+-but it contains several useful components for Snappy development.
+-
+-First of all, it contains unit tests, verifying correctness on your machine in
+-various scenarios. If you want to change or optimize Snappy, please run the
+-tests to verify you have not broken anything. Note that if you have the
+-Google Test library installed, unit test behavior (especially failures) will be
+-significantly more user-friendly. You can find Google Test at
+-
+-  https://github.com/google/googletest
+-
+-You probably also want the gflags library for handling of command-line flags;
+-you can find it at
++When you compile Snappy, the following binaries are compiled in addition to the
++library itself. You do not need them to use the compressor from your own
++library, but they are useful for Snappy development.
+ 
+-  https://gflags.github.io/gflags/
+-
+-In addition to the unit tests, snappy contains microbenchmarks used to
+-tune compression and decompression performance. These are automatically run
+-before the unit tests, but you can disable them using the flag
+---run_microbenchmarks=false if you have gflags installed (otherwise you will
+-need to edit the source).
++* `snappy_benchmark` contains microbenchmarks used to tune compression and
++  decompression performance.
++* `snappy_unittests` contains unit tests, verifying correctness on your machine
++  in various scenarios.
++* `snappy_test_tool` can benchmark Snappy against a few other compression
++  libraries (zlib, LZO, LZF, and QuickLZ), if they were detected at configure
++  time. To benchmark using a given file, give the compression algorithm you want
++  to test Snappy against (e.g. --zlib) and then a list of one or more file names
++  on the command line.
+ 
+-Finally, snappy can benchmark Snappy against a few other compression libraries
+-(zlib, LZO, LZF, and QuickLZ), if they were detected at configure time.
+-To benchmark using a given file, give the compression algorithm you want to test
+-Snappy against (e.g. --zlib) and then a list of one or more file names on the
+-command line. The testdata/ directory contains the files used by the
+-microbenchmark, which should provide a reasonably balanced starting point for
+-benchmarking. (Note that baddata[1-3].snappy are not intended as benchmarks; they
+-are used to verify correctness in the presence of corrupted data in the unit
+-test.)
++If you want to change or optimize Snappy, please run the tests and benchmarks to
++verify you have not broken anything.
++
++The testdata/ directory contains the files used by the microbenchmarks, which
++should provide a reasonably balanced starting point for benchmarking. (Note that
++baddata[1-3].snappy are not intended as benchmarks; they are used to verify
++correctness in the presence of corrupted data in the unit test.)
+ 
+ 
+ Contact
+ =======
+ 
+-Snappy is distributed through GitHub. For the latest version, a bug tracker,
+-and other information, see https://github.com/google/snappy.
++Snappy is distributed through GitHub. For the latest version and other
++information, see https://github.com/google/snappy.
+diff --git a/other-licenses/snappy/src/snappy-internal.h b/other-licenses/snappy/src/snappy-internal.h
+--- a/other-licenses/snappy/src/snappy-internal.h
++++ b/other-licenses/snappy/src/snappy-internal.h
+@@ -41,26 +41,26 @@ namespace internal {
+ class WorkingMemory {
+  public:
+   explicit WorkingMemory(size_t input_size);
+   ~WorkingMemory();
+ 
+   // Allocates and clears a hash table using memory in "*this",
+   // stores the number of buckets in "*table_size" and returns a pointer to
+   // the base of the hash table.
+-  uint16* GetHashTable(size_t fragment_size, int* table_size) const;
++  uint16_t* GetHashTable(size_t fragment_size, int* table_size) const;
+   char* GetScratchInput() const { return input_; }
+   char* GetScratchOutput() const { return output_; }
+ 
+  private:
+-  char* mem_;      // the allocated memory, never nullptr
+-  size_t size_;    // the size of the allocated memory, never 0
+-  uint16* table_;  // the pointer to the hashtable
+-  char* input_;    // the pointer to the input scratch buffer
+-  char* output_;   // the pointer to the output scratch buffer
++  char* mem_;        // the allocated memory, never nullptr
++  size_t size_;      // the size of the allocated memory, never 0
++  uint16_t* table_;  // the pointer to the hashtable
++  char* input_;      // the pointer to the input scratch buffer
++  char* output_;     // the pointer to the output scratch buffer
+ 
+   // No copying
+   WorkingMemory(const WorkingMemory&);
+   void operator=(const WorkingMemory&);
+ };
+ 
+ // Flat array compression that does not emit the "uncompressed length"
+ // prefix. Compresses "input" string to the "*op" buffer.
+@@ -71,104 +71,188 @@ class WorkingMemory {
+ // REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero.
+ // REQUIRES: "table_size" is a power of two
+ //
+ // Returns an "end" pointer into "op" buffer.
+ // "end - op" is the compressed size of "input".
+ char* CompressFragment(const char* input,
+                        size_t input_length,
+                        char* op,
+-                       uint16* table,
++                       uint16_t* table,
+                        const int table_size);
+ 
+ // Find the largest n such that
+ //
+ //   s1[0,n-1] == s2[0,n-1]
+ //   and n <= (s2_limit - s2).
+ //
+ // Return make_pair(n, n < 8).
+ // Does not read *s2_limit or beyond.
+ // Does not read *(s1 + (s2_limit - s2)) or beyond.
+ // Requires that s2_limit >= s2.
+ //
++// In addition populate *data with the next 5 bytes from the end of the match.
++// This is only done if 8 bytes are available (s2_limit - s2 >= 8). The point is
++// that on some arch's this can be done faster in this routine than subsequent
++// loading from s2 + n.
++//
+ // Separate implementation for 64-bit, little-endian cpus.
+ #if !defined(SNAPPY_IS_BIG_ENDIAN) && \
+-    (defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM))
++    (defined(__x86_64__) || defined(_M_X64) || defined(ARCH_PPC) || defined(ARCH_ARM))
+ static inline std::pair<size_t, bool> FindMatchLength(const char* s1,
+                                                       const char* s2,
+-                                                      const char* s2_limit) {
++                                                      const char* s2_limit,
++                                                      uint64_t* data) {
+   assert(s2_limit >= s2);
+   size_t matched = 0;
+ 
+   // This block isn't necessary for correctness; we could just start looping
+   // immediately.  As an optimization though, it is useful.  It creates some not
+   // uncommon code paths that determine, without extra effort, whether the match
+   // length is less than 8.  In short, we are hoping to avoid a conditional
+   // branch, and perhaps get better code layout from the C++ compiler.
+-  if (SNAPPY_PREDICT_TRUE(s2 <= s2_limit - 8)) {
+-    uint64 a1 = UNALIGNED_LOAD64(s1);
+-    uint64 a2 = UNALIGNED_LOAD64(s2);
+-    if (a1 != a2) {
+-      return std::pair<size_t, bool>(Bits::FindLSBSetNonZero64(a1 ^ a2) >> 3,
+-                                     true);
++  if (SNAPPY_PREDICT_TRUE(s2 <= s2_limit - 16)) {
++    uint64_t a1 = UNALIGNED_LOAD64(s1);
++    uint64_t a2 = UNALIGNED_LOAD64(s2);
++    if (SNAPPY_PREDICT_TRUE(a1 != a2)) {
++      // This code is critical for performance. The reason is that it determines
++      // how much to advance `ip` (s2). This obviously depends on both the loads
++      // from the `candidate` (s1) and `ip`. Furthermore the next `candidate`
++      // depends on the advanced `ip` calculated here through a load, hash and
++      // new candidate hash lookup (a lot of cycles). This makes s1 (ie.
++      // `candidate`) the variable that limits throughput. This is the reason we
++      // go through hoops to have this function update `data` for the next iter.
++      // The straightforward code would use *data, given by
++      //
++      // *data = UNALIGNED_LOAD64(s2 + matched_bytes) (Latency of 5 cycles),
++      //
++      // as input for the hash table lookup to find next candidate. However
++      // this forces the load on the data dependency chain of s1, because
++      // matched_bytes directly depends on s1. However matched_bytes is 0..7, so
++      // we can also calculate *data by
++      //
++      // *data = AlignRight(UNALIGNED_LOAD64(s2), UNALIGNED_LOAD64(s2 + 8),
++      //                    matched_bytes);
++      //
++      // The loads do not depend on s1 anymore and are thus off the bottleneck.
++      // The straightforward implementation on x86_64 would be to use
++      //
++      // shrd rax, rdx, cl  (cl being matched_bytes * 8)
++      //
++      // unfortunately shrd with a variable shift has a 4 cycle latency. So this
++      // only wins 1 cycle. The BMI2 shrx instruction is a 1 cycle variable
++      // shift instruction but can only shift 64 bits. If we focus on just
++      // obtaining the least significant 4 bytes, we can obtain this by
++      //
++      // *data = ConditionalMove(matched_bytes < 4, UNALIGNED_LOAD64(s2),
++      //     UNALIGNED_LOAD64(s2 + 4) >> ((matched_bytes & 3) * 8);
++      //
++      // Writen like above this is not a big win, the conditional move would be
++      // a cmp followed by a cmov (2 cycles) followed by a shift (1 cycle).
++      // However matched_bytes < 4 is equal to
++      // static_cast<uint32_t>(xorval) != 0. Writen that way, the conditional
++      // move (2 cycles) can execute in parallel with FindLSBSetNonZero64
++      // (tzcnt), which takes 3 cycles.
++      uint64_t xorval = a1 ^ a2;
++      int shift = Bits::FindLSBSetNonZero64(xorval);
++      size_t matched_bytes = shift >> 3;
++#ifndef __x86_64__
++      *data = UNALIGNED_LOAD64(s2 + matched_bytes);
++#else
++      // Ideally this would just be
++      //
++      // a2 = static_cast<uint32_t>(xorval) == 0 ? a3 : a2;
++      //
++      // However clang correctly infers that the above statement participates on
++      // a critical data dependency chain and thus, unfortunately, refuses to
++      // use a conditional move (it's tuned to cut data dependencies). In this
++      // case there is a longer parallel chain anyway AND this will be fairly
++      // unpredictable.
++      uint64_t a3 = UNALIGNED_LOAD64(s2 + 4);
++      asm("testl %k2, %k2\n\t"
++          "cmovzq %1, %0\n\t"
++          : "+r"(a2)
++          : "r"(a3), "r"(xorval));
++      *data = a2 >> (shift & (3 * 8));
++#endif
++      return std::pair<size_t, bool>(matched_bytes, true);
+     } else {
+       matched = 8;
+       s2 += 8;
+     }
+   }
+ 
+   // Find out how long the match is. We loop over the data 64 bits at a
+   // time until we find a 64-bit block that doesn't match; then we find
+   // the first non-matching bit and use that to calculate the total
+   // length of the match.
+-  while (SNAPPY_PREDICT_TRUE(s2 <= s2_limit - 8)) {
+-    if (UNALIGNED_LOAD64(s2) == UNALIGNED_LOAD64(s1 + matched)) {
++  while (SNAPPY_PREDICT_TRUE(s2 <= s2_limit - 16)) {
++    uint64_t a1 = UNALIGNED_LOAD64(s1 + matched);
++    uint64_t a2 = UNALIGNED_LOAD64(s2);
++    if (a1 == a2) {
+       s2 += 8;
+       matched += 8;
+     } else {
+-      uint64 x = UNALIGNED_LOAD64(s2) ^ UNALIGNED_LOAD64(s1 + matched);
+-      int matching_bits = Bits::FindLSBSetNonZero64(x);
+-      matched += matching_bits >> 3;
++      uint64_t xorval = a1 ^ a2;
++      int shift = Bits::FindLSBSetNonZero64(xorval);
++      size_t matched_bytes = shift >> 3;
++#ifndef __x86_64__
++      *data = UNALIGNED_LOAD64(s2 + matched_bytes);
++#else
++      uint64_t a3 = UNALIGNED_LOAD64(s2 + 4);
++      asm("testl %k2, %k2\n\t"
++          "cmovzq %1, %0\n\t"
++          : "+r"(a2)
++          : "r"(a3), "r"(xorval));
++      *data = a2 >> (shift & (3 * 8));
++#endif
++      matched += matched_bytes;
+       assert(matched >= 8);
+       return std::pair<size_t, bool>(matched, false);
+     }
+   }
+   while (SNAPPY_PREDICT_TRUE(s2 < s2_limit)) {
+     if (s1[matched] == *s2) {
+       ++s2;
+       ++matched;
+     } else {
++      if (s2 <= s2_limit - 8) {
++        *data = UNALIGNED_LOAD64(s2);
++      }
+       return std::pair<size_t, bool>(matched, matched < 8);
+     }
+   }
+   return std::pair<size_t, bool>(matched, matched < 8);
+ }
+ #else
+ static inline std::pair<size_t, bool> FindMatchLength(const char* s1,
+                                                       const char* s2,
+-                                                      const char* s2_limit) {
++                                                      const char* s2_limit,
++                                                      uint64_t* data) {
+   // Implementation based on the x86-64 version, above.
+   assert(s2_limit >= s2);
+   int matched = 0;
+ 
+   while (s2 <= s2_limit - 4 &&
+          UNALIGNED_LOAD32(s2) == UNALIGNED_LOAD32(s1 + matched)) {
+     s2 += 4;
+     matched += 4;
+   }
+   if (LittleEndian::IsLittleEndian() && s2 <= s2_limit - 4) {
+-    uint32 x = UNALIGNED_LOAD32(s2) ^ UNALIGNED_LOAD32(s1 + matched);
++    uint32_t x = UNALIGNED_LOAD32(s2) ^ UNALIGNED_LOAD32(s1 + matched);
+     int matching_bits = Bits::FindLSBSetNonZero(x);
+     matched += matching_bits >> 3;
++    s2 += matching_bits >> 3;
+   } else {
+     while ((s2 < s2_limit) && (s1[matched] == *s2)) {
+       ++s2;
+       ++matched;
+     }
+   }
++  if (s2 <= s2_limit - 8) *data = LittleEndian::Load64(s2);
+   return std::pair<size_t, bool>(matched, matched < 8);
+ }
+ #endif
+ 
+ // Lookup tables for decompression code.  Give --snappy_dump_decompression_table
+ // to the unit test to recompute char_table.
+ 
+ enum {
+@@ -185,17 +269,18 @@ static const int kMaximumTagLength = 5; 
+ //      1..64   0..7            Literal/copy length encoded in opcode byte
+ //      0..7    8..10           Copy offset encoded in opcode byte / 256
+ //      0..4    11..13          Extra bytes after opcode
+ //
+ // We use eight bits for the length even though 7 would have sufficed
+ // because of efficiency reasons:
+ //      (1) Extracting a byte is faster than a bit-field
+ //      (2) It properly aligns copy offset so we do not need a <<8
+-static const uint16 char_table[256] = {
++static constexpr uint16_t char_table[256] = {
++    // clang-format off
+   0x0001, 0x0804, 0x1001, 0x2001, 0x0002, 0x0805, 0x1002, 0x2002,
+   0x0003, 0x0806, 0x1003, 0x2003, 0x0004, 0x0807, 0x1004, 0x2004,
+   0x0005, 0x0808, 0x1005, 0x2005, 0x0006, 0x0809, 0x1006, 0x2006,
+   0x0007, 0x080a, 0x1007, 0x2007, 0x0008, 0x080b, 0x1008, 0x2008,
+   0x0009, 0x0904, 0x1009, 0x2009, 0x000a, 0x0905, 0x100a, 0x200a,
+   0x000b, 0x0906, 0x100b, 0x200b, 0x000c, 0x0907, 0x100c, 0x200c,
+   0x000d, 0x0908, 0x100d, 0x200d, 0x000e, 0x0909, 0x100e, 0x200e,
+   0x000f, 0x090a, 0x100f, 0x200f, 0x0010, 0x090b, 0x1010, 0x2010,
+@@ -217,15 +302,16 @@ static const uint16 char_table[256] = {
+   0x002f, 0x0d0a, 0x102f, 0x202f, 0x0030, 0x0d0b, 0x1030, 0x2030,
+   0x0031, 0x0e04, 0x1031, 0x2031, 0x0032, 0x0e05, 0x1032, 0x2032,
+   0x0033, 0x0e06, 0x1033, 0x2033, 0x0034, 0x0e07, 0x1034, 0x2034,
+   0x0035, 0x0e08, 0x1035, 0x2035, 0x0036, 0x0e09, 0x1036, 0x2036,
+   0x0037, 0x0e0a, 0x1037, 0x2037, 0x0038, 0x0e0b, 0x1038, 0x2038,
+   0x0039, 0x0f04, 0x1039, 0x2039, 0x003a, 0x0f05, 0x103a, 0x203a,
+   0x003b, 0x0f06, 0x103b, 0x203b, 0x003c, 0x0f07, 0x103c, 0x203c,
+   0x0801, 0x0f08, 0x103d, 0x203d, 0x1001, 0x0f09, 0x103e, 0x203e,
+-  0x1801, 0x0f0a, 0x103f, 0x203f, 0x2001, 0x0f0b, 0x1040, 0x2040
++  0x1801, 0x0f0a, 0x103f, 0x203f, 0x2001, 0x0f0b, 0x1040, 0x2040,
++    // clang-format on
+ };
+ 
+ }  // end namespace internal
+ }  // end namespace snappy
+ 
+ #endif  // THIRD_PARTY_SNAPPY_SNAPPY_INTERNAL_H_
+diff --git a/other-licenses/snappy/src/snappy-sinksource.cc b/other-licenses/snappy/src/snappy-sinksource.cc
+--- a/other-licenses/snappy/src/snappy-sinksource.cc
++++ b/other-licenses/snappy/src/snappy-sinksource.cc
+@@ -21,46 +21,54 @@
+ // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ 
+-#include <string.h>
++#include <stddef.h>
++#include <cstring>
+ 
+ #include "snappy-sinksource.h"
+ 
+ namespace snappy {
+ 
+-Source::~Source() { }
++Source::~Source() = default;
+ 
+-Sink::~Sink() { }
++Sink::~Sink() = default;
+ 
+ char* Sink::GetAppendBuffer(size_t length, char* scratch) {
++  // TODO: Switch to [[maybe_unused]] when we can assume C++17.
++  (void)length;
++
+   return scratch;
+ }
+ 
+ char* Sink::GetAppendBufferVariable(
+       size_t min_size, size_t desired_size_hint, char* scratch,
+       size_t scratch_size, size_t* allocated_size) {
++  // TODO: Switch to [[maybe_unused]] when we can assume C++17.
++  (void)min_size;
++  (void)desired_size_hint;
++
+   *allocated_size = scratch_size;
+   return scratch;
+ }
+ 
+ void Sink::AppendAndTakeOwnership(
+     char* bytes, size_t n,
+     void (*deleter)(void*, const char*, size_t),
+     void *deleter_arg) {
+   Append(bytes, n);
+   (*deleter)(deleter_arg, bytes, n);
+ }
+ 
+-ByteArraySource::~ByteArraySource() { }
++ByteArraySource::~ByteArraySource() = default;
+ 
+ size_t ByteArraySource::Available() const { return left_; }
+ 
+ const char* ByteArraySource::Peek(size_t* len) {
+   *len = left_;
+   return ptr_;
+ }
+ 
+@@ -69,36 +77,45 @@ void ByteArraySource::Skip(size_t n) {
+   ptr_ += n;
+ }
+ 
+ UncheckedByteArraySink::~UncheckedByteArraySink() { }
+ 
+ void UncheckedByteArraySink::Append(const char* data, size_t n) {
+   // Do no copying if the caller filled in the result of GetAppendBuffer()
+   if (data != dest_) {
+-    memcpy(dest_, data, n);
++    std::memcpy(dest_, data, n);
+   }
+   dest_ += n;
+ }
+ 
+ char* UncheckedByteArraySink::GetAppendBuffer(size_t len, char* scratch) {
++  // TODO: Switch to [[maybe_unused]] when we can assume C++17.
++  (void)len;
++  (void)scratch;
++
+   return dest_;
+ }
+ 
+ void UncheckedByteArraySink::AppendAndTakeOwnership(
+-    char* data, size_t n,
++    char* bytes, size_t n,
+     void (*deleter)(void*, const char*, size_t),
+     void *deleter_arg) {
+-  if (data != dest_) {
+-    memcpy(dest_, data, n);
+-    (*deleter)(deleter_arg, data, n);
++  if (bytes != dest_) {
++    std::memcpy(dest_, bytes, n);
++    (*deleter)(deleter_arg, bytes, n);
+   }
+   dest_ += n;
+ }
+ 
+ char* UncheckedByteArraySink::GetAppendBufferVariable(
+       size_t min_size, size_t desired_size_hint, char* scratch,
+       size_t scratch_size, size_t* allocated_size) {
++  // TODO: Switch to [[maybe_unused]] when we can assume C++17.
++  (void)min_size;
++  (void)scratch;
++  (void)scratch_size;
++
+   *allocated_size = desired_size_hint;
+   return dest_;
+ }
+ 
+ }  // namespace snappy
+diff --git a/other-licenses/snappy/src/snappy-sinksource.h b/other-licenses/snappy/src/snappy-sinksource.h
+--- a/other-licenses/snappy/src/snappy-sinksource.h
++++ b/other-licenses/snappy/src/snappy-sinksource.h
+@@ -141,38 +141,38 @@ class Source {
+   Source(const Source&);
+   void operator=(const Source&);
+ };
+ 
+ // A Source implementation that yields the contents of a flat array
+ class ByteArraySource : public Source {
+  public:
+   ByteArraySource(const char* p, size_t n) : ptr_(p), left_(n) { }
+-  virtual ~ByteArraySource();
+-  virtual size_t Available() const;
+-  virtual const char* Peek(size_t* len);
+-  virtual void Skip(size_t n);
++  ~ByteArraySource() override;
++  size_t Available() const override;
++  const char* Peek(size_t* len) override;
++  void Skip(size_t n) override;
+  private:
+   const char* ptr_;
+   size_t left_;
+ };
+ 
+ // A Sink implementation that writes to a flat array without any bound checks.
+ class UncheckedByteArraySink : public Sink {
+  public:
+   explicit UncheckedByteArraySink(char* dest) : dest_(dest) { }
+-  virtual ~UncheckedByteArraySink();
+-  virtual void Append(const char* data, size_t n);
+-  virtual char* GetAppendBuffer(size_t len, char* scratch);
+-  virtual char* GetAppendBufferVariable(
++  ~UncheckedByteArraySink() override;
++  void Append(const char* data, size_t n) override;
++  char* GetAppendBuffer(size_t len, char* scratch) override;
++  char* GetAppendBufferVariable(
+       size_t min_size, size_t desired_size_hint, char* scratch,
+-      size_t scratch_size, size_t* allocated_size);
+-  virtual void AppendAndTakeOwnership(
++      size_t scratch_size, size_t* allocated_size) override;
++  void AppendAndTakeOwnership(
+       char* bytes, size_t n, void (*deleter)(void*, const char*, size_t),
+-      void *deleter_arg);
++      void *deleter_arg) override;
+ 
+   // Return the current output pointer so that a caller can see how
+   // many bytes were produced.
+   // Note: this is not a Sink method.
+   char* CurrentDestination() const { return dest_; }
+  private:
+   char* dest_;
+ };
+diff --git a/other-licenses/snappy/src/snappy-stubs-internal.cc b/other-licenses/snappy/src/snappy-stubs-internal.cc
+--- a/other-licenses/snappy/src/snappy-stubs-internal.cc
++++ b/other-licenses/snappy/src/snappy-stubs-internal.cc
+@@ -28,15 +28,15 @@
+ 
+ #include <algorithm>
+ #include <string>
+ 
+ #include "snappy-stubs-internal.h"
+ 
+ namespace snappy {
+ 
+-void Varint::Append32(std::string* s, uint32 value) {
++void Varint::Append32(std::string* s, uint32_t value) {
+   char buf[Varint::kMax32];
+   const char* p = Varint::Encode32(buf, value);
+   s->append(buf, p - buf);
+ }
+ 
+ }  // namespace snappy
+diff --git a/other-licenses/snappy/src/snappy-stubs-internal.h b/other-licenses/snappy/src/snappy-stubs-internal.h
+--- a/other-licenses/snappy/src/snappy-stubs-internal.h
++++ b/other-licenses/snappy/src/snappy-stubs-internal.h
+@@ -30,21 +30,23 @@
+ 
+ #ifndef THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
+ #define THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
+ 
+ #ifdef HAVE_CONFIG_H
+ #include "config.h"
+ #endif
+ 
+-#include <string>
++#include <stdint.h>
+ 
+-#include <assert.h>
+-#include <stdlib.h>
+-#include <string.h>
++#include <cassert>
++#include <cstdlib>
++#include <cstring>
++#include <limits>
++#include <string>
+ 
+ #ifdef HAVE_SYS_MMAN_H
+ #include <sys/mman.h>
+ #endif
+ 
+ #ifdef HAVE_UNISTD_H
+ #include <unistd.h>
+ #endif
+@@ -62,522 +64,406 @@
+ #define SNAPPY_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \
+     __msan_unpoison((address), (size))
+ #else
+ #define SNAPPY_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) /* empty */
+ #endif  // __has_feature(memory_sanitizer)
+ 
+ #include "snappy-stubs-public.h"
+ 
+-#if defined(__x86_64__)
+-
+-// Enable 64-bit optimized versions of some routines.
+-#define ARCH_K8 1
+-
+-#elif defined(__ppc64__)
+-
++// Used to enable 64-bit optimized versions of some routines.
++#if defined(__PPC64__) || defined(__powerpc64__)
+ #define ARCH_PPC 1
+-
+-#elif defined(__aarch64__)
+-
++#elif defined(__aarch64__) || defined(_M_ARM64)
+ #define ARCH_ARM 1
+-
+ #endif
+ 
+ // Needed by OS X, among others.
+ #ifndef MAP_ANONYMOUS
+ #define MAP_ANONYMOUS MAP_ANON
+ #endif
+ 
+ // The size of an array, if known at compile-time.
+ // Will give unexpected results if used on a pointer.
+ // We undefine it first, since some compilers already have a definition.
+ #ifdef ARRAYSIZE
+ #undef ARRAYSIZE
+ #endif
+-#define ARRAYSIZE(a) (sizeof(a) / sizeof(*(a)))
++#define ARRAYSIZE(a) int{sizeof(a) / sizeof(*(a))}
+ 
+ // Static prediction hints.
+ #ifdef HAVE_BUILTIN_EXPECT
+ #define SNAPPY_PREDICT_FALSE(x) (__builtin_expect(x, 0))
+ #define SNAPPY_PREDICT_TRUE(x) (__builtin_expect(!!(x), 1))
+ #else
+ #define SNAPPY_PREDICT_FALSE(x) x
+ #define SNAPPY_PREDICT_TRUE(x) x
+ #endif
+ 
+-// This is only used for recomputing the tag byte table used during
+-// decompression; for simplicity we just remove it from the open-source
+-// version (anyone who wants to regenerate it can just do the call
+-// themselves within main()).
+-#define DEFINE_bool(flag_name, default_value, description) \
+-  bool FLAGS_ ## flag_name = default_value
+-#define DECLARE_bool(flag_name) \
+-  extern bool FLAGS_ ## flag_name
++// Inlining hints.
++#ifdef HAVE_ATTRIBUTE_ALWAYS_INLINE
++#define SNAPPY_ATTRIBUTE_ALWAYS_INLINE __attribute__((always_inline))
++#else
++#define SNAPPY_ATTRIBUTE_ALWAYS_INLINE
++#endif
++
++// Stubbed version of ABSL_FLAG.
++//
++// In the open source version, flags can only be changed at compile time.
++#define SNAPPY_FLAG(flag_type, flag_name, default_value, help) \
++  flag_type FLAGS_ ## flag_name = default_value
+ 
+ namespace snappy {
+ 
+-static const uint32 kuint32max = static_cast<uint32>(0xFFFFFFFF);
+-static const int64 kint64max = static_cast<int64>(0x7FFFFFFFFFFFFFFFLL);
++// Stubbed version of absl::GetFlag().
++template <typename T>
++inline T GetFlag(T flag) { return flag; }
++
++static const uint32_t kuint32max = std::numeric_limits<uint32_t>::max();
++static const int64_t kint64max = std::numeric_limits<int64_t>::max();
+ 
+ // Potentially unaligned loads and stores.
+ 
+-// x86, PowerPC, and ARM64 can simply do these loads and stores native.
+-
+-#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || \
+-    defined(__aarch64__)
+-
+-#define UNALIGNED_LOAD16(_p) (*reinterpret_cast<const uint16 *>(_p))
+-#define UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32 *>(_p))
+-#define UNALIGNED_LOAD64(_p) (*reinterpret_cast<const uint64 *>(_p))
+-
+-#define UNALIGNED_STORE16(_p, _val) (*reinterpret_cast<uint16 *>(_p) = (_val))
+-#define UNALIGNED_STORE32(_p, _val) (*reinterpret_cast<uint32 *>(_p) = (_val))
+-#define UNALIGNED_STORE64(_p, _val) (*reinterpret_cast<uint64 *>(_p) = (_val))
+-
+-// ARMv7 and newer support native unaligned accesses, but only of 16-bit
+-// and 32-bit values (not 64-bit); older versions either raise a fatal signal,
+-// do an unaligned read and rotate the words around a bit, or do the reads very
+-// slowly (trip through kernel mode). There's no simple #define that says just
+-// “ARMv7 or higher”, so we have to filter away all ARMv5 and ARMv6
+-// sub-architectures.
+-//
+-// This is a mess, but there's not much we can do about it.
+-//
+-// To further complicate matters, only LDR instructions (single reads) are
+-// allowed to be unaligned, not LDRD (two reads) or LDM (many reads). Unless we
+-// explicitly tell the compiler that these accesses can be unaligned, it can and
+-// will combine accesses. On armcc, the way to signal this is done by accessing
+-// through the type (uint32 __packed *), but GCC has no such attribute
+-// (it ignores __attribute__((packed)) on individual variables). However,
+-// we can tell it that a _struct_ is unaligned, which has the same effect,
+-// so we do that.
+-
+-#elif defined(__arm__) && \
+-      !defined(__ARM_ARCH_4__) && \
+-      !defined(__ARM_ARCH_4T__) && \
+-      !defined(__ARM_ARCH_5__) && \
+-      !defined(__ARM_ARCH_5T__) && \
+-      !defined(__ARM_ARCH_5TE__) && \
+-      !defined(__ARM_ARCH_5TEJ__) && \
+-      !defined(__ARM_ARCH_6__) && \
+-      !defined(__ARM_ARCH_6J__) && \
+-      !defined(__ARM_ARCH_6K__) && \
+-      !defined(__ARM_ARCH_6Z__) && \
+-      !defined(__ARM_ARCH_6ZK__) && \
+-      !defined(__ARM_ARCH_6T2__)
+-
+-#if __GNUC__
+-#define ATTRIBUTE_PACKED __attribute__((__packed__))
+-#else
+-#define ATTRIBUTE_PACKED
+-#endif
+-
+-namespace base {
+-namespace internal {
+-
+-struct Unaligned16Struct {
+-  uint16 value;
+-  uint8 dummy;  // To make the size non-power-of-two.
+-} ATTRIBUTE_PACKED;
+-
+-struct Unaligned32Struct {
+-  uint32 value;
+-  uint8 dummy;  // To make the size non-power-of-two.
+-} ATTRIBUTE_PACKED;
+-
+-}  // namespace internal
+-}  // namespace base
+-
+-#define UNALIGNED_LOAD16(_p) \
+-    ((reinterpret_cast<const ::snappy::base::internal::Unaligned16Struct *>(_p))->value)
+-#define UNALIGNED_LOAD32(_p) \
+-    ((reinterpret_cast<const ::snappy::base::internal::Unaligned32Struct *>(_p))->value)
+-
+-#define UNALIGNED_STORE16(_p, _val) \
+-    ((reinterpret_cast< ::snappy::base::internal::Unaligned16Struct *>(_p))->value = \
+-         (_val))
+-#define UNALIGNED_STORE32(_p, _val) \
+-    ((reinterpret_cast< ::snappy::base::internal::Unaligned32Struct *>(_p))->value = \
+-         (_val))
+-
+-// TODO: NEON supports unaligned 64-bit loads and stores.
+-// See if that would be more efficient on platforms supporting it,
+-// at least for copies.
+-
+-inline uint64 UNALIGNED_LOAD64(const void *p) {
+-  uint64 t;
+-  memcpy(&t, p, sizeof t);
+-  return t;
++inline uint16_t UNALIGNED_LOAD16(const void *p) {
++  // Compiles to a single movzx/ldrh on clang/gcc/msvc.
++  uint16_t v;
++  std::memcpy(&v, p, sizeof(v));
++  return v;
+ }
+ 
+-inline void UNALIGNED_STORE64(void *p, uint64 v) {
+-  memcpy(p, &v, sizeof v);
++inline uint32_t UNALIGNED_LOAD32(const void *p) {
++  // Compiles to a single mov/ldr on clang/gcc/msvc.
++  uint32_t v;
++  std::memcpy(&v, p, sizeof(v));
++  return v;
+ }
+ 
+-#else
+-
+-// These functions are provided for architectures that don't support
+-// unaligned loads and stores.
+-
+-inline uint16 UNALIGNED_LOAD16(const void *p) {
+-  uint16 t;
+-  memcpy(&t, p, sizeof t);
+-  return t;
+-}
+-
+-inline uint32 UNALIGNED_LOAD32(const void *p) {
+-  uint32 t;
+-  memcpy(&t, p, sizeof t);
+-  return t;
+-}
+-
+-inline uint64 UNALIGNED_LOAD64(const void *p) {
+-  uint64 t;
+-  memcpy(&t, p, sizeof t);
+-  return t;
+-}
+-
+-inline void UNALIGNED_STORE16(void *p, uint16 v) {
+-  memcpy(p, &v, sizeof v);
+-}
+-
+-inline void UNALIGNED_STORE32(void *p, uint32 v) {
+-  memcpy(p, &v, sizeof v);
+-}
+-
+-inline void UNALIGNED_STORE64(void *p, uint64 v) {
+-  memcpy(p, &v, sizeof v);
++inline uint64_t UNALIGNED_LOAD64(const void *p) {
++  // Compiles to a single mov/ldr on clang/gcc/msvc.
++  uint64_t v;
++  std::memcpy(&v, p, sizeof(v));
++  return v;
+ }
+ 
+-#endif
+-
+-// The following guarantees declaration of the byte swap functions.
+-#if defined(SNAPPY_IS_BIG_ENDIAN)
+-
+-#ifdef HAVE_SYS_BYTEORDER_H
+-#include <sys/byteorder.h>
+-#endif
+-
+-#ifdef HAVE_SYS_ENDIAN_H
+-#include <sys/endian.h>
+-#endif
+-
+-#ifdef _MSC_VER
+-#include <stdlib.h>
+-#define bswap_16(x) _byteswap_ushort(x)
+-#define bswap_32(x) _byteswap_ulong(x)
+-#define bswap_64(x) _byteswap_uint64(x)
+-
+-#elif defined(__APPLE__)
+-// Mac OS X / Darwin features
+-#include <libkern/OSByteOrder.h>
+-#define bswap_16(x) OSSwapInt16(x)
+-#define bswap_32(x) OSSwapInt32(x)
+-#define bswap_64(x) OSSwapInt64(x)
+-
+-#elif defined(HAVE_BYTESWAP_H)
+-#include <byteswap.h>
+-
+-#elif defined(bswap32)
+-// FreeBSD defines bswap{16,32,64} in <sys/endian.h> (already #included).
+-#define bswap_16(x) bswap16(x)
+-#define bswap_32(x) bswap32(x)
+-#define bswap_64(x) bswap64(x)
+-
+-#elif defined(BSWAP_64)
+-// Solaris 10 defines BSWAP_{16,32,64} in <sys/byteorder.h> (already #included).
+-#define bswap_16(x) BSWAP_16(x)
+-#define bswap_32(x) BSWAP_32(x)
+-#define bswap_64(x) BSWAP_64(x)
+-
+-#else
+-
+-inline uint16 bswap_16(uint16 x) {
+-  return (x << 8) | (x >> 8);
++inline void UNALIGNED_STORE16(void *p, uint16_t v) {
++  // Compiles to a single mov/strh on clang/gcc/msvc.
++  std::memcpy(p, &v, sizeof(v));
+ }
+ 
+-inline uint32 bswap_32(uint32 x) {
+-  x = ((x & 0xff00ff00UL) >> 8) | ((x & 0x00ff00ffUL) << 8);
+-  return (x >> 16) | (x << 16);
++inline void UNALIGNED_STORE32(void *p, uint32_t v) {
++  // Compiles to a single mov/str on clang/gcc/msvc.
++  std::memcpy(p, &v, sizeof(v));
+ }
+ 
+-inline uint64 bswap_64(uint64 x) {
+-  x = ((x & 0xff00ff00ff00ff00ULL) >> 8) | ((x & 0x00ff00ff00ff00ffULL) << 8);
+-  x = ((x & 0xffff0000ffff0000ULL) >> 16) | ((x & 0x0000ffff0000ffffULL) << 16);
+-  return (x >> 32) | (x << 32);
++inline void UNALIGNED_STORE64(void *p, uint64_t v) {
++  // Compiles to a single mov/str on clang/gcc/msvc.
++  std::memcpy(p, &v, sizeof(v));
+ }
+ 
+-#endif
+-
+-#endif  // defined(SNAPPY_IS_BIG_ENDIAN)
+-
+ // Convert to little-endian storage, opposite of network format.
+ // Convert x from host to little endian: x = LittleEndian.FromHost(x);
+ // convert x from little endian to host: x = LittleEndian.ToHost(x);
+ //
+ //  Store values into unaligned memory converting to little endian order:
+ //    LittleEndian.Store16(p, x);
+ //
+ //  Load unaligned values stored in little endian converting to host order:
+ //    x = LittleEndian.Load16(p);
+ class LittleEndian {
+  public:
+-  // Conversion functions.
+-#if defined(SNAPPY_IS_BIG_ENDIAN)
+-
+-  static uint16 FromHost16(uint16 x) { return bswap_16(x); }
+-  static uint16 ToHost16(uint16 x) { return bswap_16(x); }
++  // Functions to do unaligned loads and stores in little-endian order.
++  static inline uint16_t Load16(const void *ptr) {
++    const uint8_t* const buffer = reinterpret_cast<const uint8_t*>(ptr);
+ 
+-  static uint32 FromHost32(uint32 x) { return bswap_32(x); }
+-  static uint32 ToHost32(uint32 x) { return bswap_32(x); }
++    // Compiles to a single mov/str on recent clang and gcc.
++    return (static_cast<uint16_t>(buffer[0])) |
++            (static_cast<uint16_t>(buffer[1]) << 8);
++  }
+ 
+-  static bool IsLittleEndian() { return false; }
+-
+-#else  // !defined(SNAPPY_IS_BIG_ENDIAN)
++  static inline uint32_t Load32(const void *ptr) {
++    const uint8_t* const buffer = reinterpret_cast<const uint8_t*>(ptr);
+ 
+-  static uint16 FromHost16(uint16 x) { return x; }
+-  static uint16 ToHost16(uint16 x) { return x; }
++    // Compiles to a single mov/str on recent clang and gcc.
++    return (static_cast<uint32_t>(buffer[0])) |
++            (static_cast<uint32_t>(buffer[1]) << 8) |
++            (static_cast<uint32_t>(buffer[2]) << 16) |
++            (static_cast<uint32_t>(buffer[3]) << 24);
++  }
+ 
+-  static uint32 FromHost32(uint32 x) { return x; }
+-  static uint32 ToHost32(uint32 x) { return x; }
++  static inline uint64_t Load64(const void *ptr) {
++    const uint8_t* const buffer = reinterpret_cast<const uint8_t*>(ptr);
+ 
+-  static bool IsLittleEndian() { return true; }
+-
+-#endif  // !defined(SNAPPY_IS_BIG_ENDIAN)
+-
+-  // Functions to do unaligned loads and stores in little-endian order.
+-  static uint16 Load16(const void *p) {
+-    return ToHost16(UNALIGNED_LOAD16(p));
++    // Compiles to a single mov/str on recent clang and gcc.
++    return (static_cast<uint64_t>(buffer[0])) |
++            (static_cast<uint64_t>(buffer[1]) << 8) |
++            (static_cast<uint64_t>(buffer[2]) << 16) |
++            (static_cast<uint64_t>(buffer[3]) << 24) |
++            (static_cast<uint64_t>(buffer[4]) << 32) |
++            (static_cast<uint64_t>(buffer[5]) << 40) |
++            (static_cast<uint64_t>(buffer[6]) << 48) |
++            (static_cast<uint64_t>(buffer[7]) << 56);
+   }
+ 
+-  static void Store16(void *p, uint16 v) {
+-    UNALIGNED_STORE16(p, FromHost16(v));
++  static inline void Store16(void *dst, uint16_t value) {
++    uint8_t* const buffer = reinterpret_cast<uint8_t*>(dst);
++
++    // Compiles to a single mov/str on recent clang and gcc.
++    buffer[0] = static_cast<uint8_t>(value);
++    buffer[1] = static_cast<uint8_t>(value >> 8);
++  }
++
++  static void Store32(void *dst, uint32_t value) {
++    uint8_t* const buffer = reinterpret_cast<uint8_t*>(dst);
++
++    // Compiles to a single mov/str on recent clang and gcc.
++    buffer[0] = static_cast<uint8_t>(value);
++    buffer[1] = static_cast<uint8_t>(value >> 8);
++    buffer[2] = static_cast<uint8_t>(value >> 16);
++    buffer[3] = static_cast<uint8_t>(value >> 24);
+   }
+ 
+-  static uint32 Load32(const void *p) {
+-    return ToHost32(UNALIGNED_LOAD32(p));
++  static void Store64(void* dst, uint64_t value) {
++    uint8_t* const buffer = reinterpret_cast<uint8_t*>(dst);
++
++    // Compiles to a single mov/str on recent clang and gcc.
++    buffer[0] = static_cast<uint8_t>(value);
++    buffer[1] = static_cast<uint8_t>(value >> 8);
++    buffer[2] = static_cast<uint8_t>(value >> 16);
++    buffer[3] = static_cast<uint8_t>(value >> 24);
++    buffer[4] = static_cast<uint8_t>(value >> 32);
++    buffer[5] = static_cast<uint8_t>(value >> 40);
++    buffer[6] = static_cast<uint8_t>(value >> 48);
++    buffer[7] = static_cast<uint8_t>(value >> 56);
+   }
+ 
+-  static void Store32(void *p, uint32 v) {
+-    UNALIGNED_STORE32(p, FromHost32(v));
++  static inline constexpr bool IsLittleEndian() {
++#if defined(SNAPPY_IS_BIG_ENDIAN)
++    return false;
++#else
++    return true;
++#endif  // defined(SNAPPY_IS_BIG_ENDIAN)
+   }
+ };
+ 
+ // Some bit-manipulation functions.
+ class Bits {
+  public:
+   // Return floor(log2(n)) for positive integer n.
+-  static int Log2FloorNonZero(uint32 n);
++  static int Log2FloorNonZero(uint32_t n);
+ 
+   // Return floor(log2(n)) for positive integer n.  Returns -1 iff n == 0.
+-  static int Log2Floor(uint32 n);
++  static int Log2Floor(uint32_t n);
+ 
+   // Return the first set least / most significant bit, 0-indexed.  Returns an
+   // undefined value if n == 0.  FindLSBSetNonZero() is similar to ffs() except
+   // that it's 0-indexed.
+-  static int FindLSBSetNonZero(uint32 n);
++  static int FindLSBSetNonZero(uint32_t n);
+ 
+-#if defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
+-  static int FindLSBSetNonZero64(uint64 n);
+-#endif  // defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
++  static int FindLSBSetNonZero64(uint64_t n);
+ 
+  private:
+   // No copying
+   Bits(const Bits&);
+   void operator=(const Bits&);
+ };
+ 
+-#ifdef HAVE_BUILTIN_CTZ
++#if defined(HAVE_BUILTIN_CTZ)
+ 
+-inline int Bits::Log2FloorNonZero(uint32 n) {
++inline int Bits::Log2FloorNonZero(uint32_t n) {
+   assert(n != 0);
+   // (31 ^ x) is equivalent to (31 - x) for x in [0, 31]. An easy proof
+   // represents subtraction in base 2 and observes that there's no carry.
+   //
+   // GCC and Clang represent __builtin_clz on x86 as 31 ^ _bit_scan_reverse(x).
+   // Using "31 ^" here instead of "31 -" allows the optimizer to strip the
+   // function body down to _bit_scan_reverse(x).
+   return 31 ^ __builtin_clz(n);
+ }
+ 
+-inline int Bits::Log2Floor(uint32 n) {
++inline int Bits::Log2Floor(uint32_t n) {
+   return (n == 0) ? -1 : Bits::Log2FloorNonZero(n);
+ }
+ 
+-inline int Bits::FindLSBSetNonZero(uint32 n) {
++inline int Bits::FindLSBSetNonZero(uint32_t n) {
+   assert(n != 0);
+   return __builtin_ctz(n);
+ }
+ 
+-#if defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
+-inline int Bits::FindLSBSetNonZero64(uint64 n) {
+-  assert(n != 0);
+-  return __builtin_ctzll(n);
+-}
+-#endif  // defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
+-
+ #elif defined(_MSC_VER)
+ 
+-inline int Bits::Log2FloorNonZero(uint32 n) {
++inline int Bits::Log2FloorNonZero(uint32_t n) {
+   assert(n != 0);
++  // NOLINTNEXTLINE(runtime/int): The MSVC intrinsic demands unsigned long.
+   unsigned long where;
+   _BitScanReverse(&where, n);
+   return static_cast<int>(where);
+ }
+ 
+-inline int Bits::Log2Floor(uint32 n) {
++inline int Bits::Log2Floor(uint32_t n) {
++  // NOLINTNEXTLINE(runtime/int): The MSVC intrinsic demands unsigned long.
+   unsigned long where;
+   if (_BitScanReverse(&where, n))
+     return static_cast<int>(where);
+   return -1;
+ }
+ 
+-inline int Bits::FindLSBSetNonZero(uint32 n) {
++inline int Bits::FindLSBSetNonZero(uint32_t n) {
+   assert(n != 0);
++  // NOLINTNEXTLINE(runtime/int): The MSVC intrinsic demands unsigned long.
+   unsigned long where;
+   if (_BitScanForward(&where, n))
+     return static_cast<int>(where);
+   return 32;
+ }
+ 
+-#if defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
+-inline int Bits::FindLSBSetNonZero64(uint64 n) {
+-  assert(n != 0);
+-  unsigned long where;
+-  if (_BitScanForward64(&where, n))
+-    return static_cast<int>(where);
+-  return 64;
+-}
+-#endif  // defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
+-
+ #else  // Portable versions.
+ 
+-inline int Bits::Log2FloorNonZero(uint32 n) {
++inline int Bits::Log2FloorNonZero(uint32_t n) {
+   assert(n != 0);
+ 
+   int log = 0;
+-  uint32 value = n;
++  uint32_t value = n;
+   for (int i = 4; i >= 0; --i) {
+     int shift = (1 << i);
+-    uint32 x = value >> shift;
++    uint32_t x = value >> shift;
+     if (x != 0) {
+       value = x;
+       log += shift;
+     }
+   }
+   assert(value == 1);
+   return log;
+ }
+ 
+-inline int Bits::Log2Floor(uint32 n) {
++inline int Bits::Log2Floor(uint32_t n) {
+   return (n == 0) ? -1 : Bits::Log2FloorNonZero(n);
+ }
+ 
+-inline int Bits::FindLSBSetNonZero(uint32 n) {
++inline int Bits::FindLSBSetNonZero(uint32_t n) {
+   assert(n != 0);
+ 
+   int rc = 31;
+   for (int i = 4, shift = 1 << 4; i >= 0; --i) {
+-    const uint32 x = n << shift;
++    const uint32_t x = n << shift;
+     if (x != 0) {
+       n = x;
+       rc -= shift;
+     }
+     shift >>= 1;
+   }
+   return rc;
+ }
+ 
+-#if defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
++#endif  // End portable versions.
++
++#if defined(HAVE_BUILTIN_CTZ)
++
++inline int Bits::FindLSBSetNonZero64(uint64_t n) {
++  assert(n != 0);
++  return __builtin_ctzll(n);
++}
++
++#elif defined(_MSC_VER) && (defined(_M_X64) || defined(_M_ARM64))
++// _BitScanForward64() is only available on x64 and ARM64.
++
++inline int Bits::FindLSBSetNonZero64(uint64_t n) {
++  assert(n != 0);
++  // NOLINTNEXTLINE(runtime/int): The MSVC intrinsic demands unsigned long.
++  unsigned long where;
++  if (_BitScanForward64(&where, n))
++    return static_cast<int>(where);
++  return 64;
++}
++
++#else  // Portable version.
++
+ // FindLSBSetNonZero64() is defined in terms of FindLSBSetNonZero().
+-inline int Bits::FindLSBSetNonZero64(uint64 n) {
++inline int Bits::FindLSBSetNonZero64(uint64_t n) {
+   assert(n != 0);
+ 
+-  const uint32 bottombits = static_cast<uint32>(n);
++  const uint32_t bottombits = static_cast<uint32_t>(n);
+   if (bottombits == 0) {
+-    // Bottom bits are zero, so scan in top bits
+-    return 32 + FindLSBSetNonZero(static_cast<uint32>(n >> 32));
++    // Bottom bits are zero, so scan the top bits.
++    return 32 + FindLSBSetNonZero(static_cast<uint32_t>(n >> 32));
+   } else {
+     return FindLSBSetNonZero(bottombits);
+   }
+ }
+-#endif  // defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
+ 
+-#endif  // End portable versions.
++#endif  // End portable version.
+ 
+ // Variable-length integer encoding.
+ class Varint {
+  public:
+-  // Maximum lengths of varint encoding of uint32.
++  // Maximum lengths of varint encoding of uint32_t.
+   static const int kMax32 = 5;
+ 
+   // Attempts to parse a varint32 from a prefix of the bytes in [ptr,limit-1].
+   // Never reads a character at or beyond limit.  If a valid/terminated varint32
+   // was found in the range, stores it in *OUTPUT and returns a pointer just
+   // past the last byte of the varint32. Else returns NULL.  On success,
+   // "result <= limit".
+   static const char* Parse32WithLimit(const char* ptr, const char* limit,
+-                                      uint32* OUTPUT);
++                                      uint32_t* OUTPUT);
+ 
+   // REQUIRES   "ptr" points to a buffer of length sufficient to hold "v".
+   // EFFECTS    Encodes "v" into "ptr" and returns a pointer to the
+   //            byte just past the last encoded byte.
+-  static char* Encode32(char* ptr, uint32 v);
++  static char* Encode32(char* ptr, uint32_t v);
+ 
+   // EFFECTS    Appends the varint representation of "value" to "*s".
+-  static void Append32(std::string* s, uint32 value);
++  static void Append32(std::string* s, uint32_t value);
+ };
+ 
+ inline const char* Varint::Parse32WithLimit(const char* p,
+                                             const char* l,
+-                                            uint32* OUTPUT) {
++                                            uint32_t* OUTPUT) {
+   const unsigned char* ptr = reinterpret_cast<const unsigned char*>(p);
+   const unsigned char* limit = reinterpret_cast<const unsigned char*>(l);
+-  uint32 b, result;
++  uint32_t b, result;
+   if (ptr >= limit) return NULL;
+   b = *(ptr++); result = b & 127;          if (b < 128) goto done;
+   if (ptr >= limit) return NULL;
+   b = *(ptr++); result |= (b & 127) <<  7; if (b < 128) goto done;
+   if (ptr >= limit) return NULL;
+   b = *(ptr++); result |= (b & 127) << 14; if (b < 128) goto done;
+   if (ptr >= limit) return NULL;
+   b = *(ptr++); result |= (b & 127) << 21; if (b < 128) goto done;
+   if (ptr >= limit) return NULL;
+   b = *(ptr++); result |= (b & 127) << 28; if (b < 16) goto done;
+   return NULL;       // Value is too long to be a varint32
+  done:
+   *OUTPUT = result;
+   return reinterpret_cast<const char*>(ptr);
+ }
+ 
+-inline char* Varint::Encode32(char* sptr, uint32 v) {
++inline char* Varint::Encode32(char* sptr, uint32_t v) {
+   // Operate on characters as unsigneds
+-  unsigned char* ptr = reinterpret_cast<unsigned char*>(sptr);
+-  static const int B = 128;
+-  if (v < (1<<7)) {
+-    *(ptr++) = v;
+-  } else if (v < (1<<14)) {
+-    *(ptr++) = v | B;
+-    *(ptr++) = v>>7;
+-  } else if (v < (1<<21)) {
+-    *(ptr++) = v | B;
+-    *(ptr++) = (v>>7) | B;
+-    *(ptr++) = v>>14;
+-  } else if (v < (1<<28)) {
+-    *(ptr++) = v | B;
+-    *(ptr++) = (v>>7) | B;
+-    *(ptr++) = (v>>14) | B;
+-    *(ptr++) = v>>21;
++  uint8_t* ptr = reinterpret_cast<uint8_t*>(sptr);
++  static const uint8_t B = 128;
++  if (v < (1 << 7)) {
++    *(ptr++) = static_cast<uint8_t>(v);
++  } else if (v < (1 << 14)) {
++    *(ptr++) = static_cast<uint8_t>(v | B);
++    *(ptr++) = static_cast<uint8_t>(v >> 7);
++  } else if (v < (1 << 21)) {
++    *(ptr++) = static_cast<uint8_t>(v | B);
++    *(ptr++) = static_cast<uint8_t>((v >> 7) | B);
++    *(ptr++) = static_cast<uint8_t>(v >> 14);
++  } else if (v < (1 << 28)) {
++    *(ptr++) = static_cast<uint8_t>(v | B);
++    *(ptr++) = static_cast<uint8_t>((v >> 7) | B);
++    *(ptr++) = static_cast<uint8_t>((v >> 14) | B);
++    *(ptr++) = static_cast<uint8_t>(v >> 21);
+   } else {
+-    *(ptr++) = v | B;
+-    *(ptr++) = (v>>7) | B;
+-    *(ptr++) = (v>>14) | B;
+-    *(ptr++) = (v>>21) | B;
+-    *(ptr++) = v>>28;
++    *(ptr++) = static_cast<uint8_t>(v | B);
++    *(ptr++) = static_cast<uint8_t>((v>>7) | B);
++    *(ptr++) = static_cast<uint8_t>((v>>14) | B);
++    *(ptr++) = static_cast<uint8_t>((v>>21) | B);
++    *(ptr++) = static_cast<uint8_t>(v >> 28);
+   }
+   return reinterpret_cast<char*>(ptr);
+ }
+ 
+ // If you know the internal layout of the std::string in use, you can
+ // replace this function with one that resizes the string without
+ // filling the new space with zeros (if applicable) --
+ // it will be non-portable but faster.
+diff --git a/other-licenses/snappy/src/snappy-stubs-public.h.in b/other-licenses/snappy/src/snappy-stubs-public.h.in
+--- a/other-licenses/snappy/src/snappy-stubs-public.h.in
++++ b/other-licenses/snappy/src/snappy-stubs-public.h.in
+@@ -31,40 +31,29 @@
+ // This file cannot include config.h, as it is included from snappy.h,
+ // which is a public header. Instead, snappy-stubs-public.h is generated by
+ // from snappy-stubs-public.h.in at configure time.
+ 
+ #ifndef THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
+ #define THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
+ 
+ #include <cstddef>
+-#include <cstdint>
+-#include <string>
+ 
+ #if ${HAVE_SYS_UIO_H_01}  // HAVE_SYS_UIO_H
+ #include <sys/uio.h>
+ #endif  // HAVE_SYS_UIO_H
+ 
+ #define SNAPPY_MAJOR ${PROJECT_VERSION_MAJOR}
+ #define SNAPPY_MINOR ${PROJECT_VERSION_MINOR}
+ #define SNAPPY_PATCHLEVEL ${PROJECT_VERSION_PATCH}
+ #define SNAPPY_VERSION \
+     ((SNAPPY_MAJOR << 16) | (SNAPPY_MINOR << 8) | SNAPPY_PATCHLEVEL)
+ 
+ namespace snappy {
+ 
+-using int8 = std::int8_t;
+-using uint8 = std::uint8_t;
+-using int16 = std::int16_t;
+-using uint16 = std::uint16_t;
+-using int32 = std::int32_t;
+-using uint32 = std::uint32_t;
+-using int64 = std::int64_t;
+-using uint64 = std::uint64_t;
+-
+ #if !${HAVE_SYS_UIO_H_01}  // !HAVE_SYS_UIO_H
+ // Windows does not have an iovec type, yet the concept is universally useful.
+ // It is simple to define it ourselves, so we put it inside our own namespace.
+ struct iovec {
+   void* iov_base;
+   size_t iov_len;
+ };
+ #endif  // !HAVE_SYS_UIO_H
+diff --git a/other-licenses/snappy/src/snappy-test.cc b/other-licenses/snappy/src/snappy-test.cc
+--- a/other-licenses/snappy/src/snappy-test.cc
++++ b/other-licenses/snappy/src/snappy-test.cc
+@@ -23,248 +23,138 @@
+ // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ //
+ // Various stubs for the unit tests for the open-source version of Snappy.
+ 
+-#ifdef HAVE_CONFIG_H
+-#include "config.h"
+-#endif
+-
+-#ifdef HAVE_WINDOWS_H
+-// Needed to be able to use std::max without workarounds in the source code.
+-// https://support.microsoft.com/en-us/help/143208/prb-using-stl-in-windows-program-can-cause-min-max-conflicts
+-#define NOMINMAX
+-#include <windows.h>
+-#endif
+-
+ #include "snappy-test.h"
+ 
+ #include <algorithm>
++#include <cstdarg>
++#include <cstdio>
++#include <cstdlib>
++#include <iostream>
++#include <string>
+ 
+-DEFINE_bool(run_microbenchmarks, true,
+-            "Run microbenchmarks before doing anything else.");
++namespace file {
++
++OptionsStub::OptionsStub() = default;
++OptionsStub::~OptionsStub() = default;
++
++const OptionsStub &Defaults() {
++  static OptionsStub defaults;
++  return defaults;
++}
++
++StatusStub::StatusStub() = default;
++StatusStub::StatusStub(const StatusStub &) = default;
++StatusStub &StatusStub::operator=(const StatusStub &) = default;
++StatusStub::~StatusStub() = default;
++
++bool StatusStub::ok() { return true; }
++
++StatusStub GetContents(const std::string &filename, std::string *output,
++                       const OptionsStub & /* options */) {
++  std::FILE *fp = std::fopen(filename.c_str(), "rb");
++  if (fp == nullptr) {
++    std::perror(filename.c_str());
++    std::exit(1);
++  }
++
++  output->clear();
++  while (!std::feof(fp)) {
++    char buffer[4096];
++    size_t bytes_read = std::fread(buffer, 1, sizeof(buffer), fp);
++    if (bytes_read == 0 && std::ferror(fp)) {
++      std::perror("fread");
++      std::exit(1);
++    }
++    output->append(buffer, bytes_read);
++  }
++
++  std::fclose(fp);
++  return StatusStub();
++}
++
++StatusStub SetContents(const std::string &file_name, const std::string &content,
++                       const OptionsStub & /* options */) {
++  std::FILE *fp = std::fopen(file_name.c_str(), "wb");
++  if (fp == nullptr) {
++    std::perror(file_name.c_str());
++    std::exit(1);
++  }
++
++  size_t bytes_written = std::fwrite(content.data(), 1, content.size(), fp);
++  if (bytes_written != content.size()) {
++    std::perror("fwrite");
++    std::exit(1);
++  }
++
++  std::fclose(fp);
++  return StatusStub();
++}
++
++}  // namespace file
+ 
+ namespace snappy {
+ 
+ std::string ReadTestDataFile(const std::string& base, size_t size_limit) {
+   std::string contents;
+   const char* srcdir = getenv("srcdir");  // This is set by Automake.
+   std::string prefix;
+   if (srcdir) {
+     prefix = std::string(srcdir) + "/";
+   }
+   file::GetContents(prefix + "testdata/" + base, &contents, file::Defaults()
+-      ).CheckSuccess();
++      ).ok();
+   if (size_limit > 0) {
+     contents = contents.substr(0, size_limit);
+   }
+   return contents;
+ }
+ 
+-std::string ReadTestDataFile(const std::string& base) {
+-  return ReadTestDataFile(base, 0);
+-}
+-
+ std::string StrFormat(const char* format, ...) {
+-  char buf[4096];
+-  va_list ap;
++  char buffer[4096];
++  std::va_list ap;
+   va_start(ap, format);
+-  vsnprintf(buf, sizeof(buf), format, ap);
++  std::vsnprintf(buffer, sizeof(buffer), format, ap);
+   va_end(ap);
+-  return buf;
++  return buffer;
+ }
+ 
+-bool benchmark_running = false;
+-int64 benchmark_real_time_us = 0;
+-int64 benchmark_cpu_time_us = 0;
+-std::string* benchmark_label = nullptr;
+-int64 benchmark_bytes_processed = 0;
+-
+-void ResetBenchmarkTiming() {
+-  benchmark_real_time_us = 0;
+-  benchmark_cpu_time_us = 0;
+-}
++LogMessage::~LogMessage() { std::cerr << std::endl; }
+ 
+-#ifdef WIN32
+-LARGE_INTEGER benchmark_start_real;
+-FILETIME benchmark_start_cpu;
+-#else  // WIN32
+-struct timeval benchmark_start_real;
+-struct rusage benchmark_start_cpu;
+-#endif  // WIN32
+-
+-void StartBenchmarkTiming() {
+-#ifdef WIN32
+-  QueryPerformanceCounter(&benchmark_start_real);
+-  FILETIME dummy;
+-  CHECK(GetProcessTimes(
+-      GetCurrentProcess(), &dummy, &dummy, &dummy, &benchmark_start_cpu));
+-#else
+-  gettimeofday(&benchmark_start_real, NULL);
+-  if (getrusage(RUSAGE_SELF, &benchmark_start_cpu) == -1) {
+-    perror("getrusage(RUSAGE_SELF)");
+-    exit(1);
+-  }
+-#endif
+-  benchmark_running = true;
++LogMessage &LogMessage::operator<<(const std::string &message) {
++  std::cerr << message;
++  return *this;
+ }
+ 
+-void StopBenchmarkTiming() {
+-  if (!benchmark_running) {
+-    return;
+-  }
+-
+-#ifdef WIN32
+-  LARGE_INTEGER benchmark_stop_real;
+-  LARGE_INTEGER benchmark_frequency;
+-  QueryPerformanceCounter(&benchmark_stop_real);
+-  QueryPerformanceFrequency(&benchmark_frequency);
+-
+-  double elapsed_real = static_cast<double>(
+-      benchmark_stop_real.QuadPart - benchmark_start_real.QuadPart) /
+-      benchmark_frequency.QuadPart;
+-  benchmark_real_time_us += elapsed_real * 1e6 + 0.5;
+-
+-  FILETIME benchmark_stop_cpu, dummy;
+-  CHECK(GetProcessTimes(
+-      GetCurrentProcess(), &dummy, &dummy, &dummy, &benchmark_stop_cpu));
+-
+-  ULARGE_INTEGER start_ulargeint;
+-  start_ulargeint.LowPart = benchmark_start_cpu.dwLowDateTime;
+-  start_ulargeint.HighPart = benchmark_start_cpu.dwHighDateTime;
+-
+-  ULARGE_INTEGER stop_ulargeint;
+-  stop_ulargeint.LowPart = benchmark_stop_cpu.dwLowDateTime;
+-  stop_ulargeint.HighPart = benchmark_stop_cpu.dwHighDateTime;
+-
+-  benchmark_cpu_time_us +=
+-      (stop_ulargeint.QuadPart - start_ulargeint.QuadPart + 5) / 10;
+-#else  // WIN32
+-  struct timeval benchmark_stop_real;
+-  gettimeofday(&benchmark_stop_real, NULL);
+-  benchmark_real_time_us +=
+-      1000000 * (benchmark_stop_real.tv_sec - benchmark_start_real.tv_sec);
+-  benchmark_real_time_us +=
+-      (benchmark_stop_real.tv_usec - benchmark_start_real.tv_usec);
+-
+-  struct rusage benchmark_stop_cpu;
+-  if (getrusage(RUSAGE_SELF, &benchmark_stop_cpu) == -1) {
+-    perror("getrusage(RUSAGE_SELF)");
+-    exit(1);
+-  }
+-  benchmark_cpu_time_us += 1000000 * (benchmark_stop_cpu.ru_utime.tv_sec -
+-                                      benchmark_start_cpu.ru_utime.tv_sec);
+-  benchmark_cpu_time_us += (benchmark_stop_cpu.ru_utime.tv_usec -
+-                            benchmark_start_cpu.ru_utime.tv_usec);
+-#endif  // WIN32
+-
+-  benchmark_running = false;
+-}
+-
+-void SetBenchmarkLabel(const std::string& str) {
+-  if (benchmark_label) {
+-    delete benchmark_label;
+-  }
+-  benchmark_label = new std::string(str);
+-}
+-
+-void SetBenchmarkBytesProcessed(int64 bytes) {
+-  benchmark_bytes_processed = bytes;
++LogMessage &LogMessage::operator<<(int number) {
++  std::cerr << number;
++  return *this;
+ }
+ 
+-struct BenchmarkRun {
+-  int64 real_time_us;
+-  int64 cpu_time_us;
+-};
+-
+-struct BenchmarkCompareCPUTime {
+-  bool operator() (const BenchmarkRun& a, const BenchmarkRun& b) const {
+-    return a.cpu_time_us < b.cpu_time_us;
+-  }
+-};
+-
+-void Benchmark::Run() {
+-  for (int test_case_num = start_; test_case_num <= stop_; ++test_case_num) {
+-    // Run a few iterations first to find out approximately how fast
+-    // the benchmark is.
+-    const int kCalibrateIterations = 100;
+-    ResetBenchmarkTiming();
+-    StartBenchmarkTiming();
+-    (*function_)(kCalibrateIterations, test_case_num);
+-    StopBenchmarkTiming();
+-
+-    // Let each test case run for about 200ms, but at least as many
+-    // as we used to calibrate.
+-    // Run five times and pick the median.
+-    const int kNumRuns = 5;
+-    const int kMedianPos = kNumRuns / 2;
+-    int num_iterations = 0;
+-    if (benchmark_real_time_us > 0) {
+-      num_iterations = 200000 * kCalibrateIterations / benchmark_real_time_us;
+-    }
+-    num_iterations = std::max(num_iterations, kCalibrateIterations);
+-    BenchmarkRun benchmark_runs[kNumRuns];
+-
+-    for (int run = 0; run < kNumRuns; ++run) {
+-      ResetBenchmarkTiming();
+-      StartBenchmarkTiming();
+-      (*function_)(num_iterations, test_case_num);
+-      StopBenchmarkTiming();
+-
+-      benchmark_runs[run].real_time_us = benchmark_real_time_us;
+-      benchmark_runs[run].cpu_time_us = benchmark_cpu_time_us;
+-    }
++#ifdef _MSC_VER
++// ~LogMessageCrash calls std::abort() and therefore never exits. This is by
++// design, so temporarily disable warning C4722.
++#pragma warning(push)
++#pragma warning(disable : 4722)
++#endif
+ 
+-    std::string heading = StrFormat("%s/%d", name_.c_str(), test_case_num);
+-    std::string human_readable_speed;
++LogMessageCrash::~LogMessageCrash() {
++  std::cerr << std::endl;
++  std::abort();
++}
+ 
+-    std::nth_element(benchmark_runs,
+-                     benchmark_runs + kMedianPos,
+-                     benchmark_runs + kNumRuns,
+-                     BenchmarkCompareCPUTime());
+-    int64 real_time_us = benchmark_runs[kMedianPos].real_time_us;
+-    int64 cpu_time_us = benchmark_runs[kMedianPos].cpu_time_us;
+-    if (cpu_time_us <= 0) {
+-      human_readable_speed = "?";
+-    } else {
+-      int64 bytes_per_second =
+-          benchmark_bytes_processed * 1000000 / cpu_time_us;
+-      if (bytes_per_second < 1024) {
+-        human_readable_speed =
+-            StrFormat("%dB/s", static_cast<int>(bytes_per_second));
+-      } else if (bytes_per_second < 1024 * 1024) {
+-        human_readable_speed = StrFormat(
+-            "%.1fkB/s", bytes_per_second / 1024.0f);
+-      } else if (bytes_per_second < 1024 * 1024 * 1024) {
+-        human_readable_speed = StrFormat(
+-            "%.1fMB/s", bytes_per_second / (1024.0f * 1024.0f));
+-      } else {
+-        human_readable_speed = StrFormat(
+-            "%.1fGB/s", bytes_per_second / (1024.0f * 1024.0f * 1024.0f));
+-      }
+-    }
+-
+-    fprintf(stderr,
+-#ifdef WIN32
+-            "%-18s %10I64d %10I64d %10d %s  %s\n",
+-#else
+-            "%-18s %10lld %10lld %10d %s  %s\n",
++#ifdef _MSC_VER
++#pragma warning(pop)
+ #endif
+-            heading.c_str(),
+-            static_cast<long long>(real_time_us * 1000 / num_iterations),
+-            static_cast<long long>(cpu_time_us * 1000 / num_iterations),
+-            num_iterations,
+-            human_readable_speed.c_str(),
+-            benchmark_label->c_str());
+-  }
+-}
+ 
+ #ifdef HAVE_LIBZ
+ 
+ ZLib::ZLib()
+     : comp_init_(false),
+       uncomp_init_(false) {
+   Reinit();
+ }
+diff --git a/other-licenses/snappy/src/snappy-test.h b/other-licenses/snappy/src/snappy-test.h
+--- a/other-licenses/snappy/src/snappy-test.h
++++ b/other-licenses/snappy/src/snappy-test.h
+@@ -26,258 +26,200 @@
+ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ //
+ // Various stubs for the unit tests for the open-source version of Snappy.
+ 
+ #ifndef THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_TEST_H_
+ #define THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_TEST_H_
+ 
+-#include <iostream>
+-#include <string>
++#ifdef HAVE_CONFIG_H
++#include "config.h"
++#endif
+ 
+ #include "snappy-stubs-internal.h"
+ 
+-#include <stdio.h>
+-#include <stdarg.h>
+-
+ #ifdef HAVE_SYS_MMAN_H
+ #include <sys/mman.h>
+ #endif
+ 
+ #ifdef HAVE_SYS_RESOURCE_H
+ #include <sys/resource.h>
+ #endif
+ 
+ #ifdef HAVE_SYS_TIME_H
+ #include <sys/time.h>
+ #endif
+ 
+ #ifdef HAVE_WINDOWS_H
++// Needed to be able to use std::max without workarounds in the source code.
++// https://support.microsoft.com/en-us/help/143208/prb-using-stl-in-windows-program-can-cause-min-max-conflicts
++#define NOMINMAX
+ #include <windows.h>
+ #endif
+ 
+-#ifdef HAVE_GTEST
+-
+-#include <gtest/gtest.h>
+-#undef TYPED_TEST
+-#define TYPED_TEST TEST
+-#define INIT_GTEST(argc, argv) ::testing::InitGoogleTest(argc, *argv)
+-
+-#else
+-
+-// Stubs for if the user doesn't have Google Test installed.
+-
+-#define TEST(test_case, test_subcase) \
+-  void Test_ ## test_case ## _ ## test_subcase()
+-#define INIT_GTEST(argc, argv)
+-
+-#define TYPED_TEST TEST
+-#define EXPECT_EQ CHECK_EQ
+-#define EXPECT_NE CHECK_NE
+-#define EXPECT_FALSE(cond) CHECK(!(cond))
+-
+-#endif
+-
+-#ifdef HAVE_GFLAGS
+-
+-#include <gflags/gflags.h>
+-
+-// This is tricky; both gflags and Google Test want to look at the command line
+-// arguments. Google Test seems to be the most happy with unknown arguments,
+-// though, so we call it first and hope for the best.
+-#define InitGoogle(argv0, argc, argv, remove_flags) \
+-  INIT_GTEST(argc, argv); \
+-  google::ParseCommandLineFlags(argc, argv, remove_flags);
+-
+-#else
+-
+-// If we don't have the gflags package installed, these can only be
+-// changed at compile time.
+-#define DEFINE_int32(flag_name, default_value, description) \
+-  static int FLAGS_ ## flag_name = default_value;
+-
+-#define InitGoogle(argv0, argc, argv, remove_flags) \
+-  INIT_GTEST(argc, argv)
+-
+-#endif
++#define InitGoogle(argv0, argc, argv, remove_flags) ((void)(0))
+ 
+ #ifdef HAVE_LIBZ
+ #include "zlib.h"
+ #endif
+ 
+ #ifdef HAVE_LIBLZO2
+ #include "lzo/lzo1x.h"
+ #endif
+ 
+-namespace {
++#ifdef HAVE_LIBLZ4
++#include "lz4.h"
++#endif
+ 
+ namespace file {
+-  int Defaults() { return 0; }
+-
+-  class DummyStatus {
+-   public:
+-    void CheckSuccess() { }
+-  };
+ 
+-  DummyStatus GetContents(
+-      const std::string& filename, std::string* data, int unused) {
+-    FILE* fp = fopen(filename.c_str(), "rb");
+-    if (fp == NULL) {
+-      perror(filename.c_str());
+-      exit(1);
+-    }
++// Stubs the class file::Options.
++//
++// This class should not be instantiated explicitly. It should only be used by
++// passing file::Defaults() to file::GetContents() / file::SetContents().
++class OptionsStub {
++ public:
++  OptionsStub();
++  OptionsStub(const OptionsStub &) = delete;
++  OptionsStub &operator=(const OptionsStub &) = delete;
++  ~OptionsStub();
++};
+ 
+-    data->clear();
+-    while (!feof(fp)) {
+-      char buf[4096];
+-      size_t ret = fread(buf, 1, 4096, fp);
+-      if (ret == 0 && ferror(fp)) {
+-        perror("fread");
+-        exit(1);
+-      }
+-      data->append(std::string(buf, ret));
+-    }
++const OptionsStub &Defaults();
+ 
+-    fclose(fp);
+-
+-    return DummyStatus();
+-  }
+-
+-  inline DummyStatus SetContents(
+-      const std::string& filename, const std::string& str, int unused) {
+-    FILE* fp = fopen(filename.c_str(), "wb");
+-    if (fp == NULL) {
+-      perror(filename.c_str());
+-      exit(1);
+-    }
++// Stubs the class absl::Status.
++//
++// This class should not be instantiated explicitly. It should only be used by
++// passing the result of file::GetContents() / file::SetContents() to
++// CHECK_OK().
++class StatusStub {
++ public:
++  StatusStub();
++  StatusStub(const StatusStub &);
++  StatusStub &operator=(const StatusStub &);
++  ~StatusStub();
+ 
+-    int ret = fwrite(str.data(), str.size(), 1, fp);
+-    if (ret != 1) {
+-      perror("fwrite");
+-      exit(1);
+-    }
++  bool ok();
++};
+ 
+-    fclose(fp);
++StatusStub GetContents(const std::string &file_name, std::string *output,
++                       const OptionsStub & /* options */);
+ 
+-    return DummyStatus();
+-  }
++StatusStub SetContents(const std::string &file_name, const std::string &content,
++                       const OptionsStub & /* options */);
++
+ }  // namespace file
+ 
+-}  // namespace
+-
+ namespace snappy {
+ 
+ #define FLAGS_test_random_seed 301
+-using TypeParam = std::string;
+-
+-void Test_CorruptedTest_VerifyCorrupted();
+-void Test_Snappy_SimpleTests();
+-void Test_Snappy_MaxBlowup();
+-void Test_Snappy_RandomData();
+-void Test_Snappy_FourByteOffset();
+-void Test_SnappyCorruption_TruncatedVarint();
+-void Test_SnappyCorruption_UnterminatedVarint();
+-void Test_SnappyCorruption_OverflowingVarint();
+-void Test_Snappy_ReadPastEndOfBuffer();
+-void Test_Snappy_FindMatchLength();
+-void Test_Snappy_FindMatchLengthRandom();
+ 
+ std::string ReadTestDataFile(const std::string& base, size_t size_limit);
+ 
+-std::string ReadTestDataFile(const std::string& base);
+-
+-// A sprintf() variant that returns a std::string.
++// A std::sprintf() variant that returns a std::string.
+ // Not safe for general use due to truncation issues.
+ std::string StrFormat(const char* format, ...);
+ 
+ // A wall-time clock. This stub is not super-accurate, nor resistant to the
+ // system time changing.
+ class CycleTimer {
+  public:
+-  CycleTimer() : real_time_us_(0) {}
++  inline CycleTimer() : real_time_us_(0) {}
++  inline ~CycleTimer() = default;
+ 
+-  void Start() {
++  inline void Start() {
+ #ifdef WIN32
+     QueryPerformanceCounter(&start_);
+ #else
+-    gettimeofday(&start_, NULL);
++    ::gettimeofday(&start_, nullptr);
+ #endif
+   }
+ 
+-  void Stop() {
++  inline void Stop() {
+ #ifdef WIN32
+     LARGE_INTEGER stop;
+     LARGE_INTEGER frequency;
+     QueryPerformanceCounter(&stop);
+     QueryPerformanceFrequency(&frequency);
+ 
+     double elapsed = static_cast<double>(stop.QuadPart - start_.QuadPart) /
+         frequency.QuadPart;
+     real_time_us_ += elapsed * 1e6 + 0.5;
+ #else
+-    struct timeval stop;
+-    gettimeofday(&stop, NULL);
++    struct ::timeval stop;
++    ::gettimeofday(&stop, nullptr);
+ 
+     real_time_us_ += 1000000 * (stop.tv_sec - start_.tv_sec);
+     real_time_us_ += (stop.tv_usec - start_.tv_usec);
+ #endif
+   }
+ 
+-  double Get() {
+-    return real_time_us_ * 1e-6;
+-  }
++  inline double Get() { return real_time_us_ * 1e-6; }
+ 
+  private:
+-  int64 real_time_us_;
++  int64_t real_time_us_;
+ #ifdef WIN32
+   LARGE_INTEGER start_;
+ #else
+-  struct timeval start_;
++  struct ::timeval start_;
+ #endif
+ };
+ 
+-// Minimalistic microbenchmark framework.
++// Logging.
+ 
+-typedef void (*BenchmarkFunction)(int, int);
++class LogMessage {
++ public:
++  inline LogMessage() = default;
++  ~LogMessage();
+ 
+-class Benchmark {
++  LogMessage &operator<<(const std::string &message);
++  LogMessage &operator<<(int number);
++};
++
++class LogMessageCrash : public LogMessage {
+  public:
+-  Benchmark(const std::string& name, BenchmarkFunction function)
+-      : name_(name), function_(function) {}
++  inline LogMessageCrash() = default;
++  ~LogMessageCrash();
++};
+ 
+-  Benchmark* DenseRange(int start, int stop) {
+-    start_ = start;
+-    stop_ = stop;
+-    return this;
+-  }
++// This class is used to explicitly ignore values in the conditional
++// logging macros.  This avoids compiler warnings like "value computed
++// is not used" and "statement has no effect".
+ 
+-  void Run();
++class LogMessageVoidify {
++ public:
++  inline LogMessageVoidify() = default;
++  inline ~LogMessageVoidify() = default;
+ 
+- private:
+-  const std::string name_;
+-  const BenchmarkFunction function_;
+-  int start_, stop_;
++  // This has to be an operator with a precedence lower than << but
++  // higher than ?:
++  inline void operator&(const LogMessage &) {}
+ };
+-#define BENCHMARK(benchmark_name) \
+-  Benchmark* Benchmark_ ## benchmark_name = \
+-          (new Benchmark(#benchmark_name, benchmark_name))
++
++// Asserts, both versions activated in debug mode only,
++// and ones that are always active.
++
++#define CRASH_UNLESS(condition)  \
++  SNAPPY_PREDICT_TRUE(condition) \
++      ? (void)0                  \
++      : snappy::LogMessageVoidify() & snappy::LogMessageCrash()
+ 
+-extern Benchmark* Benchmark_BM_UFlat;
+-extern Benchmark* Benchmark_BM_UIOVec;
+-extern Benchmark* Benchmark_BM_UValidate;
+-extern Benchmark* Benchmark_BM_ZFlat;
+-extern Benchmark* Benchmark_BM_ZFlatAll;
+-extern Benchmark* Benchmark_BM_ZFlatIncreasingTableSize;
++#define LOG(level) LogMessage()
++#define VLOG(level) \
++  true ? (void)0 : snappy::LogMessageVoidify() & snappy::LogMessage()
+ 
+-void ResetBenchmarkTiming();
+-void StartBenchmarkTiming();
+-void StopBenchmarkTiming();
+-void SetBenchmarkLabel(const std::string& str);
+-void SetBenchmarkBytesProcessed(int64 bytes);
++#define CHECK(cond) CRASH_UNLESS(cond)
++#define CHECK_LE(a, b) CRASH_UNLESS((a) <= (b))
++#define CHECK_GE(a, b) CRASH_UNLESS((a) >= (b))
++#define CHECK_EQ(a, b) CRASH_UNLESS((a) == (b))
++#define CHECK_NE(a, b) CRASH_UNLESS((a) != (b))
++#define CHECK_LT(a, b) CRASH_UNLESS((a) < (b))
++#define CHECK_GT(a, b) CRASH_UNLESS((a) > (b))
++#define CHECK_OK(cond) (cond).ok()
+ 
+ #ifdef HAVE_LIBZ
+ 
+ // Object-oriented wrapper around zlib.
+ class ZLib {
+  public:
+   ZLib();
+   ~ZLib();
+@@ -392,134 +334,9 @@ class ZLib {
+   // These are used only with chunked compression.
+   bool first_chunk_;       // true if we need to emit headers with this chunk
+ };
+ 
+ #endif  // HAVE_LIBZ
+ 
+ }  // namespace snappy
+ 
+-DECLARE_bool(run_microbenchmarks);
+-
+-static inline void RunSpecifiedBenchmarks() {
+-  if (!FLAGS_run_microbenchmarks) {
+-    return;
+-  }
+-
+-  fprintf(stderr, "Running microbenchmarks.\n");
+-#ifndef NDEBUG
+-  fprintf(stderr, "WARNING: Compiled with assertions enabled, will be slow.\n");
+-#endif
+-#ifndef __OPTIMIZE__
+-  fprintf(stderr, "WARNING: Compiled without optimization, will be slow.\n");
+-#endif
+-  fprintf(stderr, "Benchmark            Time(ns)    CPU(ns) Iterations\n");
+-  fprintf(stderr, "---------------------------------------------------\n");
+-
+-  snappy::Benchmark_BM_UFlat->Run();
+-  snappy::Benchmark_BM_UIOVec->Run();
+-  snappy::Benchmark_BM_UValidate->Run();
+-  snappy::Benchmark_BM_ZFlat->Run();
+-  snappy::Benchmark_BM_ZFlatAll->Run();
+-  snappy::Benchmark_BM_ZFlatIncreasingTableSize->Run();
+-
+-  fprintf(stderr, "\n");
+-}
+-
+-#ifndef HAVE_GTEST
+-
+-static inline int RUN_ALL_TESTS() {
+-  fprintf(stderr, "Running correctness tests.\n");
+-  snappy::Test_CorruptedTest_VerifyCorrupted();
+-  snappy::Test_Snappy_SimpleTests();
+-  snappy::Test_Snappy_MaxBlowup();
+-  snappy::Test_Snappy_RandomData();
+-  snappy::Test_Snappy_FourByteOffset();
+-  snappy::Test_SnappyCorruption_TruncatedVarint();
+-  snappy::Test_SnappyCorruption_UnterminatedVarint();
+-  snappy::Test_SnappyCorruption_OverflowingVarint();
+-  snappy::Test_Snappy_ReadPastEndOfBuffer();
+-  snappy::Test_Snappy_FindMatchLength();
+-  snappy::Test_Snappy_FindMatchLengthRandom();
+-  fprintf(stderr, "All tests passed.\n");
+-
+-  return 0;
+-}
+-
+-#endif  // HAVE_GTEST
+-
+-// For main().
+-namespace snappy {
+-
+-// Logging.
+-
+-#define LOG(level) LogMessage()
+-#define VLOG(level) true ? (void)0 : \
+-    snappy::LogMessageVoidify() & snappy::LogMessage()
+-
+-class LogMessage {
+- public:
+-  LogMessage() { }
+-  ~LogMessage() {
+-    std::cerr << std::endl;
+-  }
+-
+-  LogMessage& operator<<(const std::string& msg) {
+-    std::cerr << msg;
+-    return *this;
+-  }
+-  LogMessage& operator<<(int x) {
+-    std::cerr << x;
+-    return *this;
+-  }
+-};
+-
+-// Asserts, both versions activated in debug mode only,
+-// and ones that are always active.
+-
+-#define CRASH_UNLESS(condition) \
+-    SNAPPY_PREDICT_TRUE(condition) ? (void)0 : \
+-    snappy::LogMessageVoidify() & snappy::LogMessageCrash()
+-
+-#ifdef _MSC_VER
+-// ~LogMessageCrash calls abort() and therefore never exits. This is by design
+-// so temporarily disable warning C4722.
+-#pragma warning(push)
+-#pragma warning(disable:4722)
+-#endif
+-
+-class LogMessageCrash : public LogMessage {
+- public:
+-  LogMessageCrash() { }
+-  ~LogMessageCrash() {
+-    std::cerr << std::endl;
+-    abort();
+-  }
+-};
+-
+-#ifdef _MSC_VER
+-#pragma warning(pop)
+-#endif
+-
+-// This class is used to explicitly ignore values in the conditional
+-// logging macros.  This avoids compiler warnings like "value computed
+-// is not used" and "statement has no effect".
+-
+-class LogMessageVoidify {
+- public:
+-  LogMessageVoidify() { }
+-  // This has to be an operator with a precedence lower than << but
+-  // higher than ?:
+-  void operator&(const LogMessage&) { }
+-};
+-
+-#define CHECK(cond) CRASH_UNLESS(cond)
+-#define CHECK_LE(a, b) CRASH_UNLESS((a) <= (b))
+-#define CHECK_GE(a, b) CRASH_UNLESS((a) >= (b))
+-#define CHECK_EQ(a, b) CRASH_UNLESS((a) == (b))
+-#define CHECK_NE(a, b) CRASH_UNLESS((a) != (b))
+-#define CHECK_LT(a, b) CRASH_UNLESS((a) < (b))
+-#define CHECK_GT(a, b) CRASH_UNLESS((a) > (b))
+-#define CHECK_OK(cond) (cond).CheckSuccess()
+-
+-}  // namespace snappy
+-
+ #endif  // THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_TEST_H_
+diff --git a/other-licenses/snappy/src/snappy.cc b/other-licenses/snappy/src/snappy.cc
+--- a/other-licenses/snappy/src/snappy.cc
++++ b/other-licenses/snappy/src/snappy.cc
+@@ -21,19 +21,19 @@
+ // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ 
+-#include "snappy.h"
+ #include "snappy-internal.h"
+ #include "snappy-sinksource.h"
++#include "snappy.h"
+ 
+ #if !defined(SNAPPY_HAVE_SSSE3)
+ // __SSSE3__ is defined by GCC and Clang. Visual Studio doesn't target SIMD
+ // support between SSE2 and AVX (so SSSE3 instructions require AVX support), and
+ // defines __AVX__ when AVX support is available.
+ #if defined(__SSSE3__) || defined(__AVX__)
+ #define SNAPPY_HAVE_SSSE3 1
+ #else
+@@ -63,44 +63,101 @@
+ #endif
+ 
+ #if SNAPPY_HAVE_BMI2
+ // Please do not replace with <x86intrin.h>. or with headers that assume more
+ // advanced SSE versions without checking with all the OWNERS.
+ #include <immintrin.h>
+ #endif
+ 
+-#include <stdio.h>
+-
+ #include <algorithm>
++#include <array>
++#include <cstddef>
++#include <cstdint>
++#include <cstdio>
++#include <cstring>
+ #include <string>
++#include <utility>
+ #include <vector>
+ 
+ namespace snappy {
+ 
++namespace {
++
++// The amount of slop bytes writers are using for unconditional copies.
++constexpr int kSlopBytes = 64;
++
++using internal::char_table;
+ using internal::COPY_1_BYTE_OFFSET;
+ using internal::COPY_2_BYTE_OFFSET;
++using internal::COPY_4_BYTE_OFFSET;
++using internal::kMaximumTagLength;
+ using internal::LITERAL;
+-using internal::char_table;
+-using internal::kMaximumTagLength;
++
++// We translate the information encoded in a tag through a lookup table to a
++// format that requires fewer instructions to decode. Effectively we store
++// the length minus the tag part of the offset. The lowest significant byte
++// thus stores the length. While total length - offset is given by
++// entry - ExtractOffset(type). The nice thing is that the subtraction
++// immediately sets the flags for the necessary check that offset >= length.
++// This folds the cmp with sub. We engineer the long literals and copy-4 to
++// always fail this check, so their presence doesn't affect the fast path.
++// To prevent literals from triggering the guard against offset < length (offset
++// does not apply to literals) the table is giving them a spurious offset of
++// 256.
++inline constexpr int16_t MakeEntry(int16_t len, int16_t offset) {
++  return len - (offset << 8);
++}
++
++inline constexpr int16_t LengthMinusOffset(int data, int type) {
++  return type == 3   ? 0xFF                    // copy-4 (or type == 3)
++         : type == 2 ? MakeEntry(data + 1, 0)  // copy-2
++         : type == 1 ? MakeEntry((data & 7) + 4, data >> 3)  // copy-1
++         : data < 60 ? MakeEntry(data + 1, 1)  // note spurious offset.
++                     : 0xFF;                   // long literal
++}
++
++inline constexpr int16_t LengthMinusOffset(uint8_t tag) {
++  return LengthMinusOffset(tag >> 2, tag & 3);
++}
++
++template <size_t... Ints>
++struct index_sequence {};
++
++template <std::size_t N, size_t... Is>
++struct make_index_sequence : make_index_sequence<N - 1, N - 1, Is...> {};
++
++template <size_t... Is>
++struct make_index_sequence<0, Is...> : index_sequence<Is...> {};
++
++template <size_t... seq>
++constexpr std::array<int16_t, 256> MakeTable(index_sequence<seq...>) {
++  return std::array<int16_t, 256>{LengthMinusOffset(seq)...};
++}
++
++// We maximally co-locate the two tables so that only one register needs to be
++// reserved for the table address.
++struct {
++  alignas(64) const std::array<int16_t, 256> length_minus_offset;
++  uint32_t extract_masks[4];  // Used for extracting offset based on tag type.
++} table = {MakeTable(make_index_sequence<256>{}), {0, 0xFF, 0xFFFF, 0}};
+ 
+ // Any hash function will produce a valid compressed bitstream, but a good
+ // hash function reduces the number of collisions and thus yields better
+ // compression for compressible input, and more speed for incompressible
+ // input. Of course, it doesn't hurt if the hash function is reasonably fast
+ // either, as it gets called a lot.
+-static inline uint32 HashBytes(uint32 bytes, int shift) {
+-  uint32 kMul = 0x1e35a7bd;
+-  return (bytes * kMul) >> shift;
+-}
+-static inline uint32 Hash(const char* p, int shift) {
+-  return HashBytes(UNALIGNED_LOAD32(p), shift);
++inline uint32_t HashBytes(uint32_t bytes, uint32_t mask) {
++  constexpr uint32_t kMagic = 0x1e35a7bd;
++  return ((kMagic * bytes) >> (32 - kMaxHashTableBits)) & mask;
+ }
+ 
+-size_t MaxCompressedLength(size_t source_len) {
++}  // namespace
++
++size_t MaxCompressedLength(size_t source_bytes) {
+   // Compressed data can be defined as:
+   //    compressed := item* literal*
+   //    item       := literal* copy
+   //
+   // The trailing literal sequence has a space blowup of at most 62/60
+   // since a literal of length 60 needs one tag byte + one extra byte
+   // for length information.
+   //
+@@ -111,93 +168,238 @@ size_t MaxCompressedLength(size_t source
+   // to at most the 62/60 blowup for representing literals.
+   //
+   // Suppose the "copy" op copies 5 bytes of data.  If the offset is big
+   // enough, it will take 5 bytes to encode the copy op.  Therefore the
+   // worst case here is a one-byte literal followed by a five-byte copy.
+   // I.e., 6 bytes of input turn into 7 bytes of "compressed" data.
+   //
+   // This last factor dominates the blowup, so the final estimate is:
+-  return 32 + source_len + source_len/6;
++  return 32 + source_bytes + source_bytes / 6;
+ }
+ 
+ namespace {
+ 
+ void UnalignedCopy64(const void* src, void* dst) {
+   char tmp[8];
+-  memcpy(tmp, src, 8);
+-  memcpy(dst, tmp, 8);
++  std::memcpy(tmp, src, 8);
++  std::memcpy(dst, tmp, 8);
+ }
+ 
+ void UnalignedCopy128(const void* src, void* dst) {
+-  // memcpy gets vectorized when the appropriate compiler options are used.
+-  // For example, x86 compilers targeting SSE2+ will optimize to an SSE2 load
+-  // and store.
++  // std::memcpy() gets vectorized when the appropriate compiler options are
++  // used. For example, x86 compilers targeting SSE2+ will optimize to an SSE2
++  // load and store.
+   char tmp[16];
+-  memcpy(tmp, src, 16);
+-  memcpy(dst, tmp, 16);
++  std::memcpy(tmp, src, 16);
++  std::memcpy(dst, tmp, 16);
++}
++
++template <bool use_16bytes_chunk>
++inline void ConditionalUnalignedCopy128(const char* src, char* dst) {
++  if (use_16bytes_chunk) {
++    UnalignedCopy128(src, dst);
++  } else {
++    UnalignedCopy64(src, dst);
++    UnalignedCopy64(src + 8, dst + 8);
++  }
+ }
+ 
+ // Copy [src, src+(op_limit-op)) to [op, (op_limit-op)) a byte at a time. Used
+ // for handling COPY operations where the input and output regions may overlap.
+ // For example, suppose:
+ //    src       == "ab"
+ //    op        == src + 2
+ //    op_limit  == op + 20
+ // After IncrementalCopySlow(src, op, op_limit), the result will have eleven
+ // copies of "ab"
+ //    ababababababababababab
+-// Note that this does not match the semantics of either memcpy() or memmove().
++// Note that this does not match the semantics of either std::memcpy() or
++// std::memmove().
+ inline char* IncrementalCopySlow(const char* src, char* op,
+                                  char* const op_limit) {
+   // TODO: Remove pragma when LLVM is aware this
+   // function is only called in cold regions and when cold regions don't get
+   // vectorized or unrolled.
+ #ifdef __clang__
+ #pragma clang loop unroll(disable)
+ #endif
+   while (op < op_limit) {
+     *op++ = *src++;
+   }
+   return op_limit;
+ }
+ 
+ #if SNAPPY_HAVE_SSSE3
+ 
+-// This is a table of shuffle control masks that can be used as the source
++// Computes the bytes for shuffle control mask (please read comments on
++// 'pattern_generation_masks' as well) for the given index_offset and
++// pattern_size. For example, when the 'offset' is 6, it will generate a
++// repeating pattern of size 6. So, the first 16 byte indexes will correspond to
++// the pattern-bytes {0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3} and the
++// next 16 byte indexes will correspond to the pattern-bytes {4, 5, 0, 1, 2, 3,
++// 4, 5, 0, 1, 2, 3, 4, 5, 0, 1}. These byte index sequences are generated by
++// calling MakePatternMaskBytes(0, 6, index_sequence<16>()) and
++// MakePatternMaskBytes(16, 6, index_sequence<16>()) respectively.
++template <size_t... indexes>
++inline constexpr std::array<char, sizeof...(indexes)> MakePatternMaskBytes(
++    int index_offset, int pattern_size, index_sequence<indexes...>) {
++  return {static_cast<char>((index_offset + indexes) % pattern_size)...};
++}
++
++// Computes the shuffle control mask bytes array for given pattern-sizes and
++// returns an array.
++template <size_t... pattern_sizes_minus_one>
++inline constexpr std::array<std::array<char, sizeof(__m128i)>,
++                            sizeof...(pattern_sizes_minus_one)>
++MakePatternMaskBytesTable(int index_offset,
++                          index_sequence<pattern_sizes_minus_one...>) {
++  return {MakePatternMaskBytes(
++      index_offset, pattern_sizes_minus_one + 1,
++      make_index_sequence</*indexes=*/sizeof(__m128i)>())...};
++}
++
++// This is an array of shuffle control masks that can be used as the source
+ // operand for PSHUFB to permute the contents of the destination XMM register
+ // into a repeating byte pattern.
+-alignas(16) const char pshufb_fill_patterns[7][16] = {
+-  {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+-  {0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1},
+-  {0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0},
+-  {0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3},
+-  {0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0},
+-  {0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3},
+-  {0, 1, 2, 3, 4, 5, 6, 0, 1, 2, 3, 4, 5, 6, 0, 1},
+-};
++alignas(16) constexpr std::array<std::array<char, sizeof(__m128i)>,
++                                 16> pattern_generation_masks =
++    MakePatternMaskBytesTable(
++        /*index_offset=*/0,
++        /*pattern_sizes_minus_one=*/make_index_sequence<16>());
++
++// Similar to 'pattern_generation_masks', this table is used to "rotate" the
++// pattern so that we can copy the *next 16 bytes* consistent with the pattern.
++// Basically, pattern_reshuffle_masks is a continuation of
++// pattern_generation_masks. It follows that, pattern_reshuffle_masks is same as
++// pattern_generation_masks for offsets 1, 2, 4, 8 and 16.
++alignas(16) constexpr std::array<std::array<char, sizeof(__m128i)>,
++                                 16> pattern_reshuffle_masks =
++    MakePatternMaskBytesTable(
++        /*index_offset=*/16,
++        /*pattern_sizes_minus_one=*/make_index_sequence<16>());
++
++SNAPPY_ATTRIBUTE_ALWAYS_INLINE
++static inline __m128i LoadPattern(const char* src, const size_t pattern_size) {
++  __m128i generation_mask = _mm_load_si128(reinterpret_cast<const __m128i*>(
++      pattern_generation_masks[pattern_size - 1].data()));
++  // Uninitialized bytes are masked out by the shuffle mask.
++  // TODO: remove annotation and macro defs once MSan is fixed.
++  SNAPPY_ANNOTATE_MEMORY_IS_INITIALIZED(src + pattern_size, 16 - pattern_size);
++  return _mm_shuffle_epi8(
++      _mm_loadu_si128(reinterpret_cast<const __m128i*>(src)), generation_mask);
++}
++
++SNAPPY_ATTRIBUTE_ALWAYS_INLINE
++static inline std::pair<__m128i /* pattern */, __m128i /* reshuffle_mask */>
++LoadPatternAndReshuffleMask(const char* src, const size_t pattern_size) {
++  __m128i pattern = LoadPattern(src, pattern_size);
++
++  // This mask will generate the next 16 bytes in-place. Doing so enables us to
++  // write data by at most 4 _mm_storeu_si128.
++  //
++  // For example, suppose pattern is:        abcdefabcdefabcd
++  // Shuffling with this mask will generate: efabcdefabcdefab
++  // Shuffling again will generate:          cdefabcdefabcdef
++  __m128i reshuffle_mask = _mm_load_si128(reinterpret_cast<const __m128i*>(
++      pattern_reshuffle_masks[pattern_size - 1].data()));
++  return {pattern, reshuffle_mask};
++}
+ 
+ #endif  // SNAPPY_HAVE_SSSE3
+ 
+-// Copy [src, src+(op_limit-op)) to [op, (op_limit-op)) but faster than
++// Fallback for when we need to copy while extending the pattern, for example
++// copying 10 bytes from 3 positions back abc -> abcabcabcabca.
++//
++// REQUIRES: [dst - offset, dst + 64) is a valid address range.
++SNAPPY_ATTRIBUTE_ALWAYS_INLINE
++static inline bool Copy64BytesWithPatternExtension(char* dst, size_t offset) {
++#if SNAPPY_HAVE_SSSE3
++  if (SNAPPY_PREDICT_TRUE(offset <= 16)) {
++    switch (offset) {
++      case 0:
++        return false;
++      case 1: {
++        std::memset(dst, dst[-1], 64);
++        return true;
++      }
++      case 2:
++      case 4:
++      case 8:
++      case 16: {
++        __m128i pattern = LoadPattern(dst - offset, offset);
++        for (int i = 0; i < 4; i++) {
++          _mm_storeu_si128(reinterpret_cast<__m128i*>(dst + 16 * i), pattern);
++        }
++        return true;
++      }
++      default: {
++        auto pattern_and_reshuffle_mask =
++            LoadPatternAndReshuffleMask(dst - offset, offset);
++        __m128i pattern = pattern_and_reshuffle_mask.first;
++        __m128i reshuffle_mask = pattern_and_reshuffle_mask.second;
++        for (int i = 0; i < 4; i++) {
++          _mm_storeu_si128(reinterpret_cast<__m128i*>(dst + 16 * i), pattern);
++          pattern = _mm_shuffle_epi8(pattern, reshuffle_mask);
++        }
++        return true;
++      }
++    }
++  }
++#else
++  if (SNAPPY_PREDICT_TRUE(offset < 16)) {
++    if (SNAPPY_PREDICT_FALSE(offset == 0)) return false;
++    // Extend the pattern to the first 16 bytes.
++    for (int i = 0; i < 16; i++) dst[i] = (dst - offset)[i];
++    // Find a multiple of pattern >= 16.
++    static std::array<uint8_t, 16> pattern_sizes = []() {
++      std::array<uint8_t, 16> res;
++      for (int i = 1; i < 16; i++) res[i] = (16 / i + 1) * i;
++      return res;
++    }();
++    offset = pattern_sizes[offset];
++    for (int i = 1; i < 4; i++) {
++      std::memcpy(dst + i * 16, dst + i * 16 - offset, 16);
++    }
++    return true;
++  }
++#endif  // SNAPPY_HAVE_SSSE3
++
++  // Very rare.
++  for (int i = 0; i < 4; i++) {
++    std::memcpy(dst + i * 16, dst + i * 16 - offset, 16);
++  }
++  return true;
++}
++
++// Copy [src, src+(op_limit-op)) to [op, op_limit) but faster than
+ // IncrementalCopySlow. buf_limit is the address past the end of the writable
+ // region of the buffer.
+ inline char* IncrementalCopy(const char* src, char* op, char* const op_limit,
+                              char* const buf_limit) {
++#if SNAPPY_HAVE_SSSE3
++  constexpr int big_pattern_size_lower_bound = 16;
++#else
++  constexpr int big_pattern_size_lower_bound = 8;
++#endif
++
+   // Terminology:
+   //
+   // slop = buf_limit - op
+   // pat  = op - src
+-  // len  = limit - op
++  // len  = op_limit - op
+   assert(src < op);
+-  assert(op <= op_limit);
++  assert(op < op_limit);
+   assert(op_limit <= buf_limit);
+-  // NOTE: The compressor always emits 4 <= len <= 64. It is ok to assume that
+-  // to optimize this function but we have to also handle other cases in case
+-  // the input does not satisfy these conditions.
++  // NOTE: The copy tags use 3 or 6 bits to store the copy length, so len <= 64.
++  assert(op_limit - op <= 64);
++  // NOTE: In practice the compressor always emits len >= 4, so it is ok to
++  // assume that to optimize this function, but this is not guaranteed by the
++  // compression format, so we have to also handle len < 4 in case the input
++  // does not satisfy these conditions.
+ 
+   size_t pattern_size = op - src;
+   // The cases are split into different branches to allow the branch predictor,
+   // FDO, and static prediction hints to work better. For each input we list the
+   // ratio of invocations that match each condition.
+   //
+   // input        slop < 16   pat < 8  len > 16
+   // ------------------------------------------
+@@ -211,53 +413,88 @@ inline char* IncrementalCopy(const char*
+   //
+   // It is very rare that we don't have enough slop for doing block copies. It
+   // is also rare that we need to expand a pattern. Small patterns are common
+   // for incompressible formats and for those we are plenty fast already.
+   // Lengths are normally not greater than 16 but they vary depending on the
+   // input. In general if we always predict len <= 16 it would be an ok
+   // prediction.
+   //
+-  // In order to be fast we want a pattern >= 8 bytes and an unrolled loop
+-  // copying 2x 8 bytes at a time.
++  // In order to be fast we want a pattern >= 16 bytes (or 8 bytes in non-SSE)
++  // and an unrolled loop copying 1x 16 bytes (or 2x 8 bytes in non-SSE) at a
++  // time.
+ 
+-  // Handle the uncommon case where pattern is less than 8 bytes.
+-  if (SNAPPY_PREDICT_FALSE(pattern_size < 8)) {
++  // Handle the uncommon case where pattern is less than 16 (or 8 in non-SSE)
++  // bytes.
++  if (pattern_size < big_pattern_size_lower_bound) {
+ #if SNAPPY_HAVE_SSSE3
+     // Load the first eight bytes into an 128-bit XMM register, then use PSHUFB
+     // to permute the register's contents in-place into a repeating sequence of
+     // the first "pattern_size" bytes.
+     // For example, suppose:
+     //    src       == "abc"
+     //    op        == op + 3
+     // After _mm_shuffle_epi8(), "pattern" will have five copies of "abc"
+     // followed by one byte of slop: abcabcabcabcabca.
+     //
+     // The non-SSE fallback implementation suffers from store-forwarding stalls
+     // because its loads and stores partly overlap. By expanding the pattern
+     // in-place, we avoid the penalty.
+-    if (SNAPPY_PREDICT_TRUE(op <= buf_limit - 16)) {
+-      const __m128i shuffle_mask = _mm_load_si128(
+-          reinterpret_cast<const __m128i*>(pshufb_fill_patterns)
+-          + pattern_size - 1);
+-      const __m128i pattern = _mm_shuffle_epi8(
+-          _mm_loadl_epi64(reinterpret_cast<const __m128i*>(src)), shuffle_mask);
+-      // Uninitialized bytes are masked out by the shuffle mask.
+-      // TODO: remove annotation and macro defs once MSan is fixed.
+-      SNAPPY_ANNOTATE_MEMORY_IS_INITIALIZED(&pattern, sizeof(pattern));
+-      pattern_size *= 16 / pattern_size;
+-      char* op_end = std::min(op_limit, buf_limit - 15);
+-      while (op < op_end) {
++
++    // Typically, the op_limit is the gating factor so try to simplify the loop
++    // based on that.
++    if (SNAPPY_PREDICT_TRUE(op_limit <= buf_limit - 15)) {
++      auto pattern_and_reshuffle_mask =
++          LoadPatternAndReshuffleMask(src, pattern_size);
++      __m128i pattern = pattern_and_reshuffle_mask.first;
++      __m128i reshuffle_mask = pattern_and_reshuffle_mask.second;
++
++      // There is at least one, and at most four 16-byte blocks. Writing four
++      // conditionals instead of a loop allows FDO to layout the code with
++      // respect to the actual probabilities of each length.
++      // TODO: Replace with loop with trip count hint.
++      _mm_storeu_si128(reinterpret_cast<__m128i*>(op), pattern);
++
++      if (op + 16 < op_limit) {
++        pattern = _mm_shuffle_epi8(pattern, reshuffle_mask);
++        _mm_storeu_si128(reinterpret_cast<__m128i*>(op + 16), pattern);
++      }
++      if (op + 32 < op_limit) {
++        pattern = _mm_shuffle_epi8(pattern, reshuffle_mask);
++        _mm_storeu_si128(reinterpret_cast<__m128i*>(op + 32), pattern);
++      }
++      if (op + 48 < op_limit) {
++        pattern = _mm_shuffle_epi8(pattern, reshuffle_mask);
++        _mm_storeu_si128(reinterpret_cast<__m128i*>(op + 48), pattern);
++      }
++      return op_limit;
++    }
++    char* const op_end = buf_limit - 15;
++    if (SNAPPY_PREDICT_TRUE(op < op_end)) {
++      auto pattern_and_reshuffle_mask =
++          LoadPatternAndReshuffleMask(src, pattern_size);
++      __m128i pattern = pattern_and_reshuffle_mask.first;
++      __m128i reshuffle_mask = pattern_and_reshuffle_mask.second;
++
++      // This code path is relatively cold however so we save code size
++      // by avoiding unrolling and vectorizing.
++      //
++      // TODO: Remove pragma when when cold regions don't get
++      // vectorized or unrolled.
++#ifdef __clang__
++#pragma clang loop unroll(disable)
++#endif
++      do {
+         _mm_storeu_si128(reinterpret_cast<__m128i*>(op), pattern);
+-        op += pattern_size;
+-      }
+-      if (SNAPPY_PREDICT_TRUE(op >= op_limit)) return op_limit;
++        pattern = _mm_shuffle_epi8(pattern, reshuffle_mask);
++        op += 16;
++      } while (SNAPPY_PREDICT_TRUE(op < op_end));
+     }
+-    return IncrementalCopySlow(src, op, op_limit);
+-#else  // !SNAPPY_HAVE_SSSE3
++    return IncrementalCopySlow(op - pattern_size, op, op_limit);
++#else   // !SNAPPY_HAVE_SSSE3
+     // If plenty of buffer space remains, expand the pattern to at least 8
+     // bytes. The way the following loop is written, we need 8 bytes of buffer
+     // space if pattern_size >= 4, 11 bytes if pattern_size is 1 or 3, and 10
+     // bytes if pattern_size is 2.  Precisely encoding that is probably not
+     // worthwhile; instead, invoke the slow path if we cannot write 11 bytes
+     // (because 11 are required in the worst case).
+     if (SNAPPY_PREDICT_TRUE(op <= buf_limit - 11)) {
+       while (pattern_size < 8) {
+@@ -266,91 +503,83 @@ inline char* IncrementalCopy(const char*
+         pattern_size *= 2;
+       }
+       if (SNAPPY_PREDICT_TRUE(op >= op_limit)) return op_limit;
+     } else {
+       return IncrementalCopySlow(src, op, op_limit);
+     }
+ #endif  // SNAPPY_HAVE_SSSE3
+   }
+-  assert(pattern_size >= 8);
++  assert(pattern_size >= big_pattern_size_lower_bound);
++  constexpr bool use_16bytes_chunk = big_pattern_size_lower_bound == 16;
+ 
+-  // Copy 2x 8 bytes at a time. Because op - src can be < 16, a single
+-  // UnalignedCopy128 might overwrite data in op. UnalignedCopy64 is safe
+-  // because expanding the pattern to at least 8 bytes guarantees that
+-  // op - src >= 8.
++  // Copy 1x 16 bytes (or 2x 8 bytes in non-SSE) at a time. Because op - src can
++  // be < 16 in non-SSE, a single UnalignedCopy128 might overwrite data in op.
++  // UnalignedCopy64 is safe because expanding the pattern to at least 8 bytes
++  // guarantees that op - src >= 8.
+   //
+   // Typically, the op_limit is the gating factor so try to simplify the loop
+   // based on that.
+-  if (SNAPPY_PREDICT_TRUE(op_limit <= buf_limit - 16)) {
++  if (SNAPPY_PREDICT_TRUE(op_limit <= buf_limit - 15)) {
+     // There is at least one, and at most four 16-byte blocks. Writing four
+     // conditionals instead of a loop allows FDO to layout the code with respect
+     // to the actual probabilities of each length.
+     // TODO: Replace with loop with trip count hint.
+-    UnalignedCopy64(src, op);
+-    UnalignedCopy64(src + 8, op + 8);
+-
++    ConditionalUnalignedCopy128<use_16bytes_chunk>(src, op);
+     if (op + 16 < op_limit) {
+-      UnalignedCopy64(src + 16, op + 16);
+-      UnalignedCopy64(src + 24, op + 24);
++      ConditionalUnalignedCopy128<use_16bytes_chunk>(src + 16, op + 16);
+     }
+     if (op + 32 < op_limit) {
+-      UnalignedCopy64(src + 32, op + 32);
+-      UnalignedCopy64(src + 40, op + 40);
++      ConditionalUnalignedCopy128<use_16bytes_chunk>(src + 32, op + 32);
+     }
+     if (op + 48 < op_limit) {
+-      UnalignedCopy64(src + 48, op + 48);
+-      UnalignedCopy64(src + 56, op + 56);
++      ConditionalUnalignedCopy128<use_16bytes_chunk>(src + 48, op + 48);
+     }
+     return op_limit;
+   }
+ 
+   // Fall back to doing as much as we can with the available slop in the
+   // buffer. This code path is relatively cold however so we save code size by
+   // avoiding unrolling and vectorizing.
+   //
+   // TODO: Remove pragma when when cold regions don't get vectorized
+   // or unrolled.
+ #ifdef __clang__
+ #pragma clang loop unroll(disable)
+ #endif
+-  for (char *op_end = buf_limit - 16; op < op_end; op += 16, src += 16) {
+-    UnalignedCopy64(src, op);
+-    UnalignedCopy64(src + 8, op + 8);
++  for (char* op_end = buf_limit - 16; op < op_end; op += 16, src += 16) {
++    ConditionalUnalignedCopy128<use_16bytes_chunk>(src, op);
+   }
+-  if (op >= op_limit)
+-    return op_limit;
++  if (op >= op_limit) return op_limit;
+ 
+   // We only take this branch if we didn't have enough slop and we can do a
+   // single 8 byte copy.
+   if (SNAPPY_PREDICT_FALSE(op <= buf_limit - 8)) {
+     UnalignedCopy64(src, op);
+     src += 8;
+     op += 8;
+   }
+   return IncrementalCopySlow(src, op, op_limit);
+ }
+ 
+ }  // namespace
+ 
+ template <bool allow_fast_path>
+-static inline char* EmitLiteral(char* op,
+-                                const char* literal,
+-                                int len) {
++static inline char* EmitLiteral(char* op, const char* literal, int len) {
+   // The vast majority of copies are below 16 bytes, for which a
+-  // call to memcpy is overkill. This fast path can sometimes
++  // call to std::memcpy() is overkill. This fast path can sometimes
+   // copy up to 15 bytes too much, but that is okay in the
+   // main loop, since we have a bit to go on for both sides:
+   //
+   //   - The input will always have kInputMarginBytes = 15 extra
+   //     available bytes, as long as we're in the main loop, and
+   //     if not, allow_fast_path = false.
+   //   - The output will always have 32 spare bytes (see
+   //     MaxCompressedLength).
+-  assert(len > 0);      // Zero-length literals are disallowed
++  assert(len > 0);  // Zero-length literals are disallowed
+   int n = len - 1;
+   if (allow_fast_path && len <= 16) {
+     // Fits in tag byte
+     *op++ = LITERAL | (n << 2);
+ 
+     UnalignedCopy128(literal, op);
+     return op + len;
+   }
+@@ -361,40 +590,47 @@ static inline char* EmitLiteral(char* op
+   } else {
+     int count = (Bits::Log2Floor(n) >> 3) + 1;
+     assert(count >= 1);
+     assert(count <= 4);
+     *op++ = LITERAL | ((59 + count) << 2);
+     // Encode in upcoming bytes.
+     // Write 4 bytes, though we may care about only 1 of them. The output buffer
+     // is guaranteed to have at least 3 more spaces left as 'len >= 61' holds
+-    // here and there is a memcpy of size 'len' below.
++    // here and there is a std::memcpy() of size 'len' below.
+     LittleEndian::Store32(op, n);
+     op += count;
+   }
+-  memcpy(op, literal, len);
++  std::memcpy(op, literal, len);
+   return op + len;
+ }
+ 
+ template <bool len_less_than_12>
+ static inline char* EmitCopyAtMost64(char* op, size_t offset, size_t len) {
+   assert(len <= 64);
+   assert(len >= 4);
+   assert(offset < 65536);
+   assert(len_less_than_12 == (len < 12));
+ 
+-  if (len_less_than_12 && SNAPPY_PREDICT_TRUE(offset < 2048)) {
+-    // offset fits in 11 bits.  The 3 highest go in the top of the first byte,
+-    // and the rest go in the second byte.
+-    *op++ = COPY_1_BYTE_OFFSET + ((len - 4) << 2) + ((offset >> 3) & 0xe0);
+-    *op++ = offset & 0xff;
++  if (len_less_than_12) {
++    uint32_t u = (len << 2) + (offset << 8);
++    uint32_t copy1 = COPY_1_BYTE_OFFSET - (4 << 2) + ((offset >> 3) & 0xe0);
++    uint32_t copy2 = COPY_2_BYTE_OFFSET - (1 << 2);
++    // It turns out that offset < 2048 is a difficult to predict branch.
++    // `perf record` shows this is the highest percentage of branch misses in
++    // benchmarks. This code produces branch free code, the data dependency
++    // chain that bottlenecks the throughput is so long that a few extra
++    // instructions are completely free (IPC << 6 because of data deps).
++    u += offset < 2048 ? copy1 : copy2;
++    LittleEndian::Store32(op, u);
++    op += offset < 2048 ? 2 : 3;
+   } else {
+     // Write 4 bytes, though we only care about 3 of them.  The output buffer
+     // is required to have some slack, so the extra byte won't overrun it.
+-    uint32 u = COPY_2_BYTE_OFFSET + ((len - 1) << 2) + (offset << 8);
++    uint32_t u = COPY_2_BYTE_OFFSET + ((len - 1) << 2) + (offset << 8);
+     LittleEndian::Store32(op, u);
+     op += 3;
+   }
+   return op;
+ }
+ 
+ template <bool len_less_than_12>
+ static inline char* EmitCopy(char* op, size_t offset, size_t len) {
+@@ -423,28 +659,28 @@ static inline char* EmitCopy(char* op, s
+     } else {
+       op = EmitCopyAtMost64</*len_less_than_12=*/false>(op, offset, len);
+     }
+     return op;
+   }
+ }
+ 
+ bool GetUncompressedLength(const char* start, size_t n, size_t* result) {
+-  uint32 v = 0;
++  uint32_t v = 0;
+   const char* limit = start + n;
+   if (Varint::Parse32WithLimit(start, limit, &v) != NULL) {
+     *result = v;
+     return true;
+   } else {
+     return false;
+   }
+ }
+ 
+ namespace {
+-uint32 CalculateTableSize(uint32 input_size) {
++uint32_t CalculateTableSize(uint32_t input_size) {
+   static_assert(
+       kMaxHashTableSize >= kMinHashTableSize,
+       "kMaxHashTableSize should be greater or equal to kMinHashTableSize.");
+   if (input_size > kMaxHashTableSize) {
+     return kMaxHashTableSize;
+   }
+   if (input_size < kMinHashTableSize) {
+     return kMinHashTableSize;
+@@ -457,112 +693,65 @@ uint32 CalculateTableSize(uint32 input_s
+ 
+ namespace internal {
+ WorkingMemory::WorkingMemory(size_t input_size) {
+   const size_t max_fragment_size = std::min(input_size, kBlockSize);
+   const size_t table_size = CalculateTableSize(max_fragment_size);
+   size_ = table_size * sizeof(*table_) + max_fragment_size +
+           MaxCompressedLength(max_fragment_size);
+   mem_ = std::allocator<char>().allocate(size_);
+-  table_ = reinterpret_cast<uint16*>(mem_);
++  table_ = reinterpret_cast<uint16_t*>(mem_);
+   input_ = mem_ + table_size * sizeof(*table_);
+   output_ = input_ + max_fragment_size;
+ }
+ 
+ WorkingMemory::~WorkingMemory() {
+   std::allocator<char>().deallocate(mem_, size_);
+ }
+ 
+-uint16* WorkingMemory::GetHashTable(size_t fragment_size,
+-                                    int* table_size) const {
++uint16_t* WorkingMemory::GetHashTable(size_t fragment_size,
++                                      int* table_size) const {
+   const size_t htsize = CalculateTableSize(fragment_size);
+   memset(table_, 0, htsize * sizeof(*table_));
+   *table_size = htsize;
+   return table_;
+ }
+ }  // end namespace internal
+ 
+-// For 0 <= offset <= 4, GetUint32AtOffset(GetEightBytesAt(p), offset) will
+-// equal UNALIGNED_LOAD32(p + offset).  Motivation: On x86-64 hardware we have
+-// empirically found that overlapping loads such as
+-//  UNALIGNED_LOAD32(p) ... UNALIGNED_LOAD32(p+1) ... UNALIGNED_LOAD32(p+2)
+-// are slower than UNALIGNED_LOAD64(p) followed by shifts and casts to uint32.
+-//
+-// We have different versions for 64- and 32-bit; ideally we would avoid the
+-// two functions and just inline the UNALIGNED_LOAD64 call into
+-// GetUint32AtOffset, but GCC (at least not as of 4.6) is seemingly not clever
+-// enough to avoid loading the value multiple times then. For 64-bit, the load
+-// is done when GetEightBytesAt() is called, whereas for 32-bit, the load is
+-// done at GetUint32AtOffset() time.
+-
+-#ifdef ARCH_K8
+-
+-typedef uint64 EightBytesReference;
+-
+-static inline EightBytesReference GetEightBytesAt(const char* ptr) {
+-  return UNALIGNED_LOAD64(ptr);
+-}
+-
+-static inline uint32 GetUint32AtOffset(uint64 v, int offset) {
+-  assert(offset >= 0);
+-  assert(offset <= 4);
+-  return v >> (LittleEndian::IsLittleEndian() ? 8 * offset : 32 - 8 * offset);
+-}
+-
+-#else
+-
+-typedef const char* EightBytesReference;
+-
+-static inline EightBytesReference GetEightBytesAt(const char* ptr) {
+-  return ptr;
+-}
+-
+-static inline uint32 GetUint32AtOffset(const char* v, int offset) {
+-  assert(offset >= 0);
+-  assert(offset <= 4);
+-  return UNALIGNED_LOAD32(v + offset);
+-}
+-
+-#endif
+-
+ // Flat array compression that does not emit the "uncompressed length"
+ // prefix. Compresses "input" string to the "*op" buffer.
+ //
+ // REQUIRES: "input" is at most "kBlockSize" bytes long.
+ // REQUIRES: "op" points to an array of memory that is at least
+ // "MaxCompressedLength(input.size())" in size.
+ // REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero.
+ // REQUIRES: "table_size" is a power of two
+ //
+ // Returns an "end" pointer into "op" buffer.
+ // "end - op" is the compressed size of "input".
+ namespace internal {
+-char* CompressFragment(const char* input,
+-                       size_t input_size,
+-                       char* op,
+-                       uint16* table,
+-                       const int table_size) {
++char* CompressFragment(const char* input, size_t input_size, char* op,
++                       uint16_t* table, const int table_size) {
+   // "ip" is the input pointer, and "op" is the output pointer.
+   const char* ip = input;
+   assert(input_size <= kBlockSize);
+   assert((table_size & (table_size - 1)) == 0);  // table must be power of two
+-  const int shift = 32 - Bits::Log2Floor(table_size);
+-  assert(static_cast<int>(kuint32max >> shift) == table_size - 1);
++  const uint32_t mask = table_size - 1;
+   const char* ip_end = input + input_size;
+   const char* base_ip = ip;
+-  // Bytes in [next_emit, ip) will be emitted as literal bytes.  Or
+-  // [next_emit, ip_end) after the main loop.
+-  const char* next_emit = ip;
+ 
+   const size_t kInputMarginBytes = 15;
+   if (SNAPPY_PREDICT_TRUE(input_size >= kInputMarginBytes)) {
+     const char* ip_limit = input + input_size - kInputMarginBytes;
+ 
+-    for (uint32 next_hash = Hash(++ip, shift); ; ) {
+-      assert(next_emit < ip);
++    for (uint32_t preload = LittleEndian::Load32(ip + 1);;) {
++      // Bytes in [next_emit, ip) will be emitted as literal bytes.  Or
++      // [next_emit, ip_end) after the main loop.
++      const char* next_emit = ip++;
++      uint64_t data = LittleEndian::Load64(ip);
+       // The body of this loop calls EmitLiteral once and then EmitCopy one or
+       // more times.  (The exception is that when we're close to exhausting
+       // the input we goto emit_remainder.)
+       //
+       // In the first iteration of this loop we're just starting, so
+       // there's nothing to copy, so calling EmitLiteral once is
+       // necessary.  And we only start a new iteration when the
+       // current iteration has determined that a call to EmitLiteral will
+@@ -578,123 +767,182 @@ char* CompressFragment(const char* input
+       // loss (~5% performance, ~0.1% density) for compressible data due to more
+       // bookkeeping, but for non-compressible data (such as JPEG) it's a huge
+       // win since the compressor quickly "realizes" the data is incompressible
+       // and doesn't bother looking for matches everywhere.
+       //
+       // The "skip" variable keeps track of how many bytes there are since the
+       // last match; dividing it by 32 (ie. right-shifting by five) gives the
+       // number of bytes to move ahead for each iteration.
+-      uint32 skip = 32;
++      uint32_t skip = 32;
+ 
+-      const char* next_ip = ip;
+       const char* candidate;
+-      do {
+-        ip = next_ip;
+-        uint32 hash = next_hash;
+-        assert(hash == Hash(ip, shift));
+-        uint32 bytes_between_hash_lookups = skip >> 5;
++      if (ip_limit - ip >= 16) {
++        auto delta = ip - base_ip;
++        for (int j = 0; j < 4; ++j) {
++          for (int k = 0; k < 4; ++k) {
++            int i = 4 * j + k;
++            // These for-loops are meant to be unrolled. So we can freely
++            // special case the first iteration to use the value already
++            // loaded in preload.
++            uint32_t dword = i == 0 ? preload : static_cast<uint32_t>(data);
++            assert(dword == LittleEndian::Load32(ip + i));
++            uint32_t hash = HashBytes(dword, mask);
++            candidate = base_ip + table[hash];
++            assert(candidate >= base_ip);
++            assert(candidate < ip + i);
++            table[hash] = delta + i;
++            if (SNAPPY_PREDICT_FALSE(LittleEndian::Load32(candidate) == dword)) {
++              *op = LITERAL | (i << 2);
++              UnalignedCopy128(next_emit, op + 1);
++              ip += i;
++              op = op + i + 2;
++              goto emit_match;
++            }
++            data >>= 8;
++          }
++          data = LittleEndian::Load64(ip + 4 * j + 4);
++        }
++        ip += 16;
++        skip += 16;
++      }
++      while (true) {
++        assert(static_cast<uint32_t>(data) == LittleEndian::Load32(ip));
++        uint32_t hash = HashBytes(data, mask);
++        uint32_t bytes_between_hash_lookups = skip >> 5;
+         skip += bytes_between_hash_lookups;
+-        next_ip = ip + bytes_between_hash_lookups;
++        const char* next_ip = ip + bytes_between_hash_lookups;
+         if (SNAPPY_PREDICT_FALSE(next_ip > ip_limit)) {
++          ip = next_emit;
+           goto emit_remainder;
+         }
+-        next_hash = Hash(next_ip, shift);
+         candidate = base_ip + table[hash];
+         assert(candidate >= base_ip);
+         assert(candidate < ip);
+ 
+         table[hash] = ip - base_ip;
+-      } while (SNAPPY_PREDICT_TRUE(UNALIGNED_LOAD32(ip) !=
+-                                 UNALIGNED_LOAD32(candidate)));
++        if (SNAPPY_PREDICT_FALSE(static_cast<uint32_t>(data) ==
++                                LittleEndian::Load32(candidate))) {
++          break;
++        }
++        data = LittleEndian::Load32(next_ip);
++        ip = next_ip;
++      }
+ 
+       // Step 2: A 4-byte match has been found.  We'll later see if more
+       // than 4 bytes match.  But, prior to the match, input
+       // bytes [next_emit, ip) are unmatched.  Emit them as "literal bytes."
+       assert(next_emit + 16 <= ip_end);
+       op = EmitLiteral</*allow_fast_path=*/true>(op, next_emit, ip - next_emit);
+ 
+       // Step 3: Call EmitCopy, and then see if another EmitCopy could
+       // be our next move.  Repeat until we find no match for the
+       // input immediately after what was consumed by the last EmitCopy call.
+       //
+       // If we exit this loop normally then we need to call EmitLiteral next,
+       // though we don't yet know how big the literal will be.  We handle that
+       // by proceeding to the next iteration of the main loop.  We also can exit
+       // this loop via goto if we get close to exhausting the input.
+-      EightBytesReference input_bytes;
+-      uint32 candidate_bytes = 0;
+-
++    emit_match:
+       do {
+         // We have a 4-byte match at ip, and no need to emit any
+         // "literal bytes" prior to ip.
+         const char* base = ip;
+         std::pair<size_t, bool> p =
+-            FindMatchLength(candidate + 4, ip + 4, ip_end);
++            FindMatchLength(candidate + 4, ip + 4, ip_end, &data);
+         size_t matched = 4 + p.first;
+         ip += matched;
+         size_t offset = base - candidate;
+         assert(0 == memcmp(base, candidate, matched));
+         if (p.second) {
+           op = EmitCopy</*len_less_than_12=*/true>(op, offset, matched);
+         } else {
+           op = EmitCopy</*len_less_than_12=*/false>(op, offset, matched);
+         }
+-        next_emit = ip;
+         if (SNAPPY_PREDICT_FALSE(ip >= ip_limit)) {
+           goto emit_remainder;
+         }
++        // Expect 5 bytes to match
++        assert((data & 0xFFFFFFFFFF) ==
++               (LittleEndian::Load64(ip) & 0xFFFFFFFFFF));
+         // We are now looking for a 4-byte match again.  We read
+         // table[Hash(ip, shift)] for that.  To improve compression,
+-        // we also update table[Hash(ip - 1, shift)] and table[Hash(ip, shift)].
+-        input_bytes = GetEightBytesAt(ip - 1);
+-        uint32 prev_hash = HashBytes(GetUint32AtOffset(input_bytes, 0), shift);
+-        table[prev_hash] = ip - base_ip - 1;
+-        uint32 cur_hash = HashBytes(GetUint32AtOffset(input_bytes, 1), shift);
+-        candidate = base_ip + table[cur_hash];
+-        candidate_bytes = UNALIGNED_LOAD32(candidate);
+-        table[cur_hash] = ip - base_ip;
+-      } while (GetUint32AtOffset(input_bytes, 1) == candidate_bytes);
+-
+-      next_hash = HashBytes(GetUint32AtOffset(input_bytes, 2), shift);
+-      ++ip;
++        // we also update table[Hash(ip - 1, mask)] and table[Hash(ip, mask)].
++        table[HashBytes(LittleEndian::Load32(ip - 1), mask)] = ip - base_ip - 1;
++        uint32_t hash = HashBytes(data, mask);
++        candidate = base_ip + table[hash];
++        table[hash] = ip - base_ip;
++        // Measurements on the benchmarks have shown the following probabilities
++        // for the loop to exit (ie. avg. number of iterations is reciprocal).
++        // BM_Flat/6  txt1    p = 0.3-0.4
++        // BM_Flat/7  txt2    p = 0.35
++        // BM_Flat/8  txt3    p = 0.3-0.4
++        // BM_Flat/9  txt3    p = 0.34-0.4
++        // BM_Flat/10 pb      p = 0.4
++        // BM_Flat/11 gaviota p = 0.1
++        // BM_Flat/12 cp      p = 0.5
++        // BM_Flat/13 c       p = 0.3
++      } while (static_cast<uint32_t>(data) == LittleEndian::Load32(candidate));
++      // Because the least significant 5 bytes matched, we can utilize data
++      // for the next iteration.
++      preload = data >> 8;
+     }
+   }
+ 
+- emit_remainder:
++emit_remainder:
+   // Emit the remaining bytes as a literal
+-  if (next_emit < ip_end) {
+-    op = EmitLiteral</*allow_fast_path=*/false>(op, next_emit,
+-                                                ip_end - next_emit);
++  if (ip < ip_end) {
++    op = EmitLiteral</*allow_fast_path=*/false>(op, ip, ip_end - ip);
+   }
+ 
+   return op;
+ }
+ }  // end namespace internal
+ 
+ // Called back at avery compression call to trace parameters and sizes.
+ static inline void Report(const char *algorithm, size_t compressed_size,
+-                          size_t uncompressed_size) {}
++                          size_t uncompressed_size) {
++  // TODO: Switch to [[maybe_unused]] when we can assume C++17.
++  (void)algorithm;
++  (void)compressed_size;
++  (void)uncompressed_size;
++}
+ 
+ // Signature of output types needed by decompression code.
+ // The decompression code is templatized on a type that obeys this
+ // signature so that we do not pay virtual function call overhead in
+ // the middle of a tight decompression loop.
+ //
+ // class DecompressionWriter {
+ //  public:
+ //   // Called before decompression
+ //   void SetExpectedLength(size_t length);
+ //
++//   // For performance a writer may choose to donate the cursor variable to the
++//   // decompression function. The decompression will inject it in all its
++//   // function calls to the writer. Keeping the important output cursor as a
++//   // function local stack variable allows the compiler to keep it in
++//   // register, which greatly aids performance by avoiding loads and stores of
++//   // this variable in the fast path loop iterations.
++//   T GetOutputPtr() const;
++//
++//   // At end of decompression the loop donates the ownership of the cursor
++//   // variable back to the writer by calling this function.
++//   void SetOutputPtr(T op);
++//
+ //   // Called after decompression
+ //   bool CheckLength() const;
+ //
+ //   // Called repeatedly during decompression
+-//   bool Append(const char* ip, size_t length);
+-//   bool AppendFromSelf(uint32 offset, size_t length);
++//   // Each function get a pointer to the op (output pointer), that the writer
++//   // can use and update. Note it's important that these functions get fully
++//   // inlined so that no actual address of the local variable needs to be
++//   // taken.
++//   bool Append(const char* ip, size_t length, T* op);
++//   bool AppendFromSelf(uint32_t offset, size_t length, T* op);
+ //
+ //   // The rules for how TryFastAppend differs from Append are somewhat
+ //   // convoluted:
+ //   //
+ //   //  - TryFastAppend is allowed to decline (return false) at any
+ //   //    time, for any reason -- just "return false" would be
+ //   //    a perfectly legal implementation of TryFastAppend.
+ //   //    The intention is for TryFastAppend to allow a fast path
+@@ -706,308 +954,527 @@ static inline void Report(const char *al
+ //   //    afterwards, so that there is always enough space to read the
+ //   //    next tag without checking for a refill.
+ //   //  - TryFastAppend must always return decline (return false)
+ //   //    if <length> is 61 or more, as in this case the literal length is not
+ //   //    decoded fully. In practice, this should not be a big problem,
+ //   //    as it is unlikely that one would implement a fast path accepting
+ //   //    this much data.
+ //   //
+-//   bool TryFastAppend(const char* ip, size_t available, size_t length);
++//   bool TryFastAppend(const char* ip, size_t available, size_t length, T* op);
+ // };
+ 
+-static inline uint32 ExtractLowBytes(uint32 v, int n) {
++static inline uint32_t ExtractLowBytes(uint32_t v, int n) {
+   assert(n >= 0);
+   assert(n <= 4);
+ #if SNAPPY_HAVE_BMI2
+   return _bzhi_u32(v, 8 * n);
+ #else
+-  // This needs to be wider than uint32 otherwise `mask << 32` will be
++  // This needs to be wider than uint32_t otherwise `mask << 32` will be
+   // undefined.
+-  uint64 mask = 0xffffffff;
++  uint64_t mask = 0xffffffff;
+   return v & ~(mask << (8 * n));
+ #endif
+ }
+ 
+-static inline bool LeftShiftOverflows(uint8 value, uint32 shift) {
++static inline bool LeftShiftOverflows(uint8_t value, uint32_t shift) {
+   assert(shift < 32);
+-  static const uint8 masks[] = {
++  static const uint8_t masks[] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,  //
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,  //
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,  //
+       0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe};
+   return (value & masks[shift]) != 0;
+ }
+ 
++inline bool Copy64BytesWithPatternExtension(ptrdiff_t dst, size_t offset) {
++  // TODO: Switch to [[maybe_unused]] when we can assume C++17.
++  (void)dst;
++  return offset != 0;
++}
++
++void MemCopy(char* dst, const uint8_t* src, size_t size) {
++  std::memcpy(dst, src, size);
++}
++
++void MemCopy(ptrdiff_t dst, const uint8_t* src, size_t size) {
++  // TODO: Switch to [[maybe_unused]] when we can assume C++17.
++  (void)dst;
++  (void)src;
++  (void)size;
++}
++
++void MemMove(char* dst, const void* src, size_t size) {
++  std::memmove(dst, src, size);
++}
++
++void MemMove(ptrdiff_t dst, const void* src, size_t size) {
++  // TODO: Switch to [[maybe_unused]] when we can assume C++17.
++  (void)dst;
++  (void)src;
++  (void)size;
++}
++
++SNAPPY_ATTRIBUTE_ALWAYS_INLINE
++size_t AdvanceToNextTag(const uint8_t** ip_p, size_t* tag) {
++  const uint8_t*& ip = *ip_p;
++  // This section is crucial for the throughput of the decompression loop.
++  // The latency of an iteration is fundamentally constrained by the
++  // following data chain on ip.
++  // ip -> c = Load(ip) -> ip1 = ip + 1 + (c & 3) -> ip = ip1 or ip2
++  //                       ip2 = ip + 2 + (c >> 2)
++  // This amounts to 8 cycles.
++  // 5 (load) + 1 (c & 3) + 1 (lea ip1, [ip + (c & 3) + 1]) + 1 (cmov)
++  size_t literal_len = *tag >> 2;
++  size_t tag_type = *tag;
++  bool is_literal;
++#if defined(__GNUC__) && defined(__x86_64__) && defined(__GCC_ASM_FLAG_OUTPUTS__)
++  // TODO clang misses the fact that the (c & 3) already correctly
++  // sets the zero flag.
++  asm("and $3, %k[tag_type]\n\t"
++      : [tag_type] "+r"(tag_type), "=@ccz"(is_literal));
++#else
++  tag_type &= 3;
++  is_literal = (tag_type == 0);
++#endif
++  // TODO
++  // This is code is subtle. Loading the values first and then cmov has less
++  // latency then cmov ip and then load. However clang would move the loads
++  // in an optimization phase, volatile prevents this transformation.
++  // Note that we have enough slop bytes (64) that the loads are always valid.
++  size_t tag_literal =
++      static_cast<const volatile uint8_t*>(ip)[1 + literal_len];
++  size_t tag_copy = static_cast<const volatile uint8_t*>(ip)[tag_type];
++  *tag = is_literal ? tag_literal : tag_copy;
++  const uint8_t* ip_copy = ip + 1 + tag_type;
++  const uint8_t* ip_literal = ip + 2 + literal_len;
++  ip = is_literal ? ip_literal : ip_copy;
++#if defined(__GNUC__) && defined(__x86_64__)
++  // TODO Clang is "optimizing" zero-extension (a totally free
++  // operation) this means that after the cmov of tag, it emits another movzb
++  // tag, byte(tag). It really matters as it's on the core chain. This dummy
++  // asm, persuades clang to do the zero-extension at the load (it's automatic)
++  // removing the expensive movzb.
++  asm("" ::"r"(tag_copy));
++#endif
++  return tag_type;
++}
++
++// Extract the offset for copy-1 and copy-2 returns 0 for literals or copy-4.
++inline uint32_t ExtractOffset(uint32_t val, size_t tag_type) {
++  return val & table.extract_masks[tag_type];
++};
++
++// Core decompression loop, when there is enough data available.
++// Decompresses the input buffer [ip, ip_limit) into the output buffer
++// [op, op_limit_min_slop). Returning when either we are too close to the end
++// of the input buffer, or we exceed op_limit_min_slop or when a exceptional
++// tag is encountered (literal of length > 60) or a copy-4.
++// Returns {ip, op} at the points it stopped decoding.
++// TODO This function probably does not need to be inlined, as it
++// should decode large chunks at a time. This allows runtime dispatch to
++// implementations based on CPU capability (BMI2 / perhaps 32 / 64 byte memcpy).
++template <typename T>
++std::pair<const uint8_t*, ptrdiff_t> DecompressBranchless(
++    const uint8_t* ip, const uint8_t* ip_limit, ptrdiff_t op, T op_base,
++    ptrdiff_t op_limit_min_slop) {
++  // We unroll the inner loop twice so we need twice the spare room.
++  op_limit_min_slop -= kSlopBytes;
++  if (2 * (kSlopBytes + 1) < ip_limit - ip && op < op_limit_min_slop) {
++    const uint8_t* const ip_limit_min_slop = ip_limit - 2 * kSlopBytes - 1;
++    ip++;
++    // ip points just past the tag and we are touching at maximum kSlopBytes
++    // in an iteration.
++    size_t tag = ip[-1];
++    do {
++      // The throughput is limited by instructions, unrolling the inner loop
++      // twice reduces the amount of instructions checking limits and also
++      // leads to reduced mov's.
++      for (int i = 0; i < 2; i++) {
++        const uint8_t* old_ip = ip;
++        assert(tag == ip[-1]);
++        // For literals tag_type = 0, hence we will always obtain 0 from
++        // ExtractLowBytes. For literals offset will thus be kLiteralOffset.
++        ptrdiff_t len_min_offset = table.length_minus_offset[tag];
++        size_t tag_type = AdvanceToNextTag(&ip, &tag);
++        uint32_t next = LittleEndian::Load32(old_ip);
++        size_t len = len_min_offset & 0xFF;
++        len_min_offset -= ExtractOffset(next, tag_type);
++        if (SNAPPY_PREDICT_FALSE(len_min_offset > 0)) {
++          if (SNAPPY_PREDICT_FALSE(len & 0x80)) {
++            // Exceptional case (long literal or copy 4).
++            // Actually doing the copy here is negatively impacting the main
++            // loop due to compiler incorrectly allocating a register for
++            // this fallback. Hence we just break.
++          break_loop:
++            ip = old_ip;
++            goto exit;
++          }
++          // Only copy-1 or copy-2 tags can get here.
++          assert(tag_type == 1 || tag_type == 2);
++          std::ptrdiff_t delta = op + len_min_offset - len;
++          // Guard against copies before the buffer start.
++          if (SNAPPY_PREDICT_FALSE(delta < 0 ||
++                                  !Copy64BytesWithPatternExtension(
++                                      op_base + op, len - len_min_offset))) {
++            goto break_loop;
++          }
++          op += len;
++          continue;
++        }
++        std::ptrdiff_t delta = op + len_min_offset - len;
++        if (SNAPPY_PREDICT_FALSE(delta < 0)) {
++#if defined(__GNUC__) && defined(__x86_64__)
++          // TODO
++          // When validating, both code path reduced to `op += len`. Ie. this
++          // becomes effectively
++          //
++          // if (delta < 0) if (tag_type != 0) goto break_loop;
++          // op += len;
++          //
++          // The compiler interchanges the predictable and almost always false
++          // first if-statement with the completely unpredictable second
++          // if-statement, putting an unpredictable branch on every iteration.
++          // This empty asm is worth almost 2x, which I think qualifies for an
++          // award for the most load-bearing empty statement.
++          asm("");
++#endif
++
++          // Due to the spurious offset in literals have this will trigger
++          // at the start of a block when op is still smaller than 256.
++          if (tag_type != 0) goto break_loop;
++          MemCopy(op_base + op, old_ip, 64);
++          op += len;
++          continue;
++        }
++
++        // For copies we need to copy from op_base + delta, for literals
++        // we need to copy from ip instead of from the stream.
++        const void* from =
++            tag_type ? reinterpret_cast<void*>(op_base + delta) : old_ip;
++        MemMove(op_base + op, from, 64);
++        op += len;
++      }
++    } while (ip < ip_limit_min_slop && op < op_limit_min_slop);
++  exit:
++    ip--;
++    assert(ip <= ip_limit);
++  }
++  return {ip, op};
++}
++
+ // Helper class for decompression
+ class SnappyDecompressor {
+  private:
+-  Source*       reader_;         // Underlying source of bytes to decompress
+-  const char*   ip_;             // Points to next buffered byte
+-  const char*   ip_limit_;       // Points just past buffered bytes
+-  uint32        peeked_;         // Bytes peeked from reader (need to skip)
+-  bool          eof_;            // Hit end of input without an error?
+-  char          scratch_[kMaximumTagLength];  // See RefillTag().
++  Source* reader_;        // Underlying source of bytes to decompress
++  const char* ip_;        // Points to next buffered byte
++  const char* ip_limit_;  // Points just past buffered bytes
++  // If ip < ip_limit_min_maxtaglen_ it's safe to read kMaxTagLength from
++  // buffer.
++  const char* ip_limit_min_maxtaglen_;
++  uint32_t peeked_;                  // Bytes peeked from reader (need to skip)
++  bool eof_;                         // Hit end of input without an error?
++  char scratch_[kMaximumTagLength];  // See RefillTag().
+ 
+   // Ensure that all of the tag metadata for the next tag is available
+   // in [ip_..ip_limit_-1].  Also ensures that [ip,ip+4] is readable even
+   // if (ip_limit_ - ip_ < 5).
+   //
+   // Returns true on success, false on error or end of input.
+   bool RefillTag();
+ 
++  void ResetLimit(const char* ip) {
++    ip_limit_min_maxtaglen_ =
++        ip_limit_ - std::min<ptrdiff_t>(ip_limit_ - ip, kMaximumTagLength - 1);
++  }
++
+  public:
+   explicit SnappyDecompressor(Source* reader)
+-      : reader_(reader),
+-        ip_(NULL),
+-        ip_limit_(NULL),
+-        peeked_(0),
+-        eof_(false) {
+-  }
++      : reader_(reader), ip_(NULL), ip_limit_(NULL), peeked_(0), eof_(false) {}
+ 
+   ~SnappyDecompressor() {
+     // Advance past any bytes we peeked at from the reader
+     reader_->Skip(peeked_);
+   }
+ 
+   // Returns true iff we have hit the end of the input without an error.
+-  bool eof() const {
+-    return eof_;
+-  }
++  bool eof() const { return eof_; }
+ 
+   // Read the uncompressed length stored at the start of the compressed data.
+   // On success, stores the length in *result and returns true.
+   // On failure, returns false.
+-  bool ReadUncompressedLength(uint32* result) {
+-    assert(ip_ == NULL);       // Must not have read anything yet
++  bool ReadUncompressedLength(uint32_t* result) {
++    assert(ip_ == NULL);  // Must not have read anything yet
+     // Length is encoded in 1..5 bytes
+     *result = 0;
+-    uint32 shift = 0;
++    uint32_t shift = 0;
+     while (true) {
+       if (shift >= 32) return false;
+       size_t n;
+       const char* ip = reader_->Peek(&n);
+       if (n == 0) return false;
+       const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip));
+       reader_->Skip(1);
+-      uint32 val = c & 0x7f;
+-      if (LeftShiftOverflows(static_cast<uint8>(val), shift)) return false;
++      uint32_t val = c & 0x7f;
++      if (LeftShiftOverflows(static_cast<uint8_t>(val), shift)) return false;
+       *result |= val << shift;
+       if (c < 128) {
+         break;
+       }
+       shift += 7;
+     }
+     return true;
+   }
+ 
+   // Process the next item found in the input.
+   // Returns true if successful, false on error or end of input.
+   template <class Writer>
+ #if defined(__GNUC__) && defined(__x86_64__)
+   __attribute__((aligned(32)))
+ #endif
+-  void DecompressAllTags(Writer* writer) {
+-    // In x86, pad the function body to start 16 bytes later. This function has
+-    // a couple of hotspots that are highly sensitive to alignment: we have
+-    // observed regressions by more than 20% in some metrics just by moving the
+-    // exact same code to a different position in the benchmark binary.
+-    //
+-    // Putting this code on a 32-byte-aligned boundary + 16 bytes makes us hit
+-    // the "lucky" case consistently. Unfortunately, this is a very brittle
+-    // workaround, and future differences in code generation may reintroduce
+-    // this regression. If you experience a big, difficult to explain, benchmark
+-    // performance regression here, first try removing this hack.
+-#if defined(__GNUC__) && defined(__x86_64__)
+-    // Two 8-byte "NOP DWORD ptr [EAX + EAX*1 + 00000000H]" instructions.
+-    asm(".byte 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00");
+-    asm(".byte 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00");
+-#endif
+-
++  void
++  DecompressAllTags(Writer* writer) {
+     const char* ip = ip_;
++    ResetLimit(ip);
++    auto op = writer->GetOutputPtr();
+     // We could have put this refill fragment only at the beginning of the loop.
+     // However, duplicating it at the end of each branch gives the compiler more
+     // scope to optimize the <ip_limit_ - ip> expression based on the local
+     // context, which overall increases speed.
+-    #define MAYBE_REFILL() \
+-        if (ip_limit_ - ip < kMaximumTagLength) { \
+-          ip_ = ip; \
+-          if (!RefillTag()) return; \
+-          ip = ip_; \
++#define MAYBE_REFILL()                                      \
++  if (SNAPPY_PREDICT_FALSE(ip >= ip_limit_min_maxtaglen_)) { \
++    ip_ = ip;                                               \
++    if (SNAPPY_PREDICT_FALSE(!RefillTag())) goto exit;       \
++    ip = ip_;                                               \
++    ResetLimit(ip);                                         \
++  }                                                         \
++  preload = static_cast<uint8_t>(*ip)
++
++    // At the start of the for loop below the least significant byte of preload
++    // contains the tag.
++    uint32_t preload;
++    MAYBE_REFILL();
++    for (;;) {
++      {
++        ptrdiff_t op_limit_min_slop;
++        auto op_base = writer->GetBase(&op_limit_min_slop);
++        if (op_base) {
++          auto res =
++              DecompressBranchless(reinterpret_cast<const uint8_t*>(ip),
++                                   reinterpret_cast<const uint8_t*>(ip_limit_),
++                                   op - op_base, op_base, op_limit_min_slop);
++          ip = reinterpret_cast<const char*>(res.first);
++          op = op_base + res.second;
++          MAYBE_REFILL();
+         }
+-
+-    MAYBE_REFILL();
+-    for ( ;; ) {
+-      const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip++));
++      }
++      const uint8_t c = static_cast<uint8_t>(preload);
++      ip++;
+ 
+       // Ratio of iterations that have LITERAL vs non-LITERAL for different
+       // inputs.
+       //
+       // input          LITERAL  NON_LITERAL
+       // -----------------------------------
+       // html|html4|cp   23%        77%
+       // urls            36%        64%
+       // jpg             47%        53%
+       // pdf             19%        81%
+       // txt[1-4]        25%        75%
+       // pb              24%        76%
+       // bin             24%        76%
+       if (SNAPPY_PREDICT_FALSE((c & 0x3) == LITERAL)) {
+         size_t literal_length = (c >> 2) + 1u;
+-        if (writer->TryFastAppend(ip, ip_limit_ - ip, literal_length)) {
++        if (writer->TryFastAppend(ip, ip_limit_ - ip, literal_length, &op)) {
+           assert(literal_length < 61);
+           ip += literal_length;
+           // NOTE: There is no MAYBE_REFILL() here, as TryFastAppend()
+           // will not return true unless there's already at least five spare
+           // bytes in addition to the literal.
++          preload = static_cast<uint8_t>(*ip);
+           continue;
+         }
+         if (SNAPPY_PREDICT_FALSE(literal_length >= 61)) {
+           // Long literal.
+           const size_t literal_length_length = literal_length - 60;
+           literal_length =
+               ExtractLowBytes(LittleEndian::Load32(ip), literal_length_length) +
+               1;
+           ip += literal_length_length;
+         }
+ 
+         size_t avail = ip_limit_ - ip;
+         while (avail < literal_length) {
+-          if (!writer->Append(ip, avail)) return;
++          if (!writer->Append(ip, avail, &op)) goto exit;
+           literal_length -= avail;
+           reader_->Skip(peeked_);
+           size_t n;
+           ip = reader_->Peek(&n);
+           avail = n;
+           peeked_ = avail;
+-          if (avail == 0) return;  // Premature end of input
++          if (avail == 0) goto exit;
+           ip_limit_ = ip + avail;
++          ResetLimit(ip);
+         }
+-        if (!writer->Append(ip, literal_length)) {
+-          return;
+-        }
++        if (!writer->Append(ip, literal_length, &op)) goto exit;
+         ip += literal_length;
+         MAYBE_REFILL();
+       } else {
+-        const size_t entry = char_table[c];
+-        const size_t trailer =
+-            ExtractLowBytes(LittleEndian::Load32(ip), entry >> 11);
+-        const size_t length = entry & 0xff;
+-        ip += entry >> 11;
++        if (SNAPPY_PREDICT_FALSE((c & 3) == COPY_4_BYTE_OFFSET)) {
++          const size_t copy_offset = LittleEndian::Load32(ip);
++          const size_t length = (c >> 2) + 1;
++          ip += 4;
+ 
+-        // copy_offset/256 is encoded in bits 8..10.  By just fetching
+-        // those bits, we get copy_offset (since the bit-field starts at
+-        // bit 8).
+-        const size_t copy_offset = entry & 0x700;
+-        if (!writer->AppendFromSelf(copy_offset + trailer, length)) {
+-          return;
++          if (!writer->AppendFromSelf(copy_offset, length, &op)) goto exit;
++        } else {
++          const ptrdiff_t entry = table.length_minus_offset[c];
++          preload = LittleEndian::Load32(ip);
++          const uint32_t trailer = ExtractLowBytes(preload, c & 3);
++          const uint32_t length = entry & 0xff;
++          assert(length > 0);
++
++          // copy_offset/256 is encoded in bits 8..10.  By just fetching
++          // those bits, we get copy_offset (since the bit-field starts at
++          // bit 8).
++          const uint32_t copy_offset = trailer - entry + length;
++          if (!writer->AppendFromSelf(copy_offset, length, &op)) goto exit;
++
++          ip += (c & 3);
++          // By using the result of the previous load we reduce the critical
++          // dependency chain of ip to 4 cycles.
++          preload >>= (c & 3) * 8;
++          if (ip < ip_limit_min_maxtaglen_) continue;
+         }
+         MAYBE_REFILL();
+       }
+     }
+-
+ #undef MAYBE_REFILL
++  exit:
++    writer->SetOutputPtr(op);
+   }
+ };
+ 
++constexpr uint32_t CalculateNeeded(uint8_t tag) {
++  return ((tag & 3) == 0 && tag >= (60 * 4))
++             ? (tag >> 2) - 58
++             : (0x05030201 >> ((tag * 8) & 31)) & 0xFF;
++}
++
++#if __cplusplus >= 201402L
++constexpr bool VerifyCalculateNeeded() {
++  for (int i = 0; i < 1; i++) {
++    if (CalculateNeeded(i) != (char_table[i] >> 11) + 1) return false;
++  }
++  return true;
++}
++
++// Make sure CalculateNeeded is correct by verifying it against the established
++// table encoding the number of added bytes needed.
++static_assert(VerifyCalculateNeeded(), "");
++#endif  // c++14
++
+ bool SnappyDecompressor::RefillTag() {
+   const char* ip = ip_;
+   if (ip == ip_limit_) {
+     // Fetch a new fragment from the reader
+-    reader_->Skip(peeked_);   // All peeked bytes are used up
++    reader_->Skip(peeked_);  // All peeked bytes are used up
+     size_t n;
+     ip = reader_->Peek(&n);
+     peeked_ = n;
+     eof_ = (n == 0);
+     if (eof_) return false;
+     ip_limit_ = ip + n;
+   }
+ 
+   // Read the tag character
+   assert(ip < ip_limit_);
+   const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip));
+-  const uint32 entry = char_table[c];
+-  const uint32 needed = (entry >> 11) + 1;  // +1 byte for 'c'
++  // At this point make sure that the data for the next tag is consecutive.
++  // For copy 1 this means the next 2 bytes (tag and 1 byte offset)
++  // For copy 2 the next 3 bytes (tag and 2 byte offset)
++  // For copy 4 the next 5 bytes (tag and 4 byte offset)
++  // For all small literals we only need 1 byte buf for literals 60...63 the
++  // length is encoded in 1...4 extra bytes.
++  const uint32_t needed = CalculateNeeded(c);
+   assert(needed <= sizeof(scratch_));
+ 
+   // Read more bytes from reader if needed
+-  uint32 nbuf = ip_limit_ - ip;
++  uint32_t nbuf = ip_limit_ - ip;
+   if (nbuf < needed) {
+     // Stitch together bytes from ip and reader to form the word
+     // contents.  We store the needed bytes in "scratch_".  They
+     // will be consumed immediately by the caller since we do not
+     // read more than we need.
+-    memmove(scratch_, ip, nbuf);
++    std::memmove(scratch_, ip, nbuf);
+     reader_->Skip(peeked_);  // All peeked bytes are used up
+     peeked_ = 0;
+     while (nbuf < needed) {
+       size_t length;
+       const char* src = reader_->Peek(&length);
+       if (length == 0) return false;
+-      uint32 to_add = std::min<uint32>(needed - nbuf, length);
+-      memcpy(scratch_ + nbuf, src, to_add);
++      uint32_t to_add = std::min<uint32_t>(needed - nbuf, length);
++      std::memcpy(scratch_ + nbuf, src, to_add);
+       nbuf += to_add;
+       reader_->Skip(to_add);
+     }
+     assert(nbuf == needed);
+     ip_ = scratch_;
+     ip_limit_ = scratch_ + needed;
+   } else if (nbuf < kMaximumTagLength) {
+     // Have enough bytes, but move into scratch_ so that we do not
+     // read past end of input
+-    memmove(scratch_, ip, nbuf);
++    std::memmove(scratch_, ip, nbuf);
+     reader_->Skip(peeked_);  // All peeked bytes are used up
+     peeked_ = 0;
+     ip_ = scratch_;
+     ip_limit_ = scratch_ + nbuf;
+   } else {
+     // Pass pointer to buffer returned by reader_.
+     ip_ = ip;
+   }
+   return true;
+ }
+ 
+ template <typename Writer>
+ static bool InternalUncompress(Source* r, Writer* writer) {
+   // Read the uncompressed length from the front of the compressed input
+   SnappyDecompressor decompressor(r);
+-  uint32 uncompressed_len = 0;
++  uint32_t uncompressed_len = 0;
+   if (!decompressor.ReadUncompressedLength(&uncompressed_len)) return false;
+ 
+   return InternalUncompressAllTags(&decompressor, writer, r->Available(),
+                                    uncompressed_len);
+ }
+ 
+ template <typename Writer>
+ static bool InternalUncompressAllTags(SnappyDecompressor* decompressor,
+-                                      Writer* writer,
+-                                      uint32 compressed_len,
+-                                      uint32 uncompressed_len) {
++                                      Writer* writer, uint32_t compressed_len,
++                                      uint32_t uncompressed_len) {
+   Report("snappy_uncompress", compressed_len, uncompressed_len);
+ 
+   writer->SetExpectedLength(uncompressed_len);
+ 
+   // Process the entire input
+   decompressor->DecompressAllTags(writer);
+   writer->Flush();
+   return (decompressor->eof() && writer->CheckLength());
+ }
+ 
+-bool GetUncompressedLength(Source* source, uint32* result) {
++bool GetUncompressedLength(Source* source, uint32_t* result) {
+   SnappyDecompressor decompressor(source);
+   return decompressor.ReadUncompressedLength(result);
+ }
+ 
+ size_t Compress(Source* reader, Sink* writer) {
+   size_t written = 0;
+   size_t N = reader->Available();
+   const size_t uncompressed_size = N;
+   char ulength[Varint::kMax32];
+   char* p = Varint::Encode32(ulength, N);
+-  writer->Append(ulength, p-ulength);
++  writer->Append(ulength, p - ulength);
+   written += (p - ulength);
+ 
+   internal::WorkingMemory wmem(N);
+ 
+   while (N > 0) {
+     // Get next block to compress (without copying if possible)
+     size_t fragment_size;
+     const char* fragment = reader->Peek(&fragment_size);
+@@ -1017,35 +1484,35 @@ size_t Compress(Source* reader, Sink* wr
+ 
+     size_t pending_advance = 0;
+     if (bytes_read >= num_to_read) {
+       // Buffer returned by reader is large enough
+       pending_advance = num_to_read;
+       fragment_size = num_to_read;
+     } else {
+       char* scratch = wmem.GetScratchInput();
+-      memcpy(scratch, fragment, bytes_read);
++      std::memcpy(scratch, fragment, bytes_read);
+       reader->Skip(bytes_read);
+ 
+       while (bytes_read < num_to_read) {
+         fragment = reader->Peek(&fragment_size);
+         size_t n = std::min<size_t>(fragment_size, num_to_read - bytes_read);
+-        memcpy(scratch + bytes_read, fragment, n);
++        std::memcpy(scratch + bytes_read, fragment, n);
+         bytes_read += n;
+         reader->Skip(n);
+       }
+       assert(bytes_read == num_to_read);
+       fragment = scratch;
+       fragment_size = num_to_read;
+     }
+     assert(fragment_size == num_to_read);
+ 
+     // Get encoding table for compression
+     int table_size;
+-    uint16* table = wmem.GetHashTable(num_to_read, &table_size);
++    uint16_t* table = wmem.GetHashTable(num_to_read, &table_size);
+ 
+     // Compress input_fragment and append to dest
+     const int max_output = MaxCompressedLength(num_to_read);
+ 
+     // Need a scratch buffer for the output, in case the byte sink doesn't
+     // have room for us directly.
+ 
+     // Since we encode kBlockSize regions followed by a region
+@@ -1110,74 +1577,79 @@ class SnappyIOVecWriter {
+ #if !defined(NDEBUG)
+         output_iov_(iov),
+ #endif  // !defined(NDEBUG)
+         curr_iov_(iov),
+         curr_iov_output_(iov_count ? reinterpret_cast<char*>(iov->iov_base)
+                                    : nullptr),
+         curr_iov_remaining_(iov_count ? iov->iov_len : 0),
+         total_written_(0),
+-        output_limit_(-1) {}
+-
+-  inline void SetExpectedLength(size_t len) {
+-    output_limit_ = len;
++        output_limit_(-1) {
+   }
+ 
+-  inline bool CheckLength() const {
+-    return total_written_ == output_limit_;
+-  }
++  inline void SetExpectedLength(size_t len) { output_limit_ = len; }
+ 
+-  inline bool Append(const char* ip, size_t len) {
++  inline bool CheckLength() const { return total_written_ == output_limit_; }
++
++  inline bool Append(const char* ip, size_t len, char**) {
+     if (total_written_ + len > output_limit_) {
+       return false;
+     }
+ 
+     return AppendNoCheck(ip, len);
+   }
+ 
++  char* GetOutputPtr() { return nullptr; }
++  char* GetBase(ptrdiff_t*) { return nullptr; }
++  void SetOutputPtr(char* op) {
++    // TODO: Switch to [[maybe_unused]] when we can assume C++17.
++    (void)op;
++  }
++
+   inline bool AppendNoCheck(const char* ip, size_t len) {
+     while (len > 0) {
+       if (curr_iov_remaining_ == 0) {
+         // This iovec is full. Go to the next one.
+         if (curr_iov_ + 1 >= output_iov_end_) {
+           return false;
+         }
+         ++curr_iov_;
+         curr_iov_output_ = reinterpret_cast<char*>(curr_iov_->iov_base);
+         curr_iov_remaining_ = curr_iov_->iov_len;
+       }
+ 
+       const size_t to_write = std::min(len, curr_iov_remaining_);
+-      memcpy(curr_iov_output_, ip, to_write);
++      std::memcpy(curr_iov_output_, ip, to_write);
+       curr_iov_output_ += to_write;
+       curr_iov_remaining_ -= to_write;
+       total_written_ += to_write;
+       ip += to_write;
+       len -= to_write;
+     }
+ 
+     return true;
+   }
+ 
+-  inline bool TryFastAppend(const char* ip, size_t available, size_t len) {
++  inline bool TryFastAppend(const char* ip, size_t available, size_t len,
++                            char**) {
+     const size_t space_left = output_limit_ - total_written_;
+     if (len <= 16 && available >= 16 + kMaximumTagLength && space_left >= 16 &&
+         curr_iov_remaining_ >= 16) {
+       // Fast path, used for the majority (about 95%) of invocations.
+       UnalignedCopy128(ip, curr_iov_output_);
+       curr_iov_output_ += len;
+       curr_iov_remaining_ -= len;
+       total_written_ += len;
+       return true;
+     }
+ 
+     return false;
+   }
+ 
+-  inline bool AppendFromSelf(size_t offset, size_t len) {
++  inline bool AppendFromSelf(size_t offset, size_t len, char**) {
+     // See SnappyArrayWriter::AppendFromSelf for an explanation of
+     // the "offset - 1u" trick.
+     if (offset - 1u >= total_written_) {
+       return false;
+     }
+     const size_t space_left = output_limit_ - total_written_;
+     if (len > space_left) {
+       return false;
+@@ -1223,16 +1695,17 @@ class SnappyIOVecWriter {
+           ++curr_iov_;
+           curr_iov_output_ = reinterpret_cast<char*>(curr_iov_->iov_base);
+           curr_iov_remaining_ = curr_iov_->iov_len;
+           continue;
+         }
+         if (to_copy > len) {
+           to_copy = len;
+         }
++        assert(to_copy > 0);
+ 
+         IncrementalCopy(GetIOVecPointer(from_iov, from_iov_offset),
+                         curr_iov_output_, curr_iov_output_ + to_copy,
+                         curr_iov_output_ + curr_iov_remaining_);
+         curr_iov_output_ += to_copy;
+         curr_iov_remaining_ -= to_copy;
+         from_iov_offset += to_copy;
+         total_written_ += to_copy;
+@@ -1265,147 +1738,175 @@ bool RawUncompressToIOVec(Source* compre
+ // A type that writes to a flat array.
+ // Note that this is not a "ByteSink", but a type that matches the
+ // Writer template argument to SnappyDecompressor::DecompressAllTags().
+ class SnappyArrayWriter {
+  private:
+   char* base_;
+   char* op_;
+   char* op_limit_;
++  // If op < op_limit_min_slop_ then it's safe to unconditionally write
++  // kSlopBytes starting at op.
++  char* op_limit_min_slop_;
+ 
+  public:
+   inline explicit SnappyArrayWriter(char* dst)
+       : base_(dst),
+         op_(dst),
+-        op_limit_(dst) {
+-  }
++        op_limit_(dst),
++        op_limit_min_slop_(dst) {}  // Safe default see invariant.
+ 
+   inline void SetExpectedLength(size_t len) {
+     op_limit_ = op_ + len;
+-  }
+-
+-  inline bool CheckLength() const {
+-    return op_ == op_limit_;
++    // Prevent pointer from being past the buffer.
++    op_limit_min_slop_ = op_limit_ - std::min<size_t>(kSlopBytes - 1, len);
+   }
+ 
+-  inline bool Append(const char* ip, size_t len) {
+-    char* op = op_;
++  inline bool CheckLength() const { return op_ == op_limit_; }
++
++  char* GetOutputPtr() { return op_; }
++  char* GetBase(ptrdiff_t* op_limit_min_slop) {
++    *op_limit_min_slop = op_limit_min_slop_ - base_;
++    return base_;
++  }
++  void SetOutputPtr(char* op) { op_ = op; }
++
++  inline bool Append(const char* ip, size_t len, char** op_p) {
++    char* op = *op_p;
+     const size_t space_left = op_limit_ - op;
+-    if (space_left < len) {
+-      return false;
+-    }
+-    memcpy(op, ip, len);
+-    op_ = op + len;
++    if (space_left < len) return false;
++    std::memcpy(op, ip, len);
++    *op_p = op + len;
+     return true;
+   }
+ 
+-  inline bool TryFastAppend(const char* ip, size_t available, size_t len) {
+-    char* op = op_;
++  inline bool TryFastAppend(const char* ip, size_t available, size_t len,
++                            char** op_p) {
++    char* op = *op_p;
+     const size_t space_left = op_limit_ - op;
+     if (len <= 16 && available >= 16 + kMaximumTagLength && space_left >= 16) {
+       // Fast path, used for the majority (about 95%) of invocations.
+       UnalignedCopy128(ip, op);
+-      op_ = op + len;
++      *op_p = op + len;
+       return true;
+     } else {
+       return false;
+     }
+   }
+ 
+-  inline bool AppendFromSelf(size_t offset, size_t len) {
+-    char* const op_end = op_ + len;
++  SNAPPY_ATTRIBUTE_ALWAYS_INLINE
++  inline bool AppendFromSelf(size_t offset, size_t len, char** op_p) {
++    assert(len > 0);
++    char* const op = *op_p;
++    assert(op >= base_);
++    char* const op_end = op + len;
+ 
+     // Check if we try to append from before the start of the buffer.
+-    // Normally this would just be a check for "produced < offset",
+-    // but "produced <= offset - 1u" is equivalent for every case
+-    // except the one where offset==0, where the right side will wrap around
+-    // to a very big number. This is convenient, as offset==0 is another
+-    // invalid case that we also want to catch, so that we do not go
+-    // into an infinite loop.
+-    if (Produced() <= offset - 1u || op_end > op_limit_) return false;
+-    op_ = IncrementalCopy(op_ - offset, op_, op_end, op_limit_);
++    if (SNAPPY_PREDICT_FALSE(static_cast<size_t>(op - base_) < offset))
++      return false;
+ 
++    if (SNAPPY_PREDICT_FALSE((kSlopBytes < 64 && len > kSlopBytes) ||
++                            op >= op_limit_min_slop_ || offset < len)) {
++      if (op_end > op_limit_ || offset == 0) return false;
++      *op_p = IncrementalCopy(op - offset, op, op_end, op_limit_);
++      return true;
++    }
++    std::memmove(op, op - offset, kSlopBytes);
++    *op_p = op_end;
+     return true;
+   }
+   inline size_t Produced() const {
+     assert(op_ >= base_);
+     return op_ - base_;
+   }
+   inline void Flush() {}
+ };
+ 
+-bool RawUncompress(const char* compressed, size_t n, char* uncompressed) {
+-  ByteArraySource reader(compressed, n);
++bool RawUncompress(const char* compressed, size_t compressed_length,
++                   char* uncompressed) {
++  ByteArraySource reader(compressed, compressed_length);
+   return RawUncompress(&reader, uncompressed);
+ }
+ 
+ bool RawUncompress(Source* compressed, char* uncompressed) {
+   SnappyArrayWriter output(uncompressed);
+   return InternalUncompress(compressed, &output);
+ }
+ 
+-bool Uncompress(const char* compressed, size_t n, std::string* uncompressed) {
++bool Uncompress(const char* compressed, size_t compressed_length,
++                std::string* uncompressed) {
+   size_t ulength;
+-  if (!GetUncompressedLength(compressed, n, &ulength)) {
++  if (!GetUncompressedLength(compressed, compressed_length, &ulength)) {
+     return false;
+   }
+   // On 32-bit builds: max_size() < kuint32max.  Check for that instead
+   // of crashing (e.g., consider externally specified compressed data).
+   if (ulength > uncompressed->max_size()) {
+     return false;
+   }
+   STLStringResizeUninitialized(uncompressed, ulength);
+-  return RawUncompress(compressed, n, string_as_array(uncompressed));
++  return RawUncompress(compressed, compressed_length,
++                       string_as_array(uncompressed));
+ }
+ 
+ // A Writer that drops everything on the floor and just does validation
+ class SnappyDecompressionValidator {
+  private:
+   size_t expected_;
+   size_t produced_;
+ 
+  public:
+-  inline SnappyDecompressionValidator() : expected_(0), produced_(0) { }
+-  inline void SetExpectedLength(size_t len) {
+-    expected_ = len;
++  inline SnappyDecompressionValidator() : expected_(0), produced_(0) {}
++  inline void SetExpectedLength(size_t len) { expected_ = len; }
++  size_t GetOutputPtr() { return produced_; }
++  size_t GetBase(ptrdiff_t* op_limit_min_slop) {
++    *op_limit_min_slop = std::numeric_limits<ptrdiff_t>::max() - kSlopBytes + 1;
++    return 1;
+   }
+-  inline bool CheckLength() const {
+-    return expected_ == produced_;
++  void SetOutputPtr(size_t op) { produced_ = op; }
++  inline bool CheckLength() const { return expected_ == produced_; }
++  inline bool Append(const char* ip, size_t len, size_t* produced) {
++    // TODO: Switch to [[maybe_unused]] when we can assume C++17.
++    (void)ip;
++
++    *produced += len;
++    return *produced <= expected_;
+   }
+-  inline bool Append(const char* ip, size_t len) {
+-    produced_ += len;
+-    return produced_ <= expected_;
+-  }
+-  inline bool TryFastAppend(const char* ip, size_t available, size_t length) {
++  inline bool TryFastAppend(const char* ip, size_t available, size_t length,
++                            size_t* produced) {
++    // TODO: Switch to [[maybe_unused]] when we can assume C++17.
++    (void)ip;
++    (void)available;
++    (void)length;
++    (void)produced;
++
+     return false;
+   }
+-  inline bool AppendFromSelf(size_t offset, size_t len) {
++  inline bool AppendFromSelf(size_t offset, size_t len, size_t* produced) {
+     // See SnappyArrayWriter::AppendFromSelf for an explanation of
+     // the "offset - 1u" trick.
+-    if (produced_ <= offset - 1u) return false;
+-    produced_ += len;
+-    return produced_ <= expected_;
++    if (*produced <= offset - 1u) return false;
++    *produced += len;
++    return *produced <= expected_;
+   }
+   inline void Flush() {}
+ };
+ 
+-bool IsValidCompressedBuffer(const char* compressed, size_t n) {
+-  ByteArraySource reader(compressed, n);
++bool IsValidCompressedBuffer(const char* compressed, size_t compressed_length) {
++  ByteArraySource reader(compressed, compressed_length);
+   SnappyDecompressionValidator writer;
+   return InternalUncompress(&reader, &writer);
+ }
+ 
+ bool IsValidCompressed(Source* compressed) {
+   SnappyDecompressionValidator writer;
+   return InternalUncompress(compressed, &writer);
+ }
+ 
+-void RawCompress(const char* input,
+-                 size_t input_length,
+-                 char* compressed,
++void RawCompress(const char* input, size_t input_length, char* compressed,
+                  size_t* compressed_length) {
+   ByteArraySource reader(input, input_length);
+   UncheckedByteArraySink writer(compressed);
+   Compress(&reader, &writer);
+ 
+   // Compute how many bytes were added
+   *compressed_length = (writer.CurrentDestination() - compressed);
+ }
+@@ -1438,185 +1939,216 @@ class SnappyScatteredWriter {
+   // All of the blocks except the last have length kBlockSize.
+   std::vector<char*> blocks_;
+   size_t expected_;
+ 
+   // Total size of all fully generated blocks so far
+   size_t full_size_;
+ 
+   // Pointer into current output block
+-  char* op_base_;       // Base of output block
+-  char* op_ptr_;        // Pointer to next unfilled byte in block
+-  char* op_limit_;      // Pointer just past block
++  char* op_base_;   // Base of output block
++  char* op_ptr_;    // Pointer to next unfilled byte in block
++  char* op_limit_;  // Pointer just past block
++  // If op < op_limit_min_slop_ then it's safe to unconditionally write
++  // kSlopBytes starting at op.
++  char* op_limit_min_slop_;
+ 
+-  inline size_t Size() const {
+-    return full_size_ + (op_ptr_ - op_base_);
+-  }
++  inline size_t Size() const { return full_size_ + (op_ptr_ - op_base_); }
+ 
+   bool SlowAppend(const char* ip, size_t len);
+   bool SlowAppendFromSelf(size_t offset, size_t len);
+ 
+  public:
+   inline explicit SnappyScatteredWriter(const Allocator& allocator)
+       : allocator_(allocator),
+         full_size_(0),
+         op_base_(NULL),
+         op_ptr_(NULL),
+-        op_limit_(NULL) {
++        op_limit_(NULL),
++        op_limit_min_slop_(NULL) {}
++  char* GetOutputPtr() { return op_ptr_; }
++  char* GetBase(ptrdiff_t* op_limit_min_slop) {
++    *op_limit_min_slop = op_limit_min_slop_ - op_base_;
++    return op_base_;
+   }
++  void SetOutputPtr(char* op) { op_ptr_ = op; }
+ 
+   inline void SetExpectedLength(size_t len) {
+     assert(blocks_.empty());
+     expected_ = len;
+   }
+ 
+-  inline bool CheckLength() const {
+-    return Size() == expected_;
+-  }
++  inline bool CheckLength() const { return Size() == expected_; }
+ 
+   // Return the number of bytes actually uncompressed so far
+-  inline size_t Produced() const {
+-    return Size();
+-  }
++  inline size_t Produced() const { return Size(); }
+ 
+-  inline bool Append(const char* ip, size_t len) {
+-    size_t avail = op_limit_ - op_ptr_;
++  inline bool Append(const char* ip, size_t len, char** op_p) {
++    char* op = *op_p;
++    size_t avail = op_limit_ - op;
+     if (len <= avail) {
+       // Fast path
+-      memcpy(op_ptr_, ip, len);
+-      op_ptr_ += len;
++      std::memcpy(op, ip, len);
++      *op_p = op + len;
+       return true;
+     } else {
+-      return SlowAppend(ip, len);
++      op_ptr_ = op;
++      bool res = SlowAppend(ip, len);
++      *op_p = op_ptr_;
++      return res;
+     }
+   }
+ 
+-  inline bool TryFastAppend(const char* ip, size_t available, size_t length) {
+-    char* op = op_ptr_;
++  inline bool TryFastAppend(const char* ip, size_t available, size_t length,
++                            char** op_p) {
++    char* op = *op_p;
+     const int space_left = op_limit_ - op;
+     if (length <= 16 && available >= 16 + kMaximumTagLength &&
+         space_left >= 16) {
+       // Fast path, used for the majority (about 95%) of invocations.
+       UnalignedCopy128(ip, op);
+-      op_ptr_ = op + length;
++      *op_p = op + length;
+       return true;
+     } else {
+       return false;
+     }
+   }
+ 
+-  inline bool AppendFromSelf(size_t offset, size_t len) {
+-    char* const op_end = op_ptr_ + len;
+-    // See SnappyArrayWriter::AppendFromSelf for an explanation of
+-    // the "offset - 1u" trick.
+-    if (SNAPPY_PREDICT_TRUE(offset - 1u < op_ptr_ - op_base_ &&
+-                          op_end <= op_limit_)) {
+-      // Fast path: src and dst in current block.
+-      op_ptr_ = IncrementalCopy(op_ptr_ - offset, op_ptr_, op_end, op_limit_);
++  inline bool AppendFromSelf(size_t offset, size_t len, char** op_p) {
++    char* op = *op_p;
++    assert(op >= op_base_);
++    // Check if we try to append from before the start of the buffer.
++    if (SNAPPY_PREDICT_FALSE((kSlopBytes < 64 && len > kSlopBytes) ||
++                            static_cast<size_t>(op - op_base_) < offset ||
++                            op >= op_limit_min_slop_ || offset < len)) {
++      if (offset == 0) return false;
++      if (SNAPPY_PREDICT_FALSE(static_cast<size_t>(op - op_base_) < offset ||
++                              op + len > op_limit_)) {
++        op_ptr_ = op;
++        bool res = SlowAppendFromSelf(offset, len);
++        *op_p = op_ptr_;
++        return res;
++      }
++      *op_p = IncrementalCopy(op - offset, op, op + len, op_limit_);
+       return true;
+     }
+-    return SlowAppendFromSelf(offset, len);
++    // Fast path
++    char* const op_end = op + len;
++    std::memmove(op, op - offset, kSlopBytes);
++    *op_p = op_end;
++    return true;
+   }
+ 
+   // Called at the end of the decompress. We ask the allocator
+   // write all blocks to the sink.
+   inline void Flush() { allocator_.Flush(Produced()); }
+ };
+ 
+-template<typename Allocator>
++template <typename Allocator>
+ bool SnappyScatteredWriter<Allocator>::SlowAppend(const char* ip, size_t len) {
+   size_t avail = op_limit_ - op_ptr_;
+   while (len > avail) {
+     // Completely fill this block
+-    memcpy(op_ptr_, ip, avail);
++    std::memcpy(op_ptr_, ip, avail);
+     op_ptr_ += avail;
+     assert(op_limit_ - op_ptr_ == 0);
+     full_size_ += (op_ptr_ - op_base_);
+     len -= avail;
+     ip += avail;
+ 
+     // Bounds check
+-    if (full_size_ + len > expected_) {
+-      return false;
+-    }
++    if (full_size_ + len > expected_) return false;
+ 
+     // Make new block
+     size_t bsize = std::min<size_t>(kBlockSize, expected_ - full_size_);
+     op_base_ = allocator_.Allocate(bsize);
+     op_ptr_ = op_base_;
+     op_limit_ = op_base_ + bsize;
++    op_limit_min_slop_ = op_limit_ - std::min<size_t>(kSlopBytes - 1, bsize);
++
+     blocks_.push_back(op_base_);
+     avail = bsize;
+   }
+ 
+-  memcpy(op_ptr_, ip, len);
++  std::memcpy(op_ptr_, ip, len);
+   op_ptr_ += len;
+   return true;
+ }
+ 
+-template<typename Allocator>
++template <typename Allocator>
+ bool SnappyScatteredWriter<Allocator>::SlowAppendFromSelf(size_t offset,
+                                                          size_t len) {
+   // Overflow check
+   // See SnappyArrayWriter::AppendFromSelf for an explanation of
+   // the "offset - 1u" trick.
+   const size_t cur = Size();
+   if (offset - 1u >= cur) return false;
+   if (expected_ - cur < len) return false;
+ 
+   // Currently we shouldn't ever hit this path because Compress() chops the
+   // input into blocks and does not create cross-block copies. However, it is
+   // nice if we do not rely on that, since we can get better compression if we
+   // allow cross-block copies and thus might want to change the compressor in
+   // the future.
++  // TODO Replace this with a properly optimized path. This is not
++  // triggered right now. But this is so super slow, that it would regress
++  // performance unacceptably if triggered.
+   size_t src = cur - offset;
++  char* op = op_ptr_;
+   while (len-- > 0) {
+-    char c = blocks_[src >> kBlockLog][src & (kBlockSize-1)];
+-    Append(&c, 1);
++    char c = blocks_[src >> kBlockLog][src & (kBlockSize - 1)];
++    if (!Append(&c, 1, &op)) {
++      op_ptr_ = op;
++      return false;
++    }
+     src++;
+   }
++  op_ptr_ = op;
+   return true;
+ }
+ 
+ class SnappySinkAllocator {
+  public:
+-  explicit SnappySinkAllocator(Sink* dest): dest_(dest) {}
++  explicit SnappySinkAllocator(Sink* dest) : dest_(dest) {}
+   ~SnappySinkAllocator() {}
+ 
+   char* Allocate(int size) {
+     Datablock block(new char[size], size);
+     blocks_.push_back(block);
+     return block.data;
+   }
+ 
+   // We flush only at the end, because the writer wants
+   // random access to the blocks and once we hand the
+   // block over to the sink, we can't access it anymore.
+   // Also we don't write more than has been actually written
+   // to the blocks.
+   void Flush(size_t size) {
+     size_t size_written = 0;
+-    size_t block_size;
+-    for (int i = 0; i < blocks_.size(); ++i) {
+-      block_size = std::min<size_t>(blocks_[i].size, size - size_written);
+-      dest_->AppendAndTakeOwnership(blocks_[i].data, block_size,
++    for (Datablock& block : blocks_) {
++      size_t block_size = std::min<size_t>(block.size, size - size_written);
++      dest_->AppendAndTakeOwnership(block.data, block_size,
+                                     &SnappySinkAllocator::Deleter, NULL);
+       size_written += block_size;
+     }
+     blocks_.clear();
+   }
+ 
+  private:
+   struct Datablock {
+     char* data;
+     size_t size;
+     Datablock(char* p, size_t s) : data(p), size(s) {}
+   };
+ 
+   static void Deleter(void* arg, const char* bytes, size_t size) {
++    // TODO: Switch to [[maybe_unused]] when we can assume C++17.
++    (void)arg;
++    (void)size;
++
+     delete[] bytes;
+   }
+ 
+   Sink* dest_;
+   std::vector<Datablock> blocks_;
+ 
+   // Note: copying this object is allowed
+ };
+@@ -1626,25 +2158,25 @@ size_t UncompressAsMuchAsPossible(Source
+   SnappyScatteredWriter<SnappySinkAllocator> writer(allocator);
+   InternalUncompress(compressed, &writer);
+   return writer.Produced();
+ }
+ 
+ bool Uncompress(Source* compressed, Sink* uncompressed) {
+   // Read the uncompressed length from the front of the compressed input
+   SnappyDecompressor decompressor(compressed);
+-  uint32 uncompressed_len = 0;
++  uint32_t uncompressed_len = 0;
+   if (!decompressor.ReadUncompressedLength(&uncompressed_len)) {
+     return false;
+   }
+ 
+   char c;
+   size_t allocated_size;
+-  char* buf = uncompressed->GetAppendBufferVariable(
+-      1, uncompressed_len, &c, 1, &allocated_size);
++  char* buf = uncompressed->GetAppendBufferVariable(1, uncompressed_len, &c, 1,
++                                                    &allocated_size);
+ 
+   const size_t compressed_len = compressed->Available();
+   // If we can get a flat buffer, then use it, otherwise do block by block
+   // uncompression
+   if (allocated_size >= uncompressed_len) {
+     SnappyArrayWriter writer(buf);
+     bool result = InternalUncompressAllTags(&decompressor, &writer,
+                                             compressed_len, uncompressed_len);
+diff --git a/other-licenses/snappy/src/snappy.h b/other-licenses/snappy/src/snappy.h
+--- a/other-licenses/snappy/src/snappy.h
++++ b/other-licenses/snappy/src/snappy.h
+@@ -34,17 +34,19 @@
+ // with long repeated sequences or compressing data that is similar to
+ // other data, while still compressing fast, you might look at first
+ // using BMDiff and then compressing the output of BMDiff with
+ // Snappy.
+ 
+ #ifndef THIRD_PARTY_SNAPPY_SNAPPY_H__
+ #define THIRD_PARTY_SNAPPY_SNAPPY_H__
+ 
+-#include <cstddef>
++#include <stddef.h>
++#include <stdint.h>
++
+ #include <string>
+ 
+ #include "snappy-stubs-public.h"
+ 
+ namespace snappy {
+   class Source;
+   class Sink;
+ 
+@@ -58,17 +60,17 @@ namespace snappy {
+ 
+   // Find the uncompressed length of the given stream, as given by the header.
+   // Note that the true length could deviate from this; the stream could e.g.
+   // be truncated.
+   //
+   // Also note that this leaves "*source" in a state that is unsuitable for
+   // further operations, such as RawUncompress(). You will need to rewind
+   // or recreate the source yourself before attempting any further calls.
+-  bool GetUncompressedLength(Source* source, uint32* result);
++  bool GetUncompressedLength(Source* source, uint32_t* result);
+ 
+   // ------------------------------------------------------------------------
+   // Higher-level string based routines (should be sufficient for most users)
+   // ------------------------------------------------------------------------
+ 
+   // Sets "*compressed" to the compressed version of "input[0,input_length-1]".
+   // Original contents of *compressed are lost.
+   //
+diff --git a/other-licenses/snappy/src/snappy_compress_fuzzer.cc b/other-licenses/snappy/src/snappy_compress_fuzzer.cc
+--- a/other-licenses/snappy/src/snappy_compress_fuzzer.cc
++++ b/other-licenses/snappy/src/snappy_compress_fuzzer.cc
+@@ -23,19 +23,20 @@
+ // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ //
+ // libFuzzer harness for fuzzing snappy compression code.
+ 
++#include <stddef.h>
++#include <stdint.h>
++
+ #include <cassert>
+-#include <cstddef>
+-#include <cstdint>
+ #include <string>
+ 
+ #include "snappy.h"
+ 
+ // Entry point for LibFuzzer.
+ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+   std::string input(reinterpret_cast<const char*>(data), size);
+ 
+diff --git a/other-licenses/snappy/src/snappy_uncompress_fuzzer.cc b/other-licenses/snappy/src/snappy_uncompress_fuzzer.cc
+--- a/other-licenses/snappy/src/snappy_uncompress_fuzzer.cc
++++ b/other-licenses/snappy/src/snappy_uncompress_fuzzer.cc
+@@ -23,19 +23,20 @@
+ // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ //
+ // libFuzzer harness for fuzzing snappy's decompression code.
+ 
++#include <stddef.h>
++#include <stdint.h>
++
+ #include <cassert>
+-#include <cstddef>
+-#include <cstdint>
+ #include <string>
+ 
+ #include "snappy.h"
+ 
+ // Entry point for LibFuzzer.
+ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+   std::string input(reinterpret_cast<const char*>(data), size);
+ 
+diff --git a/other-licenses/snappy/src/snappy_unittest.cc b/other-licenses/snappy/src/snappy_unittest.cc
+--- a/other-licenses/snappy/src/snappy_unittest.cc
++++ b/other-licenses/snappy/src/snappy_unittest.cc
+@@ -21,53 +21,40 @@
+ // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ 
+-#include <math.h>
+-#include <stdlib.h>
+-
+ #include <algorithm>
++#include <cmath>
++#include <cstdlib>
+ #include <random>
+ #include <string>
+ #include <utility>
+ #include <vector>
+ 
+-#include "snappy.h"
+-#include "snappy-internal.h"
+ #include "snappy-test.h"
+-#include "snappy-sinksource.h"
+ 
+-DEFINE_int32(start_len, -1,
+-             "Starting prefix size for testing (-1: just full file contents)");
+-DEFINE_int32(end_len, -1,
+-             "Starting prefix size for testing (-1: just full file contents)");
+-DEFINE_int32(bytes, 10485760,
+-             "How many bytes to compress/uncompress per file for timing");
++#include "gtest/gtest.h"
+ 
+-DEFINE_bool(zlib, false,
+-            "Run zlib compression (http://www.zlib.net)");
+-DEFINE_bool(lzo, false,
+-            "Run LZO compression (http://www.oberhumer.com/opensource/lzo/)");
+-DEFINE_bool(snappy, true, "Run snappy compression");
++#include "snappy-internal.h"
++#include "snappy-sinksource.h"
++#include "snappy.h"
++#include "snappy_test_data.h"
+ 
+-DEFINE_bool(write_compressed, false,
+-            "Write compressed versions of each file to <file>.comp");
+-DEFINE_bool(write_uncompressed, false,
+-            "Write uncompressed versions of each file to <file>.uncomp");
+-
+-DEFINE_bool(snappy_dump_decompression_table, false,
++SNAPPY_FLAG(bool, snappy_dump_decompression_table, false,
+             "If true, we print the decompression table during tests.");
+ 
+ namespace snappy {
+ 
++namespace {
++
+ #if defined(HAVE_FUNC_MMAP) && defined(HAVE_FUNC_SYSCONF)
+ 
+ // To test against code that reads beyond its input, this class copies a
+ // string to a newly allocated group of pages, the last of which
+ // is made unreadable via mprotect. Note that we need to allocate the
+ // memory with mmap(), as POSIX allows mprotect() only on memory allocated
+ // with mmap(), and some malloc/posix_memalign implementations expect to
+ // be able to read previously allocated memory while doing heap allocations.
+@@ -79,17 +66,17 @@ class DataEndingAtUnreadablePage {
+     // Round up space for string to a multiple of page_size.
+     size_t space_for_string = (size + page_size - 1) & ~(page_size - 1);
+     alloc_size_ = space_for_string + page_size;
+     mem_ = mmap(NULL, alloc_size_,
+                 PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+     CHECK_NE(MAP_FAILED, mem_);
+     protected_page_ = reinterpret_cast<char*>(mem_) + space_for_string;
+     char* dst = protected_page_ - size;
+-    memcpy(dst, s.data(), size);
++    std::memcpy(dst, s.data(), size);
+     data_ = dst;
+     size_ = size;
+     // Make guard page unreadable.
+     CHECK_EQ(0, mprotect(protected_page_, page_size, PROT_NONE));
+   }
+ 
+   ~DataEndingAtUnreadablePage() {
+     const size_t page_size = sysconf(_SC_PAGESIZE);
+@@ -111,275 +98,33 @@ class DataEndingAtUnreadablePage {
+ 
+ #else  // defined(HAVE_FUNC_MMAP) && defined(HAVE_FUNC_SYSCONF)
+ 
+ // Fallback for systems without mmap.
+ using DataEndingAtUnreadablePage = std::string;
+ 
+ #endif
+ 
+-enum CompressorType {
+-  ZLIB, LZO, SNAPPY
+-};
+-
+-const char* names[] = {
+-  "ZLIB", "LZO", "SNAPPY"
+-};
+-
+-static size_t MinimumRequiredOutputSpace(size_t input_size,
+-                                         CompressorType comp) {
+-  switch (comp) {
+-#ifdef ZLIB_VERSION
+-    case ZLIB:
+-      return ZLib::MinCompressbufSize(input_size);
+-#endif  // ZLIB_VERSION
+-
+-#ifdef LZO_VERSION
+-    case LZO:
+-      return input_size + input_size/64 + 16 + 3;
+-#endif  // LZO_VERSION
+-
+-    case SNAPPY:
+-      return snappy::MaxCompressedLength(input_size);
+-
+-    default:
+-      LOG(FATAL) << "Unknown compression type number " << comp;
+-      return 0;
+-  }
+-}
+-
+-// Returns true if we successfully compressed, false otherwise.
+-//
+-// If compressed_is_preallocated is set, do not resize the compressed buffer.
+-// This is typically what you want for a benchmark, in order to not spend
+-// time in the memory allocator. If you do set this flag, however,
+-// "compressed" must be preinitialized to at least MinCompressbufSize(comp)
+-// number of bytes, and may contain junk bytes at the end after return.
+-static bool Compress(const char* input, size_t input_size, CompressorType comp,
+-                     std::string* compressed, bool compressed_is_preallocated) {
+-  if (!compressed_is_preallocated) {
+-    compressed->resize(MinimumRequiredOutputSpace(input_size, comp));
+-  }
+-
+-  switch (comp) {
+-#ifdef ZLIB_VERSION
+-    case ZLIB: {
+-      ZLib zlib;
+-      uLongf destlen = compressed->size();
+-      int ret = zlib.Compress(
+-          reinterpret_cast<Bytef*>(string_as_array(compressed)),
+-          &destlen,
+-          reinterpret_cast<const Bytef*>(input),
+-          input_size);
+-      CHECK_EQ(Z_OK, ret);
+-      if (!compressed_is_preallocated) {
+-        compressed->resize(destlen);
+-      }
+-      return true;
+-    }
+-#endif  // ZLIB_VERSION
+-
+-#ifdef LZO_VERSION
+-    case LZO: {
+-      unsigned char* mem = new unsigned char[LZO1X_1_15_MEM_COMPRESS];
+-      lzo_uint destlen;
+-      int ret = lzo1x_1_15_compress(
+-          reinterpret_cast<const uint8*>(input),
+-          input_size,
+-          reinterpret_cast<uint8*>(string_as_array(compressed)),
+-          &destlen,
+-          mem);
+-      CHECK_EQ(LZO_E_OK, ret);
+-      delete[] mem;
+-      if (!compressed_is_preallocated) {
+-        compressed->resize(destlen);
+-      }
+-      break;
+-    }
+-#endif  // LZO_VERSION
+-
+-    case SNAPPY: {
+-      size_t destlen;
+-      snappy::RawCompress(input, input_size,
+-                          string_as_array(compressed),
+-                          &destlen);
+-      CHECK_LE(destlen, snappy::MaxCompressedLength(input_size));
+-      if (!compressed_is_preallocated) {
+-        compressed->resize(destlen);
+-      }
+-      break;
+-    }
+-
+-    default: {
+-      return false;     // the asked-for library wasn't compiled in
+-    }
+-  }
+-  return true;
+-}
+-
+-static bool Uncompress(const std::string& compressed, CompressorType comp,
+-                       int size, std::string* output) {
+-  switch (comp) {
+-#ifdef ZLIB_VERSION
+-    case ZLIB: {
+-      output->resize(size);
+-      ZLib zlib;
+-      uLongf destlen = output->size();
+-      int ret = zlib.Uncompress(
+-          reinterpret_cast<Bytef*>(string_as_array(output)),
+-          &destlen,
+-          reinterpret_cast<const Bytef*>(compressed.data()),
+-          compressed.size());
+-      CHECK_EQ(Z_OK, ret);
+-      CHECK_EQ(static_cast<uLongf>(size), destlen);
+-      break;
+-    }
+-#endif  // ZLIB_VERSION
+-
+-#ifdef LZO_VERSION
+-    case LZO: {
+-      output->resize(size);
+-      lzo_uint destlen;
+-      int ret = lzo1x_decompress(
+-          reinterpret_cast<const uint8*>(compressed.data()),
+-          compressed.size(),
+-          reinterpret_cast<uint8*>(string_as_array(output)),
+-          &destlen,
+-          NULL);
+-      CHECK_EQ(LZO_E_OK, ret);
+-      CHECK_EQ(static_cast<lzo_uint>(size), destlen);
+-      break;
+-    }
+-#endif  // LZO_VERSION
+-
+-    case SNAPPY: {
+-      snappy::RawUncompress(compressed.data(), compressed.size(),
+-                            string_as_array(output));
+-      break;
+-    }
+-
+-    default: {
+-      return false;     // the asked-for library wasn't compiled in
+-    }
+-  }
+-  return true;
+-}
+-
+-static void Measure(const char* data,
+-                    size_t length,
+-                    CompressorType comp,
+-                    int repeats,
+-                    int block_size) {
+-  // Run tests a few time and pick median running times
+-  static const int kRuns = 5;
+-  double ctime[kRuns];
+-  double utime[kRuns];
+-  int compressed_size = 0;
+-
+-  {
+-    // Chop the input into blocks
+-    int num_blocks = (length + block_size - 1) / block_size;
+-    std::vector<const char*> input(num_blocks);
+-    std::vector<size_t> input_length(num_blocks);
+-    std::vector<std::string> compressed(num_blocks);
+-    std::vector<std::string> output(num_blocks);
+-    for (int b = 0; b < num_blocks; b++) {
+-      int input_start = b * block_size;
+-      int input_limit = std::min<int>((b+1)*block_size, length);
+-      input[b] = data+input_start;
+-      input_length[b] = input_limit-input_start;
+-
+-      // Pre-grow the output buffer so we don't measure string append time.
+-      compressed[b].resize(MinimumRequiredOutputSpace(block_size, comp));
+-    }
+-
+-    // First, try one trial compression to make sure the code is compiled in
+-    if (!Compress(input[0], input_length[0], comp, &compressed[0], true)) {
+-      LOG(WARNING) << "Skipping " << names[comp] << ": "
+-                   << "library not compiled in";
+-      return;
+-    }
+-
+-    for (int run = 0; run < kRuns; run++) {
+-      CycleTimer ctimer, utimer;
+-
+-      for (int b = 0; b < num_blocks; b++) {
+-        // Pre-grow the output buffer so we don't measure string append time.
+-        compressed[b].resize(MinimumRequiredOutputSpace(block_size, comp));
+-      }
+-
+-      ctimer.Start();
+-      for (int b = 0; b < num_blocks; b++)
+-        for (int i = 0; i < repeats; i++)
+-          Compress(input[b], input_length[b], comp, &compressed[b], true);
+-      ctimer.Stop();
+-
+-      // Compress once more, with resizing, so we don't leave junk
+-      // at the end that will confuse the decompressor.
+-      for (int b = 0; b < num_blocks; b++) {
+-        Compress(input[b], input_length[b], comp, &compressed[b], false);
+-      }
+-
+-      for (int b = 0; b < num_blocks; b++) {
+-        output[b].resize(input_length[b]);
+-      }
+-
+-      utimer.Start();
+-      for (int i = 0; i < repeats; i++)
+-        for (int b = 0; b < num_blocks; b++)
+-          Uncompress(compressed[b], comp, input_length[b], &output[b]);
+-      utimer.Stop();
+-
+-      ctime[run] = ctimer.Get();
+-      utime[run] = utimer.Get();
+-    }
+-
+-    compressed_size = 0;
+-    for (size_t i = 0; i < compressed.size(); i++) {
+-      compressed_size += compressed[i].size();
+-    }
+-  }
+-
+-  std::sort(ctime, ctime + kRuns);
+-  std::sort(utime, utime + kRuns);
+-  const int med = kRuns/2;
+-
+-  float comp_rate = (length / ctime[med]) * repeats / 1048576.0;
+-  float uncomp_rate = (length / utime[med]) * repeats / 1048576.0;
+-  std::string x = names[comp];
+-  x += ":";
+-  std::string urate = (uncomp_rate >= 0) ? StrFormat("%.1f", uncomp_rate)
+-                                         : std::string("?");
+-  printf("%-7s [b %dM] bytes %6d -> %6d %4.1f%%  "
+-         "comp %5.1f MB/s  uncomp %5s MB/s\n",
+-         x.c_str(),
+-         block_size/(1<<20),
+-         static_cast<int>(length), static_cast<uint32>(compressed_size),
+-         (compressed_size * 100.0) / std::max<int>(1, length),
+-         comp_rate,
+-         urate.c_str());
+-}
+-
+-static int VerifyString(const std::string& input) {
++int VerifyString(const std::string& input) {
+   std::string compressed;
+   DataEndingAtUnreadablePage i(input);
+   const size_t written = snappy::Compress(i.data(), i.size(), &compressed);
+   CHECK_EQ(written, compressed.size());
+   CHECK_LE(compressed.size(),
+            snappy::MaxCompressedLength(input.size()));
+   CHECK(snappy::IsValidCompressedBuffer(compressed.data(), compressed.size()));
+ 
+   std::string uncompressed;
+   DataEndingAtUnreadablePage c(compressed);
+   CHECK(snappy::Uncompress(c.data(), c.size(), &uncompressed));
+   CHECK_EQ(uncompressed, input);
+   return uncompressed.size();
+ }
+ 
+-static void VerifyStringSink(const std::string& input) {
++void VerifyStringSink(const std::string& input) {
+   std::string compressed;
+   DataEndingAtUnreadablePage i(input);
+   const size_t written = snappy::Compress(i.data(), i.size(), &compressed);
+   CHECK_EQ(written, compressed.size());
+   CHECK_LE(compressed.size(),
+            snappy::MaxCompressedLength(input.size()));
+   CHECK(snappy::IsValidCompressedBuffer(compressed.data(), compressed.size()));
+ 
+@@ -387,17 +132,17 @@ static void VerifyStringSink(const std::
+   uncompressed.resize(input.size());
+   snappy::UncheckedByteArraySink sink(string_as_array(&uncompressed));
+   DataEndingAtUnreadablePage c(compressed);
+   snappy::ByteArraySource source(c.data(), c.size());
+   CHECK(snappy::Uncompress(&source, &sink));
+   CHECK_EQ(uncompressed, input);
+ }
+ 
+-static void VerifyIOVec(const std::string& input) {
++void VerifyIOVec(const std::string& input) {
+   std::string compressed;
+   DataEndingAtUnreadablePage i(input);
+   const size_t written = snappy::Compress(i.data(), i.size(), &compressed);
+   CHECK_EQ(written, compressed.size());
+   CHECK_LE(compressed.size(),
+            snappy::MaxCompressedLength(input.size()));
+   CHECK(snappy::IsValidCompressedBuffer(compressed.data(), compressed.size()));
+ 
+@@ -406,17 +151,17 @@ static void VerifyIOVec(const std::strin
+   char* buf = new char[input.size()];
+   std::minstd_rand0 rng(input.size());
+   std::uniform_int_distribution<size_t> uniform_1_to_10(1, 10);
+   size_t num = uniform_1_to_10(rng);
+   if (input.size() < num) {
+     num = input.size();
+   }
+   struct iovec* iov = new iovec[num];
+-  int used_so_far = 0;
++  size_t used_so_far = 0;
+   std::bernoulli_distribution one_in_five(1.0 / 5);
+   for (size_t i = 0; i < num; ++i) {
+     assert(used_so_far < input.size());
+     iov[i].iov_base = buf + used_so_far;
+     if (i == num - 1) {
+       iov[i].iov_len = input.size() - used_so_far;
+     } else {
+       // Randomly choose to insert a 0 byte entry.
+@@ -434,29 +179,29 @@ static void VerifyIOVec(const std::strin
+       compressed.data(), compressed.size(), iov, num));
+   CHECK(!memcmp(buf, input.data(), input.size()));
+   delete[] iov;
+   delete[] buf;
+ }
+ 
+ // Test that data compressed by a compressor that does not
+ // obey block sizes is uncompressed properly.
+-static void VerifyNonBlockedCompression(const std::string& input) {
++void VerifyNonBlockedCompression(const std::string& input) {
+   if (input.length() > snappy::kBlockSize) {
+     // We cannot test larger blocks than the maximum block size, obviously.
+     return;
+   }
+ 
+   std::string prefix;
+   Varint::Append32(&prefix, input.size());
+ 
+   // Setup compression table
+   snappy::internal::WorkingMemory wmem(input.size());
+   int table_size;
+-  uint16* table = wmem.GetHashTable(input.size(), &table_size);
++  uint16_t* table = wmem.GetHashTable(input.size(), &table_size);
+ 
+   // Compress entire input in one shot
+   std::string compressed;
+   compressed += prefix;
+   compressed.resize(prefix.size()+snappy::MaxCompressedLength(input.size()));
+   char* dest = string_as_array(&compressed) + prefix.size();
+   char* end = snappy::internal::CompressFragment(input.data(), input.size(),
+                                                 dest, table, table_size);
+@@ -476,37 +221,37 @@ static void VerifyNonBlockedCompression(
+   CHECK_EQ(uncomp_str2, input);
+ 
+   // Uncompress into iovec
+   {
+     static const int kNumBlocks = 10;
+     struct iovec vec[kNumBlocks];
+     const int block_size = 1 + input.size() / kNumBlocks;
+     std::string iovec_data(block_size * kNumBlocks, 'x');
+-    for (int i = 0; i < kNumBlocks; i++) {
++    for (int i = 0; i < kNumBlocks; ++i) {
+       vec[i].iov_base = string_as_array(&iovec_data) + i * block_size;
+       vec[i].iov_len = block_size;
+     }
+     CHECK(snappy::RawUncompressToIOVec(compressed.data(), compressed.size(),
+                                        vec, kNumBlocks));
+     CHECK_EQ(std::string(iovec_data.data(), input.size()), input);
+   }
+ }
+ 
+ // Expand the input so that it is at least K times as big as block size
+-static std::string Expand(const std::string& input) {
++std::string Expand(const std::string& input) {
+   static const int K = 3;
+   std::string data = input;
+   while (data.size() < K * snappy::kBlockSize) {
+     data += input;
+   }
+   return data;
+ }
+ 
+-static int Verify(const std::string& input) {
++int Verify(const std::string& input) {
+   VLOG(1) << "Verifying input of size " << input.size();
+ 
+   // Compress using string based routines
+   const int result = VerifyString(input);
+ 
+   // Verify using sink based routines
+   VerifyStringSink(input);
+ 
+@@ -516,20 +261,20 @@ static int Verify(const std::string& inp
+     const std::string expanded = Expand(input);
+     VerifyNonBlockedCompression(expanded);
+     VerifyIOVec(input);
+   }
+ 
+   return result;
+ }
+ 
+-static bool IsValidCompressedBuffer(const std::string& c) {
++bool IsValidCompressedBuffer(const std::string& c) {
+   return snappy::IsValidCompressedBuffer(c.data(), c.size());
+ }
+-static bool Uncompress(const std::string& c, std::string* u) {
++bool Uncompress(const std::string& c, std::string* u) {
+   return snappy::Uncompress(c.data(), c.size(), u);
+ }
+ 
+ // This test checks to ensure that snappy doesn't coredump if it gets
+ // corrupted data.
+ TEST(CorruptedTest, VerifyCorrupted) {
+   std::string source = "making sure we don't crash with corrupted input";
+   VLOG(1) << source;
+@@ -544,18 +289,18 @@ TEST(CorruptedTest, VerifyCorrupted) {
+   dest[3]++;
+   // this really ought to fail.
+   CHECK(!IsValidCompressedBuffer(dest));
+   CHECK(!Uncompress(dest, &uncmp));
+ 
+   // This is testing for a security bug - a buffer that decompresses to 100k
+   // but we lie in the snappy header and only reserve 0 bytes of memory :)
+   source.resize(100000);
+-  for (size_t i = 0; i < source.length(); ++i) {
+-    source[i] = 'A';
++  for (char& source_char : source) {
++    source_char = 'A';
+   }
+   snappy::Compress(source.data(), source.size(), &dest);
+   dest[0] = dest[1] = dest[2] = dest[3] = 0;
+   CHECK(!IsValidCompressedBuffer(dest));
+   CHECK(!Uncompress(dest, &uncmp));
+ 
+   if (sizeof(void *) == 4) {
+     // Another security check; check a crazy big length can't DoS us with an
+@@ -583,30 +328,30 @@ TEST(CorruptedTest, VerifyCorrupted) {
+   for (int i = 1; i <= 3; ++i) {
+     std::string data =
+         ReadTestDataFile(StrFormat("baddata%d.snappy", i).c_str(), 0);
+     std::string uncmp;
+     // check that we don't return a crazy length
+     size_t ulen;
+     CHECK(!snappy::GetUncompressedLength(data.data(), data.size(), &ulen)
+           || (ulen < (1<<20)));
+-    uint32 ulen2;
++    uint32_t ulen2;
+     snappy::ByteArraySource source(data.data(), data.size());
+     CHECK(!snappy::GetUncompressedLength(&source, &ulen2) ||
+           (ulen2 < (1<<20)));
+     CHECK(!IsValidCompressedBuffer(data));
+     CHECK(!Uncompress(data, &uncmp));
+   }
+ }
+ 
+ // Helper routines to construct arbitrary compressed strings.
+ // These mirror the compression code in snappy.cc, but are copied
+ // here so that we can bypass some limitations in the how snappy.cc
+ // invokes these routines.
+-static void AppendLiteral(std::string* dst, const std::string& literal) {
++void AppendLiteral(std::string* dst, const std::string& literal) {
+   if (literal.empty()) return;
+   int n = literal.size() - 1;
+   if (n < 60) {
+     // Fit length in tag byte
+     dst->push_back(0 | (n << 2));
+   } else {
+     // Encode in upcoming bytes
+     char number[4];
+@@ -616,17 +361,17 @@ static void AppendLiteral(std::string* d
+       n >>= 8;
+     }
+     dst->push_back(0 | ((59+count) << 2));
+     *dst += std::string(number, count);
+   }
+   *dst += literal;
+ }
+ 
+-static void AppendCopy(std::string* dst, int offset, int length) {
++void AppendCopy(std::string* dst, int offset, int length) {
+   while (length > 0) {
+     // Figure out how much to copy in one shot
+     int to_copy;
+     if (length >= 68) {
+       to_copy = 64;
+     } else if (length > 64) {
+       to_copy = 60;
+     } else {
+@@ -660,42 +405,77 @@ TEST(Snappy, SimpleTests) {
+ 
+   Verify("aaaaaaa" + std::string(16, 'b') + std::string("aaaaa") + "abc");
+   Verify("aaaaaaa" + std::string(256, 'b') + std::string("aaaaa") + "abc");
+   Verify("aaaaaaa" + std::string(2047, 'b') + std::string("aaaaa") + "abc");
+   Verify("aaaaaaa" + std::string(65536, 'b') + std::string("aaaaa") + "abc");
+   Verify("abcaaaaaaa" + std::string(65536, 'b') + std::string("aaaaa") + "abc");
+ }
+ 
++// Regression test for cr/345340892.
++TEST(Snappy, AppendSelfPatternExtensionEdgeCases) {
++  Verify("abcabcabcabcabcabcab");
++  Verify("abcabcabcabcabcabcab0123456789ABCDEF");
++
++  Verify("abcabcabcabcabcabcabcabcabcabcabcabc");
++  Verify("abcabcabcabcabcabcabcabcabcabcabcabc0123456789ABCDEF");
++}
++
++// Regression test for cr/345340892.
++TEST(Snappy, AppendSelfPatternExtensionEdgeCasesExhaustive) {
++  std::mt19937 rng;
++  std::uniform_int_distribution<int> uniform_byte(0, 255);
++  for (int pattern_size = 1; pattern_size <= 18; ++pattern_size) {
++    for (int length = 1; length <= 64; ++length) {
++      for (int extra_bytes_after_pattern : {0, 1, 15, 16, 128}) {
++        const int size = pattern_size + length + extra_bytes_after_pattern;
++        std::string input;
++        input.resize(size);
++        for (int i = 0; i < pattern_size; ++i) {
++          input[i] = 'a' + i;
++        }
++        for (int i = 0; i < length; ++i) {
++          input[pattern_size + i] = input[i];
++        }
++        for (int i = 0; i < extra_bytes_after_pattern; ++i) {
++          input[pattern_size + length + i] =
++              static_cast<char>(uniform_byte(rng));
++        }
++        Verify(input);
++      }
++    }
++  }
++}
++
+ // Verify max blowup (lots of four-byte copies)
+ TEST(Snappy, MaxBlowup) {
+   std::mt19937 rng;
+   std::uniform_int_distribution<int> uniform_byte(0, 255);
+   std::string input;
+   for (int i = 0; i < 80000; ++i)
+     input.push_back(static_cast<char>(uniform_byte(rng)));
+ 
+   for (int i = 0; i < 80000; i += 4) {
+     std::string four_bytes(input.end() - i - 4, input.end() - i);
+     input.append(four_bytes);
+   }
+   Verify(input);
+ }
+ 
+ TEST(Snappy, RandomData) {
+-  std::minstd_rand0 rng(FLAGS_test_random_seed);
++  std::minstd_rand0 rng(snappy::GetFlag(FLAGS_test_random_seed));
+   std::uniform_int_distribution<int> uniform_0_to_3(0, 3);
+   std::uniform_int_distribution<int> uniform_0_to_8(0, 8);
+   std::uniform_int_distribution<int> uniform_byte(0, 255);
+   std::uniform_int_distribution<size_t> uniform_4k(0, 4095);
+   std::uniform_int_distribution<size_t> uniform_64k(0, 65535);
+   std::bernoulli_distribution one_in_ten(1.0 / 10);
+ 
+   constexpr int num_ops = 20000;
+-  for (int i = 0; i < num_ops; i++) {
++  for (int i = 0; i < num_ops; ++i) {
+     if ((i % 1000) == 0) {
+       VLOG(0) << "Random op " << i << " of " << num_ops;
+     }
+ 
+     std::string x;
+     size_t len = uniform_4k(rng);
+     if (i < 100) {
+       len = 65536 + uniform_64k(rng);
+@@ -733,24 +513,24 @@ TEST(Snappy, FourByteOffset) {
+ 
+   // The two fragments that make up the input string.
+   std::string fragment1 = "012345689abcdefghijklmnopqrstuvwxyz";
+   std::string fragment2 = "some other string";
+ 
+   // How many times each fragment is emitted.
+   const int n1 = 2;
+   const int n2 = 100000 / fragment2.size();
+-  const int length = n1 * fragment1.size() + n2 * fragment2.size();
++  const size_t length = n1 * fragment1.size() + n2 * fragment2.size();
+ 
+   std::string compressed;
+   Varint::Append32(&compressed, length);
+ 
+   AppendLiteral(&compressed, fragment1);
+   std::string src = fragment1;
+-  for (int i = 0; i < n2; i++) {
++  for (int i = 0; i < n2; ++i) {
+     AppendLiteral(&compressed, fragment2);
+     src += fragment2;
+   }
+   AppendCopy(&compressed, src.size(), fragment1.size());
+   src += fragment1;
+   CHECK_EQ(length, src.size());
+ 
+   std::string uncompressed;
+@@ -867,24 +647,23 @@ TEST(Snappy, IOVecCopyOverflow) {
+   CHECK(!snappy::RawUncompressToIOVec(
+       compressed.data(), compressed.size(), iov, ARRAYSIZE(iov)));
+ 
+   for (int i = 0; i < ARRAYSIZE(kLengths); ++i) {
+     delete[] reinterpret_cast<char *>(iov[i].iov_base);
+   }
+ }
+ 
+-static bool CheckUncompressedLength(const std::string& compressed,
+-                                    size_t* ulength) {
++bool CheckUncompressedLength(const std::string& compressed, size_t* ulength) {
+   const bool result1 = snappy::GetUncompressedLength(compressed.data(),
+                                                      compressed.size(),
+                                                      ulength);
+ 
+   snappy::ByteArraySource source(compressed.data(), compressed.size());
+-  uint32 length;
++  uint32_t length;
+   const bool result2 = snappy::GetUncompressedLength(&source, &length);
+   CHECK_EQ(result1, result2);
+   return result1;
+ }
+ 
+ TEST(SnappyCorruption, TruncatedVarint) {
+   std::string compressed, uncompressed;
+   size_t ulength;
+@@ -949,27 +728,24 @@ TEST(Snappy, ZeroOffsetCopy) {
+ 
+ TEST(Snappy, ZeroOffsetCopyValidation) {
+   const char* compressed = "\x05\x12\x00\x00";
+   //  \x05              Length
+   //  \x12\x00\x00      Copy with offset==0, length==5
+   EXPECT_FALSE(snappy::IsValidCompressedBuffer(compressed, 4));
+ }
+ 
+-namespace {
+-
+ int TestFindMatchLength(const char* s1, const char *s2, unsigned length) {
++  uint64_t data;
+   std::pair<size_t, bool> p =
+-      snappy::internal::FindMatchLength(s1, s2, s2 + length);
++      snappy::internal::FindMatchLength(s1, s2, s2 + length, &data);
+   CHECK_EQ(p.first < 8, p.second);
+   return p.first;
+ }
+ 
+-}  // namespace
+-
+ TEST(Snappy, FindMatchLength) {
+   // Exercise all different code paths through the function.
+   // 64-bit version:
+ 
+   // Hit s1_limit in 64-bit loop, hit s1_limit in single-character loop.
+   EXPECT_EQ(6, TestFindMatchLength("012345", "012345", 6));
+   EXPECT_EQ(11, TestFindMatchLength("01234567abc", "01234567abc", 11));
+ 
+@@ -1053,454 +829,138 @@ TEST(Snappy, FindMatchLength) {
+   EXPECT_EQ(11, TestFindMatchLength("xxxxxxabcd0123", "xxxxxxabcd0?23", 14));
+   EXPECT_EQ(12, TestFindMatchLength("xxxxxxabcd0123", "xxxxxxabcd0132", 14));
+   EXPECT_EQ(13, TestFindMatchLength("xxxxxxabcd0123", "xxxxxxabcd012?", 14));
+ }
+ 
+ TEST(Snappy, FindMatchLengthRandom) {
+   constexpr int kNumTrials = 10000;
+   constexpr int kTypicalLength = 10;
+-  std::minstd_rand0 rng(FLAGS_test_random_seed);
++  std::minstd_rand0 rng(snappy::GetFlag(FLAGS_test_random_seed));
+   std::uniform_int_distribution<int> uniform_byte(0, 255);
+   std::bernoulli_distribution one_in_two(1.0 / 2);
+   std::bernoulli_distribution one_in_typical_length(1.0 / kTypicalLength);
+ 
+-  for (int i = 0; i < kNumTrials; i++) {
++  for (int i = 0; i < kNumTrials; ++i) {
+     std::string s, t;
+     char a = static_cast<char>(uniform_byte(rng));
+     char b = static_cast<char>(uniform_byte(rng));
+     while (!one_in_typical_length(rng)) {
+       s.push_back(one_in_two(rng) ? a : b);
+       t.push_back(one_in_two(rng) ? a : b);
+     }
+     DataEndingAtUnreadablePage u(s);
+     DataEndingAtUnreadablePage v(t);
+-    int matched = TestFindMatchLength(u.data(), v.data(), t.size());
++    size_t matched = TestFindMatchLength(u.data(), v.data(), t.size());
+     if (matched == t.size()) {
+       EXPECT_EQ(s, t);
+     } else {
+       EXPECT_NE(s[matched], t[matched]);
+-      for (int j = 0; j < matched; j++) {
++      for (size_t j = 0; j < matched; ++j) {
+         EXPECT_EQ(s[j], t[j]);
+       }
+     }
+   }
+ }
+ 
+-static uint16 MakeEntry(unsigned int extra,
+-                        unsigned int len,
+-                        unsigned int copy_offset) {
++uint16_t MakeEntry(unsigned int extra, unsigned int len,
++                   unsigned int copy_offset) {
+   // Check that all of the fields fit within the allocated space
+   assert(extra       == (extra & 0x7));          // At most 3 bits
+   assert(copy_offset == (copy_offset & 0x7));    // At most 3 bits
+   assert(len         == (len & 0x7f));           // At most 7 bits
+   return len | (copy_offset << 8) | (extra << 11);
+ }
+ 
+ // Check that the decompression table is correct, and optionally print out
+ // the computed one.
+ TEST(Snappy, VerifyCharTable) {
+   using snappy::internal::LITERAL;
+   using snappy::internal::COPY_1_BYTE_OFFSET;
+   using snappy::internal::COPY_2_BYTE_OFFSET;
+   using snappy::internal::COPY_4_BYTE_OFFSET;
+   using snappy::internal::char_table;
+ 
+-  uint16 dst[256];
++  uint16_t dst[256];
+ 
+   // Place invalid entries in all places to detect missing initialization
+   int assigned = 0;
+-  for (int i = 0; i < 256; i++) {
++  for (int i = 0; i < 256; ++i) {
+     dst[i] = 0xffff;
+   }
+ 
+   // Small LITERAL entries.  We store (len-1) in the top 6 bits.
+-  for (unsigned int len = 1; len <= 60; len++) {
+-    dst[LITERAL | ((len-1) << 2)] = MakeEntry(0, len, 0);
++  for (uint8_t len = 1; len <= 60; ++len) {
++    dst[LITERAL | ((len - 1) << 2)] = MakeEntry(0, len, 0);
+     assigned++;
+   }
+ 
+   // Large LITERAL entries.  We use 60..63 in the high 6 bits to
+   // encode the number of bytes of length info that follow the opcode.
+-  for (unsigned int extra_bytes = 1; extra_bytes <= 4; extra_bytes++) {
++  for (uint8_t extra_bytes = 1; extra_bytes <= 4; ++extra_bytes) {
+     // We set the length field in the lookup table to 1 because extra
+     // bytes encode len-1.
+-    dst[LITERAL | ((extra_bytes+59) << 2)] = MakeEntry(extra_bytes, 1, 0);
++    dst[LITERAL | ((extra_bytes + 59) << 2)] = MakeEntry(extra_bytes, 1, 0);
+     assigned++;
+   }
+ 
+   // COPY_1_BYTE_OFFSET.
+   //
+   // The tag byte in the compressed data stores len-4 in 3 bits, and
+   // offset/256 in 5 bits.  offset%256 is stored in the next byte.
+   //
+   // This format is used for length in range [4..11] and offset in
+   // range [0..2047]
+-  for (unsigned int len = 4; len < 12; len++) {
+-    for (unsigned int offset = 0; offset < 2048; offset += 256) {
+-      dst[COPY_1_BYTE_OFFSET | ((len-4)<<2) | ((offset>>8)<<5)] =
+-        MakeEntry(1, len, offset>>8);
++  for (uint8_t len = 4; len < 12; ++len) {
++    for (uint16_t offset = 0; offset < 2048; offset += 256) {
++      uint8_t offset_high = static_cast<uint8_t>(offset >> 8);
++      dst[COPY_1_BYTE_OFFSET | ((len - 4) << 2) | (offset_high << 5)] =
++          MakeEntry(1, len, offset_high);
+       assigned++;
+     }
+   }
+ 
+   // COPY_2_BYTE_OFFSET.
+   // Tag contains len-1 in top 6 bits, and offset in next two bytes.
+-  for (unsigned int len = 1; len <= 64; len++) {
+-    dst[COPY_2_BYTE_OFFSET | ((len-1)<<2)] = MakeEntry(2, len, 0);
++  for (uint8_t len = 1; len <= 64; ++len) {
++    dst[COPY_2_BYTE_OFFSET | ((len - 1) << 2)] = MakeEntry(2, len, 0);
+     assigned++;
+   }
+ 
+   // COPY_4_BYTE_OFFSET.
+   // Tag contents len-1 in top 6 bits, and offset in next four bytes.
+-  for (unsigned int len = 1; len <= 64; len++) {
+-    dst[COPY_4_BYTE_OFFSET | ((len-1)<<2)] = MakeEntry(4, len, 0);
++  for (uint8_t len = 1; len <= 64; ++len) {
++    dst[COPY_4_BYTE_OFFSET | ((len - 1) << 2)] = MakeEntry(4, len, 0);
+     assigned++;
+   }
+ 
+   // Check that each entry was initialized exactly once.
+   EXPECT_EQ(256, assigned) << "Assigned only " << assigned << " of 256";
+-  for (int i = 0; i < 256; i++) {
++  for (int i = 0; i < 256; ++i) {
+     EXPECT_NE(0xffff, dst[i]) << "Did not assign byte " << i;
+   }
+ 
+-  if (FLAGS_snappy_dump_decompression_table) {
+-    printf("static const uint16 char_table[256] = {\n  ");
+-    for (int i = 0; i < 256; i++) {
+-      printf("0x%04x%s",
+-             dst[i],
+-             ((i == 255) ? "\n" : (((i%8) == 7) ? ",\n  " : ", ")));
++  if (snappy::GetFlag(FLAGS_snappy_dump_decompression_table)) {
++    std::printf("static const uint16_t char_table[256] = {\n  ");
++    for (int i = 0; i < 256; ++i) {
++      std::printf("0x%04x%s",
++                  dst[i],
++                  ((i == 255) ? "\n" : (((i % 8) == 7) ? ",\n  " : ", ")));
+     }
+-    printf("};\n");
++    std::printf("};\n");
+   }
+ 
+   // Check that computed table matched recorded table.
+-  for (int i = 0; i < 256; i++) {
++  for (int i = 0; i < 256; ++i) {
+     EXPECT_EQ(dst[i], char_table[i]) << "Mismatch in byte " << i;
+   }
+ }
+ 
+-static void CompressFile(const char* fname) {
+-  std::string fullinput;
+-  CHECK_OK(file::GetContents(fname, &fullinput, file::Defaults()));
+-
+-  std::string compressed;
+-  Compress(fullinput.data(), fullinput.size(), SNAPPY, &compressed, false);
+-
+-  CHECK_OK(file::SetContents(std::string(fname).append(".comp"), compressed,
+-                             file::Defaults()));
+-}
+-
+-static void UncompressFile(const char* fname) {
+-  std::string fullinput;
+-  CHECK_OK(file::GetContents(fname, &fullinput, file::Defaults()));
+-
+-  size_t uncompLength;
+-  CHECK(CheckUncompressedLength(fullinput, &uncompLength));
+-
+-  std::string uncompressed;
+-  uncompressed.resize(uncompLength);
+-  CHECK(snappy::Uncompress(fullinput.data(), fullinput.size(), &uncompressed));
+-
+-  CHECK_OK(file::SetContents(std::string(fname).append(".uncomp"), uncompressed,
+-                             file::Defaults()));
+-}
+-
+-static void MeasureFile(const char* fname) {
+-  std::string fullinput;
+-  CHECK_OK(file::GetContents(fname, &fullinput, file::Defaults()));
+-  printf("%-40s :\n", fname);
+-
+-  int start_len = (FLAGS_start_len < 0) ? fullinput.size() : FLAGS_start_len;
+-  int end_len = fullinput.size();
+-  if (FLAGS_end_len >= 0) {
+-    end_len = std::min<int>(fullinput.size(), FLAGS_end_len);
+-  }
+-  for (int len = start_len; len <= end_len; len++) {
+-    const char* const input = fullinput.data();
+-    int repeats = (FLAGS_bytes + len) / (len + 1);
+-    if (FLAGS_zlib)     Measure(input, len, ZLIB, repeats, 1024<<10);
+-    if (FLAGS_lzo)      Measure(input, len, LZO, repeats, 1024<<10);
+-    if (FLAGS_snappy)    Measure(input, len, SNAPPY, repeats, 4096<<10);
+-
+-    // For block-size based measurements
+-    if (0 && FLAGS_snappy) {
+-      Measure(input, len, SNAPPY, repeats, 8<<10);
+-      Measure(input, len, SNAPPY, repeats, 16<<10);
+-      Measure(input, len, SNAPPY, repeats, 32<<10);
+-      Measure(input, len, SNAPPY, repeats, 64<<10);
+-      Measure(input, len, SNAPPY, repeats, 256<<10);
+-      Measure(input, len, SNAPPY, repeats, 1024<<10);
+-    }
++TEST(Snappy, TestBenchmarkFiles) {
++  for (int i = 0; i < ARRAYSIZE(kTestDataFiles); ++i) {
++    Verify(ReadTestDataFile(kTestDataFiles[i].filename,
++                            kTestDataFiles[i].size_limit));
+   }
+ }
+ 
+-static struct {
+-  const char* label;
+-  const char* filename;
+-  size_t size_limit;
+-} files[] = {
+-  { "html", "html", 0 },
+-  { "urls", "urls.10K", 0 },
+-  { "jpg", "fireworks.jpeg", 0 },
+-  { "jpg_200", "fireworks.jpeg", 200 },
+-  { "pdf", "paper-100k.pdf", 0 },
+-  { "html4", "html_x_4", 0 },
+-  { "txt1", "alice29.txt", 0 },
+-  { "txt2", "asyoulik.txt", 0 },
+-  { "txt3", "lcet10.txt", 0 },
+-  { "txt4", "plrabn12.txt", 0 },
+-  { "pb", "geo.protodata", 0 },
+-  { "gaviota", "kppkn.gtb", 0 },
+-};
+-
+-static void BM_UFlat(int iters, int arg) {
+-  StopBenchmarkTiming();
+-
+-  // Pick file to process based on "arg"
+-  CHECK_GE(arg, 0);
+-  CHECK_LT(arg, ARRAYSIZE(files));
+-  std::string contents =
+-      ReadTestDataFile(files[arg].filename, files[arg].size_limit);
+-
+-  std::string zcontents;
+-  snappy::Compress(contents.data(), contents.size(), &zcontents);
+-  char* dst = new char[contents.size()];
+-
+-  SetBenchmarkBytesProcessed(static_cast<int64>(iters) *
+-                             static_cast<int64>(contents.size()));
+-  SetBenchmarkLabel(files[arg].label);
+-  StartBenchmarkTiming();
+-  while (iters-- > 0) {
+-    CHECK(snappy::RawUncompress(zcontents.data(), zcontents.size(), dst));
+-  }
+-  StopBenchmarkTiming();
+-
+-  delete[] dst;
+-}
+-BENCHMARK(BM_UFlat)->DenseRange(0, ARRAYSIZE(files) - 1);
+-
+-static void BM_UValidate(int iters, int arg) {
+-  StopBenchmarkTiming();
+-
+-  // Pick file to process based on "arg"
+-  CHECK_GE(arg, 0);
+-  CHECK_LT(arg, ARRAYSIZE(files));
+-  std::string contents =
+-      ReadTestDataFile(files[arg].filename, files[arg].size_limit);
+-
+-  std::string zcontents;
+-  snappy::Compress(contents.data(), contents.size(), &zcontents);
+-
+-  SetBenchmarkBytesProcessed(static_cast<int64>(iters) *
+-                             static_cast<int64>(contents.size()));
+-  SetBenchmarkLabel(files[arg].label);
+-  StartBenchmarkTiming();
+-  while (iters-- > 0) {
+-    CHECK(snappy::IsValidCompressedBuffer(zcontents.data(), zcontents.size()));
+-  }
+-  StopBenchmarkTiming();
+-}
+-BENCHMARK(BM_UValidate)->DenseRange(0, 4);
+-
+-static void BM_UIOVec(int iters, int arg) {
+-  StopBenchmarkTiming();
+-
+-  // Pick file to process based on "arg"
+-  CHECK_GE(arg, 0);
+-  CHECK_LT(arg, ARRAYSIZE(files));
+-  std::string contents =
+-      ReadTestDataFile(files[arg].filename, files[arg].size_limit);
+-
+-  std::string zcontents;
+-  snappy::Compress(contents.data(), contents.size(), &zcontents);
+-
+-  // Uncompress into an iovec containing ten entries.
+-  const int kNumEntries = 10;
+-  struct iovec iov[kNumEntries];
+-  char *dst = new char[contents.size()];
+-  int used_so_far = 0;
+-  for (int i = 0; i < kNumEntries; ++i) {
+-    iov[i].iov_base = dst + used_so_far;
+-    if (used_so_far == contents.size()) {
+-      iov[i].iov_len = 0;
+-      continue;
+-    }
+-
+-    if (i == kNumEntries - 1) {
+-      iov[i].iov_len = contents.size() - used_so_far;
+-    } else {
+-      iov[i].iov_len = contents.size() / kNumEntries;
+-    }
+-    used_so_far += iov[i].iov_len;
+-  }
+-
+-  SetBenchmarkBytesProcessed(static_cast<int64>(iters) *
+-                             static_cast<int64>(contents.size()));
+-  SetBenchmarkLabel(files[arg].label);
+-  StartBenchmarkTiming();
+-  while (iters-- > 0) {
+-    CHECK(snappy::RawUncompressToIOVec(zcontents.data(), zcontents.size(), iov,
+-                                       kNumEntries));
+-  }
+-  StopBenchmarkTiming();
+-
+-  delete[] dst;
+-}
+-BENCHMARK(BM_UIOVec)->DenseRange(0, 4);
+-
+-static void BM_UFlatSink(int iters, int arg) {
+-  StopBenchmarkTiming();
+-
+-  // Pick file to process based on "arg"
+-  CHECK_GE(arg, 0);
+-  CHECK_LT(arg, ARRAYSIZE(files));
+-  std::string contents =
+-      ReadTestDataFile(files[arg].filename, files[arg].size_limit);
+-
+-  std::string zcontents;
+-  snappy::Compress(contents.data(), contents.size(), &zcontents);
+-  char* dst = new char[contents.size()];
+-
+-  SetBenchmarkBytesProcessed(static_cast<int64>(iters) *
+-                             static_cast<int64>(contents.size()));
+-  SetBenchmarkLabel(files[arg].label);
+-  StartBenchmarkTiming();
+-  while (iters-- > 0) {
+-    snappy::ByteArraySource source(zcontents.data(), zcontents.size());
+-    snappy::UncheckedByteArraySink sink(dst);
+-    CHECK(snappy::Uncompress(&source, &sink));
+-  }
+-  StopBenchmarkTiming();
+-
+-  std::string s(dst, contents.size());
+-  CHECK_EQ(contents, s);
+-
+-  delete[] dst;
+-}
+-
+-BENCHMARK(BM_UFlatSink)->DenseRange(0, ARRAYSIZE(files) - 1);
+-
+-static void BM_ZFlat(int iters, int arg) {
+-  StopBenchmarkTiming();
+-
+-  // Pick file to process based on "arg"
+-  CHECK_GE(arg, 0);
+-  CHECK_LT(arg, ARRAYSIZE(files));
+-  std::string contents =
+-      ReadTestDataFile(files[arg].filename, files[arg].size_limit);
+-
+-  char* dst = new char[snappy::MaxCompressedLength(contents.size())];
+-
+-  SetBenchmarkBytesProcessed(static_cast<int64>(iters) *
+-                             static_cast<int64>(contents.size()));
+-  StartBenchmarkTiming();
+-
+-  size_t zsize = 0;
+-  while (iters-- > 0) {
+-    snappy::RawCompress(contents.data(), contents.size(), dst, &zsize);
+-  }
+-  StopBenchmarkTiming();
+-  const double compression_ratio =
+-      static_cast<double>(zsize) / std::max<size_t>(1, contents.size());
+-  SetBenchmarkLabel(StrFormat("%s (%.2f %%)", files[arg].label,
+-                              100.0 * compression_ratio));
+-  VLOG(0) << StrFormat("compression for %s: %zd -> %zd bytes",
+-                       files[arg].label, static_cast<int>(contents.size()),
+-                       static_cast<int>(zsize));
+-  delete[] dst;
+-}
+-BENCHMARK(BM_ZFlat)->DenseRange(0, ARRAYSIZE(files) - 1);
+-
+-static void BM_ZFlatAll(int iters, int arg) {
+-  StopBenchmarkTiming();
+-
+-  CHECK_EQ(arg, 0);
+-  const int num_files = ARRAYSIZE(files);
+-
+-  std::vector<std::string> contents(num_files);
+-  std::vector<char*> dst(num_files);
+-
+-  int64 total_contents_size = 0;
+-  for (int i = 0; i < num_files; ++i) {
+-    contents[i] = ReadTestDataFile(files[i].filename, files[i].size_limit);
+-    dst[i] = new char[snappy::MaxCompressedLength(contents[i].size())];
+-    total_contents_size += contents[i].size();
+-  }
+-
+-  SetBenchmarkBytesProcessed(static_cast<int64>(iters) * total_contents_size);
+-  StartBenchmarkTiming();
+-
+-  size_t zsize = 0;
+-  while (iters-- > 0) {
+-    for (int i = 0; i < num_files; ++i) {
+-      snappy::RawCompress(contents[i].data(), contents[i].size(), dst[i],
+-                          &zsize);
+-    }
+-  }
+-  StopBenchmarkTiming();
+-
+-  for (int i = 0; i < num_files; ++i) {
+-    delete[] dst[i];
+-  }
+-  SetBenchmarkLabel(StrFormat("%d files", num_files));
+-}
+-BENCHMARK(BM_ZFlatAll)->DenseRange(0, 0);
+-
+-static void BM_ZFlatIncreasingTableSize(int iters, int arg) {
+-  StopBenchmarkTiming();
+-
+-  CHECK_EQ(arg, 0);
+-  CHECK_GT(ARRAYSIZE(files), 0);
+-  const std::string base_content =
+-      ReadTestDataFile(files[0].filename, files[0].size_limit);
+-
+-  std::vector<std::string> contents;
+-  std::vector<char*> dst;
+-  int64 total_contents_size = 0;
+-  for (int table_bits = kMinHashTableBits; table_bits <= kMaxHashTableBits;
+-       ++table_bits) {
+-    std::string content = base_content;
+-    content.resize(1 << table_bits);
+-    dst.push_back(new char[snappy::MaxCompressedLength(content.size())]);
+-    total_contents_size += content.size();
+-    contents.push_back(std::move(content));
+-  }
+-
+-  size_t zsize = 0;
+-  SetBenchmarkBytesProcessed(static_cast<int64>(iters) * total_contents_size);
+-  StartBenchmarkTiming();
+-  while (iters-- > 0) {
+-    for (int i = 0; i < contents.size(); ++i) {
+-      snappy::RawCompress(contents[i].data(), contents[i].size(), dst[i],
+-                          &zsize);
+-    }
+-  }
+-  StopBenchmarkTiming();
+-
+-  for (int i = 0; i < dst.size(); ++i) {
+-    delete[] dst[i];
+-  }
+-  SetBenchmarkLabel(StrFormat("%zd tables", contents.size()));
+-}
+-BENCHMARK(BM_ZFlatIncreasingTableSize)->DenseRange(0, 0);
++}  // namespace
+ 
+ }  // namespace snappy
+-
+-int main(int argc, char** argv) {
+-  InitGoogle(argv[0], &argc, &argv, true);
+-  RunSpecifiedBenchmarks();
+-
+-  if (argc >= 2) {
+-    for (int arg = 1; arg < argc; arg++) {
+-      if (FLAGS_write_compressed) {
+-        snappy::CompressFile(argv[arg]);
+-      } else if (FLAGS_write_uncompressed) {
+-        snappy::UncompressFile(argv[arg]);
+-      } else {
+-        snappy::MeasureFile(argv[arg]);
+-      }
+-    }
+-    return 0;
+-  }
+-
+-  return RUN_ALL_TESTS();
+-}

+ 193 - 0
mozilla-release/patches/TOP-1846703-binutilsfix-11504.patch

@@ -0,0 +1,193 @@
+# HG changeset patch
+# User Paul Adenot <paul@paul.cx>
+# Date 1696236205 0
+# Node ID e024fe4fd62caae07e9415f4fdddba40f04c2aca
+# Parent  dc1c205b3d2ae16feb53411096d297f7eba54134
+Bug 1846703 - Backport a single revision from ffmpeg into our vendored copy, to fix the build with recent binutils. r=glandium, a=RyanVM
+
+Because this library is a rather complex, manually trimmed, subset of ffmpeg,
+this doesn't use updatebot. We're currently investigating moving to it to at
+least fix some manual work needed, but that hasn't happened yet. In lieu of
+adding the new patch to a yaml file, I'm modified the Mozilla-specific README.
+
+Differential Revision: https://phabricator.services.mozilla.com/D189751
+
+diff --git a/media/ffvpx/1846703.patch b/media/ffvpx/1846703.patch
+new file mode 100644
+--- /dev/null
++++ b/media/ffvpx/1846703.patch
+@@ -0,0 +1,76 @@
++From effadce6c756247ea8bae32dc13bb3e6f464f0eb Mon Sep 17 00:00:00 2001
++From: =?utf8?q?R=C3=A9mi=20Denis-Courmont?= <remi@remlab.net>
++Date: Sun, 16 Jul 2023 18:18:02 +0300
++Subject: [PATCH] avcodec/x86/mathops: clip constants used with shift
++ instructions within inline assembly
++
++Fixes assembling with binutil as >= 2.41
++
++Signed-off-by: James Almer <jamrial@gmail.com>
++---
++ libavcodec/x86/mathops.h | 26 +++++++++++++++++++++++---
++ 1 file changed, 23 insertions(+), 3 deletions(-)
++
++diff --git a/libavcodec/x86/mathops.h b/libavcodec/x86/mathops.h
++index 6298f5ed19..ca7e2dffc1 100644
++--- a/libavcodec/x86/mathops.h
+++++ b/libavcodec/x86/mathops.h
++@@ -35,12 +35,20 @@
++ static av_always_inline av_const int MULL(int a, int b, unsigned shift)
++ {
++     int rt, dummy;
+++    if (__builtin_constant_p(shift))
++     __asm__ (
++         "imull %3               \n\t"
++         "shrdl %4, %%edx, %%eax \n\t"
++         :"=a"(rt), "=d"(dummy)
++-        :"a"(a), "rm"(b), "ci"((uint8_t)shift)
+++        :"a"(a), "rm"(b), "i"(shift & 0x1F)
++     );
+++    else
+++        __asm__ (
+++            "imull %3               \n\t"
+++            "shrdl %4, %%edx, %%eax \n\t"
+++            :"=a"(rt), "=d"(dummy)
+++            :"a"(a), "rm"(b), "c"((uint8_t)shift)
+++        );
++     return rt;
++ }
++ 
++@@ -113,19 +121,31 @@ __asm__ volatile(\
++ // avoid +32 for shift optimization (gcc should do that ...)
++ #define NEG_SSR32 NEG_SSR32
++ static inline  int32_t NEG_SSR32( int32_t a, int8_t s){
+++    if (__builtin_constant_p(s))
++     __asm__ ("sarl %1, %0\n\t"
++          : "+r" (a)
++-         : "ic" ((uint8_t)(-s))
+++         : "i" (-s & 0x1F)
++     );
+++    else
+++        __asm__ ("sarl %1, %0\n\t"
+++               : "+r" (a)
+++               : "c" ((uint8_t)(-s))
+++        );
++     return a;
++ }
++ 
++ #define NEG_USR32 NEG_USR32
++ static inline uint32_t NEG_USR32(uint32_t a, int8_t s){
+++    if (__builtin_constant_p(s))
++     __asm__ ("shrl %1, %0\n\t"
++          : "+r" (a)
++-         : "ic" ((uint8_t)(-s))
+++         : "i" (-s & 0x1F)
++     );
+++    else
+++        __asm__ ("shrl %1, %0\n\t"
+++               : "+r" (a)
+++               : "c" ((uint8_t)(-s))
+++        );
++     return a;
++ }
++ 
++-- 
++2.30.2
++
+diff --git a/media/ffvpx/README_MOZILLA b/media/ffvpx/README_MOZILLA
+--- a/media/ffvpx/README_MOZILLA
++++ b/media/ffvpx/README_MOZILLA
+@@ -1,11 +1,18 @@
+ This directory contains files used in gecko builds from FFmpeg
+ (http://ffmpeg.org). The current files are from FFmpeg as of
+ revision n4.0.2-6-g2be51cbeea
++
++This copy has a single patch backported from a later revision to fix the build with recent binutils:
++
++http://git.videolan.org/?p=ffmpeg.git;a=patch;h=effadce6c756247ea8bae32dc13bb3e6f464f0eb
++that is available as `1846703.patch` in this directory. As usual this has other
++changes that don't come from upstream, in `changes.patch`
++
+ All source files match their path from the library's source archive.
+ 
+ Currently, we only use the vp8 and vp9 portion of the library, and only on x86
+ based platforms. If this changes, configuration files will most likely
+ need to be updated.
+ 
+ AVX2 must be disabled on Linux 32 bits due to the use of yasm 1.1 on the build bots.
+ Once yasm is upgraded to 1.2 or later, AVX2 code could be re-enabled.
+diff --git a/media/ffvpx/libavcodec/x86/mathops.h b/media/ffvpx/libavcodec/x86/mathops.h
+--- a/media/ffvpx/libavcodec/x86/mathops.h
++++ b/media/ffvpx/libavcodec/x86/mathops.h
+@@ -30,22 +30,30 @@
+ #if HAVE_INLINE_ASM
+ 
+ #if ARCH_X86_32
+ 
+ #define MULL MULL
+ static av_always_inline av_const int MULL(int a, int b, unsigned shift)
+ {
+     int rt, dummy;
++    if (__builtin_constant_p(shift))
+     __asm__ (
+         "imull %3               \n\t"
+         "shrdl %4, %%edx, %%eax \n\t"
+         :"=a"(rt), "=d"(dummy)
+-        :"a"(a), "rm"(b), "ci"((uint8_t)shift)
++        :"a"(a), "rm"(b), "i"(shift & 0x1F)
+     );
++    else
++        __asm__ (
++            "imull %3               \n\t"
++            "shrdl %4, %%edx, %%eax \n\t"
++            :"=a"(rt), "=d"(dummy)
++            :"a"(a), "rm"(b), "c"((uint8_t)shift)
++        );
+     return rt;
+ }
+ 
+ #define MULH MULH
+ static av_always_inline av_const int MULH(int a, int b)
+ {
+     int rt, dummy;
+     __asm__ (
+@@ -108,26 +116,38 @@ static inline av_const int mid_pred(int 
+     __asm__ ("cdq                    \n\t"      \
+              "xorl %1, %0            \n\t"      \
+              "subl %1, %0            \n\t"      \
+              : "+a"(level), "=&d"(mask))
+ 
+ // avoid +32 for shift optimization (gcc should do that ...)
+ #define NEG_SSR32 NEG_SSR32
+ static inline  int32_t NEG_SSR32( int32_t a, int8_t s){
++    if (__builtin_constant_p(s))
+     __asm__ ("sarl %1, %0\n\t"
+          : "+r" (a)
+-         : "ic" ((uint8_t)(-s))
++         : "i" (-s & 0x1F)
+     );
++    else
++        __asm__ ("sarl %1, %0\n\t"
++               : "+r" (a)
++               : "c" ((uint8_t)(-s))
++        );
+     return a;
+ }
+ 
+ #define NEG_USR32 NEG_USR32
+ static inline uint32_t NEG_USR32(uint32_t a, int8_t s){
++    if (__builtin_constant_p(s))
+     __asm__ ("shrl %1, %0\n\t"
+          : "+r" (a)
+-         : "ic" ((uint8_t)(-s))
++         : "i" (-s & 0x1F)
+     );
++    else
++        __asm__ ("shrl %1, %0\n\t"
++               : "+r" (a)
++               : "c" ((uint8_t)(-s))
++        );
+     return a;
+ }
+ 
+ #endif /* HAVE_INLINE_ASM */
+ #endif /* AVCODEC_X86_MATHOPS_H */

+ 23 - 0
mozilla-release/patches/series

@@ -8470,6 +8470,29 @@ TOP-NOBUG-revendor-25318.patch
 TOP-NOBUG-backout1440761-25318.patch
 TOP-NOBUG-remove-ftl-25318.patch
 TOP-NOBUG-test-fixes-25318.patch
+1434513-61a1.patch
+1484846-63a1.patch
+1488217-64a1.patch
+1489454-libmar-64a1.patch
+1458129-64a1.patch
+1497937-65a1.patch
+1468539-65a1.patch
+1468544-65a1.patch
+1468542-65a1.patch
+1468556-65a1.patch
+1468552-65a1.patch
+1511181-bspatch-65a1.patch
+1508782-66a1.patch
+1514532-1-66a1.patch
+1514532-2-66a1.patch
+1540142-68a1.patch
+1596660-PARTIAL-removeonly-73a1.patch
 1583854-75a1.patch
+1648336-79a1.patch
+1340901-87a1.patch
+1691957-87a1.patch
+1743947-104a1.patch
+1648336-120a1.patch
+TOP-1846703-binutilsfix-11504.patch
 1835524-sym-upload-fix-v1_2-25318.patch
 1835524-bugsplat-mr-v1_6-25318.patch