Browse Source

hash stuff

Frank-Rainer Grahl 7 months ago
parent
commit
cd003fe9ac
24 changed files with 8197 additions and 3 deletions
  1. 227 0
      frg/work-js/mozilla-release/patches/1420975-60a1.patch
  2. 95 0
      frg/work-js/mozilla-release/patches/1442765-1-63a1.patch
  3. 769 0
      frg/work-js/mozilla-release/patches/1442765-2-63a1.patch
  4. 28 0
      frg/work-js/mozilla-release/patches/1452202-fix-61a1.patch
  5. 63 0
      frg/work-js/mozilla-release/patches/1452288-61a1.patch
  6. 44 0
      frg/work-js/mozilla-release/patches/1468514-63a1.patch
  7. 247 0
      frg/work-js/mozilla-release/patches/1475461-1-63a1.patch
  8. 418 0
      frg/work-js/mozilla-release/patches/1475461-2-63a1.patch
  9. 74 0
      frg/work-js/mozilla-release/patches/1475980-63a1.patch
  10. 90 0
      frg/work-js/mozilla-release/patches/1477626-1-63a1.patch
  11. 91 0
      frg/work-js/mozilla-release/patches/1477626-2-63a1.patch
  12. 818 0
      frg/work-js/mozilla-release/patches/1477626-3-63a1.patch
  13. 263 0
      frg/work-js/mozilla-release/patches/1477626-4-63a1.patch
  14. 72 0
      frg/work-js/mozilla-release/patches/1477626-5-63a1.patch
  15. 3506 0
      frg/work-js/mozilla-release/patches/1477626-6-63a1.patch
  16. 97 0
      frg/work-js/mozilla-release/patches/1477626-7-63a1.patch
  17. 284 0
      frg/work-js/mozilla-release/patches/1477626-8-63a1.patch
  18. 58 0
      frg/work-js/mozilla-release/patches/1477632-63a1.patch
  19. 87 0
      frg/work-js/mozilla-release/patches/L-1477626-9-63a1.patch
  20. 259 0
      frg/work-js/mozilla-release/patches/mozilla-esr68-push_449668.patch
  21. 424 0
      frg/work-js/mozilla-release/patches/mozilla-esr68-push_449669.patch
  22. 142 0
      frg/work-js/mozilla-release/patches/mozilla-esr68-push_449670.patch
  23. 23 3
      frg/work-js/mozilla-release/patches/series
  24. 18 0
      frg/work-js/mozilla-release/patches/series-test

+ 227 - 0
frg/work-js/mozilla-release/patches/1420975-60a1.patch

@@ -0,0 +1,227 @@
+# HG changeset patch
+# User Andrew McCreight <continuation@gmail.com>
+# Date 1515621765 28800
+# Node ID 5c7b21fa7315ca753684881b3dc3cee5ec235ca7
+# Parent  e16fc8abd345b6b4afb9c3e5541e7e684894b7c0
+Bug 1420975 - Add a environment variable to record JS stack for leaks. r=froydnj,mrbkap
+
+This patch adds a new environment variable XPCOM_MEM_LOG_JS_STACK that
+changes XPCOM leakchecking to record a JS stack for all objects, in
+addition to a C++ stack. This is useful when a C++ object is being
+leaked due to JS. The JS stack will be printed if the object leaks, if
+it is used in combination with XPCOM_MEM_BLOAT_LOG=1 and
+XPCOM_MEM_LOG_CLASSES=nsFoo, if nsFoo is the class of interest.
+
+This patch moves a few XPConnect functions for recording the stack
+into xpcpublic.h so they can be called from nsTraceRefcnt.cpp.
+
+MozReview-Commit-ID: FX2QVCSXz4f
+
+diff --git a/js/xpconnect/src/xpcprivate.h b/js/xpconnect/src/xpcprivate.h
+--- a/js/xpconnect/src/xpcprivate.h
++++ b/js/xpconnect/src/xpcprivate.h
+@@ -2348,28 +2348,16 @@ extern JSObject*
+ xpc_NewIDObject(JSContext* cx, JS::HandleObject jsobj, const nsID& aID);
+ 
+ extern const nsID*
+ xpc_JSObjectToID(JSContext* cx, JSObject* obj);
+ 
+ extern bool
+ xpc_JSObjectIsID(JSContext* cx, JSObject* obj);
+ 
+-/***************************************************************************/
+-// in XPCDebug.cpp
+-
+-extern bool
+-xpc_DumpJSStack(bool showArgs, bool showLocals, bool showThisProps);
+-
+-// Return a newly-allocated string containing a representation of the
+-// current JS stack.
+-extern JS::UniqueChars
+-xpc_PrintJSStack(JSContext* cx, bool showArgs, bool showLocals,
+-                 bool showThisProps);
+-
+ /******************************************************************************
+  * Handles pre/post script processing.
+  */
+ class MOZ_RAII AutoScriptEvaluate
+ {
+ public:
+     /**
+      * Saves the JSContext as well as initializing our state
+diff --git a/js/xpconnect/src/xpcpublic.h b/js/xpconnect/src/xpcpublic.h
+--- a/js/xpconnect/src/xpcpublic.h
++++ b/js/xpconnect/src/xpcpublic.h
+@@ -249,16 +249,27 @@ xpc_MarkInCCGeneration(nsISupports* aVar
+ 
+ // If aWrappedJS is a JS wrapper, unmark its JSObject.
+ extern void
+ xpc_TryUnmarkWrappedGrayObject(nsISupports* aWrappedJS);
+ 
+ extern void
+ xpc_UnmarkSkippableJSHolders();
+ 
++// Defined in XPCDebug.cpp.
++extern bool
++xpc_DumpJSStack(bool showArgs, bool showLocals, bool showThisProps);
++
++// Return a newly-allocated string containing a representation of the
++// current JS stack. Defined in XPCDebug.cpp.
++extern JS::UniqueChars
++xpc_PrintJSStack(JSContext* cx, bool showArgs, bool showLocals,
++                 bool showThisProps);
++
++
+ // readable string conversions, static methods and members only
+ class XPCStringConvert
+ {
+ public:
+ 
+     // If the string shares the readable's buffer, that buffer will
+     // get assigned to *sharedBuffer.  Otherwise null will be
+     // assigned.
+diff --git a/xpcom/base/nsTraceRefcnt.cpp b/xpcom/base/nsTraceRefcnt.cpp
+--- a/xpcom/base/nsTraceRefcnt.cpp
++++ b/xpcom/base/nsTraceRefcnt.cpp
+@@ -29,16 +29,17 @@
+ #else
+ #include <unistd.h>
+ #endif
+ 
+ #include "mozilla/Atomics.h"
+ #include "mozilla/AutoRestore.h"
+ #include "mozilla/BlockingResourceBase.h"
+ #include "mozilla/PoisonIOInterposer.h"
++#include "mozilla/UniquePtr.h"
+ 
+ #include <string>
+ #include <vector>
+ 
+ #ifdef HAVE_DLOPEN
+ #include <dlfcn.h>
+ #endif
+ 
+@@ -78,16 +79,17 @@ struct MOZ_STACK_CLASS AutoTraceLogLock 
+ };
+ 
+ static PLHashTable* gBloatView;
+ static PLHashTable* gTypesToLog;
+ static PLHashTable* gObjectsToLog;
+ static PLHashTable* gSerialNumbers;
+ static intptr_t gNextSerialNumber;
+ static bool gDumpedStatistics = false;
++static bool gLogJSStacks = false;
+ 
+ // By default, debug builds only do bloat logging. Bloat logging
+ // only tries to record when an object is created or destroyed, so we
+ // optimize the common case in NS_LogAddRef and NS_LogRelease where
+ // only bloat logging is enabled and no logging needs to be done.
+ enum LoggingType
+ {
+   NoLogging,
+@@ -128,16 +130,37 @@ struct SerialNumberRecord
+ 
+   intptr_t serialNumber;
+   int32_t refCount;
+   int32_t COMPtrCount;
+   // We use std:: classes here rather than the XPCOM equivalents because the
+   // XPCOM equivalents do leak-checking, and if you try to leak-check while
+   // leak-checking, you're gonna have a bad time.
+   std::vector<void*> allocationStack;
++  mozilla::UniquePtr<char[]> jsStack;
++
++  void SaveJSStack() {
++    // If this thread isn't running JS, there's nothing to do.
++    if (!CycleCollectedJSContext::Get()) {
++      return;
++    }
++
++    JSContext* cx = nsContentUtils::GetCurrentJSContextForThread();
++    if (!cx) {
++      return;
++    }
++
++    JS::UniqueChars chars = xpc_PrintJSStack(cx,
++                                             /*showArgs=*/ false,
++                                             /*showLocals=*/ false,
++                                             /*showThisProps=*/ false);
++    size_t len = strlen(chars.get());
++    jsStack = MakeUnique<char[]>(len + 1);
++    memcpy(jsStack.get(), chars.get(), len + 1);
++  }
+ };
+ 
+ struct nsTraceRefcntStats
+ {
+   uint64_t mCreates;
+   uint64_t mDestroys;
+ 
+   bool HaveLeaks() const
+@@ -467,16 +490,25 @@ DumpSerialNumbers(PLHashEntry* aHashEntr
+     for (size_t i = 0, length = record->allocationStack.size();
+          i < length;
+          ++i) {
+       gCodeAddressService->GetLocation(i, record->allocationStack[i],
+                                        buf, bufLen);
+       fprintf(outputFile, "%s\n", buf);
+     }
+   }
++
++  if (gLogJSStacks) {
++    if (record->jsStack) {
++      fprintf(outputFile, "JS allocation stack:\n%s\n", record->jsStack.get());
++    } else {
++      fprintf(outputFile, "There is no JS context on the stack.\n");
++    }
++  }
++
+   return HT_ENUMERATE_NEXT;
+ }
+ 
+ 
+ template<>
+ class nsDefaultComparator<BloatEntry*, BloatEntry*>
+ {
+ public:
+@@ -582,16 +614,19 @@ GetSerialNumber(void* aPtr, bool aCreate
+   if (!aCreate) {
+     return 0;
+   }
+ 
+   SerialNumberRecord* record = new SerialNumberRecord();
+   WalkTheStackSavingLocations(record->allocationStack);
+   PL_HashTableRawAdd(gSerialNumbers, hep, HashNumber(aPtr),
+                      aPtr, static_cast<void*>(record));
++  if (gLogJSStacks) {
++    record->SaveJSStack();
++  }
+   return gNextSerialNumber;
+ }
+ 
+ static int32_t*
+ GetRefCount(void* aPtr)
+ {
+   PLHashEntry** hep = PL_HashTableRawLookup(gSerialNumbers,
+                                             HashNumber(aPtr),
+@@ -824,16 +859,20 @@ InitTraceLog()
+         }
+         *cm = ',';
+         cp = cm + 1;
+       }
+       fprintf(stdout, "\n");
+     }
+   }
+ 
++  if (getenv("XPCOM_MEM_LOG_JS_STACK")) {
++    fprintf(stdout, "### XPCOM_MEM_LOG_JS_STACK defined\n");
++    gLogJSStacks = true;
++  }
+ 
+   if (gBloatLog) {
+     gLogging = OnlyBloatLogging;
+   }
+ 
+   if (gRefcntsLog || gAllocLog || gCOMPtrLog) {
+     gLogging = FullLogging;
+   }
+

+ 95 - 0
frg/work-js/mozilla-release/patches/1442765-1-63a1.patch

@@ -0,0 +1,95 @@
+# HG changeset patch
+# User Eric Rahm <erahm@mozilla.com>
+# Date 1521593536 25200
+# Node ID b60b9d07842ed57911c896477b37defd9c00fb8b
+# Parent  b9193e72be193ca5d706a16d8c7ab314b458cf9f
+Bug 1442765 - Part 1: Add intptr_t hashkey type. r=froydnj
+
+This adds a hashkey that operates on a uintptr_t.
+
+diff --git a/xpcom/ds/nsHashKeys.h b/xpcom/ds/nsHashKeys.h
+--- a/xpcom/ds/nsHashKeys.h
++++ b/xpcom/ds/nsHashKeys.h
+@@ -15,16 +15,17 @@
+ #include "PLDHashTable.h"
+ #include <new>
+ 
+ #include "nsString.h"
+ #include "nsCRTGlue.h"
+ #include "nsUnicharUtils.h"
+ #include "nsPointerHashKeys.h"
+ 
++#include <stdint.h>
+ #include <stdlib.h>
+ #include <string.h>
+ 
+ #include "mozilla/HashFunctions.h"
+ #include "mozilla/Move.h"
+ 
+ namespace mozilla {
+ 
+@@ -49,16 +50,17 @@ HashString(const nsACString& aStr)
+  * classes follows the nsTHashtable::EntryType specification
+  *
+  * Lightweight keytypes provided here:
+  * nsStringHashKey
+  * nsCStringHashKey
+  * nsUint32HashKey
+  * nsUint64HashKey
+  * nsFloatHashKey
++ * IntPtrHashKey
+  * nsPtrHashKey
+  * nsClearingPtrHashKey
+  * nsVoidPtrHashKey
+  * nsClearingVoidPtrHashKey
+  * nsISupportsHashKey
+  * nsIDHashKey
+  * nsDepCharHashKey
+  * nsCharPtrHashKey
+@@ -280,16 +282,45 @@ public:
+   }
+   enum { ALLOW_MEMMOVE = true };
+ 
+ private:
+   const float mValue;
+ };
+ 
+ /**
++ * hashkey wrapper using intptr_t KeyType
++ *
++ * @see nsTHashtable::EntryType for specification
++ */
++class IntPtrHashKey : public PLDHashEntryHdr
++{
++public:
++  typedef const intptr_t& KeyType;
++  typedef const intptr_t* KeyTypePointer;
++
++  explicit IntPtrHashKey(KeyTypePointer aKey) : mValue(*aKey) {}
++  IntPtrHashKey(const IntPtrHashKey& aToCopy) : mValue(aToCopy.mValue) {}
++  ~IntPtrHashKey() {}
++
++  KeyType GetKey() const { return mValue; }
++  bool KeyEquals(KeyTypePointer aKey) const { return *aKey == mValue; }
++
++  static KeyTypePointer KeyToPointer(KeyType aKey) { return &aKey; }
++  static PLDHashNumber HashKey(KeyTypePointer aKey)
++  {
++    return mozilla::HashGeneric(*aKey);
++  }
++  enum { ALLOW_MEMMOVE = true };
++
++private:
++  const intptr_t mValue;
++};
++
++/**
+  * hashkey wrapper using nsISupports* KeyType
+  *
+  * @see nsTHashtable::EntryType for specification
+  */
+ class nsISupportsHashKey : public PLDHashEntryHdr
+ {
+ public:
+   typedef nsISupports* KeyType;
+

+ 769 - 0
frg/work-js/mozilla-release/patches/1442765-2-63a1.patch

@@ -0,0 +1,769 @@
+# HG changeset patch
+# User Eric Rahm <erahm@mozilla.com>
+# Date 1520297400 28800
+# Node ID c611225375ec87c61485093bda701d1b3f643730
+# Parent  2c4513379dba30de2b518f66e608630a9a1fb087
+Bug 1442765 - Part 2: Switch nsTraceRefcnt's hashtables to use xpcom hashtables. r=mccr8
+
+diff --git a/xpcom/base/nsTraceRefcnt.cpp b/xpcom/base/nsTraceRefcnt.cpp
+--- a/xpcom/base/nsTraceRefcnt.cpp
++++ b/xpcom/base/nsTraceRefcnt.cpp
+@@ -4,17 +4,19 @@
+  * License, v. 2.0. If a copy of the MPL was not distributed with this
+  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+ 
+ #include "nsTraceRefcnt.h"
+ #include "mozilla/IntegerPrintfMacros.h"
+ #include "mozilla/StaticPtr.h"
+ #include "nsXPCOMPrivate.h"
+ #include "nscore.h"
++#include "nsClassHashtable.h"
+ #include "nsISupports.h"
++#include "nsHashKeys.h"
+ #include "nsTArray.h"
+ #include "nsTHashtable.h"
+ #include "prenv.h"
+ #include "plstr.h"
+ #include "prlink.h"
+ #include "nsCRT.h"
+ #include <math.h>
+ #include "nsHashKeys.h"
+@@ -50,18 +52,16 @@
+ 
+ // dynamic_cast<void*> is not supported on Windows without RTTI.
+ #ifndef _WIN32
+ #define HAVE_CPP_DYNAMIC_CAST_TO_VOID_PTR
+ #endif
+ 
+ ////////////////////////////////////////////////////////////////////////////////
+ 
+-#include "plhash.h"
+-
+ #include "prthread.h"
+ 
+ // We use a spin lock instead of a regular mutex because this lock is usually
+ // only held for a very short time, and gets grabbed at a very high frequency
+ // (~100000 times per second). On Mac, the overhead of using a regular lock
+ // is very high, see bug 1137963.
+ static mozilla::Atomic<uintptr_t, mozilla::ReleaseAcquire> gTraceLogLocked;
+ 
+@@ -78,20 +78,29 @@ struct MOZ_STACK_CLASS AutoTraceLogLock 
+       while (!gTraceLogLocked.compareExchange(0, currentThread)) {
+         PR_Sleep(PR_INTERVAL_NO_WAIT); /* yield */
+       }
+     }
+   }
+   ~AutoTraceLogLock() { if (doRelease) gTraceLogLocked = 0; }
+ };
+ 
+-static PLHashTable* gBloatView;
+-static PLHashTable* gTypesToLog;
+-static PLHashTable* gObjectsToLog;
+-static PLHashTable* gSerialNumbers;
++class BloatEntry;
++struct SerialNumberRecord;
++
++using BloatHash = nsClassHashtable<nsDepCharHashKey, BloatEntry>;
++using CharPtrSet = nsTHashtable<nsCharPtrHashKey>;
++using IntPtrSet = nsTHashtable<IntPtrHashKey>;
++using SerialHash = nsClassHashtable<nsVoidPtrHashKey, SerialNumberRecord>;
++
++static StaticAutoPtr<BloatHash> gBloatView;
++static StaticAutoPtr<CharPtrSet> gTypesToLog;
++static StaticAutoPtr<IntPtrSet> gObjectsToLog;
++static StaticAutoPtr<SerialHash> gSerialNumbers;
++
+ static intptr_t gNextSerialNumber;
+ static bool gDumpedStatistics = false;
+ static bool gLogJSStacks = false;
+ 
+ // By default, debug builds only do bloat logging. Bloat logging
+ // only tries to record when an object is created or destroyed, so we
+ // optimize the common case in NS_LogAddRef and NS_LogRelease where
+ // only bloat logging is enabled and no logging needs to be done.
+@@ -203,66 +212,16 @@ AssertActivityIsLegal()
+ #  define ASSERT_ACTIVITY_IS_LEGAL              \
+   do {                                          \
+     AssertActivityIsLegal();                    \
+   } while(0)
+ #else
+ #  define ASSERT_ACTIVITY_IS_LEGAL do { } while(0)
+ #endif // DEBUG
+ 
+-// These functions are copied from nsprpub/lib/ds/plhash.c, with changes
+-// to the functions not called Default* to free the SerialNumberRecord or
+-// the BloatEntry.
+-
+-static void*
+-DefaultAllocTable(void* aPool, size_t aSize)
+-{
+-  return malloc(aSize);
+-}
+-
+-static void
+-DefaultFreeTable(void* aPool, void* aItem)
+-{
+-  free(aItem);
+-}
+-
+-static PLHashEntry*
+-DefaultAllocEntry(void* aPool, const void* aKey)
+-{
+-  return (PLHashEntry*) malloc(sizeof(PLHashEntry));
+-}
+-
+-static void
+-SerialNumberFreeEntry(void* aPool, PLHashEntry* aHashEntry, unsigned aFlag)
+-{
+-  if (aFlag == HT_FREE_ENTRY) {
+-    delete static_cast<SerialNumberRecord*>(aHashEntry->value);
+-    free(aHashEntry);
+-  }
+-}
+-
+-static void
+-TypesToLogFreeEntry(void* aPool, PLHashEntry* aHashEntry, unsigned aFlag)
+-{
+-  if (aFlag == HT_FREE_ENTRY) {
+-    free(const_cast<char*>(static_cast<const char*>(aHashEntry->key)));
+-    free(aHashEntry);
+-  }
+-}
+-
+-static const PLHashAllocOps serialNumberHashAllocOps = {
+-  DefaultAllocTable, DefaultFreeTable,
+-  DefaultAllocEntry, SerialNumberFreeEntry
+-};
+-
+-static const PLHashAllocOps typesToLogHashAllocOps = {
+-  DefaultAllocTable, DefaultFreeTable,
+-  DefaultAllocEntry, TypesToLogFreeEntry
+-};
+-
+ ////////////////////////////////////////////////////////////////////////////////
+ 
+ class CodeAddressServiceStringTable final
+ {
+ public:
+   CodeAddressServiceStringTable() : mSet(32) {}
+ 
+   const char* Intern(const char* aString)
+@@ -335,34 +294,16 @@ public:
+     mStats.mCreates++;
+   }
+ 
+   void Dtor()
+   {
+     mStats.mDestroys++;
+   }
+ 
+-  static int DumpEntry(PLHashEntry* aHashEntry, int aIndex, void* aArg)
+-  {
+-    BloatEntry* entry = (BloatEntry*)aHashEntry->value;
+-    if (entry) {
+-      static_cast<nsTArray<BloatEntry*>*>(aArg)->AppendElement(entry);
+-    }
+-    return HT_ENUMERATE_NEXT;
+-  }
+-
+-  static int TotalEntries(PLHashEntry* aHashEntry, int aIndex, void* aArg)
+-  {
+-    BloatEntry* entry = (BloatEntry*)aHashEntry->value;
+-    if (entry && nsCRT::strcmp(entry->mClassName, "TOTAL") != 0) {
+-      entry->Total((BloatEntry*)aArg);
+-    }
+-    return HT_ENUMERATE_NEXT;
+-  }
+-
+   void Total(BloatEntry* aTotal)
+   {
+     aTotal->mStats.mCreates += mStats.mCreates;
+     aTotal->mStats.mDestroys += mStats.mDestroys;
+     aTotal->mClassSize += mClassSize * mStats.mCreates;    // adjust for average in DumpTotal
+     aTotal->mTotalLeaked += mClassSize * mStats.NumLeaked();
+   }
+ 
+@@ -409,88 +350,60 @@ public:
+ protected:
+   char* mClassName;
+   double mClassSize; // This is stored as a double because of the way we compute the avg class size for total bloat.
+   int64_t mTotalLeaked; // Used only for TOTAL entry.
+   nsTraceRefcntStats mStats;
+ };
+ 
+ static void
+-BloatViewFreeEntry(void* aPool, PLHashEntry* aHashEntry, unsigned aFlag)
+-{
+-  if (aFlag == HT_FREE_ENTRY) {
+-    BloatEntry* entry = static_cast<BloatEntry*>(aHashEntry->value);
+-    delete entry;
+-    free(aHashEntry);
+-  }
+-}
+-
+-const static PLHashAllocOps bloatViewHashAllocOps = {
+-  DefaultAllocTable, DefaultFreeTable,
+-  DefaultAllocEntry, BloatViewFreeEntry
+-};
+-
+-static void
+ RecreateBloatView()
+ {
+-  gBloatView = PL_NewHashTable(256,
+-                               PL_HashString,
+-                               PL_CompareStrings,
+-                               PL_CompareValues,
+-                               &bloatViewHashAllocOps, nullptr);
++  gBloatView = new BloatHash(256);
+ }
+ 
+ static BloatEntry*
+ GetBloatEntry(const char* aTypeName, uint32_t aInstanceSize)
+ {
+   if (!gBloatView) {
+     RecreateBloatView();
+   }
+-  BloatEntry* entry = nullptr;
+-  if (gBloatView) {
+-    entry = (BloatEntry*)PL_HashTableLookup(gBloatView, aTypeName);
+-    if (!entry && aInstanceSize > 0) {
+-
+-      entry = new BloatEntry(aTypeName, aInstanceSize);
+-      PLHashEntry* e = PL_HashTableAdd(gBloatView, aTypeName, entry);
+-      if (!e) {
+-        delete entry;
+-        entry = nullptr;
+-      }
+-    } else {
+-      MOZ_ASSERT(aInstanceSize == 0 || entry->GetClassSize() == aInstanceSize,
+-                 "Mismatched sizes were recorded in the memory leak logging table. "
+-                 "The usual cause of this is having a templated class that uses "
+-                 "MOZ_COUNT_{C,D}TOR in the constructor or destructor, respectively. "
+-                 "As a workaround, the MOZ_COUNT_{C,D}TOR calls can be moved to a "
+-                 "non-templated base class. Another possible cause is a runnable with "
+-                 "an mName that matches another refcounted class.");
+-    }
++  BloatEntry* entry = gBloatView->Get(aTypeName);
++  if (!entry && aInstanceSize > 0) {
++    entry = new BloatEntry(aTypeName, aInstanceSize);
++    gBloatView->Put(aTypeName, entry);
++  } else {
++    MOZ_ASSERT(aInstanceSize == 0 || entry->GetClassSize() == aInstanceSize,
++	       "Mismatched sizes were recorded in the memory leak logging table. "
++	       "The usual cause of this is having a templated class that uses "
++	       "MOZ_COUNT_{C,D}TOR in the constructor or destructor, respectively. "
++	       "As a workaround, the MOZ_COUNT_{C,D}TOR calls can be moved to a "
++	       "non-templated base class. Another possible cause is a runnable with "
++	       "an mName that matches another refcounted class.");
+   }
+   return entry;
+ }
+ 
+-static int
+-DumpSerialNumbers(PLHashEntry* aHashEntry, int aIndex, void* aClosure)
++static void
++DumpSerialNumbers(const SerialHash::Iterator& aHashEntry, FILE* aFd)
+ {
+-  SerialNumberRecord* record =
+-    static_cast<SerialNumberRecord*>(aHashEntry->value);
+-  auto* outputFile = static_cast<FILE*>(aClosure);
++  SerialNumberRecord* record = aHashEntry.Data();
++  auto* outputFile = aFd;
+ #ifdef HAVE_CPP_DYNAMIC_CAST_TO_VOID_PTR
+   fprintf(outputFile, "%" PRIdPTR
+           " @%p (%d references; %d from COMPtrs)\n",
+           record->serialNumber,
+-          aHashEntry->key,
++          aHashEntry.Key(),
+           record->refCount,
+           record->COMPtrCount);
+ #else
+   fprintf(outputFile, "%" PRIdPTR
+           " @%p (%d references)\n",
+           record->serialNumber,
+-          aHashEntry->key,
++          aHashEntry.Key(),
+           record->refCount);
+ #endif
+   if (!record->allocationStack.empty()) {
+     static const size_t bufLen = 1024;
+     char buf[bufLen];
+     fprintf(outputFile, "allocation stack:\n");
+     for (size_t i = 0, length = record->allocationStack.size();
+          i < length;
+@@ -503,18 +416,16 @@ DumpSerialNumbers(PLHashEntry* aHashEntr
+ 
+   if (gLogJSStacks) {
+     if (record->jsStack) {
+       fprintf(outputFile, "JS allocation stack:\n%s\n", record->jsStack.get());
+     } else {
+       fprintf(outputFile, "There is no JS context on the stack.\n");
+     }
+   }
+-
+-  return HT_ENUMERATE_NEXT;
+ }
+ 
+ 
+ template<>
+ class nsDefaultComparator<BloatEntry*, BloatEntry*>
+ {
+ public:
+   bool Equals(BloatEntry* const& aEntry1, BloatEntry* const& aEntry2) const
+@@ -542,27 +453,36 @@ nsTraceRefcnt::DumpStatistics()
+              "bogus positive or negative leaks being reported");
+   gDumpedStatistics = true;
+ 
+   // Don't try to log while we hold the lock, we'd deadlock.
+   AutoRestore<LoggingType> saveLogging(gLogging);
+   gLogging = NoLogging;
+ 
+   BloatEntry total("TOTAL", 0);
+-  PL_HashTableEnumerateEntries(gBloatView, BloatEntry::TotalEntries, &total);
++  for (auto iter = gBloatView->Iter(); !iter.Done(); iter.Next()) {
++    BloatEntry* entry = iter.Data();
++    if (nsCRT::strcmp(entry->GetClassName(), "TOTAL") != 0) {
++      entry->Total(&total);
++    }
++  }
++
+   const char* msg;
+   if (gLogLeaksOnly) {
+     msg = "ALL (cumulative) LEAK STATISTICS";
+   } else {
+     msg = "ALL (cumulative) LEAK AND BLOAT STATISTICS";
+   }
+   const bool leaked = total.PrintDumpHeader(gBloatLog, msg);
+ 
+   nsTArray<BloatEntry*> entries;
+-  PL_HashTableEnumerateEntries(gBloatView, BloatEntry::DumpEntry, &entries);
++  for (auto iter = gBloatView->Iter(); !iter.Done(); iter.Next()) {
++    entries.AppendElement(iter.Data());
++  }
++
+   const uint32_t count = entries.Length();
+ 
+   if (!gLogLeaksOnly || leaked) {
+     // Sort the entries alphabetically by classname.
+     entries.Sort();
+ 
+     for (uint32_t i = 0; i < count; ++i) {
+       BloatEntry* entry = entries[i];
+@@ -571,107 +491,62 @@ nsTraceRefcnt::DumpStatistics()
+ 
+     fprintf(gBloatLog, "\n");
+   }
+ 
+   fprintf(gBloatLog, "nsTraceRefcnt::DumpStatistics: %d entries\n", count);
+ 
+   if (gSerialNumbers) {
+     fprintf(gBloatLog, "\nSerial Numbers of Leaked Objects:\n");
+-    PL_HashTableEnumerateEntries(gSerialNumbers, DumpSerialNumbers, gBloatLog);
++    for (auto iter = gSerialNumbers->Iter(); !iter.Done(); iter.Next()) {
++      DumpSerialNumbers(iter, gBloatLog);
++    }
+   }
+ 
+   return NS_OK;
+ }
+ 
+ void
+ nsTraceRefcnt::ResetStatistics()
+ {
+   AutoTraceLogLock lock;
+-  if (gBloatView) {
+-    PL_HashTableDestroy(gBloatView);
+-    gBloatView = nullptr;
+-  }
+-}
+-
+-static bool
+-LogThisType(const char* aTypeName)
+-{
+-  void* he = PL_HashTableLookup(gTypesToLog, aTypeName);
+-  return he != nullptr;
+-}
+-
+-static PLHashNumber
+-HashNumber(const void* aKey)
+-{
+-  return PLHashNumber(NS_PTR_TO_INT32(aKey));
++  gBloatView = nullptr;
+ }
+ 
+ static intptr_t
+ GetSerialNumber(void* aPtr, bool aCreate)
+ {
+-  PLHashEntry** hep = PL_HashTableRawLookup(gSerialNumbers,
+-                                            HashNumber(aPtr),
+-                                            aPtr);
+-  if (hep && *hep) {
+-    MOZ_RELEASE_ASSERT(!aCreate, "If an object already has a serial number, we should be destroying it.");
+-    return static_cast<SerialNumberRecord*>((*hep)->value)->serialNumber;
++  if (!aCreate) {
++    auto record = gSerialNumbers->Get(aPtr);
++    return record ? record->serialNumber : 0;
+   }
+ 
+-  if (!aCreate) {
+-    return 0;
++  auto entry = gSerialNumbers->LookupForAdd(aPtr);
++  if (entry) {
++    MOZ_CRASH("If an object already has a serial number, we should be destroying it.");
+   }
+ 
+-  SerialNumberRecord* record = new SerialNumberRecord();
++  auto record = entry.OrInsert([]() { return new SerialNumberRecord(); });
+   WalkTheStackSavingLocations(record->allocationStack);
+-  PL_HashTableRawAdd(gSerialNumbers, hep, HashNumber(aPtr),
+-                     aPtr, static_cast<void*>(record));
+   if (gLogJSStacks) {
+     record->SaveJSStack();
+   }
+   return gNextSerialNumber;
+ }
+ 
+-static int32_t*
+-GetRefCount(void* aPtr)
+-{
+-  PLHashEntry** hep = PL_HashTableRawLookup(gSerialNumbers,
+-                                            HashNumber(aPtr),
+-                                            aPtr);
+-  if (hep && *hep) {
+-    return &(static_cast<SerialNumberRecord*>((*hep)->value)->refCount);
+-  } else {
+-    return nullptr;
+-  }
+-}
+-
+-#ifdef HAVE_CPP_DYNAMIC_CAST_TO_VOID_PTR
+-static int32_t*
+-GetCOMPtrCount(void* aPtr)
+-{
+-  PLHashEntry** hep = PL_HashTableRawLookup(gSerialNumbers,
+-                                            HashNumber(aPtr),
+-                                            aPtr);
+-  if (hep && *hep) {
+-    return &(static_cast<SerialNumberRecord*>((*hep)->value)->COMPtrCount);
+-  }
+-  return nullptr;
+-}
+-#endif // HAVE_CPP_DYNAMIC_CAST_TO_VOID_PTR
+-
+ static void
+ RecycleSerialNumberPtr(void* aPtr)
+ {
+-  PL_HashTableRemove(gSerialNumbers, aPtr);
++  gSerialNumbers->Remove(aPtr);
+ }
+ 
+ static bool
+ LogThisObj(intptr_t aSerialNumber)
+ {
+-  return (bool)PL_HashTableLookup(gObjectsToLog, (const void*)aSerialNumber);
++  return gObjectsToLog->Contains(aSerialNumber);
+ }
+ 
+ #ifdef XP_WIN
+ #define FOPEN_NO_INHERIT "N"
+ #else
+ #define FOPEN_NO_INHERIT
+ #endif
+ 
+@@ -778,64 +653,43 @@ InitTraceLog()
+   if (comptr_log) {
+     fprintf(stdout, "### XPCOM_MEM_COMPTR_LOG defined -- but it will not work without dynamic_cast\n");
+   }
+ #endif // HAVE_CPP_DYNAMIC_CAST_TO_VOID_PTR
+ 
+   if (classes) {
+     // if XPCOM_MEM_LOG_CLASSES was set to some value, the value is interpreted
+     // as a list of class names to track
+-    gTypesToLog = PL_NewHashTable(256,
+-                                  PL_HashString,
+-                                  PL_CompareStrings,
+-                                  PL_CompareValues,
+-                                  &typesToLogHashAllocOps, nullptr);
+-    if (!gTypesToLog) {
+-      NS_WARNING("out of memory");
+-      fprintf(stdout, "### XPCOM_MEM_LOG_CLASSES defined -- unable to log specific classes\n");
+-    } else {
+-      fprintf(stdout, "### XPCOM_MEM_LOG_CLASSES defined -- only logging these classes: ");
+-      const char* cp = classes;
+-      for (;;) {
+-        char* cm = (char*)strchr(cp, ',');
+-        if (cm) {
+-          *cm = '\0';
+-        }
+-        PL_HashTableAdd(gTypesToLog, strdup(cp), (void*)1);
+-        fprintf(stdout, "%s ", cp);
+-        if (!cm) {
+-          break;
+-        }
+-        *cm = ',';
+-        cp = cm + 1;
++    gTypesToLog = new CharPtrSet(256);
++
++    fprintf(stdout, "### XPCOM_MEM_LOG_CLASSES defined -- only logging these classes: ");
++    const char* cp = classes;
++    for (;;) {
++      char* cm = (char*)strchr(cp, ',');
++      if (cm) {
++        *cm = '\0';
+       }
+-      fprintf(stdout, "\n");
++      gTypesToLog->PutEntry(cp);
++      fprintf(stdout, "%s ", cp);
++      if (!cm) {
++        break;
++      }
++      *cm = ',';
++      cp = cm + 1;
+     }
++    fprintf(stdout, "\n");
+ 
+-    gSerialNumbers = PL_NewHashTable(256,
+-                                     HashNumber,
+-                                     PL_CompareValues,
+-                                     PL_CompareValues,
+-                                     &serialNumberHashAllocOps, nullptr);
+-
+-
++    gSerialNumbers = new SerialHash(256);
+   }
+ 
+   const char* objects = getenv("XPCOM_MEM_LOG_OBJECTS");
+   if (objects) {
+-    gObjectsToLog = PL_NewHashTable(256,
+-                                    HashNumber,
+-                                    PL_CompareValues,
+-                                    PL_CompareValues,
+-                                    nullptr, nullptr);
++    gObjectsToLog = new IntPtrSet(256);
+ 
+-    if (!gObjectsToLog) {
+-      NS_WARNING("out of memory");
+-      fprintf(stdout, "### XPCOM_MEM_LOG_OBJECTS defined -- unable to log specific objects\n");
+-    } else if (!(gRefcntsLog || gAllocLog || gCOMPtrLog)) {
++    if (!(gRefcntsLog || gAllocLog || gCOMPtrLog)) {
+       fprintf(stdout, "### XPCOM_MEM_LOG_OBJECTS defined -- but none of XPCOM_MEM_(REFCNT|ALLOC|COMPTR)_LOG is defined\n");
+     } else {
+       fprintf(stdout, "### XPCOM_MEM_LOG_OBJECTS defined -- only logging these objects: ");
+       const char* cp = objects;
+       for (;;) {
+         char* cm = (char*)strchr(cp, ',');
+         if (cm) {
+           *cm = '\0';
+@@ -851,17 +705,17 @@ InitTraceLog()
+           top *= 10;
+           top += *cp - '0';
+           ++cp;
+         }
+         if (!bottom) {
+           bottom = top;
+         }
+         for (intptr_t serialno = bottom; serialno <= top; serialno++) {
+-          PL_HashTableAdd(gObjectsToLog, (const void*)serialno, (void*)1);
++          gObjectsToLog->PutEntry(serialno);
+           fprintf(stdout, "%" PRIdPTR " ", serialno);
+         }
+         if (!cm) {
+           break;
+         }
+         *cm = ',';
+         cp = cm + 1;
+       }
+@@ -1098,28 +952,27 @@ NS_LogAddRef(void* aPtr, nsrefcnt aRefcn
+       if (entry) {
+         entry->Ctor();
+       }
+     }
+ 
+     // Here's the case where MOZ_COUNT_CTOR was not used,
+     // yet we still want to see creation information:
+ 
+-    bool loggingThisType = (!gTypesToLog || LogThisType(aClass));
++    bool loggingThisType = (!gTypesToLog || gTypesToLog->Contains(aClass));
+     intptr_t serialno = 0;
+     if (gSerialNumbers && loggingThisType) {
+       serialno = GetSerialNumber(aPtr, aRefcnt == 1);
+       MOZ_ASSERT(serialno != 0,
+                  "Serial number requested for unrecognized pointer!  "
+                  "Are you memmoving a refcounted object?");
+-      int32_t* count = GetRefCount(aPtr);
+-      if (count) {
+-        (*count)++;
++      auto record = gSerialNumbers->Get(aPtr);
++      if (record) {
++        ++record->refCount;
+       }
+-
+     }
+ 
+     bool loggingThisObject = (!gObjectsToLog || LogThisObj(serialno));
+     if (aRefcnt == 1 && gAllocLog && loggingThisType && loggingThisObject) {
+       fprintf(gAllocLog, "\n<%s> %p %" PRIdPTR " Create [thread %p]\n", aClass, aPtr, serialno, PR_GetCurrentThread());
+       WalkTheStackCached(gAllocLog);
+     }
+ 
+@@ -1148,28 +1001,27 @@ NS_LogRelease(void* aPtr, nsrefcnt aRefc
+ 
+     if (aRefcnt == 0 && gBloatLog) {
+       BloatEntry* entry = GetBloatEntry(aClass, 0);
+       if (entry) {
+         entry->Dtor();
+       }
+     }
+ 
+-    bool loggingThisType = (!gTypesToLog || LogThisType(aClass));
++    bool loggingThisType = (!gTypesToLog || gTypesToLog->Contains(aClass));
+     intptr_t serialno = 0;
+     if (gSerialNumbers && loggingThisType) {
+       serialno = GetSerialNumber(aPtr, false);
+       MOZ_ASSERT(serialno != 0,
+                  "Serial number requested for unrecognized pointer!  "
+                  "Are you memmoving a refcounted object?");
+-      int32_t* count = GetRefCount(aPtr);
+-      if (count) {
+-        (*count)--;
++      auto record = gSerialNumbers->Get(aPtr);
++      if (record) {
++        --record->refCount;
+       }
+-
+     }
+ 
+     bool loggingThisObject = (!gObjectsToLog || LogThisObj(serialno));
+     if (gRefcntsLog && loggingThisType && loggingThisObject) {
+       // Can't use MOZ_LOG(), b/c it truncates the line
+       fprintf(gRefcntsLog,
+               "\n<%s> %p %" PRIuPTR " Release %" PRIuPTR " [thread %p]\n",
+               aClass, aPtr, serialno, aRefcnt, PR_GetCurrentThread());
+@@ -1207,17 +1059,17 @@ NS_LogCtor(void* aPtr, const char* aType
+ 
+   if (gBloatLog) {
+     BloatEntry* entry = GetBloatEntry(aType, aInstanceSize);
+     if (entry) {
+       entry->Ctor();
+     }
+   }
+ 
+-  bool loggingThisType = (!gTypesToLog || LogThisType(aType));
++  bool loggingThisType = (!gTypesToLog || gTypesToLog->Contains(aType));
+   intptr_t serialno = 0;
+   if (gSerialNumbers && loggingThisType) {
+     serialno = GetSerialNumber(aPtr, true);
+     MOZ_ASSERT(serialno != 0, "GetSerialNumber should never return 0 when passed true");
+   }
+ 
+   bool loggingThisObject = (!gObjectsToLog || LogThisObj(serialno));
+   if (gAllocLog && loggingThisType && loggingThisObject) {
+@@ -1244,17 +1096,17 @@ NS_LogDtor(void* aPtr, const char* aType
+ 
+   if (gBloatLog) {
+     BloatEntry* entry = GetBloatEntry(aType, aInstanceSize);
+     if (entry) {
+       entry->Dtor();
+     }
+   }
+ 
+-  bool loggingThisType = (!gTypesToLog || LogThisType(aType));
++  bool loggingThisType = (!gTypesToLog || gTypesToLog->Contains(aType));
+   intptr_t serialno = 0;
+   if (gSerialNumbers && loggingThisType) {
+     serialno = GetSerialNumber(aPtr, false);
+     MOZ_ASSERT(serialno != 0,
+                "Serial number requested for unrecognized pointer!  "
+                "Are you memmoving a MOZ_COUNT_CTOR-tracked object?");
+     RecycleSerialNumberPtr(aPtr);
+   }
+@@ -1290,26 +1142,23 @@ NS_LogCOMPtrAddRef(void* aCOMPtr, nsISup
+   if (gLogging == FullLogging) {
+     AutoTraceLogLock lock;
+ 
+     intptr_t serialno = GetSerialNumber(object, false);
+     if (serialno == 0) {
+       return;
+     }
+ 
+-    int32_t* count = GetCOMPtrCount(object);
+-    if (count) {
+-      (*count)++;
+-    }
+-
++    auto record = gSerialNumbers->Get(object);
++    int32_t count = record ? ++record->COMPtrCount : -1;
+     bool loggingThisObject = (!gObjectsToLog || LogThisObj(serialno));
+ 
+     if (gCOMPtrLog && loggingThisObject) {
+       fprintf(gCOMPtrLog, "\n<?> %p %" PRIdPTR " nsCOMPtrAddRef %d %p\n",
+-              object, serialno, count ? (*count) : -1, aCOMPtr);
++              object, serialno, count, aCOMPtr);
+       WalkTheStackCached(gCOMPtrLog);
+     }
+   }
+ #endif // HAVE_CPP_DYNAMIC_CAST_TO_VOID_PTR
+ }
+ 
+ 
+ EXPORT_XPCOM_API(void)
+@@ -1331,52 +1180,37 @@ NS_LogCOMPtrRelease(void* aCOMPtr, nsISu
+   if (gLogging == FullLogging) {
+     AutoTraceLogLock lock;
+ 
+     intptr_t serialno = GetSerialNumber(object, false);
+     if (serialno == 0) {
+       return;
+     }
+ 
+-    int32_t* count = GetCOMPtrCount(object);
+-    if (count) {
+-      (*count)--;
+-    }
+-
++    auto record = gSerialNumbers->Get(object);
++    int32_t count = record ? --record->COMPtrCount : -1;
+     bool loggingThisObject = (!gObjectsToLog || LogThisObj(serialno));
+ 
+     if (gCOMPtrLog && loggingThisObject) {
+       fprintf(gCOMPtrLog, "\n<?> %p %" PRIdPTR " nsCOMPtrRelease %d %p\n",
+-              object, serialno, count ? (*count) : -1, aCOMPtr);
++              object, serialno, count, aCOMPtr);
+       WalkTheStackCached(gCOMPtrLog);
+     }
+   }
+ #endif // HAVE_CPP_DYNAMIC_CAST_TO_VOID_PTR
+ }
+ 
+ void
+ nsTraceRefcnt::Shutdown()
+ {
+   gCodeAddressService = nullptr;
+-  if (gBloatView) {
+-    PL_HashTableDestroy(gBloatView);
+-    gBloatView = nullptr;
+-  }
+-  if (gTypesToLog) {
+-    PL_HashTableDestroy(gTypesToLog);
+-    gTypesToLog = nullptr;
+-  }
+-  if (gObjectsToLog) {
+-    PL_HashTableDestroy(gObjectsToLog);
+-    gObjectsToLog = nullptr;
+-  }
+-  if (gSerialNumbers) {
+-    PL_HashTableDestroy(gSerialNumbers);
+-    gSerialNumbers = nullptr;
+-  }
++  gBloatView = nullptr;
++  gTypesToLog = nullptr;
++  gObjectsToLog = nullptr;
++  gSerialNumbers = nullptr;
+   maybeUnregisterAndCloseFile(gBloatLog);
+   maybeUnregisterAndCloseFile(gRefcntsLog);
+   maybeUnregisterAndCloseFile(gAllocLog);
+   maybeUnregisterAndCloseFile(gCOMPtrLog);
+ }
+ 
+ void
+ nsTraceRefcnt::SetActivityIsLegal(bool aLegal)

+ 28 - 0
frg/work-js/mozilla-release/patches/1452202-fix-61a1.patch

@@ -0,0 +1,28 @@
+# HG changeset patch
+# User Eric Rahm <erahm@mozilla.com>
+# Date 1523296919 25200
+# Node ID 9036c64b7a66ffe93e717ca97642a4400e396d9c
+# Parent  522cf89f6bcea4a4a6fe0d736434765673da3e54
+Bug 1452202 - Clean up PLDHashTable move operator. r=froydnj
+
+diff --git a/xpcom/ds/PLDHashTable.cpp b/xpcom/ds/PLDHashTable.cpp
+--- a/xpcom/ds/PLDHashTable.cpp
++++ b/xpcom/ds/PLDHashTable.cpp
+@@ -220,17 +220,16 @@ PLDHashTable::PLDHashTable(const PLDHash
+ PLDHashTable&
+ PLDHashTable::operator=(PLDHashTable&& aOther)
+ {
+   if (this == &aOther) {
+     return *this;
+   }
+ 
+   // |mOps| and |mEntrySize| are required to stay the same, they're
+-  // require that they are equal. The justification for this is that they're
+   // conceptually part of the type -- indeed, if PLDHashTable was a templated
+   // type like nsTHashtable, they *would* be part of the type -- so it only
+   // makes sense to assign in cases where they match.
+   MOZ_RELEASE_ASSERT(mOps == aOther.mOps);
+   MOZ_RELEASE_ASSERT(mEntrySize == aOther.mEntrySize);
+ 
+   // Reconstruct |this|.
+   this->~PLDHashTable();

+ 63 - 0
frg/work-js/mozilla-release/patches/1452288-61a1.patch

@@ -0,0 +1,63 @@
+# HG changeset patch
+# User Eric Rahm <erahm@mozilla.com>
+# Date 1523058718 25200
+# Node ID 4cd93e695965adeb76237e41d03c4a6d52ab27d1
+# Parent  f45d6ae3fdb194a07ef5eba9f9a760f28f564b64
+Bug 1452288 - Use calloc for allocating PLDHashTable entries. r=froydnj
+
+diff --git a/xpcom/ds/PLDHashTable.cpp b/xpcom/ds/PLDHashTable.cpp
+--- a/xpcom/ds/PLDHashTable.cpp
++++ b/xpcom/ds/PLDHashTable.cpp
+@@ -471,27 +471,26 @@ PLDHashTable::ChangeTable(int32_t aDelta
+     return false;
+   }
+ 
+   uint32_t nbytes;
+   if (!SizeOfEntryStore(newCapacity, mEntrySize, &nbytes)) {
+     return false;   // overflowed
+   }
+ 
+-  char* newEntryStore = (char*)malloc(nbytes);
++  char* newEntryStore = (char*)calloc(1, nbytes);
+   if (!newEntryStore) {
+     return false;
+   }
+ 
+   // We can't fail from here on, so update table parameters.
+   mHashShift = kHashBits - newLog2;
+   mRemovedCount = 0;
+ 
+   // Assign the new entry store to table.
+-  memset(newEntryStore, 0, nbytes);
+   char* oldEntryStore;
+   char* oldEntryAddr;
+   oldEntryAddr = oldEntryStore = mEntryStore.Get();
+   mEntryStore.Set(newEntryStore, &mGeneration);
+   PLDHashMoveEntry moveEntry = mOps->moveEntry;
+ 
+   // Copy only live entries, leaving removed ones behind.
+   uint32_t oldCapacity = 1u << oldLog2;
+@@ -550,21 +549,20 @@ PLDHashTable::Add(const void* aKey, cons
+ #endif
+ 
+   // Allocate the entry storage if it hasn't already been allocated.
+   if (!mEntryStore.Get()) {
+     uint32_t nbytes;
+     // We already checked this in the constructor, so it must still be true.
+     MOZ_RELEASE_ASSERT(SizeOfEntryStore(CapacityFromHashShift(), mEntrySize,
+                                         &nbytes));
+-    mEntryStore.Set((char*)malloc(nbytes), &mGeneration);
++    mEntryStore.Set((char*)calloc(1, nbytes), &mGeneration);
+     if (!mEntryStore.Get()) {
+       return nullptr;
+     }
+-    memset(mEntryStore.Get(), 0, nbytes);
+   }
+ 
+   // If alpha is >= .75, grow or compress the table. If aKey is already in the
+   // table, we may grow once more than necessary, but only if we are on the
+   // edge of being overloaded.
+   uint32_t capacity = Capacity();
+   if (mEntryCount + mRemovedCount >= MaxLoad(capacity)) {
+     // Compress if a quarter or more of all entries are removed.
+

+ 44 - 0
frg/work-js/mozilla-release/patches/1468514-63a1.patch

@@ -0,0 +1,44 @@
+# HG changeset patch
+# User Nathan Froyd <froydnj@mozilla.com>
+# Date 1532359857 18000
+# Node ID ce374736cb001e60a6f09f4b219938eb401bf04c
+# Parent  599977a50d92cfa1493e4c2d76f53c8c7d40e3ae
+Bug 1468514 - make resizing PLDHashTable smarter; r=erahm
+
+The current code for resizing PLDHashTable modifies the cached hash for
+all entries in the old hash table.  This is unnecessary, because we're
+going to throw away the old hash table shortly, and inefficient, because
+writing to memory we're never going to use again just wastes time and
+memory bandwidth.  Instead, let's avoid the write by pulling out the
+cached key and doing the necessary manipulation on local variables,
+which is probably slightly faster.
+
+diff --git a/xpcom/ds/PLDHashTable.cpp b/xpcom/ds/PLDHashTable.cpp
+--- a/xpcom/ds/PLDHashTable.cpp
++++ b/xpcom/ds/PLDHashTable.cpp
+@@ -496,21 +496,21 @@ PLDHashTable::ChangeTable(int32_t aDelta
+   mEntryStore.Set(newEntryStore, &mGeneration);
+   PLDHashMoveEntry moveEntry = mOps->moveEntry;
+ 
+   // Copy only live entries, leaving removed ones behind.
+   uint32_t oldCapacity = 1u << oldLog2;
+   for (uint32_t i = 0; i < oldCapacity; ++i) {
+     PLDHashEntryHdr* oldEntry = (PLDHashEntryHdr*)oldEntryAddr;
+     if (EntryIsLive(oldEntry)) {
+-      oldEntry->mKeyHash &= ~kCollisionFlag;
+-      PLDHashEntryHdr* newEntry = FindFreeEntry(oldEntry->mKeyHash);
++      const PLDHashNumber key = oldEntry->mKeyHash & ~kCollisionFlag;
++      PLDHashEntryHdr* newEntry = FindFreeEntry(key);
+       NS_ASSERTION(EntryIsFree(newEntry), "EntryIsFree(newEntry)");
+       moveEntry(this, oldEntry, newEntry);
+-      newEntry->mKeyHash = oldEntry->mKeyHash;
++      newEntry->mKeyHash = key;
+     }
+     oldEntryAddr += mEntrySize;
+   }
+ 
+   free(oldEntryStore);
+   return true;
+ }
+ 
+

+ 247 - 0
frg/work-js/mozilla-release/patches/1475461-1-63a1.patch

@@ -0,0 +1,247 @@
+# HG changeset patch
+# User Masayuki Nakano <masayuki@d-toybox.com>
+# Date 1531468589 -32400
+# Node ID 9f8a3c2f65288f91b94223b69c63da6612ebee55
+# Parent  7ce64da43ceb50ea7ea358126332cb86765eeb25
+Bug 1475461 - part 1: Mark PLDHashTable::Search() and called by it as const r=Ehsan
+
+PLDHashTable::Search() does not modify any members.  So, this method and
+methods called by it should be marked as const.
+
+MozReview-Commit-ID: 6g4jrYK1j9E
+
+diff --git a/xpcom/ds/PLDHashTable.cpp b/xpcom/ds/PLDHashTable.cpp
+--- a/xpcom/ds/PLDHashTable.cpp
++++ b/xpcom/ds/PLDHashTable.cpp
+@@ -251,24 +251,24 @@ PLDHashTable::operator=(PLDHashTable&& a
+ #endif
+     aOther.mEntryStore.Set(nullptr, &aOther.mGeneration);
+   }
+ 
+   return *this;
+ }
+ 
+ PLDHashNumber
+-PLDHashTable::Hash1(PLDHashNumber aHash0)
++PLDHashTable::Hash1(PLDHashNumber aHash0) const
+ {
+   return aHash0 >> mHashShift;
+ }
+ 
+ void
+ PLDHashTable::Hash2(PLDHashNumber aHash0,
+-                    uint32_t& aHash2Out, uint32_t& aSizeMaskOut)
++                    uint32_t& aHash2Out, uint32_t& aSizeMaskOut) const
+ {
+   uint32_t sizeLog2 = kHashBits - mHashShift;
+   uint32_t sizeMask = (PLDHashNumber(1) << sizeLog2) - 1;
+   aSizeMaskOut = sizeMask;
+ 
+   // The incoming aHash0 always has the low bit unset (since we leave it
+   // free for the collision flag), and should have reasonably random
+   // data in the other 31 bits.  We used the high bits of aHash0 for
+@@ -288,27 +288,29 @@ PLDHashTable::Hash2(PLDHashNumber aHash0
+ // that a removed-entry sentinel need be stored only if the removed entry had
+ // a colliding entry added after it. Therefore we can use 1 as the collision
+ // flag in addition to the removed-entry sentinel value. Multiplicative hash
+ // uses the high order bits of mKeyHash, so this least-significant reservation
+ // should not hurt the hash function's effectiveness much.
+ 
+ // Match an entry's mKeyHash against an unstored one computed from a key.
+ /* static */ bool
+-PLDHashTable::MatchEntryKeyhash(PLDHashEntryHdr* aEntry, PLDHashNumber aKeyHash)
++PLDHashTable::MatchEntryKeyhash(const PLDHashEntryHdr* aEntry,
++                                const PLDHashNumber aKeyHash)
+ {
+   return (aEntry->mKeyHash & ~kCollisionFlag) == aKeyHash;
+ }
+ 
+ // Compute the address of the indexed entry in table.
+ PLDHashEntryHdr*
+-PLDHashTable::AddressEntry(uint32_t aIndex)
++PLDHashTable::AddressEntry(uint32_t aIndex) const
+ {
+-  return reinterpret_cast<PLDHashEntryHdr*>(
+-    mEntryStore.Get() + aIndex * mEntrySize);
++  return const_cast<PLDHashEntryHdr*>(
++    reinterpret_cast<const PLDHashEntryHdr*>(
++      mEntryStore.Get() + aIndex * mEntrySize));
+ }
+ 
+ PLDHashTable::~PLDHashTable()
+ {
+ #ifdef DEBUG
+   AutoDestructorOp op(mChecker);
+ #endif
+ 
+@@ -349,17 +351,17 @@ PLDHashTable::Clear()
+ 
+ // If |Reason| is |ForAdd|, the return value is always non-null and it may be
+ // a previously-removed entry. If |Reason| is |ForSearchOrRemove|, the return
+ // value is null on a miss, and will never be a previously-removed entry on a
+ // hit. This distinction is a bit grotty but this function is hot enough that
+ // these differences are worthwhile.
+ template <PLDHashTable::SearchReason Reason>
+ PLDHashEntryHdr* NS_FASTCALL
+-PLDHashTable::SearchTable(const void* aKey, PLDHashNumber aKeyHash)
++PLDHashTable::SearchTable(const void* aKey, PLDHashNumber aKeyHash) const
+ {
+   MOZ_ASSERT(mEntryStore.Get());
+   NS_ASSERTION(!(aKeyHash & kCollisionFlag),
+                "!(aKeyHash & kCollisionFlag)");
+ 
+   // Compute the primary hash address.
+   PLDHashNumber hash1 = Hash1(aKeyHash);
+   PLDHashEntryHdr* entry = AddressEntry(hash1);
+@@ -416,17 +418,17 @@ PLDHashTable::SearchTable(const void* aK
+ // This is a copy of SearchTable(), used by ChangeTable(), hardcoded to
+ //   1. assume |Reason| is |ForAdd|,
+ //   2. assume that |aKey| will never match an existing entry, and
+ //   3. assume that no entries have been removed from the current table
+ //      structure.
+ // Avoiding the need for |aKey| means we can avoid needing a way to map entries
+ // to keys, which means callers can use complex key types more easily.
+ MOZ_ALWAYS_INLINE PLDHashEntryHdr*
+-PLDHashTable::FindFreeEntry(PLDHashNumber aKeyHash)
++PLDHashTable::FindFreeEntry(PLDHashNumber aKeyHash) const
+ {
+   MOZ_ASSERT(mEntryStore.Get());
+   NS_ASSERTION(!(aKeyHash & kCollisionFlag),
+                "!(aKeyHash & kCollisionFlag)");
+ 
+   // Compute the primary hash address.
+   PLDHashNumber hash1 = Hash1(aKeyHash);
+   PLDHashEntryHdr* entry = AddressEntry(hash1);
+@@ -506,34 +508,34 @@ PLDHashTable::ChangeTable(int32_t aDelta
+     oldEntryAddr += mEntrySize;
+   }
+ 
+   free(oldEntryStore);
+   return true;
+ }
+ 
+ MOZ_ALWAYS_INLINE PLDHashNumber
+-PLDHashTable::ComputeKeyHash(const void* aKey)
++PLDHashTable::ComputeKeyHash(const void* aKey) const
+ {
+   MOZ_ASSERT(mEntryStore.Get());
+ 
+   PLDHashNumber keyHash = mOps->hashKey(aKey);
+   keyHash *= kGoldenRatio;
+ 
+   // Avoid 0 and 1 hash codes, they indicate free and removed entries.
+   if (keyHash < 2) {
+     keyHash -= 2;
+   }
+   keyHash &= ~kCollisionFlag;
+ 
+   return keyHash;
+ }
+ 
+ PLDHashEntryHdr*
+-PLDHashTable::Search(const void* aKey)
++PLDHashTable::Search(const void* aKey) const
+ {
+ #ifdef DEBUG
+   AutoReadOp op(mChecker);
+ #endif
+ 
+   PLDHashEntryHdr* entry = mEntryStore.Get()
+                          ? SearchTable<ForSearchOrRemove>(aKey,
+                                                           ComputeKeyHash(aKey))
+diff --git a/xpcom/ds/PLDHashTable.h b/xpcom/ds/PLDHashTable.h
+--- a/xpcom/ds/PLDHashTable.h
++++ b/xpcom/ds/PLDHashTable.h
+@@ -320,17 +320,17 @@ public:
+   uint32_t Generation() const { return mGeneration; }
+ 
+   // To search for a |key| in |table|, call:
+   //
+   //   entry = table.Search(key);
+   //
+   // If |entry| is non-null, |key| was found. If |entry| is null, key was not
+   // found.
+-  PLDHashEntryHdr* Search(const void* aKey);
++  PLDHashEntryHdr* Search(const void* aKey) const;
+ 
+   // To add an entry identified by |key| to table, call:
+   //
+   //   entry = table.Add(key, mozilla::fallible);
+   //
+   // If |entry| is null upon return, then the table is severely overloaded and
+   // memory can't be allocated for entry storage.
+   //
+@@ -502,60 +502,62 @@ private:
+   // expressed as a fixed-point 32-bit fraction.
+   static const uint32_t kHashBits = 32;
+   static const uint32_t kGoldenRatio = 0x9E3779B9U;
+ 
+   static uint32_t HashShift(uint32_t aEntrySize, uint32_t aLength);
+ 
+   static const PLDHashNumber kCollisionFlag = 1;
+ 
+-  static bool EntryIsFree(PLDHashEntryHdr* aEntry)
++  static bool EntryIsFree(const PLDHashEntryHdr* aEntry)
+   {
+     return aEntry->mKeyHash == 0;
+   }
+-  static bool EntryIsRemoved(PLDHashEntryHdr* aEntry)
++  static bool EntryIsRemoved(const PLDHashEntryHdr* aEntry)
+   {
+     return aEntry->mKeyHash == 1;
+   }
+-  static bool EntryIsLive(PLDHashEntryHdr* aEntry)
++  static bool EntryIsLive(const PLDHashEntryHdr* aEntry)
+   {
+     return aEntry->mKeyHash >= 2;
+   }
+ 
+   static void MarkEntryFree(PLDHashEntryHdr* aEntry)
+   {
+     aEntry->mKeyHash = 0;
+   }
+   static void MarkEntryRemoved(PLDHashEntryHdr* aEntry)
+   {
+     aEntry->mKeyHash = 1;
+   }
+ 
+-  PLDHashNumber Hash1(PLDHashNumber aHash0);
+-  void Hash2(PLDHashNumber aHash, uint32_t& aHash2Out, uint32_t& aSizeMaskOut);
++  PLDHashNumber Hash1(PLDHashNumber aHash0) const;
++  void Hash2(PLDHashNumber aHash,
++             uint32_t& aHash2Out, uint32_t& aSizeMaskOut) const;
+ 
+-  static bool MatchEntryKeyhash(PLDHashEntryHdr* aEntry, PLDHashNumber aHash);
+-  PLDHashEntryHdr* AddressEntry(uint32_t aIndex);
++  static bool MatchEntryKeyhash(const PLDHashEntryHdr* aEntry,
++                                const PLDHashNumber aHash);
++  PLDHashEntryHdr* AddressEntry(uint32_t aIndex) const;
+ 
+   // We store mHashShift rather than sizeLog2 to optimize the collision-free
+   // case in SearchTable.
+   uint32_t CapacityFromHashShift() const
+   {
+     return ((uint32_t)1 << (kHashBits - mHashShift));
+   }
+ 
+-  PLDHashNumber ComputeKeyHash(const void* aKey);
++  PLDHashNumber ComputeKeyHash(const void* aKey) const;
+ 
+   enum SearchReason { ForSearchOrRemove, ForAdd };
+ 
+   template <SearchReason Reason>
+   PLDHashEntryHdr* NS_FASTCALL
+-    SearchTable(const void* aKey, PLDHashNumber aKeyHash);
++    SearchTable(const void* aKey, PLDHashNumber aKeyHash) const;
+ 
+-  PLDHashEntryHdr* FindFreeEntry(PLDHashNumber aKeyHash);
++  PLDHashEntryHdr* FindFreeEntry(PLDHashNumber aKeyHash) const;
+ 
+   bool ChangeTable(int aDeltaLog2);
+ 
+   void ShrinkIfAppropriate();
+ 
+   PLDHashTable(const PLDHashTable& aOther) = delete;
+   PLDHashTable& operator=(const PLDHashTable& aOther) = delete;
+ };
+

+ 418 - 0
frg/work-js/mozilla-release/patches/1475461-2-63a1.patch

@@ -0,0 +1,418 @@
+# HG changeset patch
+# User Masayuki Nakano <masayuki@d-toybox.com>
+# Date 1531476113 -32400
+# Node ID de078de9ee987f4931f40ac8d5d9e8633a14748a
+# Parent  4b1c6ab0d8adbed6fca6aff172ea74363eaad11e
+Bug 1475461 - part 2: Make callers of PLDHashTable::Search() const methods if possible r=Ehsan
+
+Some callers of PLDHashTable::Search() use const_cast, some others are not
+const methods due to non-const PLDHashTable::Search().
+
+This patch removes const_cast from the former and mark some methods of the
+latter const.
+
+MozReview-Commit-ID: C8ayoi7mXc1
+
+diff --git a/dom/commandhandler/nsCommandParams.cpp b/dom/commandhandler/nsCommandParams.cpp
+--- a/dom/commandhandler/nsCommandParams.cpp
++++ b/dom/commandhandler/nsCommandParams.cpp
+@@ -202,17 +202,19 @@ nsCommandParams::RemoveValue(const char*
+ {
+   mValuesHash.Remove((void*)aName);
+   return NS_OK;
+ }
+ 
+ nsCommandParams::HashEntry*
+ nsCommandParams::GetNamedEntry(const char* aName)
+ {
++  // See Bug 1475461. Do not change when backporting bug 1450882.
+   return static_cast<HashEntry*>(mValuesHash.Search((void*)aName));
++
+ }
+ 
+ nsCommandParams::HashEntry*
+ nsCommandParams::GetOrMakeEntry(const char* aName, uint8_t aEntryType)
+ {
+   auto foundEntry = static_cast<HashEntry*>(mValuesHash.Search((void*)aName));
+   if (foundEntry) { // reuse existing entry
+     foundEntry->Reset(aEntryType);
+diff --git a/js/xpconnect/src/XPCMaps.h b/js/xpconnect/src/XPCMaps.h
+--- a/js/xpconnect/src/XPCMaps.h
++++ b/js/xpconnect/src/XPCMaps.h
+@@ -107,17 +107,17 @@ public:
+     struct Entry : public PLDHashEntryHdr
+     {
+         nsISupports*      key;
+         XPCWrappedNative* value;
+     };
+ 
+     static Native2WrappedNativeMap* newMap(int length);
+ 
+-    inline XPCWrappedNative* Find(nsISupports* Obj)
++    inline XPCWrappedNative* Find(nsISupports* Obj) const
+     {
+         NS_PRECONDITION(Obj,"bad param");
+         auto entry = static_cast<Entry*>(mTable.Search(Obj));
+         return entry ? entry->value : nullptr;
+     }
+ 
+     inline XPCWrappedNative* Add(XPCWrappedNative* wrapper)
+     {
+@@ -173,17 +173,17 @@ public:
+         const nsIID*         key;
+         nsXPCWrappedJSClass* value;
+ 
+         static const struct PLDHashTableOps sOps;
+     };
+ 
+     static IID2WrappedJSClassMap* newMap(int length);
+ 
+-    inline nsXPCWrappedJSClass* Find(REFNSIID iid)
++    inline nsXPCWrappedJSClass* Find(REFNSIID iid) const
+     {
+         auto entry = static_cast<Entry*>(mTable.Search(&iid));
+         return entry ? entry->value : nullptr;
+     }
+ 
+     inline nsXPCWrappedJSClass* Add(nsXPCWrappedJSClass* clazz)
+     {
+         NS_PRECONDITION(clazz,"bad param");
+@@ -227,17 +227,17 @@ public:
+         const nsIID*        key;
+         XPCNativeInterface* value;
+ 
+         static const struct PLDHashTableOps sOps;
+     };
+ 
+     static IID2NativeInterfaceMap* newMap(int length);
+ 
+-    inline XPCNativeInterface* Find(REFNSIID iid)
++    inline XPCNativeInterface* Find(REFNSIID iid) const
+     {
+         auto entry = static_cast<Entry*>(mTable.Search(&iid));
+         return entry ? entry->value : nullptr;
+     }
+ 
+     inline XPCNativeInterface* Add(XPCNativeInterface* iface)
+     {
+         NS_PRECONDITION(iface,"bad param");
+@@ -285,17 +285,17 @@ public:
+ 
+     private:
+         static bool Match(const PLDHashEntryHdr* aEntry, const void* aKey);
+         static void Clear(PLDHashTable* aTable, PLDHashEntryHdr* aEntry);
+     };
+ 
+     static ClassInfo2NativeSetMap* newMap(int length);
+ 
+-    inline XPCNativeSet* Find(nsIClassInfo* info)
++    inline XPCNativeSet* Find(nsIClassInfo* info) const
+     {
+         auto entry = static_cast<Entry*>(mTable.Search(info));
+         return entry ? entry->value : nullptr;
+     }
+ 
+     inline XPCNativeSet* Add(nsIClassInfo* info, XPCNativeSet* set)
+     {
+         NS_PRECONDITION(info,"bad param");
+@@ -338,17 +338,17 @@ public:
+     struct Entry : public PLDHashEntryHdr
+     {
+         nsIClassInfo*          key;
+         XPCWrappedNativeProto* value;
+     };
+ 
+     static ClassInfo2WrappedNativeProtoMap* newMap(int length);
+ 
+-    inline XPCWrappedNativeProto* Find(nsIClassInfo* info)
++    inline XPCWrappedNativeProto* Find(nsIClassInfo* info) const
+     {
+         auto entry = static_cast<Entry*>(mTable.Search(info));
+         return entry ? entry->value : nullptr;
+     }
+ 
+     inline XPCWrappedNativeProto* Add(nsIClassInfo* info, XPCWrappedNativeProto* proto)
+     {
+         NS_PRECONDITION(info,"bad param");
+@@ -396,17 +396,17 @@ public:
+         static bool
+         Match(const PLDHashEntryHdr* entry, const void* key);
+ 
+         static const struct PLDHashTableOps sOps;
+     };
+ 
+     static NativeSetMap* newMap(int length);
+ 
+-    inline XPCNativeSet* Find(XPCNativeSetKey* key)
++    inline XPCNativeSet* Find(XPCNativeSetKey* key) const
+     {
+         auto entry = static_cast<Entry*>(mTable.Search(key));
+         return entry ? entry->key_value : nullptr;
+     }
+ 
+     inline XPCNativeSet* Add(const XPCNativeSetKey* key, XPCNativeSet* set)
+     {
+         MOZ_ASSERT(key, "bad param");
+diff --git a/netwerk/cache/nsCacheEntry.cpp b/netwerk/cache/nsCacheEntry.cpp
+--- a/netwerk/cache/nsCacheEntry.cpp
++++ b/netwerk/cache/nsCacheEntry.cpp
+@@ -414,17 +414,17 @@ nsCacheEntryHashTable::Shutdown()
+     if (initialized) {
+         table.ClearAndPrepareForLength(kInitialTableLength);
+         initialized = false;
+     }
+ }
+ 
+ 
+ nsCacheEntry *
+-nsCacheEntryHashTable::GetEntry( const nsCString * key)
++nsCacheEntryHashTable::GetEntry( const nsCString * key) const
+ {
+     NS_ASSERTION(initialized, "nsCacheEntryHashTable not initialized");
+     if (!initialized)  return nullptr;
+ 
+     PLDHashEntryHdr *hashEntry = table.Search(key);
+     return hashEntry ? ((nsCacheEntryHashTableEntry *)hashEntry)->cacheEntry
+                      : nullptr;
+ }
+diff --git a/netwerk/cache/nsCacheEntry.h b/netwerk/cache/nsCacheEntry.h
+--- a/netwerk/cache/nsCacheEntry.h
++++ b/netwerk/cache/nsCacheEntry.h
+@@ -265,17 +265,17 @@ class nsCacheEntryHashTable
+ {
+ public:
+     nsCacheEntryHashTable();
+     ~nsCacheEntryHashTable();
+ 
+     void          Init();
+     void          Shutdown();
+ 
+-    nsCacheEntry *GetEntry( const nsCString * key);
++    nsCacheEntry *GetEntry( const nsCString * key) const;
+     nsresult      AddEntry( nsCacheEntry *entry);
+     void          RemoveEntry( nsCacheEntry *entry);
+ 
+     PLDHashTable::Iterator Iter();
+ 
+ private:
+     // PLDHashTable operation callbacks
+     static PLDHashNumber  HashKey(const void *key);
+diff --git a/netwerk/cache/nsDiskCacheBinding.cpp b/netwerk/cache/nsDiskCacheBinding.cpp
+--- a/netwerk/cache/nsDiskCacheBinding.cpp
++++ b/netwerk/cache/nsDiskCacheBinding.cpp
+@@ -187,17 +187,17 @@ nsDiskCacheBindery::CreateBinding(nsCach
+     return binding;
+ }
+ 
+ 
+ /**
+  *  FindActiveEntry :  to find active colliding entry so we can doom it
+  */
+ nsDiskCacheBinding *
+-nsDiskCacheBindery::FindActiveBinding(uint32_t  hashNumber)
++nsDiskCacheBindery::FindActiveBinding(uint32_t  hashNumber) const
+ {
+     NS_ASSERTION(initialized, "nsDiskCacheBindery not initialized");
+     // find hash entry for key
+     auto hashEntry = static_cast<HashTableEntry*>
+         (table.Search((void*)(uintptr_t)hashNumber));
+     if (!hashEntry) return nullptr;
+ 
+     // walk list looking for active entry
+diff --git a/netwerk/cache/nsDiskCacheBinding.h b/netwerk/cache/nsDiskCacheBinding.h
+--- a/netwerk/cache/nsDiskCacheBinding.h
++++ b/netwerk/cache/nsDiskCacheBinding.h
+@@ -98,17 +98,17 @@ public:
+     ~nsDiskCacheBindery();
+ 
+     void                    Init();
+     void                    Reset();
+ 
+     nsDiskCacheBinding *    CreateBinding(nsCacheEntry *       entry,
+                                           nsDiskCacheRecord *  record);
+ 
+-    nsDiskCacheBinding *    FindActiveBinding(uint32_t  hashNumber);
++    nsDiskCacheBinding *    FindActiveBinding(uint32_t  hashNumber) const;
+     void                    RemoveBinding(nsDiskCacheBinding * binding);
+     bool                    ActiveBindings();
+ 
+     size_t                 SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf);
+ 
+ private:
+     nsresult                AddBinding(nsDiskCacheBinding * binding);
+ 
+diff --git a/uriloader/base/nsDocLoader.cpp b/uriloader/base/nsDocLoader.cpp
+--- a/uriloader/base/nsDocLoader.cpp
++++ b/uriloader/base/nsDocLoader.cpp
+@@ -1402,17 +1402,18 @@ nsresult nsDocLoader::AddRequestInfo(nsI
+   return NS_OK;
+ }
+ 
+ void nsDocLoader::RemoveRequestInfo(nsIRequest *aRequest)
+ {
+   mRequestInfoHash.Remove(aRequest);
+ }
+ 
+-nsDocLoader::nsRequestInfo* nsDocLoader::GetRequestInfo(nsIRequest* aRequest)
++nsDocLoader::nsRequestInfo*
++nsDocLoader::GetRequestInfo(nsIRequest* aRequest) const
+ {
+   return static_cast<nsRequestInfo*>(mRequestInfoHash.Search(aRequest));
+ }
+ 
+ void nsDocLoader::ClearRequestInfoHash(void)
+ {
+   mRequestInfoHash.Clear();
+ }
+diff --git a/uriloader/base/nsDocLoader.h b/uriloader/base/nsDocLoader.h
+--- a/uriloader/base/nsDocLoader.h
++++ b/uriloader/base/nsDocLoader.h
+@@ -319,17 +319,17 @@ private:
+     // loadgroup has no active requests before checking for "real" emptiness if
+     // aFlushLayout is true.
+     void DocLoaderIsEmpty(bool aFlushLayout);
+ 
+     int64_t GetMaxTotalProgress();
+ 
+     nsresult AddRequestInfo(nsIRequest* aRequest);
+     void RemoveRequestInfo(nsIRequest* aRequest);
+-    nsRequestInfo *GetRequestInfo(nsIRequest* aRequest);
++    nsRequestInfo *GetRequestInfo(nsIRequest* aRequest) const;
+     void ClearRequestInfoHash();
+     int64_t CalculateMaxProgress();
+ ///    void DumpChannelInfo(void);
+ 
+     // used to clear our internal progress state between loads...
+     void ClearInternalProgress();
+ };
+ 
+diff --git a/xpcom/base/nsCycleCollector.cpp b/xpcom/base/nsCycleCollector.cpp
+--- a/xpcom/base/nsCycleCollector.cpp
++++ b/xpcom/base/nsCycleCollector.cpp
+@@ -941,17 +941,17 @@ public:
+     n += mWeakMaps.ShallowSizeOfExcludingThis(aMallocSizeOf);
+ 
+     n += mPtrToNodeMap.ShallowSizeOfExcludingThis(aMallocSizeOf);
+ 
+     return n;
+   }
+ 
+ private:
+-  PtrToNodeEntry* FindNodeEntry(void* aPtr)
++  PtrToNodeEntry* FindNodeEntry(void* aPtr) const
+   {
+     return static_cast<PtrToNodeEntry*>(mPtrToNodeMap.Search(aPtr));
+   }
+ };
+ 
+ PtrInfo*
+ CCGraph::FindNode(void* aPtr)
+ {
+diff --git a/xpcom/ds/nsAtomTable.cpp.1475461-2.later b/xpcom/ds/nsAtomTable.cpp.1475461-2.later
+new file mode 100644
+--- /dev/null
++++ b/xpcom/ds/nsAtomTable.cpp.1475461-2.later
+@@ -0,0 +1,21 @@
++--- nsAtomTable.cpp
+++++ nsAtomTable.cpp
++@@ -238,17 +238,17 @@ class nsAtomSubTable
++   friend class nsAtomTable;
++   Mutex mLock;
++   PLDHashTable mTable;
++   nsAtomSubTable();
++   void GCLocked(GCKind aKind);
++   void AddSizeOfExcludingThisLocked(MallocSizeOf aMallocSizeOf,
++                                     AtomsSizes& aSizes);
++ 
++-  AtomTableEntry* Search(AtomTableKey& aKey)
+++  AtomTableEntry* Search(AtomTableKey& aKey) const
++   {
++     mLock.AssertCurrentThreadOwns();
++     return static_cast<AtomTableEntry*>(mTable.Search(&aKey));
++   }
++ 
++   AtomTableEntry* Add(AtomTableKey& aKey)
++   {
++     mLock.AssertCurrentThreadOwns();
+diff --git a/xpcom/ds/nsStaticNameTable.cpp b/xpcom/ds/nsStaticNameTable.cpp
+--- a/xpcom/ds/nsStaticNameTable.cpp
++++ b/xpcom/ds/nsStaticNameTable.cpp
+@@ -158,30 +158,30 @@ nsStaticCaseInsensitiveNameTable::~nsSta
+   for (uint32_t index = 0; index < mNameTable.EntryCount(); index++) {
+     mNameArray[index].~nsDependentCString();
+   }
+   free((void*)mNameArray);
+   MOZ_COUNT_DTOR(nsStaticCaseInsensitiveNameTable);
+ }
+ 
+ int32_t
+-nsStaticCaseInsensitiveNameTable::Lookup(const nsACString& aName)
++nsStaticCaseInsensitiveNameTable::Lookup(const nsACString& aName) const
+ {
+   NS_ASSERTION(mNameArray, "not inited");
+ 
+   const nsCString& str = PromiseFlatCString(aName);
+ 
+   NameTableKey key(mNameArray, &str);
+   auto entry = static_cast<NameTableEntry*>(mNameTable.Search(&key));
+ 
+   return entry ? entry->mIndex : nsStaticCaseInsensitiveNameTable::NOT_FOUND;
+ }
+ 
+ int32_t
+-nsStaticCaseInsensitiveNameTable::Lookup(const nsAString& aName)
++nsStaticCaseInsensitiveNameTable::Lookup(const nsAString& aName) const
+ {
+   NS_ASSERTION(mNameArray, "not inited");
+ 
+   const nsString& str = PromiseFlatString(aName);
+ 
+   NameTableKey key(mNameArray, &str);
+   auto entry = static_cast<NameTableEntry*>(mNameTable.Search(&key));
+ 
+diff --git a/xpcom/ds/nsStaticNameTable.h b/xpcom/ds/nsStaticNameTable.h
+--- a/xpcom/ds/nsStaticNameTable.h
++++ b/xpcom/ds/nsStaticNameTable.h
+@@ -28,18 +28,18 @@
+  *    as long as this table object - typically a static string array.
+  */
+ 
+ class nsStaticCaseInsensitiveNameTable
+ {
+ public:
+   enum { NOT_FOUND = -1 };
+ 
+-  int32_t          Lookup(const nsACString& aName);
+-  int32_t          Lookup(const nsAString& aName);
++  int32_t          Lookup(const nsACString& aName) const;
++  int32_t          Lookup(const nsAString& aName) const;
+   const nsCString& GetStringValue(int32_t aIndex);
+ 
+   nsStaticCaseInsensitiveNameTable(const char* const aNames[], int32_t aLength);
+   ~nsStaticCaseInsensitiveNameTable();
+ 
+ private:
+   nsDependentCString*   mNameArray;
+   PLDHashTable          mNameTable;
+diff --git a/xpcom/ds/nsTHashtable.h b/xpcom/ds/nsTHashtable.h
+--- a/xpcom/ds/nsTHashtable.h
++++ b/xpcom/ds/nsTHashtable.h
+@@ -127,17 +127,17 @@ public:
+    * Get the entry associated with a key.
+    * @param     aKey the key to retrieve
+    * @return    pointer to the entry class, if the key exists; nullptr if the
+    *            key doesn't exist
+    */
+   EntryType* GetEntry(KeyType aKey) const
+   {
+     return static_cast<EntryType*>(
+-      const_cast<PLDHashTable*>(&mTable)->Search(EntryType::KeyToPointer(aKey)));
++      mTable.Search(EntryType::KeyToPointer(aKey)));
+   }
+ 
+   /**
+    * Return true if an entry for the given key exists, false otherwise.
+    * @param     aKey the key to retrieve
+    * @return    true if the key exists, false if the key doesn't exist
+    */
+   bool Contains(KeyType aKey) const { return !!GetEntry(aKey); }

+ 74 - 0
frg/work-js/mozilla-release/patches/1475980-63a1.patch

@@ -0,0 +1,74 @@
+# HG changeset patch
+# User Emilio Cobos Alvarez <emilio@crisal.io>
+# Date 1531749810 -7200
+# Node ID 1689d9068611d8635c4b2adc8d692f57c6bb8fc5
+# Parent  ca44c68906d0395259fbaca282c6e662abdcf481
+Bug 1475980: A moved table should be empty. r=froydnj
+
+MozReview-Commit-ID: 7K9wNGhIhaD
+
+
+diff --git a/xpcom/ds/PLDHashTable.cpp b/xpcom/ds/PLDHashTable.cpp
+--- a/xpcom/ds/PLDHashTable.cpp
++++ b/xpcom/ds/PLDHashTable.cpp
+@@ -239,21 +239,23 @@ PLDHashTable::operator=(PLDHashTable&& a
+   mHashShift = std::move(aOther.mHashShift);
+   mEntryCount = std::move(aOther.mEntryCount);
+   mRemovedCount = std::move(aOther.mRemovedCount);
+   mEntryStore.Set(aOther.mEntryStore.Get(), &mGeneration);
+ #ifdef DEBUG
+   mChecker = std::move(aOther.mChecker);
+ #endif
+ 
+-  // Clear up |aOther| so its destruction will be a no-op.
++  // Clear up |aOther| so its destruction will be a no-op and it reports being
++  // empty.
+   {
+ #ifdef DEBUG
+     AutoDestructorOp op(mChecker);
+ #endif
++    aOther.mEntryCount = 0;
+     aOther.mEntryStore.Set(nullptr, &aOther.mGeneration);
+   }
+ 
+   return *this;
+ }
+ 
+ PLDHashNumber
+ PLDHashTable::Hash1(PLDHashNumber aHash0) const
+diff --git a/xpcom/tests/gtest/TestHashtables.cpp b/xpcom/tests/gtest/TestHashtables.cpp
+--- a/xpcom/tests/gtest/TestHashtables.cpp
++++ b/xpcom/tests/gtest/TestHashtables.cpp
+@@ -272,16 +272,31 @@ TEST(Hashtable, THashtable)
+   testTHashtable(EntityToUnicode, ENTITY_COUNT);
+ 
+   EntityToUnicode.Clear();
+ 
+   count = nsTIterPrint(EntityToUnicode);
+   ASSERT_EQ(count, uint32_t(0));
+ }
+ 
++TEST(Hashtable, Move)
++{
++  const void* kPtr = reinterpret_cast<void*>(static_cast<uintptr_t>(0xbadc0de));
++
++  nsTHashtable<nsPtrHashKey<const void>> table;
++  table.PutEntry(kPtr);
++
++  nsTHashtable<nsPtrHashKey<const void>> moved = std::move(table);
++  ASSERT_EQ(table.Count(), 0u);
++  ASSERT_EQ(moved.Count(), 1u);
++
++  EXPECT_TRUE(moved.Contains(kPtr));
++  EXPECT_FALSE(table.Contains(kPtr));
++}
++
+ TEST(Hashtables, DataHashtable)
+ {
+   // check a data-hashtable
+   nsDataHashtable<nsUint32HashKey,const char*> UniToEntity(ENTITY_COUNT);
+ 
+   for (auto& entity : gEntities) {
+     UniToEntity.Put(entity.mUnicode, entity.mStr);
+   }
+

+ 90 - 0
frg/work-js/mozilla-release/patches/1477626-1-63a1.patch

@@ -0,0 +1,90 @@
+# HG changeset patch
+# User Nicholas Nethercote <nnethercote@mozilla.com>
+# Date 1532595166 -36000
+#      Thu Jul 26 18:52:46 2018 +1000
+# Node ID 6ef5c1f956f9ac5fc8f0f5d39454188679ea4060
+# Parent  cd5678331e973d2ea13191578f3d974f354824b7
+Bug 1477626 - Replace some bespoke code with a call to CeilingLog2(). r=Waldo
+
+After all, bug 543034 was fixed 9 years ago.
+
+MozReview-Commit-ID: HDPO3gGuQMx
+
+diff --git a/js/public/HashTable.h b/js/public/HashTable.h
+--- a/js/public/HashTable.h
++++ b/js/public/HashTable.h
+@@ -6,16 +6,17 @@
+ 
+ #ifndef js_HashTable_h
+ #define js_HashTable_h
+ 
+ #include "mozilla/Assertions.h"
+ #include "mozilla/Attributes.h"
+ #include "mozilla/Casting.h"
+ #include "mozilla/HashFunctions.h"
++#include "mozilla/MathAlgorithms.h"
+ #include "mozilla/MemoryChecking.h"
+ #include "mozilla/MemoryReporting.h"
+ #include "mozilla/Move.h"
+ #include "mozilla/Opaque.h"
+ #include "mozilla/PodOperations.h"
+ #include "mozilla/ReentrancyGuard.h"
+ #include "mozilla/TemplateLib.h"
+ #include "mozilla/TypeTraits.h"
+@@ -1248,18 +1249,17 @@ class HashTable : private AllocPolicy
+     } stats;
+ #   define METER(x) x
+ #else
+ #   define METER(x)
+ #endif
+ 
+     // The default initial capacity is 32 (enough to hold 16 elements), but it
+     // can be as low as 4.
+-    static const unsigned sMinCapacityLog2 = 2;
+-    static const unsigned sMinCapacity  = 1 << sMinCapacityLog2;
++    static const unsigned sMinCapacity  = 4;
+     static const unsigned sMaxInit      = JS_BIT(CAP_BITS - 1);
+     static const unsigned sMaxCapacity  = JS_BIT(CAP_BITS);
+     static const unsigned sHashBits     = mozilla::tl::BitSize<HashNumber>::value;
+ 
+     // Hash-table alpha is conceptually a fraction, but to avoid floating-point
+     // math we implement it as a ratio of integers.
+     static const uint8_t sAlphaDenominator = 4;
+     static const uint8_t sMinAlphaNumerator = 1; // min alpha: 1/4
+@@ -1356,32 +1356,28 @@ class HashTable : private AllocPolicy
+         // Compute the smallest capacity allowing |length| elements to be
+         // inserted without rehashing: ceil(length / max-alpha).  (Ceiling
+         // integral division: <http://stackoverflow.com/a/2745086>.)
+         uint32_t newCapacity =
+             (length * sAlphaDenominator + sMaxAlphaNumerator - 1) / sMaxAlphaNumerator;
+         if (newCapacity < sMinCapacity)
+             newCapacity = sMinCapacity;
+ 
+-        // FIXME: use JS_CEILING_LOG2 when PGO stops crashing (bug 543034).
+-        uint32_t roundUp = sMinCapacity, roundUpLog2 = sMinCapacityLog2;
+-        while (roundUp < newCapacity) {
+-            roundUp <<= 1;
+-            ++roundUpLog2;
+-        }
++        // Round up capacity to next power-of-two.
++        uint32_t log2 = mozilla::CeilingLog2(newCapacity);
++        newCapacity = 1u << log2;
+ 
+-        newCapacity = roundUp;
+         MOZ_ASSERT(newCapacity >= length);
+         MOZ_ASSERT(newCapacity <= sMaxCapacity);
+ 
+         table = createTable(*this, newCapacity);
+         if (!table)
+             return false;
+ 
+-        setTableSizeLog2(roundUpLog2);
++        setTableSizeLog2(log2);
+         METER(memset(&stats, 0, sizeof(stats)));
+         return true;
+     }
+ 
+     bool initialized() const
+     {
+         return !!table;
+     }

+ 91 - 0
frg/work-js/mozilla-release/patches/1477626-2-63a1.patch

@@ -0,0 +1,91 @@
+# HG changeset patch
+# User Nicholas Nethercote <nnethercote@mozilla.com>
+# Date 1532595166 -36000
+#      Thu Jul 26 18:52:46 2018 +1000
+# Node ID cff709da0b230613b24baea53cf8e047fd449f6e
+# Parent  6ef5c1f956f9ac5fc8f0f5d39454188679ea4060
+Bug 1477626 - Use `uint32_t` instead of `unsigned` in HashTable.h. r=Waldo
+
+Because it's more precise, and gives us more consistency.
+
+MozReview-Commit-ID: BLYXYSHgZ7v
+
+diff --git a/js/public/HashTable.h b/js/public/HashTable.h
+--- a/js/public/HashTable.h
++++ b/js/public/HashTable.h
+@@ -1249,32 +1249,32 @@ class HashTable : private AllocPolicy
+     } stats;
+ #   define METER(x) x
+ #else
+ #   define METER(x)
+ #endif
+ 
+     // The default initial capacity is 32 (enough to hold 16 elements), but it
+     // can be as low as 4.
+-    static const unsigned sMinCapacity  = 4;
+-    static const unsigned sMaxInit      = JS_BIT(CAP_BITS - 1);
+-    static const unsigned sMaxCapacity  = JS_BIT(CAP_BITS);
+-    static const unsigned sHashBits     = mozilla::tl::BitSize<HashNumber>::value;
++    static const uint32_t sMinCapacity  = 4;
++    static const uint32_t sMaxInit      = JS_BIT(CAP_BITS - 1);
++    static const uint32_t sMaxCapacity  = JS_BIT(CAP_BITS);
++    static const uint32_t sHashBits     = mozilla::tl::BitSize<HashNumber>::value;
+ 
+     // Hash-table alpha is conceptually a fraction, but to avoid floating-point
+     // math we implement it as a ratio of integers.
+     static const uint8_t sAlphaDenominator = 4;
+     static const uint8_t sMinAlphaNumerator = 1; // min alpha: 1/4
+     static const uint8_t sMaxAlphaNumerator = 3; // max alpha: 3/4
+ 
+     static const HashNumber sFreeKey = Entry::sFreeKey;
+     static const HashNumber sRemovedKey = Entry::sRemovedKey;
+     static const HashNumber sCollisionBit = Entry::sCollisionBit;
+ 
+-    void setTableSizeLog2(unsigned sizeLog2)
++    void setTableSizeLog2(uint32_t sizeLog2)
+     {
+         hashShift = sHashBits - sizeLog2;
+     }
+ 
+     static bool isLiveHash(HashNumber hash)
+     {
+         return Entry::isLiveHash(hash);
+     }
+@@ -1397,17 +1397,17 @@ class HashTable : private AllocPolicy
+     struct DoubleHash
+     {
+         HashNumber h2;
+         HashNumber sizeMask;
+     };
+ 
+     DoubleHash hash2(HashNumber curKeyHash) const
+     {
+-        unsigned sizeLog2 = sHashBits - hashShift;
++        uint32_t sizeLog2 = sHashBits - hashShift;
+         DoubleHash dh = {
+             ((curKeyHash << sizeLog2) >> hashShift) | 1,
+             (HashNumber(1) << sizeLog2) - 1
+         };
+         return dh;
+     }
+ 
+     static HashNumber applyDoubleHash(HashNumber h1, const DoubleHash& dh)
+@@ -1443,17 +1443,17 @@ class HashTable : private AllocPolicy
+     }
+ 
+     // Warning: in order for readonlyThreadsafeLookup() to be safe this
+     // function must not modify the table in any way when |collisionBit| is 0.
+     // (The use of the METER() macro to increment stats violates this
+     // restriction but we will live with that for now because it's enabled so
+     // rarely.)
+     MOZ_ALWAYS_INLINE Entry&
+-    lookup(const Lookup& l, HashNumber keyHash, unsigned collisionBit) const
++    lookup(const Lookup& l, HashNumber keyHash, uint32_t collisionBit) const
+     {
+         MOZ_ASSERT(isLiveHash(keyHash));
+         MOZ_ASSERT(!(keyHash & sCollisionBit));
+         MOZ_ASSERT(collisionBit == 0 || collisionBit == sCollisionBit);
+         MOZ_ASSERT(table);
+         METER(stats.searches++);
+ 
+         // Compute the primary hash address.

+ 818 - 0
frg/work-js/mozilla-release/patches/1477626-3-63a1.patch

@@ -0,0 +1,818 @@
+# HG changeset patch
+# User Nicholas Nethercote <nnethercote@mozilla.com>
+# Date 1532595166 -36000
+#      Thu Jul 26 18:52:46 2018 +1000
+# Node ID 9cf98793e243bd1fa1413d70cf957b9a4f4d54f4
+# Parent  4881d9e1fd464d1d9c604e5f56a3e45f599d9731
+Bug 1477626 - Introduce mozilla::HashNumber and use it in various places. r=Waldo
+
+Currently we have three ways of representing hash values.
+
+- uint32_t: used in HashFunctions.h.
+
+- PLDHashNumber: defined in PLDHashTable.{h,cpp}.
+
+- js::HashNumber: defined in js/public/Utility.h.
+
+Functions that create hash values with functions from HashFunctions.h use a mix
+of these three types. It's a bit of a mess.
+
+This patch introduces mozilla::HashNumber, and redefines PLDHashNumber and
+js::HashNumber as synonyms. It also changes HashFunctions.h to use
+mozilla::HashNumber throughout instead of uint32_t.
+
+This leaves plenty of places that still use uint32_t that should use
+mozilla::HashNumber or one of its synonyms, but I didn't want to tackle that
+now.
+
+The patch also:
+
+- Does similar things for the constants defining the number of bits in each
+  hash number type.
+
+- Moves js::HashNumber from Utility.h to HashTable.h, which is a better spot
+  for it. (This required changing the signature of ScrambleHashCode(); that's
+  ok, it'll get moved by the next patch anyway.)
+
+MozReview-Commit-ID: EdoWlCm7OUC
+
+diff --git a/js/public/HashTable.h b/js/public/HashTable.h
+--- a/js/public/HashTable.h
++++ b/js/public/HashTable.h
+@@ -13,24 +13,26 @@
+ #include "mozilla/HashFunctions.h"
+ #include "mozilla/MathAlgorithms.h"
+ #include "mozilla/MemoryChecking.h"
+ #include "mozilla/MemoryReporting.h"
+ #include "mozilla/Move.h"
+ #include "mozilla/Opaque.h"
+ #include "mozilla/PodOperations.h"
+ #include "mozilla/ReentrancyGuard.h"
+-#include "mozilla/TemplateLib.h"
+ #include "mozilla/TypeTraits.h"
+ #include "mozilla/UniquePtr.h"
+ 
+ #include "js/Utility.h"
+ 
+ namespace js {
+ 
++using HashNumber = mozilla::HashNumber;
++static const uint32_t kHashNumberBits = mozilla::kHashNumberBits;
++
+ class TempAllocPolicy;
+ template <class> struct DefaultHasher;
+ template <class, class> class HashMapEntry;
+ namespace detail {
+     template <typename T> class HashTableEntry;
+     template <class T, class HashPolicy, class AllocPolicy> class HashTable;
+ } // namespace detail
+ 
+@@ -1252,31 +1254,30 @@ class HashTable : private AllocPolicy
+ #   define METER(x)
+ #endif
+ 
+     // The default initial capacity is 32 (enough to hold 16 elements), but it
+     // can be as low as 4.
+     static const uint32_t sMinCapacity  = 4;
+     static const uint32_t sMaxInit      = JS_BIT(CAP_BITS - 1);
+     static const uint32_t sMaxCapacity  = JS_BIT(CAP_BITS);
+-    static const uint32_t sHashBits     = mozilla::tl::BitSize<HashNumber>::value;
+ 
+     // Hash-table alpha is conceptually a fraction, but to avoid floating-point
+     // math we implement it as a ratio of integers.
+     static const uint8_t sAlphaDenominator = 4;
+     static const uint8_t sMinAlphaNumerator = 1; // min alpha: 1/4
+     static const uint8_t sMaxAlphaNumerator = 3; // max alpha: 3/4
+ 
+     static const HashNumber sFreeKey = Entry::sFreeKey;
+     static const HashNumber sRemovedKey = Entry::sRemovedKey;
+     static const HashNumber sCollisionBit = Entry::sCollisionBit;
+ 
+     void setTableSizeLog2(uint32_t sizeLog2)
+     {
+-        hashShift = sHashBits - sizeLog2;
++        hashShift = js::kHashNumberBits - sizeLog2;
+     }
+ 
+     static bool isLiveHash(HashNumber hash)
+     {
+         return Entry::isLiveHash(hash);
+     }
+ 
+     static HashNumber prepareHash(const Lookup& l)
+@@ -1321,17 +1322,17 @@ class HashTable : private AllocPolicy
+             e->~Entry();
+         alloc.free_(oldTable, capacity);
+     }
+ 
+   public:
+     explicit HashTable(AllocPolicy ap)
+       : AllocPolicy(ap)
+       , gen(0)
+-      , hashShift(sHashBits)
++      , hashShift(js::kHashNumberBits)
+       , table(nullptr)
+       , entryCount(0)
+       , removedCount(0)
+ #ifdef JS_DEBUG
+       , mutationCount(0)
+       , mEntered(false)
+ #endif
+     {}
+@@ -1397,17 +1398,17 @@ class HashTable : private AllocPolicy
+     struct DoubleHash
+     {
+         HashNumber h2;
+         HashNumber sizeMask;
+     };
+ 
+     DoubleHash hash2(HashNumber curKeyHash) const
+     {
+-        uint32_t sizeLog2 = sHashBits - hashShift;
++        uint32_t sizeLog2 = js::kHashNumberBits - hashShift;
+         DoubleHash dh = {
+             ((curKeyHash << sizeLog2) >> hashShift) | 1,
+             (HashNumber(1) << sizeLog2) - 1
+         };
+         return dh;
+     }
+ 
+     static HashNumber applyDoubleHash(HashNumber h1, const DoubleHash& dh)
+@@ -1547,17 +1548,17 @@ class HashTable : private AllocPolicy
+ 
+     enum RebuildStatus { NotOverloaded, Rehashed, RehashFailed };
+ 
+     RebuildStatus changeTableSize(int deltaLog2, FailureBehavior reportFailure = ReportFailure)
+     {
+         // Look, but don't touch, until we succeed in getting new entry store.
+         Entry* oldTable = table;
+         uint32_t oldCap = capacity();
+-        uint32_t newLog2 = sHashBits - hashShift + deltaLog2;
++        uint32_t newLog2 = js::kHashNumberBits - hashShift + deltaLog2;
+         uint32_t newCapacity = JS_BIT(newLog2);
+         if (MOZ_UNLIKELY(newCapacity > sMaxCapacity)) {
+             if (reportFailure)
+                 this->reportAllocOverflow();
+             return RehashFailed;
+         }
+ 
+         Entry* newTable = createTable(*this, newCapacity, reportFailure);
+@@ -1786,17 +1787,17 @@ class HashTable : private AllocPolicy
+     {
+         MOZ_ASSERT(table);
+         return entryCount;
+     }
+ 
+     uint32_t capacity() const
+     {
+         MOZ_ASSERT(table);
+-        return JS_BIT(sHashBits - hashShift);
++        return JS_BIT(js::kHashNumberBits - hashShift);
+     }
+ 
+     Generation generation() const
+     {
+         MOZ_ASSERT(table);
+         return Generation(gen);
+     }
+ 
+diff --git a/js/public/Utility.h b/js/public/Utility.h
+--- a/js/public/Utility.h
++++ b/js/public/Utility.h
+@@ -661,20 +661,16 @@ struct FreePolicy
+ 
+ typedef mozilla::UniquePtr<char[], JS::FreePolicy> UniqueChars;
+ typedef mozilla::UniquePtr<char16_t[], JS::FreePolicy> UniqueTwoByteChars;
+ 
+ } // namespace JS
+ 
+ namespace js {
+ 
+-/* Integral types for all hash functions. */
+-typedef uint32_t HashNumber;
+-const unsigned HashNumberSizeBits = 32;
+-
+ namespace detail {
+ 
+ /*
+  * Given a raw hash code, h, return a number that can be used to select a hash
+  * bucket.
+  *
+  * This function aims to produce as uniform an output distribution as possible,
+  * especially in the most significant (leftmost) bits, even though the input
+@@ -682,35 +678,35 @@ namespace detail {
+  * be deterministic and quick to compute.
+  *
+  * Since the leftmost bits of the result are best, the hash bucket index is
+  * computed by doing ScrambleHashCode(h) / (2^32/N) or the equivalent
+  * right-shift, not ScrambleHashCode(h) % N or the equivalent bit-mask.
+  *
+  * FIXME: OrderedHashTable uses a bit-mask; see bug 775896.
+  */
+-inline HashNumber
+-ScrambleHashCode(HashNumber h)
++inline uint32_t
++ScrambleHashCode(uint32_t h)
+ {
+     /*
+      * Simply returning h would not cause any hash tables to produce wrong
+      * answers. But it can produce pathologically bad performance: The caller
+      * right-shifts the result, keeping only the highest bits. The high bits of
+      * hash codes are very often completely entropy-free. (So are the lowest
+      * bits.)
+      *
+      * So we use Fibonacci hashing, as described in Knuth, The Art of Computer
+      * Programming, 6.4. This mixes all the bits of the input hash code h.
+      *
+      * The value of goldenRatio is taken from the hex
+      * expansion of the golden ratio, which starts 1.9E3779B9....
+      * This value is especially good if values with consecutive hash codes
+      * are stored in a hash table; see Knuth for details.
+      */
+-    static const HashNumber goldenRatio = 0x9E3779B9U;
++    static const uint32_t goldenRatio = 0x9E3779B9U;
+     return mozilla::WrappingMultiply(h, goldenRatio);
+ }
+ 
+ } /* namespace detail */
+ 
+ } /* namespace js */
+ 
+ /* sixgill annotation defines */
+diff --git a/js/src/ds/OrderedHashTable.h b/js/src/ds/OrderedHashTable.h
+--- a/js/src/ds/OrderedHashTable.h
++++ b/js/src/ds/OrderedHashTable.h
+@@ -130,17 +130,17 @@ class OrderedHashTable
+ 
+         // clear() requires that members are assigned only after all allocation
+         // has succeeded, and that this->ranges is left untouched.
+         hashTable = tableAlloc;
+         data = dataAlloc;
+         dataLength = 0;
+         dataCapacity = capacity;
+         liveCount = 0;
+-        hashShift = HashNumberSizeBits - initialBucketsLog2();
++        hashShift = js::kHashNumberBits - initialBucketsLog2();
+         MOZ_ASSERT(hashBuckets() == buckets);
+         return true;
+     }
+ 
+     ~OrderedHashTable() {
+         forEachRange<Range::onTableDestroyed>();
+         alloc.free_(hashTable, hashBuckets());
+         freeData(data, dataLength, dataCapacity);
+@@ -619,17 +619,17 @@ class OrderedHashTable
+   public:
+     HashNumber prepareHash(const Lookup& l) const {
+         return ScrambleHashCode(Ops::hash(l, hcs));
+     }
+ 
+   private:
+     /* The size of hashTable, in elements. Always a power of two. */
+     uint32_t hashBuckets() const {
+-        return 1 << (HashNumberSizeBits - hashShift);
++        return 1 << (js::kHashNumberBits - hashShift);
+     }
+ 
+     static void destroyData(Data* data, uint32_t length) {
+         for (Data* p = data + length; p != data; )
+             (--p)->~Data();
+     }
+ 
+     void freeData(Data* data, uint32_t length, uint32_t capacity) {
+@@ -691,17 +691,17 @@ class OrderedHashTable
+         // If the size of the table is not changing, rehash in place to avoid
+         // allocating memory.
+         if (newHashShift == hashShift) {
+             rehashInPlace();
+             return true;
+         }
+ 
+         size_t newHashBuckets =
+-            size_t(1) << (HashNumberSizeBits - newHashShift);
++            size_t(1) << (js::kHashNumberBits - newHashShift);
+         Data** newHashTable = alloc.template pod_malloc<Data*>(newHashBuckets);
+         if (!newHashTable)
+             return false;
+         for (uint32_t i = 0; i < newHashBuckets; i++)
+             newHashTable[i] = nullptr;
+ 
+         uint32_t newCapacity = uint32_t(newHashBuckets * fillFactor());
+         Data* newData = alloc.template pod_malloc<Data>(newCapacity);
+diff --git a/mfbt/HashFunctions.h b/mfbt/HashFunctions.h
+--- a/mfbt/HashFunctions.h
++++ b/mfbt/HashFunctions.h
+@@ -2,18 +2,18 @@
+ /* vim: set ts=8 sts=2 et sw=2 tw=80: */
+ /* This Source Code Form is subject to the terms of the Mozilla Public
+  * License, v. 2.0. If a copy of the MPL was not distributed with this
+  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+ 
+ /* Utilities for hashing. */
+ 
+ /*
+- * This file exports functions for hashing data down to a 32-bit value,
+- * including:
++ * This file exports functions for hashing data down to a uint32_t (a.k.a.
++ * mozilla::HashNumber), including:
+  *
+  *  - HashString    Hash a char* or char16_t/wchar_t* of known or unknown
+  *                  length.
+  *
+  *  - HashBytes     Hash a byte array of known length.
+  *
+  *  - HashGeneric   Hash one or more values.  Currently, we support uint32_t,
+  *                  types which can be implicitly cast to uint32_t, data
+@@ -27,19 +27,19 @@
+  *
+  *  class ComplexObject
+  *  {
+  *    char* mStr;
+  *    uint32_t mUint1, mUint2;
+  *    void (*mCallbackFn)();
+  *
+  *  public:
+- *    uint32_t hash()
++ *    HashNumber hash()
+  *    {
+- *      uint32_t hash = HashString(mStr);
++ *      HashNumber hash = HashString(mStr);
+  *      hash = AddToHash(hash, mUint1, mUint2);
+  *      return AddToHash(hash, mCallbackFn);
+  *    }
+  *  };
+  *
+  * If you want to hash an nsAString or nsACString, use the HashString functions
+  * in nsHashKeys.h.
+  */
+@@ -53,32 +53,35 @@
+ #include "mozilla/MathAlgorithms.h"
+ #include "mozilla/Types.h"
+ #include "mozilla/WrappingOperations.h"
+ 
+ #include <stdint.h>
+ 
+ namespace mozilla {
+ 
++using HashNumber = uint32_t;
++static const uint32_t kHashNumberBits = 32;
++
+ /**
+  * The golden ratio as a 32-bit fixed-point value.
+  */
+-static const uint32_t kGoldenRatioU32 = 0x9E3779B9U;
++static const HashNumber kGoldenRatioU32 = 0x9E3779B9U;
+ 
+ namespace detail {
+ 
+ MOZ_NO_SANITIZE_UNSIGNED_OVERFLOW
+-constexpr uint32_t
+-RotateLeft5(uint32_t aValue)
++constexpr HashNumber
++RotateLeft5(HashNumber aValue)
+ {
+   return (aValue << 5) | (aValue >> 27);
+ }
+ 
+-constexpr uint32_t
+-AddU32ToHash(uint32_t aHash, uint32_t aValue)
++constexpr HashNumber
++AddU32ToHash(HashNumber aHash, uint32_t aValue)
+ {
+   /*
+    * This is the meat of all our hash routines.  This hash function is not
+    * particularly sophisticated, but it seems to work well for our mostly
+    * plain-text inputs.  Implementation notes follow.
+    *
+    * Our use of the golden ratio here is arbitrary; we could pick almost any
+    * number which:
+@@ -119,25 +122,25 @@ AddU32ToHash(uint32_t aHash, uint32_t aV
+   return mozilla::WrappingMultiply(kGoldenRatioU32,
+                                    RotateLeft5(aHash) ^ aValue);
+ }
+ 
+ /**
+  * AddUintptrToHash takes sizeof(uintptr_t) as a template parameter.
+  */
+ template<size_t PtrSize>
+-constexpr uint32_t
+-AddUintptrToHash(uint32_t aHash, uintptr_t aValue)
++constexpr HashNumber
++AddUintptrToHash(HashNumber aHash, uintptr_t aValue)
+ {
+   return AddU32ToHash(aHash, static_cast<uint32_t>(aValue));
+ }
+ 
+ template<>
+-inline uint32_t
+-AddUintptrToHash<8>(uint32_t aHash, uintptr_t aValue)
++inline HashNumber
++AddUintptrToHash<8>(HashNumber aHash, uintptr_t aValue)
+ {
+   uint32_t v1 = static_cast<uint32_t>(aValue);
+   uint32_t v2 = static_cast<uint32_t>(static_cast<uint64_t>(aValue) >> 32);
+   return AddU32ToHash(AddU32ToHash(aHash, v1), v2);
+ }
+ 
+ } /* namespace detail */
+ 
+@@ -146,188 +149,188 @@ AddUintptrToHash<8>(uint32_t aHash, uint
+  * inputs.
+  *
+  * Currently, we support hashing uint32_t's, values which we can implicitly
+  * convert to uint32_t, data pointers, and function pointers.
+  */
+ template<typename T,
+          bool TypeIsNotIntegral = !mozilla::IsIntegral<T>::value,
+          typename U = typename mozilla::EnableIf<TypeIsNotIntegral>::Type>
+-MOZ_MUST_USE inline uint32_t
+-AddToHash(uint32_t aHash, T aA)
++MOZ_MUST_USE inline HashNumber
++AddToHash(HashNumber aHash, T aA)
+ {
+   /*
+    * Try to convert |A| to uint32_t implicitly.  If this works, great.  If not,
+    * we'll error out.
+    */
+   return detail::AddU32ToHash(aHash, aA);
+ }
+ 
+ template<typename A>
+-MOZ_MUST_USE inline uint32_t
+-AddToHash(uint32_t aHash, A* aA)
++MOZ_MUST_USE inline HashNumber
++AddToHash(HashNumber aHash, A* aA)
+ {
+   /*
+    * You might think this function should just take a void*.  But then we'd only
+    * catch data pointers and couldn't handle function pointers.
+    */
+ 
+   static_assert(sizeof(aA) == sizeof(uintptr_t), "Strange pointer!");
+ 
+   return detail::AddUintptrToHash<sizeof(uintptr_t)>(aHash, uintptr_t(aA));
+ }
+ 
+ // We use AddUintptrToHash() for hashing all integral types.  8-byte integral types
+ // are treated the same as 64-bit pointers, and smaller integral types are first
+ // implicitly converted to 32 bits and then passed to AddUintptrToHash() to be hashed.
+ template<typename T,
+          typename U = typename mozilla::EnableIf<mozilla::IsIntegral<T>::value>::Type>
+-MOZ_MUST_USE constexpr uint32_t
+-AddToHash(uint32_t aHash, T aA)
++MOZ_MUST_USE constexpr HashNumber
++AddToHash(HashNumber aHash, T aA)
+ {
+   return detail::AddUintptrToHash<sizeof(T)>(aHash, aA);
+ }
+ 
+ template<typename A, typename... Args>
+-MOZ_MUST_USE uint32_t
+-AddToHash(uint32_t aHash, A aArg, Args... aArgs)
++MOZ_MUST_USE HashNumber
++AddToHash(HashNumber aHash, A aArg, Args... aArgs)
+ {
+   return AddToHash(AddToHash(aHash, aArg), aArgs...);
+ }
+ 
+ /**
+  * The HashGeneric class of functions let you hash one or more values.
+  *
+  * If you want to hash together two values x and y, calling HashGeneric(x, y) is
+  * much better than calling AddToHash(x, y), because AddToHash(x, y) assumes
+  * that x has already been hashed.
+  */
+ template<typename... Args>
+-MOZ_MUST_USE inline uint32_t
++MOZ_MUST_USE inline HashNumber
+ HashGeneric(Args... aArgs)
+ {
+   return AddToHash(0, aArgs...);
+ }
+ 
+ namespace detail {
+ 
+ template<typename T>
+-uint32_t
++HashNumber
+ HashUntilZero(const T* aStr)
+ {
+-  uint32_t hash = 0;
++  HashNumber hash = 0;
+   for (T c; (c = *aStr); aStr++) {
+     hash = AddToHash(hash, c);
+   }
+   return hash;
+ }
+ 
+ // This is a `constexpr` alternative to HashUntilZero(const T*). It should
+ // only be used for compile-time computation because it uses recursion.
+ // XXX: once support for GCC 4.9 is dropped, this function should be removed
+ // and HashUntilZero(const T*) should be made `constexpr`.
+ template<typename T>
+-constexpr uint32_t
+-ConstExprHashUntilZero(const T* aStr, uint32_t aHash)
++constexpr HashNumber
++ConstExprHashUntilZero(const T* aStr, HashNumber aHash)
+ {
+   return !*aStr
+        ? aHash
+        : ConstExprHashUntilZero(aStr + 1, AddToHash(aHash, *aStr));
+ }
+ 
+ template<typename T>
+-uint32_t
++HashNumber
+ HashKnownLength(const T* aStr, size_t aLength)
+ {
+-  uint32_t hash = 0;
++  HashNumber hash = 0;
+   for (size_t i = 0; i < aLength; i++) {
+     hash = AddToHash(hash, aStr[i]);
+   }
+   return hash;
+ }
+ 
+ } /* namespace detail */
+ 
+ /**
+  * The HashString overloads below do just what you'd expect.
+  *
+  * If you have the string's length, you might as well call the overload which
+  * includes the length.  It may be marginally faster.
+  */
+-MOZ_MUST_USE inline uint32_t
++MOZ_MUST_USE inline HashNumber
+ HashString(const char* aStr)
+ {
+   return detail::HashUntilZero(reinterpret_cast<const unsigned char*>(aStr));
+ }
+ 
+-MOZ_MUST_USE inline uint32_t
++MOZ_MUST_USE inline HashNumber
+ HashString(const char* aStr, size_t aLength)
+ {
+   return detail::HashKnownLength(reinterpret_cast<const unsigned char*>(aStr), aLength);
+ }
+ 
+ MOZ_MUST_USE
+-inline uint32_t
++inline HashNumber
+ HashString(const unsigned char* aStr, size_t aLength)
+ {
+   return detail::HashKnownLength(aStr, aLength);
+ }
+ 
+-MOZ_MUST_USE inline uint32_t
++MOZ_MUST_USE inline HashNumber
+ HashString(const char16_t* aStr)
+ {
+   return detail::HashUntilZero(aStr);
+ }
+ 
+ // This is a `constexpr` alternative to HashString(const char16_t*). It should
+ // only be used for compile-time computation because it uses recursion.
+ //
+ // You may need to use the
+ // MOZ_{PUSH,POP}_DISABLE_INTEGRAL_CONSTANT_OVERFLOW_WARNING macros if you use
+ // this function. See the comment on those macros' definitions for more detail.
+ //
+ // XXX: once support for GCC 4.9 is dropped, this function should be removed
+ // and HashString(const char16_t*) should be made `constexpr`.
+-MOZ_MUST_USE constexpr uint32_t
++MOZ_MUST_USE constexpr HashNumber
+ ConstExprHashString(const char16_t* aStr)
+ {
+   return detail::ConstExprHashUntilZero(aStr, 0);
+ }
+ 
+-MOZ_MUST_USE inline uint32_t
++MOZ_MUST_USE inline HashNumber
+ HashString(const char16_t* aStr, size_t aLength)
+ {
+   return detail::HashKnownLength(aStr, aLength);
+ }
+ 
+ /*
+  * On Windows, wchar_t is not the same as char16_t, even though it's
+  * the same width!
+  */
+ #ifdef WIN32
+-MOZ_MUST_USE inline uint32_t
++MOZ_MUST_USE inline HashNumber
+ HashString(const wchar_t* aStr)
+ {
+   return detail::HashUntilZero(aStr);
+ }
+ 
+-MOZ_MUST_USE inline uint32_t
++MOZ_MUST_USE inline HashNumber
+ HashString(const wchar_t* aStr, size_t aLength)
+ {
+   return detail::HashKnownLength(aStr, aLength);
+ }
+ #endif
+ 
+ /**
+  * Hash some number of bytes.
+  *
+  * This hash walks word-by-word, rather than byte-by-byte, so you won't get the
+  * same result out of HashBytes as you would out of HashString.
+  */
+-MOZ_MUST_USE extern MFBT_API uint32_t
++MOZ_MUST_USE extern MFBT_API HashNumber
+ HashBytes(const void* bytes, size_t aLength);
+ 
+ /**
+  * A pseudorandom function mapping 32-bit integers to 32-bit integers.
+  *
+  * This is for when you're feeding private data (like pointer values or credit
+  * card numbers) to a non-crypto hash function (like HashBytes) and then using
+  * the hash code for something that untrusted parties could observe (like a JS
+@@ -350,20 +353,20 @@ class HashCodeScrambler
+ public:
+   /** Creates a new scrambler with the given 128-bit key. */
+   constexpr HashCodeScrambler(uint64_t aK0, uint64_t aK1) : mK0(aK0), mK1(aK1) {}
+ 
+   /**
+    * Scramble a hash code. Always produces the same result for the same
+    * combination of key and hash code.
+    */
+-  uint32_t scramble(uint32_t aHashCode) const
++  HashNumber scramble(HashNumber aHashCode) const
+   {
+     SipHasher hasher(mK0, mK1);
+-    return uint32_t(hasher.sipHash(aHashCode));
++    return HashNumber(hasher.sipHash(aHashCode));
+   }
+ 
+ private:
+   struct SipHasher
+   {
+     SipHasher(uint64_t aK0, uint64_t aK1)
+     {
+       // 1. Initialization.
+diff --git a/xpcom/ds/PLDHashTable.cpp b/xpcom/ds/PLDHashTable.cpp
+--- a/xpcom/ds/PLDHashTable.cpp
++++ b/xpcom/ds/PLDHashTable.cpp
+@@ -188,17 +188,17 @@ PLDHashTable::HashShift(uint32_t aEntryS
+   BestCapacity(aLength, &capacity, &log2);
+ 
+   uint32_t nbytes;
+   if (!SizeOfEntryStore(capacity, aEntrySize, &nbytes)) {
+     MOZ_CRASH("Initial entry store size is too large");
+   }
+ 
+   // Compute the hashShift value.
+-  return kHashBits - log2;
++  return kPLDHashNumberBits - log2;
+ }
+ 
+ PLDHashTable::PLDHashTable(const PLDHashTableOps* aOps, uint32_t aEntrySize,
+                            uint32_t aLength)
+   : mOps(aOps)
+   , mEntryStore()
+   , mGeneration(0)
+   , mHashShift(HashShift(aEntrySize, aLength))
+@@ -262,17 +262,17 @@ PLDHashTable::Hash1(PLDHashNumber aHash0
+ {
+   return aHash0 >> mHashShift;
+ }
+ 
+ void
+ PLDHashTable::Hash2(PLDHashNumber aHash0,
+                     uint32_t& aHash2Out, uint32_t& aSizeMaskOut) const
+ {
+-  uint32_t sizeLog2 = kHashBits - mHashShift;
++  uint32_t sizeLog2 = kPLDHashNumberBits - mHashShift;
+   uint32_t sizeMask = (PLDHashNumber(1) << sizeLog2) - 1;
+   aSizeMaskOut = sizeMask;
+ 
+   // The incoming aHash0 always has the low bit unset (since we leave it
+   // free for the collision flag), and should have reasonably random
+   // data in the other 31 bits.  We used the high bits of aHash0 for
+   // Hash1, so we use the low bits here.  If the table size is large,
+   // the bits we use may overlap, but that's still more random than
+@@ -463,17 +463,17 @@ PLDHashTable::FindFreeEntry(PLDHashNumbe
+ }
+ 
+ bool
+ PLDHashTable::ChangeTable(int32_t aDeltaLog2)
+ {
+   MOZ_ASSERT(mEntryStore.Get());
+ 
+   // Look, but don't touch, until we succeed in getting new entry store.
+-  int32_t oldLog2 = kHashBits - mHashShift;
++  int32_t oldLog2 = kPLDHashNumberBits - mHashShift;
+   int32_t newLog2 = oldLog2 + aDeltaLog2;
+   uint32_t newCapacity = 1u << newLog2;
+   if (newCapacity > kMaxCapacity) {
+     return false;
+   }
+ 
+   uint32_t nbytes;
+   if (!SizeOfEntryStore(newCapacity, mEntrySize, &nbytes)) {
+@@ -481,17 +481,17 @@ PLDHashTable::ChangeTable(int32_t aDelta
+   }
+ 
+   char* newEntryStore = (char*)calloc(1, nbytes);
+   if (!newEntryStore) {
+     return false;
+   }
+ 
+   // We can't fail from here on, so update table parameters.
+-  mHashShift = kHashBits - newLog2;
++  mHashShift = kPLDHashNumberBits - newLog2;
+   mRemovedCount = 0;
+ 
+   // Assign the new entry store to table.
+   char* oldEntryStore;
+   char* oldEntryAddr;
+   oldEntryAddr = oldEntryStore = mEntryStore.Get();
+   mEntryStore.Set(newEntryStore, &mGeneration);
+   PLDHashMoveEntry moveEntry = mOps->moveEntry;
+@@ -685,17 +685,17 @@ void
+ PLDHashTable::ShrinkIfAppropriate()
+ {
+   uint32_t capacity = Capacity();
+   if (mRemovedCount >= capacity >> 2 ||
+       (capacity > kMinCapacity && mEntryCount <= MinLoad(capacity))) {
+     uint32_t log2;
+     BestCapacity(mEntryCount, &capacity, &log2);
+ 
+-    int32_t deltaLog2 = log2 - (kHashBits - mHashShift);
++    int32_t deltaLog2 = log2 - (kPLDHashNumberBits - mHashShift);
+     MOZ_ASSERT(deltaLog2 <= 0);
+ 
+     (void) ChangeTable(deltaLog2);
+   }
+ }
+ 
+ size_t
+ PLDHashTable::ShallowSizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
+diff --git a/xpcom/ds/PLDHashTable.h b/xpcom/ds/PLDHashTable.h
+--- a/xpcom/ds/PLDHashTable.h
++++ b/xpcom/ds/PLDHashTable.h
+@@ -5,22 +5,24 @@
+  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+ 
+ #ifndef PLDHashTable_h
+ #define PLDHashTable_h
+ 
+ #include "mozilla/Atomics.h"
+ #include "mozilla/Attributes.h" // for MOZ_ALWAYS_INLINE
+ #include "mozilla/fallible.h"
++#include "mozilla/HashFunctions.h"
+ #include "mozilla/MemoryReporting.h"
+ #include "mozilla/Move.h"
+ #include "mozilla/Types.h"
+ #include "nscore.h"
+ 
+-typedef uint32_t PLDHashNumber;
++using PLDHashNumber = mozilla::HashNumber;
++static const uint32_t kPLDHashNumberBits = mozilla::kHashNumberBits;
+ 
+ class PLDHashTable;
+ struct PLDHashTableOps;
+ 
+ // Table entry header structure.
+ //
+ // In order to allow in-line allocation of key and value, we do not declare
+ // either here. Instead, the API uses const void *key as a formal parameter.
+@@ -495,17 +497,16 @@ public:
+   Iterator ConstIter() const
+   {
+     return Iterator(const_cast<PLDHashTable*>(this));
+   }
+ 
+ private:
+   // Multiplicative hash uses an unsigned 32 bit integer and the golden ratio,
+   // expressed as a fixed-point 32-bit fraction.
+-  static const uint32_t kHashBits = 32;
+   static const uint32_t kGoldenRatio = 0x9E3779B9U;
+ 
+   static uint32_t HashShift(uint32_t aEntrySize, uint32_t aLength);
+ 
+   static const PLDHashNumber kCollisionFlag = 1;
+ 
+   static bool EntryIsFree(const PLDHashEntryHdr* aEntry)
+   {
+@@ -536,17 +537,17 @@ private:
+   static bool MatchEntryKeyhash(const PLDHashEntryHdr* aEntry,
+                                 const PLDHashNumber aHash);
+   PLDHashEntryHdr* AddressEntry(uint32_t aIndex) const;
+ 
+   // We store mHashShift rather than sizeLog2 to optimize the collision-free
+   // case in SearchTable.
+   uint32_t CapacityFromHashShift() const
+   {
+-    return ((uint32_t)1 << (kHashBits - mHashShift));
++    return ((uint32_t)1 << (kPLDHashNumberBits - mHashShift));
+   }
+ 
+   PLDHashNumber ComputeKeyHash(const void* aKey) const;
+ 
+   enum SearchReason { ForSearchOrRemove, ForAdd };
+ 
+   template <SearchReason Reason>
+   PLDHashEntryHdr* NS_FASTCALL

+ 263 - 0
frg/work-js/mozilla-release/patches/1477626-4-63a1.patch

@@ -0,0 +1,263 @@
+# HG changeset patch
+# User Nicholas Nethercote <nnethercote@mozilla.com>
+# Date 1532595167 -36000
+#      Thu Jul 26 18:52:47 2018 +1000
+# Node ID 2ce09953e25bfbcc4170ed989c028c469b6ea21d
+# Parent  9cf98793e243bd1fa1413d70cf957b9a4f4d54f4
+Bug 1477626 - Move ScrambleHashCode() from js/src/Utility.h to mfbt/HashFunctions.h. r=Waldo
+
+And use it in PLDHashTable.cpp.
+
+MozReview-Commit-ID: BqwEkE0p5AG
+
+diff --git a/js/public/HashTable.h b/js/public/HashTable.h
+--- a/js/public/HashTable.h
++++ b/js/public/HashTable.h
+@@ -1277,17 +1277,17 @@ class HashTable : private AllocPolicy
+ 
+     static bool isLiveHash(HashNumber hash)
+     {
+         return Entry::isLiveHash(hash);
+     }
+ 
+     static HashNumber prepareHash(const Lookup& l)
+     {
+-        HashNumber keyHash = ScrambleHashCode(HashPolicy::hash(l));
++        HashNumber keyHash = mozilla::ScrambleHashCode(HashPolicy::hash(l));
+ 
+         // Avoid reserved hash codes.
+         if (!isLiveHash(keyHash))
+             keyHash -= (sRemovedKey + 1);
+         return keyHash & ~sCollisionBit;
+     }
+ 
+     enum FailureBehavior { DontReportFailure = false, ReportFailure = true };
+diff --git a/js/public/Utility.h b/js/public/Utility.h
+--- a/js/public/Utility.h
++++ b/js/public/Utility.h
+@@ -9,17 +9,16 @@
+ 
+ #include "mozilla/Assertions.h"
+ #include "mozilla/Atomics.h"
+ #include "mozilla/Attributes.h"
+ #include "mozilla/Compiler.h"
+ #include "mozilla/Move.h"
+ #include "mozilla/TemplateLib.h"
+ #include "mozilla/UniquePtr.h"
+-#include "mozilla/WrappingOperations.h"
+ 
+ #include <stdlib.h>
+ #include <string.h>
+ 
+ #ifdef JS_OOM_DO_BACKTRACES
+ #include <execinfo.h>
+ #include <stdio.h>
+ #endif
+@@ -659,61 +658,16 @@ struct FreePolicy
+     }
+ };
+ 
+ typedef mozilla::UniquePtr<char[], JS::FreePolicy> UniqueChars;
+ typedef mozilla::UniquePtr<char16_t[], JS::FreePolicy> UniqueTwoByteChars;
+ 
+ } // namespace JS
+ 
+-namespace js {
+-
+-namespace detail {
+-
+-/*
+- * Given a raw hash code, h, return a number that can be used to select a hash
+- * bucket.
+- *
+- * This function aims to produce as uniform an output distribution as possible,
+- * especially in the most significant (leftmost) bits, even though the input
+- * distribution may be highly nonrandom, given the constraints that this must
+- * be deterministic and quick to compute.
+- *
+- * Since the leftmost bits of the result are best, the hash bucket index is
+- * computed by doing ScrambleHashCode(h) / (2^32/N) or the equivalent
+- * right-shift, not ScrambleHashCode(h) % N or the equivalent bit-mask.
+- *
+- * FIXME: OrderedHashTable uses a bit-mask; see bug 775896.
+- */
+-inline uint32_t
+-ScrambleHashCode(uint32_t h)
+-{
+-    /*
+-     * Simply returning h would not cause any hash tables to produce wrong
+-     * answers. But it can produce pathologically bad performance: The caller
+-     * right-shifts the result, keeping only the highest bits. The high bits of
+-     * hash codes are very often completely entropy-free. (So are the lowest
+-     * bits.)
+-     *
+-     * So we use Fibonacci hashing, as described in Knuth, The Art of Computer
+-     * Programming, 6.4. This mixes all the bits of the input hash code h.
+-     *
+-     * The value of goldenRatio is taken from the hex
+-     * expansion of the golden ratio, which starts 1.9E3779B9....
+-     * This value is especially good if values with consecutive hash codes
+-     * are stored in a hash table; see Knuth for details.
+-     */
+-    static const uint32_t goldenRatio = 0x9E3779B9U;
+-    return mozilla::WrappingMultiply(h, goldenRatio);
+-}
+-
+-} /* namespace detail */
+-
+-} /* namespace js */
+-
+ /* sixgill annotation defines */
+ #ifndef HAVE_STATIC_ANNOTATIONS
+ # define HAVE_STATIC_ANNOTATIONS
+ # ifdef XGILL_PLUGIN
+ #  define STATIC_PRECONDITION(COND)         __attribute__((precondition(#COND)))
+ #  define STATIC_PRECONDITION_ASSUME(COND)  __attribute__((precondition_assume(#COND)))
+ #  define STATIC_POSTCONDITION(COND)        __attribute__((postcondition(#COND)))
+ #  define STATIC_POSTCONDITION_ASSUME(COND) __attribute__((postcondition_assume(#COND)))
+diff --git a/js/src/ds/OrderedHashTable.h b/js/src/ds/OrderedHashTable.h
+--- a/js/src/ds/OrderedHashTable.h
++++ b/js/src/ds/OrderedHashTable.h
+@@ -613,17 +613,17 @@ class OrderedHashTable
+     /*
+      * The minimum permitted value of (liveCount / dataLength).
+      * If that ratio drops below this value, we shrink the table.
+      */
+     static double minDataFill() { return 0.25; }
+ 
+   public:
+     HashNumber prepareHash(const Lookup& l) const {
+-        return ScrambleHashCode(Ops::hash(l, hcs));
++        return mozilla::ScrambleHashCode(Ops::hash(l, hcs));
+     }
+ 
+   private:
+     /* The size of hashTable, in elements. Always a power of two. */
+     uint32_t hashBuckets() const {
+         return 1 << (js::kHashNumberBits - hashShift);
+     }
+ 
+diff --git a/mfbt/HashFunctions.h b/mfbt/HashFunctions.h
+--- a/mfbt/HashFunctions.h
++++ b/mfbt/HashFunctions.h
+@@ -61,16 +61,52 @@ namespace mozilla {
+ using HashNumber = uint32_t;
+ static const uint32_t kHashNumberBits = 32;
+ 
+ /**
+  * The golden ratio as a 32-bit fixed-point value.
+  */
+ static const HashNumber kGoldenRatioU32 = 0x9E3779B9U;
+ 
++/*
++ * Given a raw hash code, h, return a number that can be used to select a hash
++ * bucket.
++ *
++ * This function aims to produce as uniform an output distribution as possible,
++ * especially in the most significant (leftmost) bits, even though the input
++ * distribution may be highly nonrandom, given the constraints that this must
++ * be deterministic and quick to compute.
++ *
++ * Since the leftmost bits of the result are best, the hash bucket index is
++ * computed by doing ScrambleHashCode(h) / (2^32/N) or the equivalent
++ * right-shift, not ScrambleHashCode(h) % N or the equivalent bit-mask.
++ *
++ * FIXME: OrderedHashTable uses a bit-mask; see bug 775896.
++ */
++constexpr HashNumber
++ScrambleHashCode(HashNumber h)
++{
++  /*
++   * Simply returning h would not cause any hash tables to produce wrong
++   * answers. But it can produce pathologically bad performance: The caller
++   * right-shifts the result, keeping only the highest bits. The high bits of
++   * hash codes are very often completely entropy-free. (So are the lowest
++   * bits.)
++   *
++   * So we use Fibonacci hashing, as described in Knuth, The Art of Computer
++   * Programming, 6.4. This mixes all the bits of the input hash code h.
++   *
++   * The value of goldenRatio is taken from the hex expansion of the golden
++   * ratio, which starts 1.9E3779B9.... This value is especially good if
++   * values with consecutive hash codes are stored in a hash table; see Knuth
++   * for details.
++   */
++  return mozilla::WrappingMultiply(h, kGoldenRatioU32);
++}
++
+ namespace detail {
+ 
+ MOZ_NO_SANITIZE_UNSIGNED_OVERFLOW
+ constexpr HashNumber
+ RotateLeft5(HashNumber aValue)
+ {
+   return (aValue << 5) | (aValue >> 27);
+ }
+diff --git a/xpcom/ds/PLDHashTable.cpp b/xpcom/ds/PLDHashTable.cpp
+--- a/xpcom/ds/PLDHashTable.cpp
++++ b/xpcom/ds/PLDHashTable.cpp
+@@ -523,18 +523,17 @@ PLDHashTable::ChangeTable(int32_t aDelta
+   return true;
+ }
+ 
+ MOZ_ALWAYS_INLINE PLDHashNumber
+ PLDHashTable::ComputeKeyHash(const void* aKey) const
+ {
+   MOZ_ASSERT(mEntryStore.Get());
+ 
+-  PLDHashNumber keyHash = mOps->hashKey(aKey);
+-  keyHash *= kGoldenRatio;
++  PLDHashNumber keyHash = mozilla::ScrambleHashCode(mOps->hashKey(aKey));
+ 
+   // Avoid 0 and 1 hash codes, they indicate free and removed entries.
+   if (keyHash < 2) {
+     keyHash -= 2;
+   }
+   keyHash &= ~kCollisionFlag;
+ 
+   return keyHash;
+diff --git a/xpcom/ds/PLDHashTable.h b/xpcom/ds/PLDHashTable.h
+--- a/xpcom/ds/PLDHashTable.h
++++ b/xpcom/ds/PLDHashTable.h
+@@ -28,18 +28,18 @@ struct PLDHashTableOps;
+ // either here. Instead, the API uses const void *key as a formal parameter.
+ // The key need not be stored in the entry; it may be part of the value, but
+ // need not be stored at all.
+ //
+ // Callback types are defined below and grouped into the PLDHashTableOps
+ // structure, for single static initialization per hash table sub-type.
+ //
+ // Each hash table sub-type should make its entry type a subclass of
+-// PLDHashEntryHdr. The mKeyHash member contains the result of multiplying the
+-// hash code returned from the hashKey callback (see below) by kGoldenRatio,
++// PLDHashEntryHdr. The mKeyHash member contains the result of suitably
++// scrambling the hash code returned from the hashKey callback (see below),
+ // then constraining the result to avoid the magic 0 and 1 values. The stored
+ // mKeyHash value is table size invariant, and it is maintained automatically
+ // -- users need never access it.
+ struct PLDHashEntryHdr
+ {
+ private:
+   friend class PLDHashTable;
+ 
+@@ -498,20 +498,16 @@ public:
+   // Use this if you need to initialize an Iterator in a const method. If you
+   // use this case, you should not call Remove() on the iterator.
+   Iterator ConstIter() const
+   {
+     return Iterator(const_cast<PLDHashTable*>(this));
+   }
+ 
+ private:
+-  // Multiplicative hash uses an unsigned 32 bit integer and the golden ratio,
+-  // expressed as a fixed-point 32-bit fraction.
+-  static const uint32_t kGoldenRatio = 0x9E3779B9U;
+-
+   static uint32_t HashShift(uint32_t aEntrySize, uint32_t aLength);
+ 
+   static const PLDHashNumber kCollisionFlag = 1;
+ 
+   static bool EntryIsFree(const PLDHashEntryHdr* aEntry)
+   {
+     return aEntry->mKeyHash == 0;
+   }

+ 72 - 0
frg/work-js/mozilla-release/patches/1477626-5-63a1.patch

@@ -0,0 +1,72 @@
+# HG changeset patch
+# User Nicholas Nethercote <nnethercote@mozilla.com>
+# Date 1532599975 -36000
+#      Thu Jul 26 20:12:55 2018 +1000
+# Node ID f953e6b321c522d50fe137601694805dea19a9cf
+# Parent  2ce09953e25bfbcc4170ed989c028c469b6ea21d
+Bug 1477626 - Remove use of JS_BIT in js/src/HashTable.h. r=Waldo
+
+MozReview-Commit-ID: DRba0Z0Olo0
+
+diff --git a/js/public/HashTable.h b/js/public/HashTable.h
+--- a/js/public/HashTable.h
++++ b/js/public/HashTable.h
+@@ -1252,18 +1252,18 @@ class HashTable : private AllocPolicy
+ #   define METER(x) x
+ #else
+ #   define METER(x)
+ #endif
+ 
+     // The default initial capacity is 32 (enough to hold 16 elements), but it
+     // can be as low as 4.
+     static const uint32_t sMinCapacity  = 4;
+-    static const uint32_t sMaxInit      = JS_BIT(CAP_BITS - 1);
+-    static const uint32_t sMaxCapacity  = JS_BIT(CAP_BITS);
++    static const uint32_t sMaxInit      = 1u << (CAP_BITS - 1);
++    static const uint32_t sMaxCapacity  = 1u << CAP_BITS;
+ 
+     // Hash-table alpha is conceptually a fraction, but to avoid floating-point
+     // math we implement it as a ratio of integers.
+     static const uint8_t sAlphaDenominator = 4;
+     static const uint8_t sMinAlphaNumerator = 1; // min alpha: 1/4
+     static const uint8_t sMaxAlphaNumerator = 3; // max alpha: 3/4
+ 
+     static const HashNumber sFreeKey = Entry::sFreeKey;
+@@ -1549,17 +1549,17 @@ class HashTable : private AllocPolicy
+     enum RebuildStatus { NotOverloaded, Rehashed, RehashFailed };
+ 
+     RebuildStatus changeTableSize(int deltaLog2, FailureBehavior reportFailure = ReportFailure)
+     {
+         // Look, but don't touch, until we succeed in getting new entry store.
+         Entry* oldTable = table;
+         uint32_t oldCap = capacity();
+         uint32_t newLog2 = js::kHashNumberBits - hashShift + deltaLog2;
+-        uint32_t newCapacity = JS_BIT(newLog2);
++        uint32_t newCapacity = 1u << newLog2;
+         if (MOZ_UNLIKELY(newCapacity > sMaxCapacity)) {
+             if (reportFailure)
+                 this->reportAllocOverflow();
+             return RehashFailed;
+         }
+ 
+         Entry* newTable = createTable(*this, newCapacity, reportFailure);
+         if (!newTable)
+@@ -1787,17 +1787,17 @@ class HashTable : private AllocPolicy
+     {
+         MOZ_ASSERT(table);
+         return entryCount;
+     }
+ 
+     uint32_t capacity() const
+     {
+         MOZ_ASSERT(table);
+-        return JS_BIT(js::kHashNumberBits - hashShift);
++        return 1u << (js::kHashNumberBits - hashShift);
+     }
+ 
+     Generation generation() const
+     {
+         MOZ_ASSERT(table);
+         return Generation(gen);
+     }
+ 

+ 3506 - 0
frg/work-js/mozilla-release/patches/1477626-6-63a1.patch

@@ -0,0 +1,3506 @@
+# HG changeset patch
+# User Nicholas Nethercote <nnethercote@mozilla.com>
+# Date 1532600149 -36000
+#      Thu Jul 26 20:15:49 2018 +1000
+# Node ID e3a286413269f7c023fa55bfa1775be47d415547
+# Parent  e632f01e26801f83cf689c4b76e591a743e8194a
+Bug 1477626 - Move js::Hash{Set,Map} into MFBT. r=Waldo
+
+The main change is that the patch copies js/public/HashTable.h to
+mfbt/HashTable.h, and then changes it as follows.
+
+- Changes `js` namespaces to `mozilla` (and removes some now-unnecessary
+  `mozilla::` qualifiers).
+
+- Changes the default AllocPolicy from the SpiderMonkey-specific
+  `TempAllocPolicy` to the generic `MallocAllocPolicy`.
+
+- Adds `#include "AllocPolicy.h"` (like mfbt/Vector.h).
+
+- Changes `JS_DEBUG` use to `DEBUG`.
+
+- Minor comment updates, as necessary.
+
+js/public/HashTable.h is now tiny, holding just a few renamings of things from
+the `mozilla` namespace into the `js` namespace to minimize churn elsewhere.
+(Those renamings keep `TempAllocPolicy` as the default AllocPolicy for
+js::Hash{Set,Map}.)
+
+Also, various template specializations had to be moved from the `js` namespace
+to the `mozilla` namespace to avoid compile errors.
+
+MozReview-Commit-ID: GS9Qn9YeYDA
+
+diff --git a/js/public/HashTable.h b/js/public/HashTable.h
+--- a/js/public/HashTable.h
++++ b/js/public/HashTable.h
+@@ -2,1972 +2,37 @@
+  * vim: set ts=8 sts=4 et sw=4 tw=99:
+  * This Source Code Form is subject to the terms of the Mozilla Public
+  * License, v. 2.0. If a copy of the MPL was not distributed with this
+  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+ 
+ #ifndef js_HashTable_h
+ #define js_HashTable_h
+ 
+-#include "mozilla/Assertions.h"
+-#include "mozilla/Attributes.h"
+-#include "mozilla/Casting.h"
+-#include "mozilla/HashFunctions.h"
+-#include "mozilla/MathAlgorithms.h"
+-#include "mozilla/MemoryChecking.h"
+-#include "mozilla/MemoryReporting.h"
+-#include "mozilla/Move.h"
+-#include "mozilla/Opaque.h"
+-#include "mozilla/PodOperations.h"
+-#include "mozilla/ReentrancyGuard.h"
+-#include "mozilla/TypeTraits.h"
+-#include "mozilla/UniquePtr.h"
+-
+-#include "js/Utility.h"
++#include "mozilla/HashTable.h"
+ 
+ namespace js {
+ 
+ using HashNumber = mozilla::HashNumber;
+ static const uint32_t kHashNumberBits = mozilla::kHashNumberBits;
+ 
+ class TempAllocPolicy;
+-template <class> struct DefaultHasher;
+-template <class, class> class HashMapEntry;
+-namespace detail {
+-    template <typename T> class HashTableEntry;
+-    template <class T, class HashPolicy, class AllocPolicy> class HashTable;
+-} // namespace detail
+-
+-/*****************************************************************************/
+-
+-// The "generation" of a hash table is an opaque value indicating the state of
+-// modification of the hash table through its lifetime.  If the generation of
+-// a hash table compares equal at times T1 and T2, then lookups in the hash
+-// table, pointers to (or into) hash table entries, etc. at time T1 are valid
+-// at time T2.  If the generation compares unequal, these computations are all
+-// invalid and must be performed again to be used.
+-//
+-// Generations are meaningfully comparable only with respect to a single hash
+-// table.  It's always nonsensical to compare the generation of distinct hash
+-// tables H1 and H2.
+-using Generation = mozilla::Opaque<uint64_t>;
+-
+-// A JS-friendly, STL-like container providing a hash-based map from keys to
+-// values. In particular, HashMap calls constructors and destructors of all
+-// objects added so non-PODs may be used safely.
+-//
+-// Key/Value requirements:
+-//  - movable, destructible, assignable
+-// HashPolicy requirements:
+-//  - see Hash Policy section below
+-// AllocPolicy:
+-//  - see AllocPolicy.h
+-//
+-// Note:
+-// - HashMap is not reentrant: Key/Value/HashPolicy/AllocPolicy members
+-//   called by HashMap must not call back into the same HashMap object.
+-// - Due to the lack of exception handling, the user must call |init()|.
+-template <class Key,
+-          class Value,
+-          class HashPolicy = DefaultHasher<Key>,
+-          class AllocPolicy = TempAllocPolicy>
+-class HashMap
+-{
+-    typedef HashMapEntry<Key, Value> TableEntry;
+-
+-    struct MapHashPolicy : HashPolicy
+-    {
+-        using Base = HashPolicy;
+-        typedef Key KeyType;
+-        static const Key& getKey(TableEntry& e) { return e.key(); }
+-        static void setKey(TableEntry& e, Key& k) { HashPolicy::rekey(e.mutableKey(), k); }
+-    };
+-
+-    typedef detail::HashTable<TableEntry, MapHashPolicy, AllocPolicy> Impl;
+-    Impl impl;
+-
+-  public:
+-    typedef typename HashPolicy::Lookup Lookup;
+-    typedef TableEntry Entry;
+-
+-    // HashMap construction is fallible (due to OOM); thus the user must call
+-    // init after constructing a HashMap and check the return value.
+-    explicit HashMap(AllocPolicy a = AllocPolicy()) : impl(a)  {}
+-    MOZ_MUST_USE bool init(uint32_t len = 16) { return impl.init(len); }
+-    bool initialized() const                  { return impl.initialized(); }
+-
+-    // Return whether the given lookup value is present in the map. E.g.:
+-    //
+-    //   typedef HashMap<int,char> HM;
+-    //   HM h;
+-    //   if (HM::Ptr p = h.lookup(3)) {
+-    //     const HM::Entry& e = *p; // p acts like a pointer to Entry
+-    //     assert(p->key == 3);     // Entry contains the key
+-    //     char val = p->value;     // and value
+-    //   }
+-    //
+-    // Also see the definition of Ptr in HashTable above (with T = Entry).
+-    typedef typename Impl::Ptr Ptr;
+-    MOZ_ALWAYS_INLINE Ptr lookup(const Lookup& l) const { return impl.lookup(l); }
+-
+-    // Like lookup, but does not assert if two threads call lookup at the same
+-    // time. Only use this method when none of the threads will modify the map.
+-    MOZ_ALWAYS_INLINE Ptr readonlyThreadsafeLookup(const Lookup& l) const {
+-        return impl.readonlyThreadsafeLookup(l);
+-    }
+-
+-    // Assuming |p.found()|, remove |*p|.
+-    void remove(Ptr p)                                { impl.remove(p); }
+-
+-    // Like |lookup(l)|, but on miss, |p = lookupForAdd(l)| allows efficient
+-    // insertion of Key |k| (where |HashPolicy::match(k,l) == true|) using
+-    // |add(p,k,v)|. After |add(p,k,v)|, |p| points to the new Entry. E.g.:
+-    //
+-    //   typedef HashMap<int,char> HM;
+-    //   HM h;
+-    //   HM::AddPtr p = h.lookupForAdd(3);
+-    //   if (!p) {
+-    //     if (!h.add(p, 3, 'a'))
+-    //       return false;
+-    //   }
+-    //   const HM::Entry& e = *p;   // p acts like a pointer to Entry
+-    //   assert(p->key == 3);       // Entry contains the key
+-    //   char val = p->value;       // and value
+-    //
+-    // Also see the definition of AddPtr in HashTable above (with T = Entry).
+-    //
+-    // N.B. The caller must ensure that no mutating hash table operations
+-    // occur between a pair of |lookupForAdd| and |add| calls. To avoid
+-    // looking up the key a second time, the caller may use the more efficient
+-    // relookupOrAdd method. This method reuses part of the hashing computation
+-    // to more efficiently insert the key if it has not been added. For
+-    // example, a mutation-handling version of the previous example:
+-    //
+-    //    HM::AddPtr p = h.lookupForAdd(3);
+-    //    if (!p) {
+-    //      call_that_may_mutate_h();
+-    //      if (!h.relookupOrAdd(p, 3, 'a'))
+-    //        return false;
+-    //    }
+-    //    const HM::Entry& e = *p;
+-    //    assert(p->key == 3);
+-    //    char val = p->value;
+-    typedef typename Impl::AddPtr AddPtr;
+-    MOZ_ALWAYS_INLINE AddPtr lookupForAdd(const Lookup& l) const {
+-        return impl.lookupForAdd(l);
+-    }
+-
+-    template<typename KeyInput, typename ValueInput>
+-    MOZ_MUST_USE bool add(AddPtr& p, KeyInput&& k, ValueInput&& v) {
+-        return impl.add(p,
+-                        std::forward<KeyInput>(k),
+-                        std::forward<ValueInput>(v));
+-    }
+-
+-    template<typename KeyInput>
+-    MOZ_MUST_USE bool add(AddPtr& p, KeyInput&& k) {
+-        return impl.add(p, std::forward<KeyInput>(k), Value());
+-    }
+-
+-    template<typename KeyInput, typename ValueInput>
+-    MOZ_MUST_USE bool relookupOrAdd(AddPtr& p, KeyInput&& k, ValueInput&& v) {
+-        return impl.relookupOrAdd(p, k,
+-                                  std::forward<KeyInput>(k),
+-                                  std::forward<ValueInput>(v));
+-    }
+-
+-    // |all()| returns a Range containing |count()| elements. E.g.:
+-    //
+-    //   typedef HashMap<int,char> HM;
+-    //   HM h;
+-    //   for (HM::Range r = h.all(); !r.empty(); r.popFront())
+-    //     char c = r.front().value();
+-    //
+-    // Also see the definition of Range in HashTable above (with T = Entry).
+-    typedef typename Impl::Range Range;
+-    Range all() const                                 { return impl.all(); }
+-
+-    // Typedef for the enumeration class. An Enum may be used to examine and
+-    // remove table entries:
+-    //
+-    //   typedef HashMap<int,char> HM;
+-    //   HM s;
+-    //   for (HM::Enum e(s); !e.empty(); e.popFront())
+-    //     if (e.front().value() == 'l')
+-    //       e.removeFront();
+-    //
+-    // Table resize may occur in Enum's destructor. Also see the definition of
+-    // Enum in HashTable above (with T = Entry).
+-    typedef typename Impl::Enum Enum;
+ 
+-    // Remove all entries. This does not shrink the table. For that consider
+-    // using the finish() method.
+-    void clear()                                      { impl.clear(); }
+-
+-    // Remove all entries. Unlike clear() this method tries to shrink the table.
+-    // Unlike finish() it does not require the map to be initialized again.
+-    void clearAndShrink()                             { impl.clearAndShrink(); }
+-
+-    // Remove all the entries and release all internal buffers. The map must
+-    // be initialized again before any use.
+-    void finish()                                     { impl.finish(); }
+-
+-    // Does the table contain any entries?
+-    bool empty() const                                { return impl.empty(); }
+-
+-    // Number of live elements in the map.
+-    uint32_t count() const                            { return impl.count(); }
+-
+-    // Total number of allocation in the dynamic table. Note: resize will
+-    // happen well before count() == capacity().
+-    size_t capacity() const                           { return impl.capacity(); }
+-
+-    // Don't just call |impl.sizeOfExcludingThis()| because there's no
+-    // guarantee that |impl| is the first field in HashMap.
+-    size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+-        return impl.sizeOfExcludingThis(mallocSizeOf);
+-    }
+-    size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+-        return mallocSizeOf(this) + impl.sizeOfExcludingThis(mallocSizeOf);
+-    }
+-
+-    Generation generation() const {
+-        return impl.generation();
+-    }
+-
+-    /************************************************** Shorthand operations */
+-
+-    bool has(const Lookup& l) const {
+-        return impl.lookup(l).found();
+-    }
+-
+-    // Overwrite existing value with v. Return false on oom.
+-    template<typename KeyInput, typename ValueInput>
+-    MOZ_MUST_USE bool put(KeyInput&& k, ValueInput&& v) {
+-        AddPtr p = lookupForAdd(k);
+-        if (p) {
+-            p->value() = std::forward<ValueInput>(v);
+-            return true;
+-        }
+-        return add(p, std::forward<KeyInput>(k), std::forward<ValueInput>(v));
+-    }
+-
+-    // Like put, but assert that the given key is not already present.
+-    template<typename KeyInput, typename ValueInput>
+-    MOZ_MUST_USE bool putNew(KeyInput&& k, ValueInput&& v) {
+-        return impl.putNew(k, std::forward<KeyInput>(k), std::forward<ValueInput>(v));
+-    }
+-
+-    // Only call this to populate an empty map after reserving space with init().
+-    template<typename KeyInput, typename ValueInput>
+-    void putNewInfallible(KeyInput&& k, ValueInput&& v) {
+-        impl.putNewInfallible(k, std::forward<KeyInput>(k), std::forward<ValueInput>(v));
+-    }
+-
+-    // Add (k,defaultValue) if |k| is not found. Return a false-y Ptr on oom.
+-    Ptr lookupWithDefault(const Key& k, const Value& defaultValue) {
+-        AddPtr p = lookupForAdd(k);
+-        if (p)
+-            return p;
+-        bool ok = add(p, k, defaultValue);
+-        MOZ_ASSERT_IF(!ok, !p); // p is left false-y on oom.
+-        (void)ok;
+-        return p;
+-    }
+-
+-    // Remove if present.
+-    void remove(const Lookup& l) {
+-        if (Ptr p = lookup(l))
+-            remove(p);
+-    }
+-
+-    // Infallibly rekey one entry, if necessary.
+-    // Requires template parameters Key and HashPolicy::Lookup to be the same type.
+-    void rekeyIfMoved(const Key& old_key, const Key& new_key) {
+-        if (old_key != new_key)
+-            rekeyAs(old_key, new_key, new_key);
+-    }
++template <class T>
++using DefaultHasher = mozilla::DefaultHasher<T>;
+ 
+-    // Infallibly rekey one entry if present, and return whether that happened.
+-    bool rekeyAs(const Lookup& old_lookup, const Lookup& new_lookup, const Key& new_key) {
+-        if (Ptr p = lookup(old_lookup)) {
+-            impl.rekeyAndMaybeRehash(p, new_lookup, new_key);
+-            return true;
+-        }
+-        return false;
+-    }
+-
+-    // HashMap is movable
+-    HashMap(HashMap&& rhs) : impl(std::move(rhs.impl)) {}
+-    void operator=(HashMap&& rhs) {
+-        MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited");
+-        impl = std::move(rhs.impl);
+-    }
+-
+-  private:
+-    // HashMap is not copyable or assignable
+-    HashMap(const HashMap& hm) = delete;
+-    HashMap& operator=(const HashMap& hm) = delete;
+-
+-    friend class Impl::Enum;
+-};
+-
+-/*****************************************************************************/
+-
+-// A JS-friendly, STL-like container providing a hash-based set of values. In
+-// particular, HashSet calls constructors and destructors of all objects added
+-// so non-PODs may be used safely.
+-//
+-// T requirements:
+-//  - movable, destructible, assignable
+-// HashPolicy requirements:
+-//  - see Hash Policy section below
+-// AllocPolicy:
+-//  - see AllocPolicy.h
+-//
+-// Note:
+-// - HashSet is not reentrant: T/HashPolicy/AllocPolicy members called by
+-//   HashSet must not call back into the same HashSet object.
+-// - Due to the lack of exception handling, the user must call |init()|.
+-template <class T,
+-          class HashPolicy = DefaultHasher<T>,
+-          class AllocPolicy = TempAllocPolicy>
+-class HashSet
+-{
+-    struct SetOps : HashPolicy
+-    {
+-        using Base = HashPolicy;
+-        typedef T KeyType;
+-        static const KeyType& getKey(const T& t) { return t; }
+-        static void setKey(T& t, KeyType& k) { HashPolicy::rekey(t, k); }
+-    };
+-
+-    typedef detail::HashTable<const T, SetOps, AllocPolicy> Impl;
+-    Impl impl;
+-
+-  public:
+-    typedef typename HashPolicy::Lookup Lookup;
+-    typedef T Entry;
+-
+-    // HashSet construction is fallible (due to OOM); thus the user must call
+-    // init after constructing a HashSet and check the return value.
+-    explicit HashSet(AllocPolicy a = AllocPolicy()) : impl(a)  {}
+-    MOZ_MUST_USE bool init(uint32_t len = 16) { return impl.init(len); }
+-    bool initialized() const                  { return impl.initialized(); }
+-
+-    // Return whether the given lookup value is present in the map. E.g.:
+-    //
+-    //   typedef HashSet<int> HS;
+-    //   HS h;
+-    //   if (HS::Ptr p = h.lookup(3)) {
+-    //     assert(*p == 3);   // p acts like a pointer to int
+-    //   }
+-    //
+-    // Also see the definition of Ptr in HashTable above.
+-    typedef typename Impl::Ptr Ptr;
+-    MOZ_ALWAYS_INLINE Ptr lookup(const Lookup& l) const { return impl.lookup(l); }
+-
+-    // Like lookup, but does not assert if two threads call lookup at the same
+-    // time. Only use this method when none of the threads will modify the map.
+-    MOZ_ALWAYS_INLINE Ptr readonlyThreadsafeLookup(const Lookup& l) const {
+-        return impl.readonlyThreadsafeLookup(l);
+-    }
+-
+-    // Assuming |p.found()|, remove |*p|.
+-    void remove(Ptr p)                                { impl.remove(p); }
++template <typename Key>
++using PointerHasher = mozilla::PointerHasher<Key>;
+ 
+-    // Like |lookup(l)|, but on miss, |p = lookupForAdd(l)| allows efficient
+-    // insertion of T value |t| (where |HashPolicy::match(t,l) == true|) using
+-    // |add(p,t)|. After |add(p,t)|, |p| points to the new element. E.g.:
+-    //
+-    //   typedef HashSet<int> HS;
+-    //   HS h;
+-    //   HS::AddPtr p = h.lookupForAdd(3);
+-    //   if (!p) {
+-    //     if (!h.add(p, 3))
+-    //       return false;
+-    //   }
+-    //   assert(*p == 3);   // p acts like a pointer to int
+-    //
+-    // Also see the definition of AddPtr in HashTable above.
+-    //
+-    // N.B. The caller must ensure that no mutating hash table operations
+-    // occur between a pair of |lookupForAdd| and |add| calls. To avoid
+-    // looking up the key a second time, the caller may use the more efficient
+-    // relookupOrAdd method. This method reuses part of the hashing computation
+-    // to more efficiently insert the key if it has not been added. For
+-    // example, a mutation-handling version of the previous example:
+-    //
+-    //    HS::AddPtr p = h.lookupForAdd(3);
+-    //    if (!p) {
+-    //      call_that_may_mutate_h();
+-    //      if (!h.relookupOrAdd(p, 3, 3))
+-    //        return false;
+-    //    }
+-    //    assert(*p == 3);
+-    //
+-    // Note that relookupOrAdd(p,l,t) performs Lookup using |l| and adds the
+-    // entry |t|, where the caller ensures match(l,t).
+-    typedef typename Impl::AddPtr AddPtr;
+-    MOZ_ALWAYS_INLINE AddPtr lookupForAdd(const Lookup& l) const {
+-        return impl.lookupForAdd(l);
+-    }
+-
+-    template <typename U>
+-    MOZ_MUST_USE bool add(AddPtr& p, U&& u) {
+-        return impl.add(p, std::forward<U>(u));
+-    }
+-
+-    template <typename U>
+-    MOZ_MUST_USE bool relookupOrAdd(AddPtr& p, const Lookup& l, U&& u) {
+-        return impl.relookupOrAdd(p, l, std::forward<U>(u));
+-    }
+-
+-    // |all()| returns a Range containing |count()| elements:
+-    //
+-    //   typedef HashSet<int> HS;
+-    //   HS h;
+-    //   for (HS::Range r = h.all(); !r.empty(); r.popFront())
+-    //     int i = r.front();
+-    //
+-    // Also see the definition of Range in HashTable above.
+-    typedef typename Impl::Range Range;
+-    Range all() const                                 { return impl.all(); }
+-
+-    // Typedef for the enumeration class. An Enum may be used to examine and
+-    // remove table entries:
+-    //
+-    //   typedef HashSet<int> HS;
+-    //   HS s;
+-    //   for (HS::Enum e(s); !e.empty(); e.popFront())
+-    //     if (e.front() == 42)
+-    //       e.removeFront();
+-    //
+-    // Table resize may occur in Enum's destructor. Also see the definition of
+-    // Enum in HashTable above.
+-    typedef typename Impl::Enum Enum;
+-
+-    // Remove all entries. This does not shrink the table. For that consider
+-    // using the finish() method.
+-    void clear()                                      { impl.clear(); }
+-
+-    // Remove all entries. Unlike clear() this method tries to shrink the table.
+-    // Unlike finish() it does not require the set to be initialized again.
+-    void clearAndShrink()                             { impl.clearAndShrink(); }
+-
+-    // Remove all the entries and release all internal buffers. The set must
+-    // be initialized again before any use.
+-    void finish()                                     { impl.finish(); }
+-
+-    // Does the table contain any entries?
+-    bool empty() const                                { return impl.empty(); }
+-
+-    // Number of live elements in the map.
+-    uint32_t count() const                            { return impl.count(); }
+-
+-    // Total number of allocation in the dynamic table. Note: resize will
+-    // happen well before count() == capacity().
+-    size_t capacity() const                           { return impl.capacity(); }
+-
+-    // Don't just call |impl.sizeOfExcludingThis()| because there's no
+-    // guarantee that |impl| is the first field in HashSet.
+-    size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+-        return impl.sizeOfExcludingThis(mallocSizeOf);
+-    }
+-    size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+-        return mallocSizeOf(this) + impl.sizeOfExcludingThis(mallocSizeOf);
+-    }
+-
+-    Generation generation() const {
+-        return impl.generation();
+-    }
+-
+-    /************************************************** Shorthand operations */
+-
+-    bool has(const Lookup& l) const {
+-        return impl.lookup(l).found();
+-    }
+-
+-    // Add |u| if it is not present already. Return false on oom.
+-    template <typename U>
+-    MOZ_MUST_USE bool put(U&& u) {
+-        AddPtr p = lookupForAdd(u);
+-        return p ? true : add(p, std::forward<U>(u));
+-    }
+-
+-    // Like put, but assert that the given key is not already present.
+-    template <typename U>
+-    MOZ_MUST_USE bool putNew(U&& u) {
+-        return impl.putNew(u, std::forward<U>(u));
+-    }
+-
+-    template <typename U>
+-    MOZ_MUST_USE bool putNew(const Lookup& l, U&& u) {
+-        return impl.putNew(l, std::forward<U>(u));
+-    }
+-
+-    // Only call this to populate an empty set after reserving space with init().
+-    template <typename U>
+-    void putNewInfallible(const Lookup& l, U&& u) {
+-        impl.putNewInfallible(l, std::forward<U>(u));
+-    }
+-
+-    void remove(const Lookup& l) {
+-        if (Ptr p = lookup(l))
+-            remove(p);
+-    }
+-
+-    // Infallibly rekey one entry, if present.
+-    // Requires template parameters T and HashPolicy::Lookup to be the same type.
+-    void rekeyIfMoved(const Lookup& old_value, const T& new_value) {
+-        if (old_value != new_value)
+-            rekeyAs(old_value, new_value, new_value);
+-    }
+-
+-    // Infallibly rekey one entry if present, and return whether that happened.
+-    bool rekeyAs(const Lookup& old_lookup, const Lookup& new_lookup, const T& new_value) {
+-        if (Ptr p = lookup(old_lookup)) {
+-            impl.rekeyAndMaybeRehash(p, new_lookup, new_value);
+-            return true;
+-        }
+-        return false;
+-    }
+-
+-    // Infallibly replace the current key at |p| with an equivalent key.
+-    // Specifically, both HashPolicy::hash and HashPolicy::match must return
+-    // identical results for the new and old key when applied against all
+-    // possible matching values.
+-    void replaceKey(Ptr p, const T& new_value) {
+-        MOZ_ASSERT(p.found());
+-        MOZ_ASSERT(*p != new_value);
+-        MOZ_ASSERT(HashPolicy::hash(*p) == HashPolicy::hash(new_value));
+-        MOZ_ASSERT(HashPolicy::match(*p, new_value));
+-        const_cast<T&>(*p) = new_value;
+-    }
++template <typename T,
++          class HashPolicy = mozilla::DefaultHasher<T>,
++          class AllocPolicy = TempAllocPolicy>
++using HashSet = mozilla::HashSet<T, HashPolicy, AllocPolicy>;
+ 
+-    // HashSet is movable
+-    HashSet(HashSet&& rhs) : impl(std::move(rhs.impl)) {}
+-    void operator=(HashSet&& rhs) {
+-        MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited");
+-        impl = std::move(rhs.impl);
+-    }
+-
+-  private:
+-    // HashSet is not copyable or assignable
+-    HashSet(const HashSet& hs) = delete;
+-    HashSet& operator=(const HashSet& hs) = delete;
+-
+-    friend class Impl::Enum;
+-};
+-
+-/*****************************************************************************/
+-
+-// Hash Policy
+-//
+-// A hash policy P for a hash table with key-type Key must provide:
+-//  - a type |P::Lookup| to use to lookup table entries;
+-//  - a static member function |P::hash| with signature
+-//
+-//      static js::HashNumber hash(Lookup)
+-//
+-//    to use to hash the lookup type; and
+-//  - a static member function |P::match| with signature
+-//
+-//      static bool match(Key, Lookup)
+-//
+-//    to use to test equality of key and lookup values.
+-//
+-// Normally, Lookup = Key. In general, though, different values and types of
+-// values can be used to lookup and store. If a Lookup value |l| is != to the
+-// added Key value |k|, the user must ensure that |P::match(k,l)|. E.g.:
+-//
+-//   js::HashSet<Key, P>::AddPtr p = h.lookup(l);
+-//   if (!p) {
+-//     assert(P::match(k, l));  // must hold
+-//     h.add(p, k);
+-//   }
+-
+-// Pointer hashing policy that uses HashGeneric() to create good hashes for
+-// pointers.  Note that we don't shift out the lowest k bits to generate a
+-// good distribution for arena allocated pointers.
+-template <typename Key>
+-struct PointerHasher
+-{
+-    typedef Key Lookup;
+-    static HashNumber hash(const Lookup& l) {
+-        size_t word = reinterpret_cast<size_t>(l);
+-        return mozilla::HashGeneric(word);
+-    }
+-    static bool match(const Key& k, const Lookup& l) {
+-        return k == l;
+-    }
+-    static void rekey(Key& k, const Key& newKey) {
+-        k = newKey;
+-    }
+-};
+-
+-// Default hash policy: just use the 'lookup' value. This of course only
+-// works if the lookup value is integral. HashTable applies ScrambleHashCode to
+-// the result of the 'hash' which means that it is 'ok' if the lookup value is
+-// not well distributed over the HashNumber domain.
+-template <class Key>
+-struct DefaultHasher
+-{
+-    typedef Key Lookup;
+-    static HashNumber hash(const Lookup& l) {
+-        // Hash if can implicitly cast to hash number type.
+-        return l;
+-    }
+-    static bool match(const Key& k, const Lookup& l) {
+-        // Use builtin or overloaded operator==.
+-        return k == l;
+-    }
+-    static void rekey(Key& k, const Key& newKey) {
+-        k = newKey;
+-    }
+-};
+-
+-// Specialize hashing policy for pointer types. It assumes that the type is
+-// at least word-aligned. For types with smaller size use PointerHasher.
+-template <class T>
+-struct DefaultHasher<T*> : PointerHasher<T*>
+-{};
++template <typename Key,
++          typename Value,
++          class HashPolicy = mozilla::DefaultHasher<Key>,
++          class AllocPolicy = TempAllocPolicy>
++using HashMap = mozilla::HashMap<Key, Value, HashPolicy, AllocPolicy>;
+ 
+-// Specialize hashing policy for mozilla::UniquePtr to proxy the UniquePtr's
+-// raw pointer to PointerHasher.
+-template <class T, class D>
+-struct DefaultHasher<mozilla::UniquePtr<T, D>>
+-{
+-    using Lookup = mozilla::UniquePtr<T, D>;
+-    using PtrHasher = PointerHasher<T*>;
+-
+-    static HashNumber hash(const Lookup& l) {
+-        return PtrHasher::hash(l.get());
+-    }
+-    static bool match(const mozilla::UniquePtr<T, D>& k, const Lookup& l) {
+-        return PtrHasher::match(k.get(), l.get());
+-    }
+-    static void rekey(mozilla::UniquePtr<T, D>& k, mozilla::UniquePtr<T, D>&& newKey) {
+-        k = std::move(newKey);
+-    }
+-};
+-
+-// For doubles, we can xor the two uint32s.
+-template <>
+-struct DefaultHasher<double>
+-{
+-    typedef double Lookup;
+-    static HashNumber hash(double d) {
+-        static_assert(sizeof(HashNumber) == 4,
+-                      "subsequent code assumes a four-byte hash");
+-        uint64_t u = mozilla::BitwiseCast<uint64_t>(d);
+-        return HashNumber(u ^ (u >> 32));
+-    }
+-    static bool match(double lhs, double rhs) {
+-        return mozilla::BitwiseCast<uint64_t>(lhs) == mozilla::BitwiseCast<uint64_t>(rhs);
+-    }
+-};
+-
+-template <>
+-struct DefaultHasher<float>
+-{
+-    typedef float Lookup;
+-    static HashNumber hash(float f) {
+-        static_assert(sizeof(HashNumber) == 4,
+-                      "subsequent code assumes a four-byte hash");
+-        return HashNumber(mozilla::BitwiseCast<uint32_t>(f));
+-    }
+-    static bool match(float lhs, float rhs) {
+-        return mozilla::BitwiseCast<uint32_t>(lhs) == mozilla::BitwiseCast<uint32_t>(rhs);
+-    }
+-};
+-
+-// A hash policy that compares C strings.
+-struct CStringHasher
+-{
+-    typedef const char* Lookup;
+-    static js::HashNumber hash(Lookup l) {
+-        return mozilla::HashString(l);
+-    }
+-    static bool match(const char* key, Lookup lookup) {
+-        return strcmp(key, lookup) == 0;
+-    }
+-};
+-
+-// Fallible hashing interface.
+-//
+-// Most of the time generating a hash code is infallible so this class provides
+-// default methods that always succeed.  Specialize this class for your own hash
+-// policy to provide fallible hashing.
+-//
+-// This is used by MovableCellHasher to handle the fact that generating a unique
+-// ID for cell pointer may fail due to OOM.
+-template <typename HashPolicy>
+-struct FallibleHashMethods
+-{
+-    // Return true if a hashcode is already available for its argument.  Once
+-    // this returns true for a specific argument it must continue to do so.
+-    template <typename Lookup> static bool hasHash(Lookup&& l) { return true; }
+-
+-    // Fallible method to ensure a hashcode exists for its argument and create
+-    // one if not.  Returns false on error, e.g. out of memory.
+-    template <typename Lookup> static bool ensureHash(Lookup&& l) { return true; }
+-};
+-
+-template <typename HashPolicy, typename Lookup>
+-static bool
+-HasHash(Lookup&& l) {
+-    return FallibleHashMethods<typename HashPolicy::Base>::hasHash(std::forward<Lookup>(l));
+-}
+-
+-template <typename HashPolicy, typename Lookup>
+-static bool
+-EnsureHash(Lookup&& l) {
+-    return FallibleHashMethods<typename HashPolicy::Base>::ensureHash(std::forward<Lookup>(l));
+ }
+ 
+-/*****************************************************************************/
+-
+-// Both HashMap and HashSet are implemented by a single HashTable that is even
+-// more heavily parameterized than the other two. This leaves HashTable gnarly
+-// and extremely coupled to HashMap and HashSet; thus code should not use
+-// HashTable directly.
+-
+-template <class Key, class Value>
+-class HashMapEntry
+-{
+-    Key key_;
+-    Value value_;
+-
+-    template <class, class, class> friend class detail::HashTable;
+-    template <class> friend class detail::HashTableEntry;
+-    template <class, class, class, class> friend class HashMap;
+-
+-  public:
+-    template<typename KeyInput, typename ValueInput>
+-    HashMapEntry(KeyInput&& k, ValueInput&& v)
+-      : key_(std::forward<KeyInput>(k)),
+-        value_(std::forward<ValueInput>(v))
+-    {}
+-
+-    HashMapEntry(HashMapEntry&& rhs)
+-      : key_(std::move(rhs.key_)),
+-        value_(std::move(rhs.value_))
+-    {}
+-
+-    void operator=(HashMapEntry&& rhs) {
+-        key_ = std::move(rhs.key_);
+-        value_ = std::move(rhs.value_);
+-    }
+-
+-    typedef Key KeyType;
+-    typedef Value ValueType;
+-
+-    const Key& key() const { return key_; }
+-    Key& mutableKey() { return key_; }
+-    const Value& value() const { return value_; }
+-    Value& value() { return value_; }
+-
+-  private:
+-    HashMapEntry(const HashMapEntry&) = delete;
+-    void operator=(const HashMapEntry&) = delete;
+-};
+-
+-} // namespace js
+-
+-namespace mozilla {
+-
+-template <typename K, typename V>
+-struct IsPod<js::HashMapEntry<K, V> >
+-  : IntegralConstant<bool, IsPod<K>::value && IsPod<V>::value>
+-{};
+-
+-} // namespace mozilla
+-
+-namespace js {
+-
+-namespace detail {
+-
+-template <class T, class HashPolicy, class AllocPolicy>
+-class HashTable;
+-
+-template <typename T>
+-class HashTableEntry
+-{
+-  private:
+-    using NonConstT = typename mozilla::RemoveConst<T>::Type;
+-
+-    static const HashNumber sFreeKey = 0;
+-    static const HashNumber sRemovedKey = 1;
+-    static const HashNumber sCollisionBit = 1;
+-
+-    HashNumber keyHash = sFreeKey;
+-    alignas(NonConstT) unsigned char valueData_[sizeof(NonConstT)];
+-
+-  private:
+-    template <class, class, class> friend class HashTable;
+-
+-    // Some versions of GCC treat it as a -Wstrict-aliasing violation (ergo a
+-    // -Werror compile error) to reinterpret_cast<> |valueData_| to |T*|, even
+-    // through |void*|.  Placing the latter cast in these separate functions
+-    // breaks the chain such that affected GCC versions no longer warn/error.
+-    void* rawValuePtr() { return valueData_; }
+-
+-    static bool isLiveHash(HashNumber hash)
+-    {
+-        return hash > sRemovedKey;
+-    }
+-
+-    HashTableEntry(const HashTableEntry&) = delete;
+-    void operator=(const HashTableEntry&) = delete;
+-
+-    NonConstT* valuePtr() { return reinterpret_cast<NonConstT*>(rawValuePtr()); }
+-
+-    void destroyStoredT() {
+-        NonConstT* ptr = valuePtr();
+-        ptr->~T();
+-        MOZ_MAKE_MEM_UNDEFINED(ptr, sizeof(*ptr));
+-    }
+-
+-  public:
+-    HashTableEntry() = default;
+-
+-    ~HashTableEntry() {
+-        if (isLive())
+-            destroyStoredT();
+-
+-        MOZ_MAKE_MEM_UNDEFINED(this, sizeof(*this));
+-    }
+-
+-    void destroy() {
+-        MOZ_ASSERT(isLive());
+-        destroyStoredT();
+-    }
+-
+-    void swap(HashTableEntry* other) {
+-        if (this == other)
+-            return;
+-        MOZ_ASSERT(isLive());
+-        if (other->isLive()) {
+-            mozilla::Swap(*valuePtr(), *other->valuePtr());
+-        } else {
+-            *other->valuePtr() = std::move(*valuePtr());
+-            destroy();
+-        }
+-        mozilla::Swap(keyHash, other->keyHash);
+-    }
+-
+-    T& get() {
+-        MOZ_ASSERT(isLive());
+-        return *valuePtr();
+-    }
+-
+-    NonConstT& getMutable() {
+-        MOZ_ASSERT(isLive());
+-        return *valuePtr();
+-    }
+-
+-    bool isFree() const {
+-        return keyHash == sFreeKey;
+-    }
+-
+-    void clearLive() {
+-        MOZ_ASSERT(isLive());
+-        keyHash = sFreeKey;
+-        destroyStoredT();
+-    }
+-
+-    void clear() {
+-        if (isLive())
+-            destroyStoredT();
+-
+-        MOZ_MAKE_MEM_UNDEFINED(this, sizeof(*this));
+-        keyHash = sFreeKey;
+-    }
+-
+-    bool isRemoved() const {
+-        return keyHash == sRemovedKey;
+-    }
+-
+-    void removeLive() {
+-        MOZ_ASSERT(isLive());
+-        keyHash = sRemovedKey;
+-        destroyStoredT();
+-    }
+-
+-    bool isLive() const {
+-        return isLiveHash(keyHash);
+-    }
+-
+-    void setCollision() {
+-        MOZ_ASSERT(isLive());
+-        keyHash |= sCollisionBit;
+-    }
+-
+-    void unsetCollision() {
+-        keyHash &= ~sCollisionBit;
+-    }
+-
+-    bool hasCollision() const {
+-        return keyHash & sCollisionBit;
+-    }
+-
+-    bool matchHash(HashNumber hn) {
+-        return (keyHash & ~sCollisionBit) == hn;
+-    }
+-
+-    HashNumber getKeyHash() const {
+-        return keyHash & ~sCollisionBit;
+-    }
+-
+-    template <typename... Args>
+-    void setLive(HashNumber hn, Args&&... args)
+-    {
+-        MOZ_ASSERT(!isLive());
+-        keyHash = hn;
+-        new (valuePtr()) T(std::forward<Args>(args)...);
+-        MOZ_ASSERT(isLive());
+-    }
+-};
+-
+-template <class T, class HashPolicy, class AllocPolicy>
+-class HashTable : private AllocPolicy
+-{
+-    friend class mozilla::ReentrancyGuard;
+-
+-    typedef typename mozilla::RemoveConst<T>::Type NonConstT;
+-    typedef typename HashPolicy::KeyType Key;
+-    typedef typename HashPolicy::Lookup Lookup;
+-
+-  public:
+-    using Entry = HashTableEntry<T>;
+-
+-    // A nullable pointer to a hash table element. A Ptr |p| can be tested
+-    // either explicitly |if (p.found()) p->...| or using boolean conversion
+-    // |if (p) p->...|. Ptr objects must not be used after any mutating hash
+-    // table operations unless |generation()| is tested.
+-    class Ptr
+-    {
+-        friend class HashTable;
+-
+-        Entry* entry_;
+-#ifdef JS_DEBUG
+-        const HashTable* table_;
+-        Generation generation;
+-#endif
+-
+-      protected:
+-        Ptr(Entry& entry, const HashTable& tableArg)
+-          : entry_(&entry)
+-#ifdef JS_DEBUG
+-          , table_(&tableArg)
+-          , generation(tableArg.generation())
+-#endif
+-        {}
+-
+-      public:
+-        Ptr()
+-          : entry_(nullptr)
+-#ifdef JS_DEBUG
+-          , table_(nullptr)
+-          , generation(0)
+-#endif
+-        {}
+-
+-        bool isValid() const {
+-            return !!entry_;
+-        }
+-
+-        bool found() const {
+-            if (!isValid())
+-                return false;
+-#ifdef JS_DEBUG
+-            MOZ_ASSERT(generation == table_->generation());
+-#endif
+-            return entry_->isLive();
+-        }
+-
+-        explicit operator bool() const {
+-            return found();
+-        }
+-
+-        bool operator==(const Ptr& rhs) const {
+-            MOZ_ASSERT(found() && rhs.found());
+-            return entry_ == rhs.entry_;
+-        }
+-
+-        bool operator!=(const Ptr& rhs) const {
+-#ifdef JS_DEBUG
+-            MOZ_ASSERT(generation == table_->generation());
+-#endif
+-            return !(*this == rhs);
+-        }
+-
+-        T& operator*() const {
+-#ifdef JS_DEBUG
+-            MOZ_ASSERT(found());
+-            MOZ_ASSERT(generation == table_->generation());
+-#endif
+-            return entry_->get();
+-        }
+-
+-        T* operator->() const {
+-#ifdef JS_DEBUG
+-            MOZ_ASSERT(found());
+-            MOZ_ASSERT(generation == table_->generation());
+-#endif
+-            return &entry_->get();
+-        }
+-    };
+-
+-    // A Ptr that can be used to add a key after a failed lookup.
+-    class AddPtr : public Ptr
+-    {
+-        friend class HashTable;
+-        HashNumber keyHash;
+-#ifdef JS_DEBUG
+-        uint64_t mutationCount;
+-#endif
+-
+-        AddPtr(Entry& entry, const HashTable& tableArg, HashNumber hn)
+-          : Ptr(entry, tableArg)
+-          , keyHash(hn)
+-#ifdef JS_DEBUG
+-          , mutationCount(tableArg.mutationCount)
+-#endif
+-        {}
+-
+-      public:
+-        AddPtr() : keyHash(0) {}
+-    };
+-
+-    // A collection of hash table entries. The collection is enumerated by
+-    // calling |front()| followed by |popFront()| as long as |!empty()|. As
+-    // with Ptr/AddPtr, Range objects must not be used after any mutating hash
+-    // table operation unless the |generation()| is tested.
+-    class Range
+-    {
+-      protected:
+-        friend class HashTable;
+-
+-        Range(const HashTable& tableArg, Entry* c, Entry* e)
+-          : cur(c)
+-          , end(e)
+-#ifdef JS_DEBUG
+-          , table_(&tableArg)
+-          , mutationCount(tableArg.mutationCount)
+-          , generation(tableArg.generation())
+-          , validEntry(true)
+-#endif
+-        {
+-            while (cur < end && !cur->isLive())
+-                ++cur;
+-        }
+-
+-        Entry* cur;
+-        Entry* end;
+-#ifdef JS_DEBUG
+-        const HashTable* table_;
+-        uint64_t mutationCount;
+-        Generation generation;
+-        bool validEntry;
+-#endif
+-
+-      public:
+-        Range()
+-          : cur(nullptr)
+-          , end(nullptr)
+-#ifdef JS_DEBUG
+-          , table_(nullptr)
+-          , mutationCount(0)
+-          , generation(0)
+-          , validEntry(false)
+-#endif
+-        {}
+-
+-        bool empty() const {
+-#ifdef JS_DEBUG
+-            MOZ_ASSERT(generation == table_->generation());
+-            MOZ_ASSERT(mutationCount == table_->mutationCount);
+-#endif
+-            return cur == end;
+-        }
+-
+-        T& front() const {
+-            MOZ_ASSERT(!empty());
+-#ifdef JS_DEBUG
+-            MOZ_ASSERT(validEntry);
+-            MOZ_ASSERT(generation == table_->generation());
+-            MOZ_ASSERT(mutationCount == table_->mutationCount);
+-#endif
+-            return cur->get();
+-        }
+-
+-        void popFront() {
+-            MOZ_ASSERT(!empty());
+-#ifdef JS_DEBUG
+-            MOZ_ASSERT(generation == table_->generation());
+-            MOZ_ASSERT(mutationCount == table_->mutationCount);
+-#endif
+-            while (++cur < end && !cur->isLive())
+-                continue;
+-#ifdef JS_DEBUG
+-            validEntry = true;
+-#endif
+-        }
+-    };
+-
+-    // A Range whose lifetime delimits a mutating enumeration of a hash table.
+-    // Since rehashing when elements were removed during enumeration would be
+-    // bad, it is postponed until the Enum is destructed.  Since the Enum's
+-    // destructor touches the hash table, the user must ensure that the hash
+-    // table is still alive when the destructor runs.
+-    class Enum : public Range
+-    {
+-        friend class HashTable;
+-
+-        HashTable& table_;
+-        bool rekeyed;
+-        bool removed;
+-
+-        // Enum is movable but not copyable.
+-        Enum(const Enum&) = delete;
+-        void operator=(const Enum&) = delete;
+-
+-      public:
+-        template<class Map>
+-        explicit Enum(Map& map)
+-          : Range(map.all()), table_(map.impl), rekeyed(false), removed(false) {}
+-
+-        MOZ_IMPLICIT Enum(Enum&& other)
+-          : Range(other), table_(other.table_), rekeyed(other.rekeyed), removed(other.removed)
+-        {
+-            other.rekeyed = false;
+-            other.removed = false;
+-        }
+-
+-        // Removes the |front()| element from the table, leaving |front()|
+-        // invalid until the next call to |popFront()|. For example:
+-        //
+-        //   HashSet<int> s;
+-        //   for (HashSet<int>::Enum e(s); !e.empty(); e.popFront())
+-        //     if (e.front() == 42)
+-        //       e.removeFront();
+-        void removeFront() {
+-            table_.remove(*this->cur);
+-            removed = true;
+-#ifdef JS_DEBUG
+-            this->validEntry = false;
+-            this->mutationCount = table_.mutationCount;
+-#endif
+-        }
+-
+-        NonConstT& mutableFront() {
+-            MOZ_ASSERT(!this->empty());
+-#ifdef JS_DEBUG
+-            MOZ_ASSERT(this->validEntry);
+-            MOZ_ASSERT(this->generation == this->Range::table_->generation());
+-            MOZ_ASSERT(this->mutationCount == this->Range::table_->mutationCount);
+-#endif
+-            return this->cur->getMutable();
+-        }
+-
+-        // Removes the |front()| element and re-inserts it into the table with
+-        // a new key at the new Lookup position.  |front()| is invalid after
+-        // this operation until the next call to |popFront()|.
+-        void rekeyFront(const Lookup& l, const Key& k) {
+-            MOZ_ASSERT(&k != &HashPolicy::getKey(this->cur->get()));
+-            Ptr p(*this->cur, table_);
+-            table_.rekeyWithoutRehash(p, l, k);
+-            rekeyed = true;
+-#ifdef JS_DEBUG
+-            this->validEntry = false;
+-            this->mutationCount = table_.mutationCount;
+-#endif
+-        }
+-
+-        void rekeyFront(const Key& k) {
+-            rekeyFront(k, k);
+-        }
+-
+-        // Potentially rehashes the table.
+-        ~Enum() {
+-            if (rekeyed) {
+-                table_.gen++;
+-                table_.checkOverRemoved();
+-            }
+-
+-            if (removed)
+-                table_.compactIfUnderloaded();
+-        }
+-    };
+-
+-    // HashTable is movable
+-    HashTable(HashTable&& rhs)
+-      : AllocPolicy(rhs)
+-    {
+-        mozilla::PodAssign(this, &rhs);
+-        rhs.table = nullptr;
+-    }
+-    void operator=(HashTable&& rhs) {
+-        MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited");
+-        if (table)
+-            destroyTable(*this, table, capacity());
+-        mozilla::PodAssign(this, &rhs);
+-        rhs.table = nullptr;
+-    }
+-
+-  private:
+-    // HashTable is not copyable or assignable
+-    HashTable(const HashTable&) = delete;
+-    void operator=(const HashTable&) = delete;
+-
+-  private:
+-    static const size_t CAP_BITS = 30;
+-
+-  public:
+-    uint64_t    gen:56;                 // entry storage generation number
+-    uint64_t    hashShift:8;            // multiplicative hash shift
+-    Entry*      table;                  // entry storage
+-    uint32_t    entryCount;             // number of entries in table
+-    uint32_t    removedCount;           // removed entry sentinels in table
+-
+-#ifdef JS_DEBUG
+-    uint64_t     mutationCount;
+-    mutable bool mEntered;
+-    // Note that some updates to these stats are not thread-safe. See the
+-    // comment on the three-argument overloading of HashTable::lookup().
+-    mutable struct Stats
+-    {
+-        uint32_t        searches;       // total number of table searches
+-        uint32_t        steps;          // hash chain links traversed
+-        uint32_t        hits;           // searches that found key
+-        uint32_t        misses;         // searches that didn't find key
+-        uint32_t        addOverRemoved; // adds that recycled a removed entry
+-        uint32_t        removes;        // calls to remove
+-        uint32_t        removeFrees;    // calls to remove that freed the entry
+-        uint32_t        grows;          // table expansions
+-        uint32_t        shrinks;        // table contractions
+-        uint32_t        compresses;     // table compressions
+-        uint32_t        rehashes;       // tombstone decontaminations
+-    } stats;
+-#   define METER(x) x
+-#else
+-#   define METER(x)
+-#endif
+-
+-    // The default initial capacity is 32 (enough to hold 16 elements), but it
+-    // can be as low as 4.
+-    static const uint32_t sMinCapacity  = 4;
+-    static const uint32_t sMaxInit      = 1u << (CAP_BITS - 1);
+-    static const uint32_t sMaxCapacity  = 1u << CAP_BITS;
+-
+-    // Hash-table alpha is conceptually a fraction, but to avoid floating-point
+-    // math we implement it as a ratio of integers.
+-    static const uint8_t sAlphaDenominator = 4;
+-    static const uint8_t sMinAlphaNumerator = 1; // min alpha: 1/4
+-    static const uint8_t sMaxAlphaNumerator = 3; // max alpha: 3/4
+-
+-    static const HashNumber sFreeKey = Entry::sFreeKey;
+-    static const HashNumber sRemovedKey = Entry::sRemovedKey;
+-    static const HashNumber sCollisionBit = Entry::sCollisionBit;
+-
+-    void setTableSizeLog2(uint32_t sizeLog2)
+-    {
+-        hashShift = js::kHashNumberBits - sizeLog2;
+-    }
+-
+-    static bool isLiveHash(HashNumber hash)
+-    {
+-        return Entry::isLiveHash(hash);
+-    }
+-
+-    static HashNumber prepareHash(const Lookup& l)
+-    {
+-        HashNumber keyHash = mozilla::ScrambleHashCode(HashPolicy::hash(l));
+-
+-        // Avoid reserved hash codes.
+-        if (!isLiveHash(keyHash))
+-            keyHash -= (sRemovedKey + 1);
+-        return keyHash & ~sCollisionBit;
+-    }
+-
+-    enum FailureBehavior { DontReportFailure = false, ReportFailure = true };
+-
+-    static Entry* createTable(AllocPolicy& alloc, uint32_t capacity,
+-                              FailureBehavior reportFailure = ReportFailure)
+-    {
+-        Entry* table = reportFailure
+-                       ? alloc.template pod_malloc<Entry>(capacity)
+-                       : alloc.template maybe_pod_malloc<Entry>(capacity);
+-        if (table) {
+-            for (uint32_t i = 0; i < capacity; i++)
+-                new (&table[i]) Entry();
+-        }
+-        return table;
+-    }
+-
+-    static Entry* maybeCreateTable(AllocPolicy& alloc, uint32_t capacity)
+-    {
+-        Entry* table = alloc.template maybe_pod_malloc<Entry>(capacity);
+-        if (table) {
+-            for (uint32_t i = 0; i < capacity; i++)
+-                new (&table[i]) Entry();
+-        }
+-        return table;
+-    }
+-
+-    static void destroyTable(AllocPolicy& alloc, Entry* oldTable, uint32_t capacity)
+-    {
+-        Entry* end = oldTable + capacity;
+-        for (Entry* e = oldTable; e < end; ++e)
+-            e->~Entry();
+-        alloc.free_(oldTable, capacity);
+-    }
+-
+-  public:
+-    explicit HashTable(AllocPolicy ap)
+-      : AllocPolicy(ap)
+-      , gen(0)
+-      , hashShift(js::kHashNumberBits)
+-      , table(nullptr)
+-      , entryCount(0)
+-      , removedCount(0)
+-#ifdef JS_DEBUG
+-      , mutationCount(0)
+-      , mEntered(false)
+-#endif
+-    {}
+-
+-    MOZ_MUST_USE bool init(uint32_t length)
+-    {
+-        MOZ_ASSERT(!initialized());
+-
+-        // Reject all lengths whose initial computed capacity would exceed
+-        // sMaxCapacity.  Round that maximum length down to the nearest power
+-        // of two for speedier code.
+-        if (MOZ_UNLIKELY(length > sMaxInit)) {
+-            this->reportAllocOverflow();
+-            return false;
+-        }
+-
+-        static_assert((sMaxInit * sAlphaDenominator) / sAlphaDenominator == sMaxInit,
+-                      "multiplication in numerator below could overflow");
+-        static_assert(sMaxInit * sAlphaDenominator <= UINT32_MAX - sMaxAlphaNumerator,
+-                      "numerator calculation below could potentially overflow");
+-
+-        // Compute the smallest capacity allowing |length| elements to be
+-        // inserted without rehashing: ceil(length / max-alpha).  (Ceiling
+-        // integral division: <http://stackoverflow.com/a/2745086>.)
+-        uint32_t newCapacity =
+-            (length * sAlphaDenominator + sMaxAlphaNumerator - 1) / sMaxAlphaNumerator;
+-        if (newCapacity < sMinCapacity)
+-            newCapacity = sMinCapacity;
+-
+-        // Round up capacity to next power-of-two.
+-        uint32_t log2 = mozilla::CeilingLog2(newCapacity);
+-        newCapacity = 1u << log2;
+-
+-        MOZ_ASSERT(newCapacity >= length);
+-        MOZ_ASSERT(newCapacity <= sMaxCapacity);
+-
+-        table = createTable(*this, newCapacity);
+-        if (!table)
+-            return false;
+-
+-        setTableSizeLog2(log2);
+-        METER(memset(&stats, 0, sizeof(stats)));
+-        return true;
+-    }
+-
+-    bool initialized() const
+-    {
+-        return !!table;
+-    }
+-
+-    ~HashTable()
+-    {
+-        if (table)
+-            destroyTable(*this, table, capacity());
+-    }
+-
+-  private:
+-    HashNumber hash1(HashNumber hash0) const
+-    {
+-        return hash0 >> hashShift;
+-    }
+-
+-    struct DoubleHash
+-    {
+-        HashNumber h2;
+-        HashNumber sizeMask;
+-    };
+-
+-    DoubleHash hash2(HashNumber curKeyHash) const
+-    {
+-        uint32_t sizeLog2 = js::kHashNumberBits - hashShift;
+-        DoubleHash dh = {
+-            ((curKeyHash << sizeLog2) >> hashShift) | 1,
+-            (HashNumber(1) << sizeLog2) - 1
+-        };
+-        return dh;
+-    }
+-
+-    static HashNumber applyDoubleHash(HashNumber h1, const DoubleHash& dh)
+-    {
+-        return (h1 - dh.h2) & dh.sizeMask;
+-    }
+-
+-    bool overloaded()
+-    {
+-        static_assert(sMaxCapacity <= UINT32_MAX / sMaxAlphaNumerator,
+-                      "multiplication below could overflow");
+-        return entryCount + removedCount >=
+-               capacity() * sMaxAlphaNumerator / sAlphaDenominator;
+-    }
+-
+-    // Would the table be underloaded if it had the given capacity and entryCount?
+-    static bool wouldBeUnderloaded(uint32_t capacity, uint32_t entryCount)
+-    {
+-        static_assert(sMaxCapacity <= UINT32_MAX / sMinAlphaNumerator,
+-                      "multiplication below could overflow");
+-        return capacity > sMinCapacity &&
+-               entryCount <= capacity * sMinAlphaNumerator / sAlphaDenominator;
+-    }
+-
+-    bool underloaded()
+-    {
+-        return wouldBeUnderloaded(capacity(), entryCount);
+-    }
+-
+-    static MOZ_ALWAYS_INLINE bool match(Entry& e, const Lookup& l)
+-    {
+-        return HashPolicy::match(HashPolicy::getKey(e.get()), l);
+-    }
+-
+-    // Warning: in order for readonlyThreadsafeLookup() to be safe this
+-    // function must not modify the table in any way when |collisionBit| is 0.
+-    // (The use of the METER() macro to increment stats violates this
+-    // restriction but we will live with that for now because it's enabled so
+-    // rarely.)
+-    MOZ_ALWAYS_INLINE Entry&
+-    lookup(const Lookup& l, HashNumber keyHash, uint32_t collisionBit) const
+-    {
+-        MOZ_ASSERT(isLiveHash(keyHash));
+-        MOZ_ASSERT(!(keyHash & sCollisionBit));
+-        MOZ_ASSERT(collisionBit == 0 || collisionBit == sCollisionBit);
+-        MOZ_ASSERT(table);
+-        METER(stats.searches++);
+-
+-        // Compute the primary hash address.
+-        HashNumber h1 = hash1(keyHash);
+-        Entry* entry = &table[h1];
+-
+-        // Miss: return space for a new entry.
+-        if (entry->isFree()) {
+-            METER(stats.misses++);
+-            return *entry;
+-        }
+-
+-        // Hit: return entry.
+-        if (entry->matchHash(keyHash) && match(*entry, l)) {
+-            METER(stats.hits++);
+-            return *entry;
+-        }
+-
+-        // Collision: double hash.
+-        DoubleHash dh = hash2(keyHash);
+-
+-        // Save the first removed entry pointer so we can recycle later.
+-        Entry* firstRemoved = nullptr;
+-
+-        while (true) {
+-            if (MOZ_UNLIKELY(entry->isRemoved())) {
+-                if (!firstRemoved)
+-                    firstRemoved = entry;
+-            } else {
+-                if (collisionBit == sCollisionBit)
+-                    entry->setCollision();
+-            }
+-
+-            METER(stats.steps++);
+-            h1 = applyDoubleHash(h1, dh);
+-
+-            entry = &table[h1];
+-            if (entry->isFree()) {
+-                METER(stats.misses++);
+-                return firstRemoved ? *firstRemoved : *entry;
+-            }
+-
+-            if (entry->matchHash(keyHash) && match(*entry, l)) {
+-                METER(stats.hits++);
+-                return *entry;
+-            }
+-        }
+-    }
+-
+-    // This is a copy of lookup hardcoded to the assumptions:
+-    //   1. the lookup is a lookupForAdd
+-    //   2. the key, whose |keyHash| has been passed is not in the table,
+-    //   3. no entries have been removed from the table.
+-    // This specialized search avoids the need for recovering lookup values
+-    // from entries, which allows more flexible Lookup/Key types.
+-    Entry& findFreeEntry(HashNumber keyHash)
+-    {
+-        MOZ_ASSERT(!(keyHash & sCollisionBit));
+-        MOZ_ASSERT(table);
+-        METER(stats.searches++);
+-
+-        // We assume 'keyHash' has already been distributed.
+-
+-        // Compute the primary hash address.
+-        HashNumber h1 = hash1(keyHash);
+-        Entry* entry = &table[h1];
+-
+-        // Miss: return space for a new entry.
+-        if (!entry->isLive()) {
+-            METER(stats.misses++);
+-            return *entry;
+-        }
+-
+-        // Collision: double hash.
+-        DoubleHash dh = hash2(keyHash);
+-
+-        while (true) {
+-            MOZ_ASSERT(!entry->isRemoved());
+-            entry->setCollision();
+-
+-            METER(stats.steps++);
+-            h1 = applyDoubleHash(h1, dh);
+-
+-            entry = &table[h1];
+-            if (!entry->isLive()) {
+-                METER(stats.misses++);
+-                return *entry;
+-            }
+-        }
+-    }
+-
+-    enum RebuildStatus { NotOverloaded, Rehashed, RehashFailed };
+-
+-    RebuildStatus changeTableSize(int deltaLog2, FailureBehavior reportFailure = ReportFailure)
+-    {
+-        // Look, but don't touch, until we succeed in getting new entry store.
+-        Entry* oldTable = table;
+-        uint32_t oldCap = capacity();
+-        uint32_t newLog2 = js::kHashNumberBits - hashShift + deltaLog2;
+-        uint32_t newCapacity = 1u << newLog2;
+-        if (MOZ_UNLIKELY(newCapacity > sMaxCapacity)) {
+-            if (reportFailure)
+-                this->reportAllocOverflow();
+-            return RehashFailed;
+-        }
+-
+-        Entry* newTable = createTable(*this, newCapacity, reportFailure);
+-        if (!newTable)
+-            return RehashFailed;
+-
+-        // We can't fail from here on, so update table parameters.
+-        setTableSizeLog2(newLog2);
+-        removedCount = 0;
+-        gen++;
+-        table = newTable;
+-
+-        // Copy only live entries, leaving removed ones behind.
+-        Entry* end = oldTable + oldCap;
+-        for (Entry* src = oldTable; src < end; ++src) {
+-            if (src->isLive()) {
+-                HashNumber hn = src->getKeyHash();
+-                findFreeEntry(hn).setLive(
+-                    hn, std::move(const_cast<typename Entry::NonConstT&>(src->get())));
+-            }
+-
+-            src->~Entry();
+-        }
+-
+-        // All entries have been destroyed, no need to destroyTable.
+-        this->free_(oldTable, oldCap);
+-        return Rehashed;
+-    }
+-
+-    bool shouldCompressTable()
+-    {
+-        // Compress if a quarter or more of all entries are removed.
+-        return removedCount >= (capacity() >> 2);
+-    }
+-
+-    RebuildStatus checkOverloaded(FailureBehavior reportFailure = ReportFailure)
+-    {
+-        if (!overloaded())
+-            return NotOverloaded;
+-
+-        int deltaLog2;
+-        if (shouldCompressTable()) {
+-            METER(stats.compresses++);
+-            deltaLog2 = 0;
+-        } else {
+-            METER(stats.grows++);
+-            deltaLog2 = 1;
+-        }
+-
+-        return changeTableSize(deltaLog2, reportFailure);
+-    }
+-
+-    // Infallibly rehash the table if we are overloaded with removals.
+-    void checkOverRemoved()
+-    {
+-        if (overloaded()) {
+-            if (checkOverloaded(DontReportFailure) == RehashFailed)
+-                rehashTableInPlace();
+-        }
+-    }
+-
+-    void remove(Entry& e)
+-    {
+-        MOZ_ASSERT(table);
+-        METER(stats.removes++);
+-
+-        if (e.hasCollision()) {
+-            e.removeLive();
+-            removedCount++;
+-        } else {
+-            METER(stats.removeFrees++);
+-            e.clearLive();
+-        }
+-        entryCount--;
+-#ifdef JS_DEBUG
+-        mutationCount++;
+-#endif
+-    }
+-
+-    void checkUnderloaded()
+-    {
+-        if (underloaded()) {
+-            METER(stats.shrinks++);
+-            (void) changeTableSize(-1, DontReportFailure);
+-        }
+-    }
+-
+-    // Resize the table down to the largest capacity which doesn't underload the
+-    // table.  Since we call checkUnderloaded() on every remove, you only need
+-    // to call this after a bulk removal of items done without calling remove().
+-    void compactIfUnderloaded()
+-    {
+-        int32_t resizeLog2 = 0;
+-        uint32_t newCapacity = capacity();
+-        while (wouldBeUnderloaded(newCapacity, entryCount)) {
+-            newCapacity = newCapacity >> 1;
+-            resizeLog2--;
+-        }
+-
+-        if (resizeLog2 != 0)
+-            (void) changeTableSize(resizeLog2, DontReportFailure);
+-    }
+-
+-    // This is identical to changeTableSize(currentSize), but without requiring
+-    // a second table.  We do this by recycling the collision bits to tell us if
+-    // the element is already inserted or still waiting to be inserted.  Since
+-    // already-inserted elements win any conflicts, we get the same table as we
+-    // would have gotten through random insertion order.
+-    void rehashTableInPlace()
+-    {
+-        METER(stats.rehashes++);
+-        removedCount = 0;
+-        gen++;
+-        for (size_t i = 0; i < capacity(); ++i)
+-            table[i].unsetCollision();
+-
+-        for (size_t i = 0; i < capacity();) {
+-            Entry* src = &table[i];
+-
+-            if (!src->isLive() || src->hasCollision()) {
+-                ++i;
+-                continue;
+-            }
+-
+-            HashNumber keyHash = src->getKeyHash();
+-            HashNumber h1 = hash1(keyHash);
+-            DoubleHash dh = hash2(keyHash);
+-            Entry* tgt = &table[h1];
+-            while (true) {
+-                if (!tgt->hasCollision()) {
+-                    src->swap(tgt);
+-                    tgt->setCollision();
+-                    break;
+-                }
+-
+-                h1 = applyDoubleHash(h1, dh);
+-                tgt = &table[h1];
+-            }
+-        }
+-
+-        // TODO: this algorithm leaves collision bits on *all* elements, even if
+-        // they are on no collision path. We have the option of setting the
+-        // collision bits correctly on a subsequent pass or skipping the rehash
+-        // unless we are totally filled with tombstones: benchmark to find out
+-        // which approach is best.
+-    }
+-
+-    // Note: |l| may be a reference to a piece of |u|, so this function
+-    // must take care not to use |l| after moving |u|.
+-    //
+-    // Prefer to use putNewInfallible; this function does not check
+-    // invariants.
+-    template <typename... Args>
+-    void putNewInfallibleInternal(const Lookup& l, Args&&... args)
+-    {
+-        MOZ_ASSERT(table);
+-
+-        HashNumber keyHash = prepareHash(l);
+-        Entry* entry = &findFreeEntry(keyHash);
+-        MOZ_ASSERT(entry);
+-
+-        if (entry->isRemoved()) {
+-            METER(stats.addOverRemoved++);
+-            removedCount--;
+-            keyHash |= sCollisionBit;
+-        }
+-
+-        entry->setLive(keyHash, std::forward<Args>(args)...);
+-        entryCount++;
+-#ifdef JS_DEBUG
+-        mutationCount++;
+-#endif
+-    }
+-
+-  public:
+-    void clear()
+-    {
+-        Entry* end = table + capacity();
+-        for (Entry* e = table; e < end; ++e)
+-            e->clear();
+-
+-        removedCount = 0;
+-        entryCount = 0;
+-#ifdef JS_DEBUG
+-        mutationCount++;
+-#endif
+-    }
+-
+-    void clearAndShrink()
+-    {
+-        clear();
+-        compactIfUnderloaded();
+-    }
+-
+-    void finish()
+-    {
+-#ifdef JS_DEBUG
+-        MOZ_ASSERT(!mEntered);
+-#endif
+-        if (!table)
+-            return;
+-
+-        destroyTable(*this, table, capacity());
+-        table = nullptr;
+-        gen++;
+-        entryCount = 0;
+-        removedCount = 0;
+-#ifdef JS_DEBUG
+-        mutationCount++;
+-#endif
+-    }
+-
+-    Range all() const
+-    {
+-        MOZ_ASSERT(table);
+-        return Range(*this, table, table + capacity());
+-    }
+-
+-    bool empty() const
+-    {
+-        MOZ_ASSERT(table);
+-        return !entryCount;
+-    }
+-
+-    uint32_t count() const
+-    {
+-        MOZ_ASSERT(table);
+-        return entryCount;
+-    }
+-
+-    uint32_t capacity() const
+-    {
+-        MOZ_ASSERT(table);
+-        return 1u << (js::kHashNumberBits - hashShift);
+-    }
+-
+-    Generation generation() const
+-    {
+-        MOZ_ASSERT(table);
+-        return Generation(gen);
+-    }
+-
+-    size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const
+-    {
+-        return mallocSizeOf(table);
+-    }
+-
+-    size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const
+-    {
+-        return mallocSizeOf(this) + sizeOfExcludingThis(mallocSizeOf);
+-    }
+-
+-    MOZ_ALWAYS_INLINE Ptr lookup(const Lookup& l) const
+-    {
+-        mozilla::ReentrancyGuard g(*this);
+-        if (!HasHash<HashPolicy>(l))
+-            return Ptr();
+-        HashNumber keyHash = prepareHash(l);
+-        return Ptr(lookup(l, keyHash, 0), *this);
+-    }
+-
+-    MOZ_ALWAYS_INLINE Ptr readonlyThreadsafeLookup(const Lookup& l) const
+-    {
+-        if (!HasHash<HashPolicy>(l))
+-            return Ptr();
+-        HashNumber keyHash = prepareHash(l);
+-        return Ptr(lookup(l, keyHash, 0), *this);
+-    }
+-
+-    MOZ_ALWAYS_INLINE AddPtr lookupForAdd(const Lookup& l) const
+-    {
+-        mozilla::ReentrancyGuard g(*this);
+-        if (!EnsureHash<HashPolicy>(l))
+-            return AddPtr();
+-        HashNumber keyHash = prepareHash(l);
+-        // Directly call the constructor in the return statement to avoid
+-        // excess copying when building with Visual Studio 2017.
+-        // See bug 1385181.
+-        return AddPtr(lookup(l, keyHash, sCollisionBit), *this, keyHash);
+-    }
+-
+-    template <typename... Args>
+-    MOZ_MUST_USE bool add(AddPtr& p, Args&&... args)
+-    {
+-        mozilla::ReentrancyGuard g(*this);
+-        MOZ_ASSERT(table);
+-        MOZ_ASSERT_IF(p.isValid(), p.table_ == this);
+-        MOZ_ASSERT(!p.found());
+-        MOZ_ASSERT(!(p.keyHash & sCollisionBit));
+-
+-        // Check for error from ensureHash() here.
+-        if (!p.isValid())
+-            return false;
+-
+-        MOZ_ASSERT(p.generation == generation());
+-#ifdef JS_DEBUG
+-        MOZ_ASSERT(p.mutationCount == mutationCount);
+-#endif
+-
+-        // Changing an entry from removed to live does not affect whether we
+-        // are overloaded and can be handled separately.
+-        if (p.entry_->isRemoved()) {
+-            if (!this->checkSimulatedOOM())
+-                return false;
+-            METER(stats.addOverRemoved++);
+-            removedCount--;
+-            p.keyHash |= sCollisionBit;
+-        } else {
+-            // Preserve the validity of |p.entry_|.
+-            RebuildStatus status = checkOverloaded();
+-            if (status == RehashFailed)
+-                return false;
+-            if (status == NotOverloaded && !this->checkSimulatedOOM())
+-                return false;
+-            if (status == Rehashed)
+-                p.entry_ = &findFreeEntry(p.keyHash);
+-        }
+-
+-        p.entry_->setLive(p.keyHash, std::forward<Args>(args)...);
+-        entryCount++;
+-#ifdef JS_DEBUG
+-        mutationCount++;
+-        p.generation = generation();
+-        p.mutationCount = mutationCount;
+-#endif
+-        return true;
+-    }
+-
+-    // Note: |l| may be a reference to a piece of |u|, so this function
+-    // must take care not to use |l| after moving |u|.
+-    template <typename... Args>
+-    void putNewInfallible(const Lookup& l, Args&&... args)
+-    {
+-        MOZ_ASSERT(!lookup(l).found());
+-        mozilla::ReentrancyGuard g(*this);
+-        putNewInfallibleInternal(l, std::forward<Args>(args)...);
+-    }
+-
+-    // Note: |l| may be alias arguments in |args|, so this function must take
+-    // care not to use |l| after moving |args|.
+-    template <typename... Args>
+-    MOZ_MUST_USE bool putNew(const Lookup& l, Args&&... args)
+-    {
+-        if (!this->checkSimulatedOOM())
+-            return false;
+-
+-        if (!EnsureHash<HashPolicy>(l))
+-            return false;
+-
+-        if (checkOverloaded() == RehashFailed)
+-            return false;
+-
+-        putNewInfallible(l, std::forward<Args>(args)...);
+-        return true;
+-    }
+-
+-    // Note: |l| may be a reference to a piece of |u|, so this function
+-    // must take care not to use |l| after moving |u|.
+-    template <typename... Args>
+-    MOZ_MUST_USE bool relookupOrAdd(AddPtr& p, const Lookup& l, Args&&... args)
+-    {
+-        // Check for error from ensureHash() here.
+-        if (!p.isValid())
+-            return false;
+-
+-#ifdef JS_DEBUG
+-        p.generation = generation();
+-        p.mutationCount = mutationCount;
+-#endif
+-        {
+-            mozilla::ReentrancyGuard g(*this);
+-            MOZ_ASSERT(prepareHash(l) == p.keyHash); // l has not been destroyed
+-            p.entry_ = &lookup(l, p.keyHash, sCollisionBit);
+-        }
+-        return p.found() || add(p, std::forward<Args>(args)...);
+-    }
+-
+-    void remove(Ptr p)
+-    {
+-        MOZ_ASSERT(table);
+-        mozilla::ReentrancyGuard g(*this);
+-        MOZ_ASSERT(p.found());
+-        MOZ_ASSERT(p.generation == generation());
+-        remove(*p.entry_);
+-        checkUnderloaded();
+-    }
+-
+-    void rekeyWithoutRehash(Ptr p, const Lookup& l, const Key& k)
+-    {
+-        MOZ_ASSERT(table);
+-        mozilla::ReentrancyGuard g(*this);
+-        MOZ_ASSERT(p.found());
+-        MOZ_ASSERT(p.generation == generation());
+-        typename HashTableEntry<T>::NonConstT t(std::move(*p));
+-        HashPolicy::setKey(t, const_cast<Key&>(k));
+-        remove(*p.entry_);
+-        putNewInfallibleInternal(l, std::move(t));
+-    }
+-
+-    void rekeyAndMaybeRehash(Ptr p, const Lookup& l, const Key& k)
+-    {
+-        rekeyWithoutRehash(p, l, k);
+-        checkOverRemoved();
+-    }
+-
+-#undef METER
+-};
+-
+-} // namespace detail
+-} // namespace js
+-
+ #endif  /* js_HashTable_h */
+diff --git a/js/public/RootingAPI.h b/js/public/RootingAPI.h
+--- a/js/public/RootingAPI.h
++++ b/js/public/RootingAPI.h
+@@ -761,28 +761,32 @@ struct JS_PUBLIC_API(MovableCellHasher<J
+     static bool ensureHash(const Lookup& l) { return MovableCellHasher<T>::ensureHash(l); }
+     static HashNumber hash(const Lookup& l) { return MovableCellHasher<T>::hash(l); }
+     static bool match(const Key& k, const Lookup& l) {
+         return MovableCellHasher<T>::match(k.unbarrieredGet(), l);
+     }
+     static void rekey(Key& k, const Key& newKey) { k.unsafeSet(newKey); }
+ };
+ 
++} // namespace js
++
++namespace mozilla {
++
+ template <typename T>
+-struct FallibleHashMethods<MovableCellHasher<T>>
++struct FallibleHashMethods<js::MovableCellHasher<T>>
+ {
+     template <typename Lookup> static bool hasHash(Lookup&& l) {
+-        return MovableCellHasher<T>::hasHash(std::forward<Lookup>(l));
++        return js::MovableCellHasher<T>::hasHash(std::forward<Lookup>(l));
+     }
+     template <typename Lookup> static bool ensureHash(Lookup&& l) {
+-        return MovableCellHasher<T>::ensureHash(std::forward<Lookup>(l));
++        return js::MovableCellHasher<T>::ensureHash(std::forward<Lookup>(l));
+     }
+ };
+ 
+-} /* namespace js */
++} // namespace mozilla
+ 
+ namespace js {
+ 
+ // The alignment must be set because the Rooted and PersistentRooted ptr fields
+ // may be accessed through reinterpret_cast<Rooted<ConcreteTraceable>*>, and
+ // the compiler may choose a different alignment for the ptr field when it
+ // knows the actual type stored in DispatchWrapper<T>.
+ //
+diff --git a/js/public/UbiNode.h b/js/public/UbiNode.h
+--- a/js/public/UbiNode.h
++++ b/js/public/UbiNode.h
+@@ -1162,17 +1162,17 @@ class JS_PUBLIC_API(Concrete<void>) : pu
+   public:
+     static void construct(void* storage, void* ptr) { new (storage) Concrete(ptr); }
+ };
+ 
+ 
+ } // namespace ubi
+ } // namespace JS
+ 
+-namespace js {
++namespace mozilla {
+ 
+ // Make ubi::Node::HashPolicy the default hash policy for ubi::Node.
+ template<> struct DefaultHasher<JS::ubi::Node> : JS::ubi::Node::HashPolicy { };
+ template<> struct DefaultHasher<JS::ubi::StackFrame> : JS::ubi::StackFrame::HashPolicy { };
+ 
+-} // namespace js
++} // namespace mozilla
+ 
+ #endif // js_UbiNode_h
+diff --git a/js/src/ds/OrderedHashTable.h b/js/src/ds/OrderedHashTable.h
+--- a/js/src/ds/OrderedHashTable.h
++++ b/js/src/ds/OrderedHashTable.h
+@@ -35,16 +35,18 @@
+  * following static member functions:
+  *     bool isEmpty(const Key&);
+  *     void makeEmpty(Key*);
+  */
+ 
+ #include "mozilla/HashFunctions.h"
+ #include "mozilla/Move.h"
+ 
++#include "js/HashTable.h"
++
+ namespace js {
+ 
+ namespace detail {
+ 
+ /*
+  * detail::OrderedHashTable is the underlying data structure used to implement both
+  * OrderedHashMap and OrderedHashSet. Programs should use one of those two
+  * templates rather than OrderedHashTable.
+diff --git a/js/src/gc/Barrier.h b/js/src/gc/Barrier.h
+--- a/js/src/gc/Barrier.h
++++ b/js/src/gc/Barrier.h
+@@ -866,49 +866,57 @@ struct GCPtrHasher
+     typedef GCPtr<T> Key;
+     typedef T Lookup;
+ 
+     static HashNumber hash(Lookup obj) { return DefaultHasher<T>::hash(obj); }
+     static bool match(const Key& k, Lookup l) { return k.get() == l; }
+     static void rekey(Key& k, const Key& newKey) { k.unsafeSet(newKey); }
+ };
+ 
+-/* Specialized hashing policy for GCPtrs. */
+-template <class T>
+-struct DefaultHasher<GCPtr<T>> : GCPtrHasher<T> {};
+-
+ template <class T>
+ struct PreBarrieredHasher
+ {
+     typedef PreBarriered<T> Key;
+     typedef T Lookup;
+ 
+     static HashNumber hash(Lookup obj) { return DefaultHasher<T>::hash(obj); }
+     static bool match(const Key& k, Lookup l) { return k.get() == l; }
+     static void rekey(Key& k, const Key& newKey) { k.unsafeSet(newKey); }
+ };
+ 
+-template <class T>
+-struct DefaultHasher<PreBarriered<T>> : PreBarrieredHasher<T> { };
+-
+ /* Useful for hashtables with a ReadBarriered as key. */
+ template <class T>
+ struct ReadBarrieredHasher
+ {
+     typedef ReadBarriered<T> Key;
+     typedef T Lookup;
+ 
+     static HashNumber hash(Lookup obj) { return DefaultHasher<T>::hash(obj); }
+     static bool match(const Key& k, Lookup l) { return k.unbarrieredGet() == l; }
+     static void rekey(Key& k, const Key& newKey) { k.set(newKey.unbarrieredGet()); }
+ };
+ 
++} // namespace js
++
++namespace mozilla {
++
++/* Specialized hashing policy for GCPtrs. */
++template <class T>
++struct DefaultHasher<js::GCPtr<T>> : js::GCPtrHasher<T> {};
++
++template <class T>
++struct DefaultHasher<js::PreBarriered<T>> : js::PreBarrieredHasher<T> { };
++
+ /* Specialized hashing policy for ReadBarriereds. */
+ template <class T>
+-struct DefaultHasher<ReadBarriered<T>> : ReadBarrieredHasher<T> { };
++struct DefaultHasher<js::ReadBarriered<T>> : js::ReadBarrieredHasher<T> { };
++
++} // namespace mozilla
++
++namespace js {
+ 
+ class ArrayObject;
+ class ArrayBufferObject;
+ class GlobalObject;
+ class Scope;
+ class ScriptSourceObject;
+ class Shape;
+ class BaseShape;
+diff --git a/js/src/jsapi-tests/testUbiNode.cpp b/js/src/jsapi-tests/testUbiNode.cpp
+--- a/js/src/jsapi-tests/testUbiNode.cpp
++++ b/js/src/jsapi-tests/testUbiNode.cpp
+@@ -304,33 +304,33 @@ struct ExpectedEdge
+     char to;
+ 
+     ExpectedEdge(FakeNode& fromNode, FakeNode& toNode)
+         : from(fromNode.name)
+         , to(toNode.name)
+     { }
+ };
+ 
+-namespace js {
++namespace mozilla {
+ 
+ template <>
+ struct DefaultHasher<ExpectedEdge>
+ {
+     using Lookup = ExpectedEdge;
+ 
+     static HashNumber hash(const Lookup& l) {
+         return mozilla::AddToHash(l.from, l.to);
+     }
+ 
+     static bool match(const ExpectedEdge& k, const Lookup& l) {
+         return k.from == l.from && k.to == l.to;
+     }
+ };
+ 
+-} // namespace js
++} // namespace mozilla
+ 
+ BEGIN_TEST(test_ubiPostOrder)
+ {
+     // Construct the following graph:
+     //
+     //                          .-----.
+     //                          |     |
+     //                  .-------|  r  |---------------.
+diff --git a/js/src/vm/ObjectGroup.cpp b/js/src/vm/ObjectGroup.cpp
+--- a/js/src/vm/ObjectGroup.cpp
++++ b/js/src/vm/ObjectGroup.cpp
+@@ -426,28 +426,28 @@ struct ObjectGroupRealm::NewEntry
+                (associated && IsAboutToBeFinalizedUnbarriered(&associated));
+     }
+ 
+     bool operator==(const NewEntry& other) const {
+         return group == other.group && associated == other.associated;
+     }
+ };
+ 
+-namespace js {
++namespace mozilla {
+ template <>
+ struct FallibleHashMethods<ObjectGroupRealm::NewEntry>
+ {
+     template <typename Lookup> static bool hasHash(Lookup&& l) {
+         return ObjectGroupRealm::NewEntry::hasHash(std::forward<Lookup>(l));
+     }
+     template <typename Lookup> static bool ensureHash(Lookup&& l) {
+         return ObjectGroupRealm::NewEntry::ensureHash(std::forward<Lookup>(l));
+     }
+ };
+-} // namespace js
++} // namespace mozilla
+ 
+ class ObjectGroupRealm::NewTable : public JS::WeakCache<js::GCHashSet<NewEntry, NewEntry,
+                                                                             SystemAllocPolicy>>
+ {
+     using Table = js::GCHashSet<NewEntry, NewEntry, SystemAllocPolicy>;
+     using Base = JS::WeakCache<Table>;
+ 
+   public:
+diff --git a/js/src/vm/SavedFrame.h b/js/src/vm/SavedFrame.h
+--- a/js/src/vm/SavedFrame.h
++++ b/js/src/vm/SavedFrame.h
+@@ -167,27 +167,35 @@ struct SavedFrame::HashPolicy
+     static bool       ensureHash(const Lookup& l);
+     static HashNumber hash(const Lookup& lookup);
+     static bool       match(SavedFrame* existing, const Lookup& lookup);
+ 
+     typedef ReadBarriered<SavedFrame*> Key;
+     static void rekey(Key& key, const Key& newKey);
+ };
+ 
++} // namespace js
++
++namespace mozilla {
++
+ template <>
+-struct FallibleHashMethods<SavedFrame::HashPolicy>
++struct FallibleHashMethods<js::SavedFrame::HashPolicy>
+ {
+     template <typename Lookup> static bool hasHash(Lookup&& l) {
+-        return SavedFrame::HashPolicy::hasHash(std::forward<Lookup>(l));
++        return js::SavedFrame::HashPolicy::hasHash(std::forward<Lookup>(l));
+     }
+     template <typename Lookup> static bool ensureHash(Lookup&& l) {
+-        return SavedFrame::HashPolicy::ensureHash(std::forward<Lookup>(l));
++        return js::SavedFrame::HashPolicy::ensureHash(std::forward<Lookup>(l));
+     }
+ };
+ 
++} // namespace mozilla
++
++namespace js {
++
+ // Assert that if the given object is not null, that it must be either a
+ // SavedFrame object or wrapper (Xray or CCW) around a SavedFrame object.
+ inline void AssertObjectIsSavedFrameOrWrapper(JSContext* cx, HandleObject stack);
+ 
+ // When we reconstruct a SavedFrame stack from a JS::ubi::StackFrame, we may not
+ // have access to the principals that the original stack was captured
+ // with. Instead, we use these two singleton principals based on whether
+ // JS::ubi::StackFrame::isSystem or not. These singletons should never be passed
+diff --git a/js/src/vm/Shape.h b/js/src/vm/Shape.h
+--- a/js/src/vm/Shape.h
++++ b/js/src/vm/Shape.h
+@@ -668,28 +668,36 @@ HashId(jsid id)
+     // could then be recovered from the hash code. See bug 1330769.
+     if (MOZ_LIKELY(JSID_IS_ATOM(id)))
+         return JSID_TO_ATOM(id)->hash();
+     if (JSID_IS_SYMBOL(id))
+         return JSID_TO_SYMBOL(id)->hash();
+     return mozilla::HashGeneric(JSID_BITS(id));
+ }
+ 
++} // namespace js
++
++namespace mozilla {
++
+ template <>
+ struct DefaultHasher<jsid>
+ {
+     typedef jsid Lookup;
+     static HashNumber hash(jsid id) {
+-        return HashId(id);
++        return js::HashId(id);
+     }
+     static bool match(jsid id1, jsid id2) {
+         return id1 == id2;
+     }
+ };
+ 
++} // namespace mozilla
++
++namespace js {
++
+ using BaseShapeSet = JS::WeakCache<JS::GCHashSet<ReadBarriered<UnownedBaseShape*>,
+                                                  StackBaseShape,
+                                                  SystemAllocPolicy>>;
+ 
+ class Shape : public gc::TenuredCell
+ {
+     friend class ::JSObject;
+     friend class ::JSFunction;
+diff --git a/js/src/vm/Stack.h b/js/src/vm/Stack.h
+--- a/js/src/vm/Stack.h
++++ b/js/src/vm/Stack.h
+@@ -1047,29 +1047,37 @@ FillArgumentsFromArraylike(JSContext* cx
+         return false;
+ 
+     for (uint32_t i = 0; i < len; i++)
+         args[i].set(arraylike[i]);
+ 
+     return true;
+ }
+ 
++} // namespace js
++
++namespace mozilla {
++
+ template <>
+-struct DefaultHasher<AbstractFramePtr> {
+-    typedef AbstractFramePtr Lookup;
++struct DefaultHasher<js::AbstractFramePtr> {
++    typedef js::AbstractFramePtr Lookup;
+ 
+     static js::HashNumber hash(const Lookup& key) {
+         return mozilla::HashGeneric(key.raw());
+     }
+ 
+-    static bool match(const AbstractFramePtr& k, const Lookup& l) {
++    static bool match(const js::AbstractFramePtr& k, const Lookup& l) {
+         return k == l;
+     }
+ };
+ 
++} // namespace mozilla
++
++namespace js {
++
+ /*****************************************************************************/
+ 
+ // SavedFrame caching to minimize stack walking.
+ //
+ // Since each SavedFrame object includes a 'parent' pointer to the SavedFrame
+ // for its caller, if we could easily find the right SavedFrame for a given
+ // stack frame, we wouldn't need to walk the rest of the stack. Traversing deep
+ // stacks can be expensive, and when we're profiling or instrumenting code, we
+diff --git a/js/src/vm/UbiNodeCensus.cpp b/js/src/vm/UbiNodeCensus.cpp
+--- a/js/src/vm/UbiNodeCensus.cpp
++++ b/js/src/vm/UbiNodeCensus.cpp
+@@ -326,17 +326,18 @@ static int compareEntries(const void* lh
+     if (lhs < rhs)
+         return 1;
+     if (lhs > rhs)
+         return -1;
+     return 0;
+ }
+ 
+ // A hash map mapping from C strings to counts.
+-using CStringCountMap = HashMap<const char*, CountBasePtr, CStringHasher, SystemAllocPolicy>;
++using CStringCountMap =
++    HashMap<const char*, CountBasePtr, mozilla::CStringHasher, SystemAllocPolicy>;
+ 
+ // Convert a HashMap into an object with each key one of the entries from the
+ // map and each value the associated count's report. For use during census
+ // reporting.
+ //
+ // `Map` must be a `HashMap` from some key type to a `CountBasePtr`.
+ //
+ // `GetName` must be a callable type which takes `const Map::Key&` and returns
+@@ -727,17 +728,17 @@ ByAllocationStack::count(CountBase& coun
+ 
+ bool
+ ByAllocationStack::report(JSContext* cx, CountBase& countBase, MutableHandleValue report)
+ {
+     Count& count = static_cast<Count&>(countBase);
+ 
+ #ifdef DEBUG
+     // Check that nothing rehashes our table while we hold pointers into it.
+-    Generation generation = count.table.generation();
++    mozilla::Generation generation = count.table.generation();
+ #endif
+ 
+     // Build a vector of pointers to entries; sort by total; and then use
+     // that to build the result object. This makes the ordering of entries
+     // more interesting, and a little less non-deterministic.
+     JS::ubi::Vector<Entry*> entries;
+     if (!entries.reserve(count.table.count()))
+         return false;
+@@ -789,21 +790,21 @@ ByAllocationStack::report(JSContext* cx,
+ // A count type that categorizes nodes by their script's filename.
+ class ByFilename : public CountType {
+     using UniqueCString = JS::UniqueChars;
+ 
+     struct UniqueCStringHasher {
+         using Lookup = UniqueCString;
+ 
+         static js::HashNumber hash(const Lookup& lookup) {
+-            return CStringHasher::hash(lookup.get());
++            return mozilla::CStringHasher::hash(lookup.get());
+         }
+ 
+         static bool match(const UniqueCString& key, const Lookup& lookup) {
+-            return CStringHasher::match(key.get(), lookup.get());
++            return mozilla::CStringHasher::match(key.get(), lookup.get());
+         }
+     };
+ 
+     // A table mapping filenames to their counts. Note that we treat scripts
+     // with the same filename as equivalent. If you have several sources with
+     // the same filename, then all their scripts will get bucketed together.
+     using Table = HashMap<UniqueCString, CountBasePtr, UniqueCStringHasher,
+                           SystemAllocPolicy>;
+diff --git a/js/src/wasm/WasmValidate.cpp b/js/src/wasm/WasmValidate.cpp
+--- a/js/src/wasm/WasmValidate.cpp
++++ b/js/src/wasm/WasmValidate.cpp
+@@ -1735,17 +1735,17 @@ DecodeGlobalSection(Decoder& d, ModuleEn
+             return false;
+ 
+         env->globals.infallibleAppend(GlobalDesc(initializer, isMutable));
+     }
+ 
+     return d.finishSection(*range, "global");
+ }
+ 
+-typedef HashSet<const char*, CStringHasher, SystemAllocPolicy> CStringSet;
++typedef HashSet<const char*, mozilla::CStringHasher, SystemAllocPolicy> CStringSet;
+ 
+ static UniqueChars
+ DecodeExportName(Decoder& d, CStringSet* dupSet)
+ {
+     UniqueChars exportName = DecodeName(d);
+     if (!exportName) {
+         d.fail("expected valid export name");
+         return nullptr;
+diff --git a/js/public/HashTable.h b/mfbt/HashTable.h
+copy from js/public/HashTable.h
+copy to mfbt/HashTable.h
+--- a/js/public/HashTable.h
++++ b/mfbt/HashTable.h
+@@ -1,39 +1,34 @@
+ /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+  * vim: set ts=8 sts=4 et sw=4 tw=99:
+  * This Source Code Form is subject to the terms of the Mozilla Public
+  * License, v. 2.0. If a copy of the MPL was not distributed with this
+  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+ 
+-#ifndef js_HashTable_h
+-#define js_HashTable_h
++#ifndef mozilla_HashTable_h
++#define mozilla_HashTable_h
+ 
++#include "mozilla/AllocPolicy.h"
+ #include "mozilla/Assertions.h"
+ #include "mozilla/Attributes.h"
+ #include "mozilla/Casting.h"
+ #include "mozilla/HashFunctions.h"
+ #include "mozilla/MathAlgorithms.h"
+ #include "mozilla/MemoryChecking.h"
+ #include "mozilla/MemoryReporting.h"
+ #include "mozilla/Move.h"
+ #include "mozilla/Opaque.h"
+ #include "mozilla/PodOperations.h"
+ #include "mozilla/ReentrancyGuard.h"
+ #include "mozilla/TypeTraits.h"
+ #include "mozilla/UniquePtr.h"
+ 
+-#include "js/Utility.h"
+-
+-namespace js {
++namespace mozilla {
+ 
+-using HashNumber = mozilla::HashNumber;
+-static const uint32_t kHashNumberBits = mozilla::kHashNumberBits;
+-
+-class TempAllocPolicy;
+ template <class> struct DefaultHasher;
+ template <class, class> class HashMapEntry;
+ namespace detail {
+     template <typename T> class HashTableEntry;
+     template <class T, class HashPolicy, class AllocPolicy> class HashTable;
+ } // namespace detail
+ 
+ /*****************************************************************************/
+@@ -43,19 +38,19 @@ namespace detail {
+ // a hash table compares equal at times T1 and T2, then lookups in the hash
+ // table, pointers to (or into) hash table entries, etc. at time T1 are valid
+ // at time T2.  If the generation compares unequal, these computations are all
+ // invalid and must be performed again to be used.
+ //
+ // Generations are meaningfully comparable only with respect to a single hash
+ // table.  It's always nonsensical to compare the generation of distinct hash
+ // tables H1 and H2.
+-using Generation = mozilla::Opaque<uint64_t>;
++using Generation = Opaque<uint64_t>;
+ 
+-// A JS-friendly, STL-like container providing a hash-based map from keys to
++// A performant, STL-like container providing a hash-based map from keys to
+ // values. In particular, HashMap calls constructors and destructors of all
+ // objects added so non-PODs may be used safely.
+ //
+ // Key/Value requirements:
+ //  - movable, destructible, assignable
+ // HashPolicy requirements:
+ //  - see Hash Policy section below
+ // AllocPolicy:
+@@ -63,17 +58,17 @@ using Generation = mozilla::Opaque<uint6
+ //
+ // Note:
+ // - HashMap is not reentrant: Key/Value/HashPolicy/AllocPolicy members
+ //   called by HashMap must not call back into the same HashMap object.
+ // - Due to the lack of exception handling, the user must call |init()|.
+ template <class Key,
+           class Value,
+           class HashPolicy = DefaultHasher<Key>,
+-          class AllocPolicy = TempAllocPolicy>
++          class AllocPolicy = MallocAllocPolicy>
+ class HashMap
+ {
+     typedef HashMapEntry<Key, Value> TableEntry;
+ 
+     struct MapHashPolicy : HashPolicy
+     {
+         using Base = HashPolicy;
+         typedef Key KeyType;
+@@ -217,20 +212,20 @@ class HashMap
+     uint32_t count() const                            { return impl.count(); }
+ 
+     // Total number of allocation in the dynamic table. Note: resize will
+     // happen well before count() == capacity().
+     size_t capacity() const                           { return impl.capacity(); }
+ 
+     // Don't just call |impl.sizeOfExcludingThis()| because there's no
+     // guarantee that |impl| is the first field in HashMap.
+-    size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
++    size_t sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
+         return impl.sizeOfExcludingThis(mallocSizeOf);
+     }
+-    size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
++    size_t sizeOfIncludingThis(MallocSizeOf mallocSizeOf) const {
+         return mallocSizeOf(this) + impl.sizeOfExcludingThis(mallocSizeOf);
+     }
+ 
+     Generation generation() const {
+         return impl.generation();
+     }
+ 
+     /************************************************** Shorthand operations */
+@@ -307,34 +302,34 @@ class HashMap
+     HashMap(const HashMap& hm) = delete;
+     HashMap& operator=(const HashMap& hm) = delete;
+ 
+     friend class Impl::Enum;
+ };
+ 
+ /*****************************************************************************/
+ 
+-// A JS-friendly, STL-like container providing a hash-based set of values. In
++// A performant, STL-like container providing a hash-based set of values. In
+ // particular, HashSet calls constructors and destructors of all objects added
+ // so non-PODs may be used safely.
+ //
+ // T requirements:
+ //  - movable, destructible, assignable
+ // HashPolicy requirements:
+ //  - see Hash Policy section below
+ // AllocPolicy:
+ //  - see AllocPolicy.h
+ //
+ // Note:
+ // - HashSet is not reentrant: T/HashPolicy/AllocPolicy members called by
+ //   HashSet must not call back into the same HashSet object.
+ // - Due to the lack of exception handling, the user must call |init()|.
+ template <class T,
+           class HashPolicy = DefaultHasher<T>,
+-          class AllocPolicy = TempAllocPolicy>
++          class AllocPolicy = MallocAllocPolicy>
+ class HashSet
+ {
+     struct SetOps : HashPolicy
+     {
+         using Base = HashPolicy;
+         typedef T KeyType;
+         static const KeyType& getKey(const T& t) { return t; }
+         static void setKey(T& t, KeyType& k) { HashPolicy::rekey(t, k); }
+@@ -464,20 +459,20 @@ class HashSet
+     uint32_t count() const                            { return impl.count(); }
+ 
+     // Total number of allocation in the dynamic table. Note: resize will
+     // happen well before count() == capacity().
+     size_t capacity() const                           { return impl.capacity(); }
+ 
+     // Don't just call |impl.sizeOfExcludingThis()| because there's no
+     // guarantee that |impl| is the first field in HashSet.
+-    size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
++    size_t sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
+         return impl.sizeOfExcludingThis(mallocSizeOf);
+     }
+-    size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
++    size_t sizeOfIncludingThis(MallocSizeOf mallocSizeOf) const {
+         return mallocSizeOf(this) + impl.sizeOfExcludingThis(mallocSizeOf);
+     }
+ 
+     Generation generation() const {
+         return impl.generation();
+     }
+ 
+     /************************************************** Shorthand operations */
+@@ -561,45 +556,45 @@ class HashSet
+ /*****************************************************************************/
+ 
+ // Hash Policy
+ //
+ // A hash policy P for a hash table with key-type Key must provide:
+ //  - a type |P::Lookup| to use to lookup table entries;
+ //  - a static member function |P::hash| with signature
+ //
+-//      static js::HashNumber hash(Lookup)
++//      static mozilla::HashNumber hash(Lookup)
+ //
+ //    to use to hash the lookup type; and
+ //  - a static member function |P::match| with signature
+ //
+ //      static bool match(Key, Lookup)
+ //
+ //    to use to test equality of key and lookup values.
+ //
+ // Normally, Lookup = Key. In general, though, different values and types of
+ // values can be used to lookup and store. If a Lookup value |l| is != to the
+ // added Key value |k|, the user must ensure that |P::match(k,l)|. E.g.:
+ //
+-//   js::HashSet<Key, P>::AddPtr p = h.lookup(l);
++//   mozilla::HashSet<Key, P>::AddPtr p = h.lookup(l);
+ //   if (!p) {
+ //     assert(P::match(k, l));  // must hold
+ //     h.add(p, k);
+ //   }
+ 
+ // Pointer hashing policy that uses HashGeneric() to create good hashes for
+ // pointers.  Note that we don't shift out the lowest k bits to generate a
+ // good distribution for arena allocated pointers.
+ template <typename Key>
+ struct PointerHasher
+ {
+     typedef Key Lookup;
+     static HashNumber hash(const Lookup& l) {
+         size_t word = reinterpret_cast<size_t>(l);
+-        return mozilla::HashGeneric(word);
++        return HashGeneric(word);
+     }
+     static bool match(const Key& k, const Lookup& l) {
+         return k == l;
+     }
+     static void rekey(Key& k, const Key& newKey) {
+         k = newKey;
+     }
+ };
+@@ -629,68 +624,68 @@ struct DefaultHasher
+ // at least word-aligned. For types with smaller size use PointerHasher.
+ template <class T>
+ struct DefaultHasher<T*> : PointerHasher<T*>
+ {};
+ 
+ // Specialize hashing policy for mozilla::UniquePtr to proxy the UniquePtr's
+ // raw pointer to PointerHasher.
+ template <class T, class D>
+-struct DefaultHasher<mozilla::UniquePtr<T, D>>
++struct DefaultHasher<UniquePtr<T, D>>
+ {
+-    using Lookup = mozilla::UniquePtr<T, D>;
++    using Lookup = UniquePtr<T, D>;
+     using PtrHasher = PointerHasher<T*>;
+ 
+     static HashNumber hash(const Lookup& l) {
+         return PtrHasher::hash(l.get());
+     }
+-    static bool match(const mozilla::UniquePtr<T, D>& k, const Lookup& l) {
++    static bool match(const UniquePtr<T, D>& k, const Lookup& l) {
+         return PtrHasher::match(k.get(), l.get());
+     }
+-    static void rekey(mozilla::UniquePtr<T, D>& k, mozilla::UniquePtr<T, D>&& newKey) {
++    static void rekey(UniquePtr<T, D>& k, UniquePtr<T, D>&& newKey) {
+         k = std::move(newKey);
+     }
+ };
+ 
+ // For doubles, we can xor the two uint32s.
+ template <>
+ struct DefaultHasher<double>
+ {
+     typedef double Lookup;
+     static HashNumber hash(double d) {
+         static_assert(sizeof(HashNumber) == 4,
+                       "subsequent code assumes a four-byte hash");
+-        uint64_t u = mozilla::BitwiseCast<uint64_t>(d);
++        uint64_t u = BitwiseCast<uint64_t>(d);
+         return HashNumber(u ^ (u >> 32));
+     }
+     static bool match(double lhs, double rhs) {
+-        return mozilla::BitwiseCast<uint64_t>(lhs) == mozilla::BitwiseCast<uint64_t>(rhs);
++        return BitwiseCast<uint64_t>(lhs) == BitwiseCast<uint64_t>(rhs);
+     }
+ };
+ 
+ template <>
+ struct DefaultHasher<float>
+ {
+     typedef float Lookup;
+     static HashNumber hash(float f) {
+         static_assert(sizeof(HashNumber) == 4,
+                       "subsequent code assumes a four-byte hash");
+-        return HashNumber(mozilla::BitwiseCast<uint32_t>(f));
++        return HashNumber(BitwiseCast<uint32_t>(f));
+     }
+     static bool match(float lhs, float rhs) {
+-        return mozilla::BitwiseCast<uint32_t>(lhs) == mozilla::BitwiseCast<uint32_t>(rhs);
++        return BitwiseCast<uint32_t>(lhs) == BitwiseCast<uint32_t>(rhs);
+     }
+ };
+ 
+ // A hash policy that compares C strings.
+ struct CStringHasher
+ {
+     typedef const char* Lookup;
+-    static js::HashNumber hash(Lookup l) {
+-        return mozilla::HashString(l);
++    static HashNumber hash(Lookup l) {
++        return HashString(l);
+     }
+     static bool match(const char* key, Lookup lookup) {
+         return strcmp(key, lookup) == 0;
+     }
+ };
+ 
+ // Fallible hashing interface.
+ //
+@@ -766,39 +761,31 @@ class HashMapEntry
+     const Value& value() const { return value_; }
+     Value& value() { return value_; }
+ 
+   private:
+     HashMapEntry(const HashMapEntry&) = delete;
+     void operator=(const HashMapEntry&) = delete;
+ };
+ 
+-} // namespace js
+-
+-namespace mozilla {
+-
+ template <typename K, typename V>
+-struct IsPod<js::HashMapEntry<K, V> >
++struct IsPod<HashMapEntry<K, V> >
+   : IntegralConstant<bool, IsPod<K>::value && IsPod<V>::value>
+ {};
+ 
+-} // namespace mozilla
+-
+-namespace js {
+-
+ namespace detail {
+ 
+ template <class T, class HashPolicy, class AllocPolicy>
+ class HashTable;
+ 
+ template <typename T>
+ class HashTableEntry
+ {
+   private:
+-    using NonConstT = typename mozilla::RemoveConst<T>::Type;
++    using NonConstT = typename RemoveConst<T>::Type;
+ 
+     static const HashNumber sFreeKey = 0;
+     static const HashNumber sRemovedKey = 1;
+     static const HashNumber sCollisionBit = 1;
+ 
+     HashNumber keyHash = sFreeKey;
+     alignas(NonConstT) unsigned char valueData_[sizeof(NonConstT)];
+ 
+@@ -842,22 +829,22 @@ class HashTableEntry
+         destroyStoredT();
+     }
+ 
+     void swap(HashTableEntry* other) {
+         if (this == other)
+             return;
+         MOZ_ASSERT(isLive());
+         if (other->isLive()) {
+-            mozilla::Swap(*valuePtr(), *other->valuePtr());
++            Swap(*valuePtr(), *other->valuePtr());
+         } else {
+             *other->valuePtr() = std::move(*valuePtr());
+             destroy();
+         }
+-        mozilla::Swap(keyHash, other->keyHash);
++        Swap(keyHash, other->keyHash);
+     }
+ 
+     T& get() {
+         MOZ_ASSERT(isLive());
+         return *valuePtr();
+     }
+ 
+     NonConstT& getMutable() {
+@@ -928,114 +915,114 @@ class HashTableEntry
+     }
+ };
+ 
+ template <class T, class HashPolicy, class AllocPolicy>
+ class HashTable : private AllocPolicy
+ {
+     friend class mozilla::ReentrancyGuard;
+ 
+-    typedef typename mozilla::RemoveConst<T>::Type NonConstT;
++    typedef typename RemoveConst<T>::Type NonConstT;
+     typedef typename HashPolicy::KeyType Key;
+     typedef typename HashPolicy::Lookup Lookup;
+ 
+   public:
+     using Entry = HashTableEntry<T>;
+ 
+     // A nullable pointer to a hash table element. A Ptr |p| can be tested
+     // either explicitly |if (p.found()) p->...| or using boolean conversion
+     // |if (p) p->...|. Ptr objects must not be used after any mutating hash
+     // table operations unless |generation()| is tested.
+     class Ptr
+     {
+         friend class HashTable;
+ 
+         Entry* entry_;
+-#ifdef JS_DEBUG
++#ifdef DEBUG
+         const HashTable* table_;
+         Generation generation;
+ #endif
+ 
+       protected:
+         Ptr(Entry& entry, const HashTable& tableArg)
+           : entry_(&entry)
+-#ifdef JS_DEBUG
++#ifdef DEBUG
+           , table_(&tableArg)
+           , generation(tableArg.generation())
+ #endif
+         {}
+ 
+       public:
+         Ptr()
+           : entry_(nullptr)
+-#ifdef JS_DEBUG
++#ifdef DEBUG
+           , table_(nullptr)
+           , generation(0)
+ #endif
+         {}
+ 
+         bool isValid() const {
+             return !!entry_;
+         }
+ 
+         bool found() const {
+             if (!isValid())
+                 return false;
+-#ifdef JS_DEBUG
++#ifdef DEBUG
+             MOZ_ASSERT(generation == table_->generation());
+ #endif
+             return entry_->isLive();
+         }
+ 
+         explicit operator bool() const {
+             return found();
+         }
+ 
+         bool operator==(const Ptr& rhs) const {
+             MOZ_ASSERT(found() && rhs.found());
+             return entry_ == rhs.entry_;
+         }
+ 
+         bool operator!=(const Ptr& rhs) const {
+-#ifdef JS_DEBUG
++#ifdef DEBUG
+             MOZ_ASSERT(generation == table_->generation());
+ #endif
+             return !(*this == rhs);
+         }
+ 
+         T& operator*() const {
+-#ifdef JS_DEBUG
++#ifdef DEBUG
+             MOZ_ASSERT(found());
+             MOZ_ASSERT(generation == table_->generation());
+ #endif
+             return entry_->get();
+         }
+ 
+         T* operator->() const {
+-#ifdef JS_DEBUG
++#ifdef DEBUG
+             MOZ_ASSERT(found());
+             MOZ_ASSERT(generation == table_->generation());
+ #endif
+             return &entry_->get();
+         }
+     };
+ 
+     // A Ptr that can be used to add a key after a failed lookup.
+     class AddPtr : public Ptr
+     {
+         friend class HashTable;
+         HashNumber keyHash;
+-#ifdef JS_DEBUG
++#ifdef DEBUG
+         uint64_t mutationCount;
+ #endif
+ 
+         AddPtr(Entry& entry, const HashTable& tableArg, HashNumber hn)
+           : Ptr(entry, tableArg)
+           , keyHash(hn)
+-#ifdef JS_DEBUG
++#ifdef DEBUG
+           , mutationCount(tableArg.mutationCount)
+ #endif
+         {}
+ 
+       public:
+         AddPtr() : keyHash(0) {}
+     };
+ 
+@@ -1046,75 +1033,75 @@ class HashTable : private AllocPolicy
+     class Range
+     {
+       protected:
+         friend class HashTable;
+ 
+         Range(const HashTable& tableArg, Entry* c, Entry* e)
+           : cur(c)
+           , end(e)
+-#ifdef JS_DEBUG
++#ifdef DEBUG
+           , table_(&tableArg)
+           , mutationCount(tableArg.mutationCount)
+           , generation(tableArg.generation())
+           , validEntry(true)
+ #endif
+         {
+             while (cur < end && !cur->isLive())
+                 ++cur;
+         }
+ 
+         Entry* cur;
+         Entry* end;
+-#ifdef JS_DEBUG
++#ifdef DEBUG
+         const HashTable* table_;
+         uint64_t mutationCount;
+         Generation generation;
+         bool validEntry;
+ #endif
+ 
+       public:
+         Range()
+           : cur(nullptr)
+           , end(nullptr)
+-#ifdef JS_DEBUG
++#ifdef DEBUG
+           , table_(nullptr)
+           , mutationCount(0)
+           , generation(0)
+           , validEntry(false)
+ #endif
+         {}
+ 
+         bool empty() const {
+-#ifdef JS_DEBUG
++#ifdef DEBUG
+             MOZ_ASSERT(generation == table_->generation());
+             MOZ_ASSERT(mutationCount == table_->mutationCount);
+ #endif
+             return cur == end;
+         }
+ 
+         T& front() const {
+             MOZ_ASSERT(!empty());
+-#ifdef JS_DEBUG
++#ifdef DEBUG
+             MOZ_ASSERT(validEntry);
+             MOZ_ASSERT(generation == table_->generation());
+             MOZ_ASSERT(mutationCount == table_->mutationCount);
+ #endif
+             return cur->get();
+         }
+ 
+         void popFront() {
+             MOZ_ASSERT(!empty());
+-#ifdef JS_DEBUG
++#ifdef DEBUG
+             MOZ_ASSERT(generation == table_->generation());
+             MOZ_ASSERT(mutationCount == table_->mutationCount);
+ #endif
+             while (++cur < end && !cur->isLive())
+                 continue;
+-#ifdef JS_DEBUG
++#ifdef DEBUG
+             validEntry = true;
+ #endif
+         }
+     };
+ 
+     // A Range whose lifetime delimits a mutating enumeration of a hash table.
+     // Since rehashing when elements were removed during enumeration would be
+     // bad, it is postponed until the Enum is destructed.  Since the Enum's
+@@ -1149,41 +1136,41 @@ class HashTable : private AllocPolicy
+         //
+         //   HashSet<int> s;
+         //   for (HashSet<int>::Enum e(s); !e.empty(); e.popFront())
+         //     if (e.front() == 42)
+         //       e.removeFront();
+         void removeFront() {
+             table_.remove(*this->cur);
+             removed = true;
+-#ifdef JS_DEBUG
++#ifdef DEBUG
+             this->validEntry = false;
+             this->mutationCount = table_.mutationCount;
+ #endif
+         }
+ 
+         NonConstT& mutableFront() {
+             MOZ_ASSERT(!this->empty());
+-#ifdef JS_DEBUG
++#ifdef DEBUG
+             MOZ_ASSERT(this->validEntry);
+             MOZ_ASSERT(this->generation == this->Range::table_->generation());
+             MOZ_ASSERT(this->mutationCount == this->Range::table_->mutationCount);
+ #endif
+             return this->cur->getMutable();
+         }
+ 
+         // Removes the |front()| element and re-inserts it into the table with
+         // a new key at the new Lookup position.  |front()| is invalid after
+         // this operation until the next call to |popFront()|.
+         void rekeyFront(const Lookup& l, const Key& k) {
+             MOZ_ASSERT(&k != &HashPolicy::getKey(this->cur->get()));
+             Ptr p(*this->cur, table_);
+             table_.rekeyWithoutRehash(p, l, k);
+             rekeyed = true;
+-#ifdef JS_DEBUG
++#ifdef DEBUG
+             this->validEntry = false;
+             this->mutationCount = table_.mutationCount;
+ #endif
+         }
+ 
+         void rekeyFront(const Key& k) {
+             rekeyFront(k, k);
+         }
+@@ -1199,24 +1186,24 @@ class HashTable : private AllocPolicy
+                 table_.compactIfUnderloaded();
+         }
+     };
+ 
+     // HashTable is movable
+     HashTable(HashTable&& rhs)
+       : AllocPolicy(rhs)
+     {
+-        mozilla::PodAssign(this, &rhs);
++        PodAssign(this, &rhs);
+         rhs.table = nullptr;
+     }
+     void operator=(HashTable&& rhs) {
+         MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited");
+         if (table)
+             destroyTable(*this, table, capacity());
+-        mozilla::PodAssign(this, &rhs);
++        PodAssign(this, &rhs);
+         rhs.table = nullptr;
+     }
+ 
+   private:
+     // HashTable is not copyable or assignable
+     HashTable(const HashTable&) = delete;
+     void operator=(const HashTable&) = delete;
+ 
+@@ -1225,17 +1212,17 @@ class HashTable : private AllocPolicy
+ 
+   public:
+     uint64_t    gen:56;                 // entry storage generation number
+     uint64_t    hashShift:8;            // multiplicative hash shift
+     Entry*      table;                  // entry storage
+     uint32_t    entryCount;             // number of entries in table
+     uint32_t    removedCount;           // removed entry sentinels in table
+ 
+-#ifdef JS_DEBUG
++#ifdef DEBUG
+     uint64_t     mutationCount;
+     mutable bool mEntered;
+     // Note that some updates to these stats are not thread-safe. See the
+     // comment on the three-argument overloading of HashTable::lookup().
+     mutable struct Stats
+     {
+         uint32_t        searches;       // total number of table searches
+         uint32_t        steps;          // hash chain links traversed
+@@ -1267,27 +1254,27 @@ class HashTable : private AllocPolicy
+     static const uint8_t sMaxAlphaNumerator = 3; // max alpha: 3/4
+ 
+     static const HashNumber sFreeKey = Entry::sFreeKey;
+     static const HashNumber sRemovedKey = Entry::sRemovedKey;
+     static const HashNumber sCollisionBit = Entry::sCollisionBit;
+ 
+     void setTableSizeLog2(uint32_t sizeLog2)
+     {
+-        hashShift = js::kHashNumberBits - sizeLog2;
++        hashShift = kHashNumberBits - sizeLog2;
+     }
+ 
+     static bool isLiveHash(HashNumber hash)
+     {
+         return Entry::isLiveHash(hash);
+     }
+ 
+     static HashNumber prepareHash(const Lookup& l)
+     {
+-        HashNumber keyHash = mozilla::ScrambleHashCode(HashPolicy::hash(l));
++        HashNumber keyHash = ScrambleHashCode(HashPolicy::hash(l));
+ 
+         // Avoid reserved hash codes.
+         if (!isLiveHash(keyHash))
+             keyHash -= (sRemovedKey + 1);
+         return keyHash & ~sCollisionBit;
+     }
+ 
+     enum FailureBehavior { DontReportFailure = false, ReportFailure = true };
+@@ -1322,21 +1309,21 @@ class HashTable : private AllocPolicy
+             e->~Entry();
+         alloc.free_(oldTable, capacity);
+     }
+ 
+   public:
+     explicit HashTable(AllocPolicy ap)
+       : AllocPolicy(ap)
+       , gen(0)
+-      , hashShift(js::kHashNumberBits)
++      , hashShift(kHashNumberBits)
+       , table(nullptr)
+       , entryCount(0)
+       , removedCount(0)
+-#ifdef JS_DEBUG
++#ifdef DEBUG
+       , mutationCount(0)
+       , mEntered(false)
+ #endif
+     {}
+ 
+     MOZ_MUST_USE bool init(uint32_t length)
+     {
+         MOZ_ASSERT(!initialized());
+@@ -1398,17 +1385,17 @@ class HashTable : private AllocPolicy
+     struct DoubleHash
+     {
+         HashNumber h2;
+         HashNumber sizeMask;
+     };
+ 
+     DoubleHash hash2(HashNumber curKeyHash) const
+     {
+-        uint32_t sizeLog2 = js::kHashNumberBits - hashShift;
++        uint32_t sizeLog2 = kHashNumberBits - hashShift;
+         DoubleHash dh = {
+             ((curKeyHash << sizeLog2) >> hashShift) | 1,
+             (HashNumber(1) << sizeLog2) - 1
+         };
+         return dh;
+     }
+ 
+     static HashNumber applyDoubleHash(HashNumber h1, const DoubleHash& dh)
+@@ -1548,17 +1535,17 @@ class HashTable : private AllocPolicy
+ 
+     enum RebuildStatus { NotOverloaded, Rehashed, RehashFailed };
+ 
+     RebuildStatus changeTableSize(int deltaLog2, FailureBehavior reportFailure = ReportFailure)
+     {
+         // Look, but don't touch, until we succeed in getting new entry store.
+         Entry* oldTable = table;
+         uint32_t oldCap = capacity();
+-        uint32_t newLog2 = js::kHashNumberBits - hashShift + deltaLog2;
++        uint32_t newLog2 = kHashNumberBits - hashShift + deltaLog2;
+         uint32_t newCapacity = 1u << newLog2;
+         if (MOZ_UNLIKELY(newCapacity > sMaxCapacity)) {
+             if (reportFailure)
+                 this->reportAllocOverflow();
+             return RehashFailed;
+         }
+ 
+         Entry* newTable = createTable(*this, newCapacity, reportFailure);
+@@ -1628,17 +1615,17 @@ class HashTable : private AllocPolicy
+         if (e.hasCollision()) {
+             e.removeLive();
+             removedCount++;
+         } else {
+             METER(stats.removeFrees++);
+             e.clearLive();
+         }
+         entryCount--;
+-#ifdef JS_DEBUG
++#ifdef DEBUG
+         mutationCount++;
+ #endif
+     }
+ 
+     void checkUnderloaded()
+     {
+         if (underloaded()) {
+             METER(stats.shrinks++);
+@@ -1723,55 +1710,55 @@ class HashTable : private AllocPolicy
+         if (entry->isRemoved()) {
+             METER(stats.addOverRemoved++);
+             removedCount--;
+             keyHash |= sCollisionBit;
+         }
+ 
+         entry->setLive(keyHash, std::forward<Args>(args)...);
+         entryCount++;
+-#ifdef JS_DEBUG
++#ifdef DEBUG
+         mutationCount++;
+ #endif
+     }
+ 
+   public:
+     void clear()
+     {
+         Entry* end = table + capacity();
+         for (Entry* e = table; e < end; ++e)
+             e->clear();
+ 
+         removedCount = 0;
+         entryCount = 0;
+-#ifdef JS_DEBUG
++#ifdef DEBUG
+         mutationCount++;
+ #endif
+     }
+ 
+     void clearAndShrink()
+     {
+         clear();
+         compactIfUnderloaded();
+     }
+ 
+     void finish()
+     {
+-#ifdef JS_DEBUG
++#ifdef DEBUG
+         MOZ_ASSERT(!mEntered);
+ #endif
+         if (!table)
+             return;
+ 
+         destroyTable(*this, table, capacity());
+         table = nullptr;
+         gen++;
+         entryCount = 0;
+         removedCount = 0;
+-#ifdef JS_DEBUG
++#ifdef DEBUG
+         mutationCount++;
+ #endif
+     }
+ 
+     Range all() const
+     {
+         MOZ_ASSERT(table);
+         return Range(*this, table, table + capacity());
+@@ -1787,79 +1774,79 @@ class HashTable : private AllocPolicy
+     {
+         MOZ_ASSERT(table);
+         return entryCount;
+     }
+ 
+     uint32_t capacity() const
+     {
+         MOZ_ASSERT(table);
+-        return 1u << (js::kHashNumberBits - hashShift);
++        return 1u << (kHashNumberBits - hashShift);
+     }
+ 
+     Generation generation() const
+     {
+         MOZ_ASSERT(table);
+         return Generation(gen);
+     }
+ 
+-    size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const
++    size_t sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
+     {
+         return mallocSizeOf(table);
+     }
+ 
+-    size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const
++    size_t sizeOfIncludingThis(MallocSizeOf mallocSizeOf) const
+     {
+         return mallocSizeOf(this) + sizeOfExcludingThis(mallocSizeOf);
+     }
+ 
+     MOZ_ALWAYS_INLINE Ptr lookup(const Lookup& l) const
+     {
+-        mozilla::ReentrancyGuard g(*this);
++        ReentrancyGuard g(*this);
+         if (!HasHash<HashPolicy>(l))
+             return Ptr();
+         HashNumber keyHash = prepareHash(l);
+         return Ptr(lookup(l, keyHash, 0), *this);
+     }
+ 
+     MOZ_ALWAYS_INLINE Ptr readonlyThreadsafeLookup(const Lookup& l) const
+     {
+         if (!HasHash<HashPolicy>(l))
+             return Ptr();
+         HashNumber keyHash = prepareHash(l);
+         return Ptr(lookup(l, keyHash, 0), *this);
+     }
+ 
+     MOZ_ALWAYS_INLINE AddPtr lookupForAdd(const Lookup& l) const
+     {
+-        mozilla::ReentrancyGuard g(*this);
++        ReentrancyGuard g(*this);
+         if (!EnsureHash<HashPolicy>(l))
+             return AddPtr();
+         HashNumber keyHash = prepareHash(l);
+         // Directly call the constructor in the return statement to avoid
+         // excess copying when building with Visual Studio 2017.
+         // See bug 1385181.
+         return AddPtr(lookup(l, keyHash, sCollisionBit), *this, keyHash);
+     }
+ 
+     template <typename... Args>
+     MOZ_MUST_USE bool add(AddPtr& p, Args&&... args)
+     {
+-        mozilla::ReentrancyGuard g(*this);
++        ReentrancyGuard g(*this);
+         MOZ_ASSERT(table);
+         MOZ_ASSERT_IF(p.isValid(), p.table_ == this);
+         MOZ_ASSERT(!p.found());
+         MOZ_ASSERT(!(p.keyHash & sCollisionBit));
+ 
+         // Check for error from ensureHash() here.
+         if (!p.isValid())
+             return false;
+ 
+         MOZ_ASSERT(p.generation == generation());
+-#ifdef JS_DEBUG
++#ifdef DEBUG
+         MOZ_ASSERT(p.mutationCount == mutationCount);
+ #endif
+ 
+         // Changing an entry from removed to live does not affect whether we
+         // are overloaded and can be handled separately.
+         if (p.entry_->isRemoved()) {
+             if (!this->checkSimulatedOOM())
+                 return false;
+@@ -1874,31 +1861,31 @@ class HashTable : private AllocPolicy
+             if (status == NotOverloaded && !this->checkSimulatedOOM())
+                 return false;
+             if (status == Rehashed)
+                 p.entry_ = &findFreeEntry(p.keyHash);
+         }
+ 
+         p.entry_->setLive(p.keyHash, std::forward<Args>(args)...);
+         entryCount++;
+-#ifdef JS_DEBUG
++#ifdef DEBUG
+         mutationCount++;
+         p.generation = generation();
+         p.mutationCount = mutationCount;
+ #endif
+         return true;
+     }
+ 
+     // Note: |l| may be a reference to a piece of |u|, so this function
+     // must take care not to use |l| after moving |u|.
+     template <typename... Args>
+     void putNewInfallible(const Lookup& l, Args&&... args)
+     {
+         MOZ_ASSERT(!lookup(l).found());
+-        mozilla::ReentrancyGuard g(*this);
++        ReentrancyGuard g(*this);
+         putNewInfallibleInternal(l, std::forward<Args>(args)...);
+     }
+ 
+     // Note: |l| may be alias arguments in |args|, so this function must take
+     // care not to use |l| after moving |args|.
+     template <typename... Args>
+     MOZ_MUST_USE bool putNew(const Lookup& l, Args&&... args)
+     {
+@@ -1919,42 +1906,42 @@ class HashTable : private AllocPolicy
+     // must take care not to use |l| after moving |u|.
+     template <typename... Args>
+     MOZ_MUST_USE bool relookupOrAdd(AddPtr& p, const Lookup& l, Args&&... args)
+     {
+         // Check for error from ensureHash() here.
+         if (!p.isValid())
+             return false;
+ 
+-#ifdef JS_DEBUG
++#ifdef DEBUG
+         p.generation = generation();
+         p.mutationCount = mutationCount;
+ #endif
+         {
+-            mozilla::ReentrancyGuard g(*this);
++            ReentrancyGuard g(*this);
+             MOZ_ASSERT(prepareHash(l) == p.keyHash); // l has not been destroyed
+             p.entry_ = &lookup(l, p.keyHash, sCollisionBit);
+         }
+         return p.found() || add(p, std::forward<Args>(args)...);
+     }
+ 
+     void remove(Ptr p)
+     {
+         MOZ_ASSERT(table);
+-        mozilla::ReentrancyGuard g(*this);
++        ReentrancyGuard g(*this);
+         MOZ_ASSERT(p.found());
+         MOZ_ASSERT(p.generation == generation());
+         remove(*p.entry_);
+         checkUnderloaded();
+     }
+ 
+     void rekeyWithoutRehash(Ptr p, const Lookup& l, const Key& k)
+     {
+         MOZ_ASSERT(table);
+-        mozilla::ReentrancyGuard g(*this);
++        ReentrancyGuard g(*this);
+         MOZ_ASSERT(p.found());
+         MOZ_ASSERT(p.generation == generation());
+         typename HashTableEntry<T>::NonConstT t(std::move(*p));
+         HashPolicy::setKey(t, const_cast<Key&>(k));
+         remove(*p.entry_);
+         putNewInfallibleInternal(l, std::move(t));
+     }
+ 
+@@ -1963,11 +1950,11 @@ class HashTable : private AllocPolicy
+         rekeyWithoutRehash(p, l, k);
+         checkOverRemoved();
+     }
+ 
+ #undef METER
+ };
+ 
+ } // namespace detail
+-} // namespace js
++} // namespace mozilla
+ 
+-#endif  /* js_HashTable_h */
++#endif  /* mozilla_HashTable_h */
+diff --git a/mfbt/moz.build b/mfbt/moz.build
+--- a/mfbt/moz.build
++++ b/mfbt/moz.build
+@@ -39,16 +39,17 @@ EXPORTS.mozilla = [
+     'EnumeratedRange.h',
+     'EnumSet.h',
+     'EnumTypeTraits.h',
+     'FastBernoulliTrial.h',
+     'FloatingPoint.h',
+     'FStream.h',
+     'GuardObjects.h',
+     'HashFunctions.h',
++    'HashTable.h',
+     'IntegerPrintfMacros.h',
+     'IntegerRange.h',
+     'IntegerTypeTraits.h',
+     'JSONWriter.h',
+     'Likely.h',
+     'LinkedList.h',
+     'MacroArgs.h',
+     'MacroForEach.h',

+ 97 - 0
frg/work-js/mozilla-release/patches/1477626-7-63a1.patch

@@ -0,0 +1,97 @@
+# HG changeset patch
+# User Nicholas Nethercote <nnethercote@mozilla.com>
+# Date 1532600155 -36000
+#      Thu Jul 26 20:15:55 2018 +1000
+# Node ID a75c5a2a6bea6dfb924c04a350af97c7690e08b9
+# Parent  f856cee67cd205c0b096e8d1f3b5c6fa30dd3bfd
+Bug 1477626 - Document some differences between mozilla::HashTable and PLDHashTable. r=Waldo
+
+MozReview-Commit-ID: DB0KUy99DDM
+
+diff --git a/mfbt/HashTable.h b/mfbt/HashTable.h
+--- a/mfbt/HashTable.h
++++ b/mfbt/HashTable.h
+@@ -1,14 +1,41 @@
+ /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+  * vim: set ts=8 sts=4 et sw=4 tw=99:
+  * This Source Code Form is subject to the terms of the Mozilla Public
+  * License, v. 2.0. If a copy of the MPL was not distributed with this
+  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+ 
++// A note on the differences between mozilla::HashTable and PLDHashTable (and
++// its subclasses, such as nsTHashtable).
++//
++// - mozilla::HashTable is a lot faster, largely because it uses templates
++//   throughout *and* inlines everything. PLDHashTable inlines operations much
++//   less aggressively, and also uses "virtual ops" for operations like hashing
++//   and matching entries that require function calls.
++//
++// - Correspondingly, mozilla::HashTable use is likely to increase executable
++//   size much more than PLDHashTable.
++//
++// - mozilla::HashTable has a nicer API, with a proper HashSet vs. HashMap
++//   distinction.
++//
++// - mozilla::HashTable requires more explicit OOM checking. Use
++//   mozilla::InfallibleAllocPolicy to make allocations infallible; note that
++//   return values of possibly-allocating methods such as add() will still need
++//   checking in some fashion -- e.g. with MOZ_ALWAYS_TRUE() -- due to the use
++//   of MOZ_MUST_USE.
++//
++// - mozilla::HashTable has a default capacity on creation of 32 and a minimum
++//   capacity of 4. PLDHashTable has a default capacity on creation of 8 and a
++//   minimum capacity of 8.
++//
++// - mozilla::HashTable allocates memory eagerly. PLDHashTable delays
++//   allocating until the first element is inserted.
++
+ #ifndef mozilla_HashTable_h
+ #define mozilla_HashTable_h
+ 
+ #include "mozilla/AllocPolicy.h"
+ #include "mozilla/Assertions.h"
+ #include "mozilla/Attributes.h"
+ #include "mozilla/Casting.h"
+ #include "mozilla/HashFunctions.h"
+diff --git a/xpcom/ds/PLDHashTable.h b/xpcom/ds/PLDHashTable.h
+--- a/xpcom/ds/PLDHashTable.h
++++ b/xpcom/ds/PLDHashTable.h
+@@ -1,14 +1,17 @@
+ /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+ /* vim: set ts=8 sts=2 et sw=2 tw=80: */
+ /* This Source Code Form is subject to the terms of the Mozilla Public
+  * License, v. 2.0. If a copy of the MPL was not distributed with this
+  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+ 
++// See the comment at the top of mfbt/HashTable.h for a comparison between
++// PLDHashTable and mozilla::HashTable.
++
+ #ifndef PLDHashTable_h
+ #define PLDHashTable_h
+ 
+ #include "mozilla/Atomics.h"
+ #include "mozilla/Attributes.h" // for MOZ_ALWAYS_INLINE
+ #include "mozilla/fallible.h"
+ #include "mozilla/HashFunctions.h"
+ #include "mozilla/MemoryReporting.h"
+diff --git a/xpcom/ds/nsTHashtable.h b/xpcom/ds/nsTHashtable.h
+--- a/xpcom/ds/nsTHashtable.h
++++ b/xpcom/ds/nsTHashtable.h
+@@ -1,14 +1,17 @@
+ /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+ /* vim: set ts=8 sts=2 et sw=2 tw=80: */
+ /* This Source Code Form is subject to the terms of the Mozilla Public
+  * License, v. 2.0. If a copy of the MPL was not distributed with this
+  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+ 
++// See the comment at the top of mfbt/HashTable.h for a comparison between
++// PLDHashTable and mozilla::HashTable.
++
+ #ifndef nsTHashtable_h__
+ #define nsTHashtable_h__
+ 
+ #include "PLDHashTable.h"
+ #include "nsPointerHashKeys.h"
+ #include "mozilla/Assertions.h"
+ #include "mozilla/Attributes.h"
+ #include "mozilla/fallible.h"

+ 284 - 0
frg/work-js/mozilla-release/patches/1477626-8-63a1.patch

@@ -0,0 +1,284 @@
+# HG changeset patch
+# User Nicholas Nethercote <nnethercote@mozilla.com>
+# Date 1532600158 -36000
+#      Thu Jul 26 20:15:58 2018 +1000
+# Node ID 8d22622b5264e15e0f429c226b9f413f413ba298
+# Parent  a75c5a2a6bea6dfb924c04a350af97c7690e08b9
+Bug 1477626 - Use mozilla::HashTable instead of JS::HashTable in DMD. r=erahm
+
+Also use mozilla::HashNumber where appropriate.
+
+MozReview-Commit-ID: BTq0XDS5UfQ
+
+diff --git a/memory/replace/dmd/DMD.cpp b/memory/replace/dmd/DMD.cpp
+--- a/memory/replace/dmd/DMD.cpp
++++ b/memory/replace/dmd/DMD.cpp
+@@ -25,28 +25,27 @@
+ #include <unistd.h>
+ #endif
+ 
+ #ifdef ANDROID
+ #include <android/log.h>
+ #endif
+ 
+ #include "nscore.h"
+-#include "mozilla/StackWalk.h"
+-
+-#include "js/HashTable.h"
+-#include "js/Vector.h"
+ 
+ #include "mozilla/Assertions.h"
+ #include "mozilla/FastBernoulliTrial.h"
+ #include "mozilla/HashFunctions.h"
++#include "mozilla/HashTable.h"
+ #include "mozilla/IntegerPrintfMacros.h"
+ #include "mozilla/JSONWriter.h"
+ #include "mozilla/Likely.h"
+ #include "mozilla/MemoryReporting.h"
++#include "mozilla/StackWalk.h"
++#include "mozilla/Vector.h"
+ 
+ // CodeAddressService is defined entirely in the header, so this does not make
+ // DMD depend on XPCOM's object file.
+ #include "CodeAddressService.h"
+ 
+ // replace_malloc.h needs to be included before replace_malloc_bridge.h,
+ // which DMD.h includes, so DMD.h needs to be included after replace_malloc.h.
+ #include "replace_malloc.h"
+@@ -91,18 +90,18 @@ StatusMsg(const char* aFmt, ...)
+ 
+ static malloc_table_t gMallocTable;
+ 
+ // This provides infallible allocations (they abort on OOM).  We use it for all
+ // of DMD's own allocations, which fall into the following three cases.
+ //
+ // - Direct allocations (the easy case).
+ //
+-// - Indirect allocations in js::{Vector,HashSet,HashMap} -- this class serves
+-//   as their AllocPolicy.
++// - Indirect allocations in mozilla::{Vector,HashSet,HashMap} -- this class
++//   serves as their AllocPolicy.
+ //
+ // - Other indirect allocations (e.g. MozStackWalk) -- see the comments on
+ //   Thread::mBlockIntercepts and in replace_malloc for how these work.
+ //
+ // It would be nice if we could use the InfallibleAllocPolicy from mozalloc,
+ // but DMD cannot use mozalloc.
+ //
+ class InfallibleAllocPolicy
+@@ -157,25 +156,23 @@ public:
+   template <typename T>
+   static T* pod_calloc(size_t aNumElems)
+   {
+     T* p = maybe_pod_calloc<T>(aNumElems);
+     ExitOnFailure(p);
+     return p;
+   }
+ 
+-  // This realloc_ is the one we use for direct reallocs within DMD.
+   static void* realloc_(void* aPtr, size_t aNewSize)
+   {
+     void* p = gMallocTable.realloc(aPtr, aNewSize);
+     ExitOnFailure(p);
+     return p;
+   }
+ 
+-  // This realloc_ is required for this to be a JS container AllocPolicy.
+   template <typename T>
+   static T* pod_realloc(T* aPtr, size_t aOldSize, size_t aNewSize)
+   {
+     T* p = maybe_pod_realloc(aPtr, aOldSize, aNewSize);
+     ExitOnFailure(p);
+     return p;
+   }
+ 
+@@ -616,28 +613,29 @@ public:
+     return n;
+   }
+ 
+ private:
+   struct StringHasher
+   {
+       typedef const char* Lookup;
+ 
+-      static uint32_t hash(const char* const& aS)
++      static mozilla::HashNumber hash(const char* const& aS)
+       {
+           return HashString(aS);
+       }
+ 
+       static bool match(const char* const& aA, const char* const& aB)
+       {
+           return strcmp(aA, aB) == 0;
+       }
+   };
+ 
+-  typedef js::HashSet<const char*, StringHasher, InfallibleAllocPolicy> StringHashSet;
++  typedef mozilla::HashSet<const char*, StringHasher, InfallibleAllocPolicy>
++          StringHashSet;
+ 
+   StringHashSet mSet;
+ };
+ 
+ class StringAlloc
+ {
+ public:
+   static char* copy(const char* aString)
+@@ -688,17 +686,17 @@ public:
+   // The stack trace returned by this function is interned in gStackTraceTable,
+   // and so is immortal and unmovable.
+   static const StackTrace* Get(Thread* aT);
+ 
+   // Hash policy.
+ 
+   typedef StackTrace* Lookup;
+ 
+-  static uint32_t hash(const StackTrace* const& aSt)
++  static mozilla::HashNumber hash(const StackTrace* const& aSt)
+   {
+     return mozilla::HashBytes(aSt->mPcs, aSt->Size());
+   }
+ 
+   static bool match(const StackTrace* const& aA,
+                     const StackTrace* const& aB)
+   {
+     return aA->mLength == aB->mLength &&
+@@ -712,29 +710,31 @@ private:
+     StackTrace* st = (StackTrace*) aClosure;
+     MOZ_ASSERT(st->mLength < MaxFrames);
+     st->mPcs[st->mLength] = aPc;
+     st->mLength++;
+     MOZ_ASSERT(st->mLength == aFrameNumber);
+   }
+ };
+ 
+-typedef js::HashSet<StackTrace*, StackTrace, InfallibleAllocPolicy>
++typedef mozilla::HashSet<StackTrace*, StackTrace, InfallibleAllocPolicy>
+         StackTraceTable;
+ static StackTraceTable* gStackTraceTable = nullptr;
+ 
+-typedef js::HashSet<const StackTrace*, js::DefaultHasher<const StackTrace*>,
+-                    InfallibleAllocPolicy>
++typedef mozilla::HashSet<const StackTrace*,
++                         mozilla::DefaultHasher<const StackTrace*>,
++                         InfallibleAllocPolicy>
+         StackTraceSet;
+ 
+-typedef js::HashSet<const void*, js::DefaultHasher<const void*>,
+-                    InfallibleAllocPolicy>
++typedef mozilla::HashSet<const void*, mozilla::DefaultHasher<const void*>,
++                         InfallibleAllocPolicy>
+         PointerSet;
+-typedef js::HashMap<const void*, uint32_t, js::DefaultHasher<const void*>,
+-                    InfallibleAllocPolicy>
++typedef mozilla::HashMap<const void*, uint32_t,
++                         mozilla::DefaultHasher<const void*>,
++                         InfallibleAllocPolicy>
+         PointerIdMap;
+ 
+ // We won't GC the stack trace table until it this many elements.
+ static uint32_t gGCStackTraceTableWhenSizeExceeds = 4 * 1024;
+ 
+ /* static */ const StackTrace*
+ StackTrace::Get(Thread* aT)
+ {
+@@ -987,37 +987,38 @@ public:
+       mReportStackTrace_mReportedOnAlloc[1].Set(nullptr, 0);
+     }
+   }
+ 
+   // Hash policy.
+ 
+   typedef const void* Lookup;
+ 
+-  static uint32_t hash(const void* const& aPtr)
++  static mozilla::HashNumber hash(const void* const& aPtr)
+   {
+     return mozilla::HashGeneric(aPtr);
+   }
+ 
+   static bool match(const LiveBlock& aB, const void* const& aPtr)
+   {
+     return aB.mPtr == aPtr;
+   }
+ };
+ 
+ // A table of live blocks where the lookup key is the block address.
+-typedef js::HashSet<LiveBlock, LiveBlock, InfallibleAllocPolicy> LiveBlockTable;
++typedef mozilla::HashSet<LiveBlock, LiveBlock, InfallibleAllocPolicy>
++        LiveBlockTable;
+ static LiveBlockTable* gLiveBlockTable = nullptr;
+ 
+ class AggregatedLiveBlockHashPolicy
+ {
+ public:
+   typedef const LiveBlock* const Lookup;
+ 
+-  static uint32_t hash(const LiveBlock* const& aB)
++  static mozilla::HashNumber hash(const LiveBlock* const& aB)
+   {
+     return gOptions->IsDarkMatterMode()
+          ? mozilla::HashGeneric(aB->ReqSize(),
+                                 aB->SlopSize(),
+                                 aB->AllocStackTrace(),
+                                 aB->ReportedOnAlloc1(),
+                                 aB->ReportedOnAlloc2())
+          : mozilla::HashGeneric(aB->ReqSize(),
+@@ -1036,18 +1037,18 @@ public:
+          : aA->ReqSize() == aB->ReqSize() &&
+            aA->SlopSize() == aB->SlopSize() &&
+            aA->AllocStackTrace() == aB->AllocStackTrace();
+   }
+ };
+ 
+ // A table of live blocks where the lookup key is everything but the block
+ // address. For aggregating similar live blocks at output time.
+-typedef js::HashMap<const LiveBlock*, size_t, AggregatedLiveBlockHashPolicy,
+-                    InfallibleAllocPolicy>
++typedef mozilla::HashMap<const LiveBlock*, size_t,
++                         AggregatedLiveBlockHashPolicy, InfallibleAllocPolicy>
+         AggregatedLiveBlockTable;
+ 
+ // A freed heap block.
+ class DeadBlock
+ {
+   const size_t mReqSize;    // size requested
+   const size_t mSlopSize;   // slop above size requested
+ 
+@@ -1083,34 +1084,34 @@ public:
+       MOZ_ALWAYS_TRUE(aStackTraces.put(AllocStackTrace()));
+     }
+   }
+ 
+   // Hash policy.
+ 
+   typedef DeadBlock Lookup;
+ 
+-  static uint32_t hash(const DeadBlock& aB)
++  static mozilla::HashNumber hash(const DeadBlock& aB)
+   {
+     return mozilla::HashGeneric(aB.ReqSize(),
+                                 aB.SlopSize(),
+                                 aB.AllocStackTrace());
+   }
+ 
+   static bool match(const DeadBlock& aA, const DeadBlock& aB)
+   {
+     return aA.ReqSize() == aB.ReqSize() &&
+            aA.SlopSize() == aB.SlopSize() &&
+            aA.AllocStackTrace() == aB.AllocStackTrace();
+   }
+ };
+ 
+ // For each unique DeadBlock value we store a count of how many actual dead
+ // blocks have that value.
+-typedef js::HashMap<DeadBlock, size_t, DeadBlock, InfallibleAllocPolicy>
++typedef mozilla::HashMap<DeadBlock, size_t, DeadBlock, InfallibleAllocPolicy>
+         DeadBlockTable;
+ static DeadBlockTable* gDeadBlockTable = nullptr;
+ 
+ // Add the dead block to the dead block table, if that's appropriate.
+ void MaybeAddToDeadBlockTable(const DeadBlock& aDb)
+ {
+   if (gOptions->IsCumulativeMode() && aDb.AllocStackTrace()) {
+     AutoLockState lock;

+ 58 - 0
frg/work-js/mozilla-release/patches/1477632-63a1.patch

@@ -0,0 +1,58 @@
+# HG changeset patch
+# User Nicholas Nethercote <nnethercote@mozilla.com>
+# Date 1532394575 -36000
+# Node ID 5da2166fd301cdfe782fa8a778454def0dc03c17
+# Parent  a50f2ebade398292084120cec1d6cec00f9c2dd3
+Bug 1477632 - Always inline PLDHashTable::SearchTable(). r=froydnj
+
+This speeds up BenchCollections.PLDHash as follows:
+
+>     succ_lookups      fail_lookups     insert_remove           iterate
+>          42.8 ms           51.6 ms           21.0 ms           34.9 ms
+>          41.8 ms           51.9 ms           20.0 ms           34.6 ms
+>          41.6 ms           51.3 ms           19.3 ms           34.5 ms
+>          41.6 ms           51.3 ms           19.8 ms           35.0 ms
+>          41.7 ms           50.8 ms           20.5 ms           35.2 ms
+
+After:
+
+>     succ_lookups      fail_lookups     insert_remove           iterate
+>          37.7 ms           33.1 ms           19.7 ms           35.0 ms
+>          37.0 ms           32.5 ms           19.1 ms           35.4 ms
+>          37.6 ms           33.6 ms           19.2 ms           36.2 ms
+>          36.7 ms           33.3 ms           19.1 ms           35.3 ms
+>          37.1 ms           33.1 ms           19.1 ms           35.0 ms
+
+Successful lookups are about 1.13x faster, failing lookups are about 1.54x
+faster, and insertions/removals are about 1.05x faster.
+
+On Linux64, this increases the size of libxul (as measured by `size`) by a mere
+16 bytes.
+
+diff --git a/xpcom/ds/PLDHashTable.cpp b/xpcom/ds/PLDHashTable.cpp
+--- a/xpcom/ds/PLDHashTable.cpp
++++ b/xpcom/ds/PLDHashTable.cpp
+@@ -358,19 +358,20 @@ PLDHashTable::Clear()
+ {
+   ClearAndPrepareForLength(kDefaultInitialLength);
+ }
+ 
+ // If |Reason| is |ForAdd|, the return value is always non-null and it may be
+ // a previously-removed entry. If |Reason| is |ForSearchOrRemove|, the return
+ // value is null on a miss, and will never be a previously-removed entry on a
+ // hit. This distinction is a bit grotty but this function is hot enough that
+-// these differences are worthwhile.
++// these differences are worthwhile. (It's also hot enough that
++// MOZ_ALWAYS_INLINE makes a significant difference.)
+ template <PLDHashTable::SearchReason Reason>
+-PLDHashEntryHdr* NS_FASTCALL
++MOZ_ALWAYS_INLINE PLDHashEntryHdr*
+ PLDHashTable::SearchTable(const void* aKey, PLDHashNumber aKeyHash) const
+ {
+   MOZ_ASSERT(mEntryStore.Get());
+   NS_ASSERTION(!(aKeyHash & kCollisionFlag),
+                "!(aKeyHash & kCollisionFlag)");
+ 
+   // Compute the primary hash address.
+   PLDHashNumber hash1 = Hash1(aKeyHash);
+

+ 87 - 0
frg/work-js/mozilla-release/patches/L-1477626-9-63a1.patch

@@ -0,0 +1,87 @@
+# HG changeset patch
+# User Nicholas Nethercote <nnethercote@mozilla.com>
+# Date 1532600160 -36000
+#      Thu Jul 26 20:16:00 2018 +1000
+# Node ID e33429d58f2356bfda4eac23a60822a037d4f192
+# Parent  8d22622b5264e15e0f429c226b9f413f413ba298
+Bug 1477626 - Use mozilla::HashTable instead of js::HashTable in Bench.cpp. r=froydnj
+
+MozReview-Commit-ID: 4P5L9Kdkiuu
+
+diff --git a/xpcom/rust/gtest/bench-collections/Bench.cpp b/xpcom/rust/gtest/bench-collections/Bench.cpp
+--- a/xpcom/rust/gtest/bench-collections/Bench.cpp
++++ b/xpcom/rust/gtest/bench-collections/Bench.cpp
+@@ -33,28 +33,28 @@
+ // Callgrind, do something like this:
+ //
+ //   MOZ_RUN_GTEST=1 GTEST_FILTER='*BenchCollections*$IMPL*'
+ //       valgrind --tool=callgrind --callgrind-out-file=clgout
+ //       $OBJDIR/dist/bin/firefox -unittest
+ //   callgrind_annotate --auto=yes clgout > clgann
+ //
+ // where $IMPL is part of an implementation name in a test (e.g. "PLDHash",
+-// "JSHash") and $OBJDIR is an objdir containing a --enable-release build.
++// "MozHash") and $OBJDIR is an objdir containing a --enable-release build.
+ //
+ // Note that multiple processes are spawned, so `clgout` gets overwritten
+ // multiple times, but the last process to write its profiling data to file is
+ // the one of interest. (Alternatively, use --callgrind-out-file=clgout.%p to
+ // get separate output files for each process, with a PID suffix.)
+ 
+ #include "gtest/gtest.h"
+ #include "gtest/MozGTestBench.h" // For MOZ_GTEST_BENCH
+-#include "js/HashTable.h"
+ #include "mozilla/AllocPolicy.h"
+ #include "mozilla/HashFunctions.h"
++#include "mozilla/HashTable.h"
+ #include "mozilla/StaticMutex.h"
+ #include "mozilla/TimeStamp.h"
+ #include "PLDHashTable.h"
+ #include <unordered_set>
+ 
+ using namespace mozilla;
+ 
+ // This function gives a pseudo-random sequence with the following properties:
+@@ -166,19 +166,19 @@ Bench_Cpp_PLDHashTable(const Params* aPa
+     MOZ_RELEASE_ASSERT(hs.EntryCount() == 0);
+   } else {
+     MOZ_RELEASE_ASSERT(hs.EntryCount() == aParams->mNumInserts);
+   }
+ }
+ 
+ // Keep this in sync with all the other Bench_*() functions.
+ void
+-Bench_Cpp_JSHashSet(const Params* aParams, void** aVals, size_t aLen)
++Bench_Cpp_MozHashSet(const Params* aParams, void** aVals, size_t aLen)
+ {
+-  js::HashSet<void*, js::DefaultHasher<void*>, MallocAllocPolicy> hs;
++  mozilla::HashSet<void*, mozilla::DefaultHasher<void*>, MallocAllocPolicy> hs;
+   MOZ_RELEASE_ASSERT(hs.init());
+ 
+   for (size_t j = 0; j < aParams->mNumInserts; j++) {
+     auto p = hs.lookupForAdd(aVals[j]);
+     MOZ_RELEASE_ASSERT(!p);
+     MOZ_RELEASE_ASSERT(hs.add(p, aVals[j]));
+   }
+ 
+@@ -290,18 +290,18 @@ StaticMutex BenchCollections::sValsMutex
+ MOZ_GTEST_BENCH_F(BenchCollections, unordered_set, [this] {
+   BenchImpl(Bench_Cpp_unordered_set);
+ });
+ 
+ MOZ_GTEST_BENCH_F(BenchCollections, PLDHash, [this] {
+   BenchImpl(Bench_Cpp_PLDHashTable);
+ });
+ 
+-MOZ_GTEST_BENCH_F(BenchCollections, JSHash, [this] {
+-  BenchImpl(Bench_Cpp_JSHashSet);
++MOZ_GTEST_BENCH_F(BenchCollections, MozHash, [this] {
++  BenchImpl(Bench_Cpp_MozHashSet);
+ });
+ 
+ MOZ_GTEST_BENCH_F(BenchCollections, RustHash, [this] {
+   BenchImpl(Bench_Rust_HashSet);
+ });
+ 
+ MOZ_GTEST_BENCH_F(BenchCollections, RustFnvHash, [this] {
+   BenchImpl(Bench_Rust_FnvHashSet);

+ 259 - 0
frg/work-js/mozilla-release/patches/mozilla-esr68-push_449668.patch

@@ -0,0 +1,259 @@
+# HG changeset patch
+# User Jed Davis <jld@mozilla.com>
+# Date 1519152564 25200
+#      Tue Feb 20 11:49:24 2018 -0700
+# Node ID 17c89b6a5972403c2ab95d3d9f988d75211c8761
+# Parent  b65210a45f7afe6b4e2556233513560feb11ed1f
+Bug 1440199 - Part 1: Remove Chromium shared memory locks. r=froydnj
+
+This deletes some dead code and removes a dependency on the shared
+memory object's name, which will be removed in the next patch (and is
+always empty in our usage).
+
+MozReview-Commit-ID: 1ub0nLCBucO
+
+diff --git a/ipc/chromium/src/base/shared_memory.h b/ipc/chromium/src/base/shared_memory.h
+--- a/ipc/chromium/src/base/shared_memory.h
++++ b/ipc/chromium/src/base/shared_memory.h
+@@ -20,25 +20,21 @@
+ #include "base/process.h"
+ 
+ namespace base {
+ 
+ // SharedMemoryHandle is a platform specific type which represents
+ // the underlying OS handle to a shared memory segment.
+ #if defined(OS_WIN)
+ typedef HANDLE SharedMemoryHandle;
+-typedef HANDLE SharedMemoryLock;
+ #elif defined(OS_POSIX)
+ // A SharedMemoryId is sufficient to identify a given shared memory segment on a
+ // system, but insufficient to map it.
+ typedef FileDescriptor SharedMemoryHandle;
+ typedef ino_t SharedMemoryId;
+-// On POSIX, the lock is implemented as a lockf() on the mapped file,
+-// so no additional member (or definition of SharedMemoryLock) is
+-// needed.
+ #endif
+ 
+ // Platform abstraction for shared memory.  Provides a C++ wrapper
+ // around the OS primitive for a memory mapped file.
+ class SharedMemory {
+  public:
+   // Create a new SharedMemory object.
+   SharedMemory();
+@@ -148,37 +144,21 @@ class SharedMemory {
+   //   return ok;
+   // Note that the memory is unmapped by calling this method, regardless of the
+   // return value.
+   bool GiveToProcess(ProcessId target_pid,
+                      SharedMemoryHandle* new_handle) {
+     return ShareToProcessCommon(target_pid, new_handle, true);
+   }
+ 
+-  // Lock the shared memory.
+-  // This is a cross-process lock which may be recursively
+-  // locked by the same thread.
+-  // TODO(port):
+-  // WARNING: on POSIX the lock only works across processes, not
+-  // across threads.  2 threads in the same process can both grab the
+-  // lock at the same time.  There are several solutions for this
+-  // (futex, lockf+anon_semaphore) but none are both clean and common
+-  // across Mac and Linux.
+-  void Lock();
+-
+-  // Release the shared memory lock.
+-  void Unlock();
+-
+  private:
+ #if defined(OS_POSIX)
+   bool CreateOrOpen(const std::wstring &name, int posix_flags, size_t size);
+   bool FilenameForMemoryName(const std::wstring &memname,
+                              std::wstring *filename);
+-  void LockOrUnlockCommon(int function);
+-
+ #endif
+   bool ShareToProcessCommon(ProcessId target_pid,
+                             SharedMemoryHandle* new_handle,
+                             bool close_self);
+ 
+ #if defined(OS_WIN)
+   // If true indicates this came from an external source so needs extra checks
+   // before being mapped.
+@@ -187,36 +167,15 @@ class SharedMemory {
+   HANDLE             mapped_file_;
+ #elif defined(OS_POSIX)
+   int                mapped_file_;
+   ino_t              inode_;
+ #endif
+   void*              memory_;
+   bool               read_only_;
+   size_t             max_size_;
+-#if !defined(OS_POSIX)
+-  SharedMemoryLock   lock_;
+-#endif
+ 
+   DISALLOW_EVIL_CONSTRUCTORS(SharedMemory);
+ };
+ 
+-// A helper class that acquires the shared memory lock while
+-// the SharedMemoryAutoLock is in scope.
+-class SharedMemoryAutoLock {
+- public:
+-  explicit SharedMemoryAutoLock(SharedMemory* shared_memory)
+-      : shared_memory_(shared_memory) {
+-    shared_memory_->Lock();
+-  }
+-
+-  ~SharedMemoryAutoLock() {
+-    shared_memory_->Unlock();
+-  }
+-
+- private:
+-  SharedMemory* shared_memory_;
+-  DISALLOW_EVIL_CONSTRUCTORS(SharedMemoryAutoLock);
+-};
+-
+ }  // namespace base
+ 
+ #endif  // BASE_SHARED_MEMORY_H_
+diff --git a/ipc/chromium/src/base/shared_memory_posix.cc b/ipc/chromium/src/base/shared_memory_posix.cc
+--- a/ipc/chromium/src/base/shared_memory_posix.cc
++++ b/ipc/chromium/src/base/shared_memory_posix.cc
+@@ -265,74 +265,13 @@ void SharedMemory::Close(bool unmap_view
+   }
+ 
+   if (mapped_file_ >= 0) {
+     close(mapped_file_);
+     mapped_file_ = -1;
+   }
+ }
+ 
+-#ifdef ANDROID
+-void SharedMemory::LockOrUnlockCommon(int function) {
+-  DCHECK(mapped_file_ >= 0);
+-  struct flock lockreq;
+-  lockreq.l_type = function;
+-  lockreq.l_whence = SEEK_SET;
+-  lockreq.l_start = 0;
+-  lockreq.l_len = 0;
+-  while (fcntl(mapped_file_, F_SETLKW, &lockreq) < 0) {
+-    if (errno == EINTR) {
+-      continue;
+-    } else if (errno == ENOLCK) {
+-      // temporary kernel resource exaustion
+-      PlatformThread::Sleep(500);
+-      continue;
+-    } else {
+-      NOTREACHED() << "lockf() failed."
+-                   << " function:" << function
+-                   << " fd:" << mapped_file_
+-                   << " errno:" << errno
+-                   << " msg:" << strerror(errno);
+-    }
+-  }
+-}
+-
+-void SharedMemory::Lock() {
+-  LockOrUnlockCommon(F_WRLCK);
+-}
+-
+-void SharedMemory::Unlock() {
+-  LockOrUnlockCommon(F_UNLCK);
+-}
+-#else
+-void SharedMemory::LockOrUnlockCommon(int function) {
+-  DCHECK(mapped_file_ >= 0);
+-  while (lockf(mapped_file_, function, 0) < 0) {
+-    if (errno == EINTR) {
+-      continue;
+-    } else if (errno == ENOLCK) {
+-      // temporary kernel resource exaustion
+-      PlatformThread::Sleep(500);
+-      continue;
+-    } else {
+-      NOTREACHED() << "lockf() failed."
+-                   << " function:" << function
+-                   << " fd:" << mapped_file_
+-                   << " errno:" << errno
+-                   << " msg:" << strerror(errno);
+-    }
+-  }
+-}
+-
+-void SharedMemory::Lock() {
+-  LockOrUnlockCommon(F_LOCK);
+-}
+-
+-void SharedMemory::Unlock() {
+-  LockOrUnlockCommon(F_ULOCK);
+-}
+-#endif
+-
+ SharedMemoryHandle SharedMemory::handle() const {
+   return FileDescriptor(mapped_file_, false);
+ }
+ 
+ }  // namespace base
+diff --git a/ipc/chromium/src/base/shared_memory_win.cc b/ipc/chromium/src/base/shared_memory_win.cc
+--- a/ipc/chromium/src/base/shared_memory_win.cc
++++ b/ipc/chromium/src/base/shared_memory_win.cc
+@@ -56,25 +56,22 @@ bool IsSectionSafeToMap(HANDLE handle) {
+ 
+ namespace base {
+ 
+ SharedMemory::SharedMemory()
+     : external_section_(false),
+       mapped_file_(NULL),
+       memory_(NULL),
+       read_only_(false),
+-      max_size_(0),
+-      lock_(NULL) {
++      max_size_(0) {
+ }
+ 
+ SharedMemory::~SharedMemory() {
+   external_section_ = true;
+   Close();
+-  if (lock_ != NULL)
+-    CloseHandle(lock_);
+ }
+ 
+ bool SharedMemory::SetHandle(SharedMemoryHandle handle, bool read_only) {
+   DCHECK(mapped_file_ == NULL);
+ 
+   external_section_ = true;
+   mapped_file_ = handle;
+   read_only_ = read_only;
+@@ -195,32 +192,13 @@ void SharedMemory::Close(bool unmap_view
+   }
+ 
+   if (mapped_file_ != NULL) {
+     CloseHandle(mapped_file_);
+     mapped_file_ = NULL;
+   }
+ }
+ 
+-void SharedMemory::Lock() {
+-  if (lock_ == NULL) {
+-    std::wstring name = name_;
+-    name.append(L"lock");
+-    lock_ = CreateMutex(NULL, FALSE, name.c_str());
+-    DCHECK(lock_ != NULL);
+-    if (lock_ == NULL) {
+-      DLOG(ERROR) << "Could not create mutex" << GetLastError();
+-      return;  // there is nothing good we can do here.
+-    }
+-  }
+-  WaitForSingleObject(lock_, INFINITE);
+-}
+-
+-void SharedMemory::Unlock() {
+-  DCHECK(lock_ != NULL);
+-  ReleaseMutex(lock_);
+-}
+-
+ SharedMemoryHandle SharedMemory::handle() const {
+   return mapped_file_;
+ }
+ 
+ }  // namespace base

+ 424 - 0
frg/work-js/mozilla-release/patches/mozilla-esr68-push_449669.patch

@@ -0,0 +1,424 @@
+# HG changeset patch
+# User Jed Davis <jld@mozilla.com>
+# Date 1519157252 25200
+#      Tue Feb 20 13:07:32 2018 -0700
+# Node ID 9561bdb8c479ff4b882a6936716ba9a8cc8bdae3
+# Parent  17c89b6a5972403c2ab95d3d9f988d75211c8761
+Bug 1440199 - Part 2: Remove named mode from IPC shared memory. r=froydnj
+
+We're not using named shared memory, and supporting only anonymous
+shared memory allows using other backends that are more compatible
+with preventing a process from accessing any shared memory it wasn't
+explicitly granted (i.e., sandboxing).
+
+Specifically: SharedMemory::Open is removed; SharedMemory::Create no
+longer takes a name, no longer has the open_existing option which doesn't
+apply to anonymous memory, and no longer supports read-only memory
+(anonymous memory which can never have been written isn't very useful).
+
+This patch also fixes some comments in what remains of SharedMemory::Create.
+
+MozReview-Commit-ID: 4kBrURtxq20
+
+diff --git a/dom/ipc/ContentParent.cpp b/dom/ipc/ContentParent.cpp
+--- a/dom/ipc/ContentParent.cpp
++++ b/dom/ipc/ContentParent.cpp
+@@ -2008,18 +2008,17 @@ ContentParent::LaunchSubprocess(ProcessP
+   // the command line.
+ 
+   // Serialize the early prefs.
+   nsAutoCStringN<1024> prefs;
+   Preferences::SerializePreferences(prefs);
+ 
+   // Set up the shared memory.
+   base::SharedMemory shm;
+-  if (!shm.Create("", /* read_only */ false, /* open_existing */ false,
+-                  prefs.Length())) {
++  if (!shm.Create(prefs.Length())) {
+     NS_ERROR("failed to create shared memory in the parent");
+     MarkAsDead();
+     return false;
+   }
+   if (!shm.Map(prefs.Length())) {
+     NS_ERROR("failed to map shared memory in the parent");
+     MarkAsDead();
+     return false;
+diff --git a/gfx/ipc/SharedDIB.cpp b/gfx/ipc/SharedDIB.cpp
+--- a/gfx/ipc/SharedDIB.cpp
++++ b/gfx/ipc/SharedDIB.cpp
+@@ -20,17 +20,17 @@ SharedDIB::~SharedDIB()
+ }
+ 
+ nsresult
+ SharedDIB::Create(uint32_t aSize)
+ {
+   Close();
+ 
+   mShMem = new base::SharedMemory();
+-  if (!mShMem || !mShMem->Create("", false, false, aSize))
++  if (!mShMem || !mShMem->Create(aSize))
+     return NS_ERROR_OUT_OF_MEMORY;
+ 
+   return NS_OK;
+ }
+ 
+ bool
+ SharedDIB::IsValid()
+ {
+diff --git a/ipc/chromium/src/base/shared_memory.h b/ipc/chromium/src/base/shared_memory.h
+--- a/ipc/chromium/src/base/shared_memory.h
++++ b/ipc/chromium/src/base/shared_memory.h
+@@ -55,34 +55,19 @@ class SharedMemory {
+ 
+   // Return true iff the given handle is valid (i.e. not the distingished
+   // invalid value; NULL for a HANDLE and -1 for a file descriptor)
+   static bool IsHandleValid(const SharedMemoryHandle& handle);
+ 
+   // Return invalid handle (see comment above for exact definition).
+   static SharedMemoryHandle NULLHandle();
+ 
+-  // Creates or opens a shared memory segment based on a name.
+-  // If read_only is true, opens the memory as read-only.
+-  // If open_existing is true, and the shared memory already exists,
+-  // opens the existing shared memory and ignores the size parameter.
+-  // If name is the empty string, use a unique name.
++  // Creates a shared memory segment.
+   // Returns true on success, false on failure.
+-  bool Create(const std::string& name, bool read_only, bool open_existing,
+-              size_t size);
+-
+-  // Deletes resources associated with a shared memory segment based on name.
+-  // Not all platforms require this call.
+-  bool Delete(const std::wstring& name);
+-
+-  // Opens a shared memory segment based on a name.
+-  // If read_only is true, opens for read-only access.
+-  // If name is the empty string, use a unique name.
+-  // Returns true on success, false on failure.
+-  bool Open(const std::wstring& name, bool read_only);
++  bool Create(size_t size);
+ 
+   // Maps the shared memory into the caller's address space.
+   // Returns true on success, false otherwise.  The memory address
+   // is accessed via the memory() accessor.
+   bool Map(size_t bytes);
+ 
+   // Unmaps the shared memory from the caller's address space.
+   // Returns true if successful; returns false on error or if the
+@@ -135,27 +120,21 @@ class SharedMemory {
+   // Note that the memory is unmapped by calling this method, regardless of the
+   // return value.
+   bool GiveToProcess(ProcessId target_pid,
+                      SharedMemoryHandle* new_handle) {
+     return ShareToProcessCommon(target_pid, new_handle, true);
+   }
+ 
+  private:
+-#if defined(OS_POSIX)
+-  bool CreateOrOpen(const std::wstring &name, int posix_flags, size_t size);
+-  bool FilenameForMemoryName(const std::wstring &memname,
+-                             std::wstring *filename);
+-#endif
+   bool ShareToProcessCommon(ProcessId target_pid,
+                             SharedMemoryHandle* new_handle,
+                             bool close_self);
+ 
+ #if defined(OS_WIN)
+-  std::wstring       name_;
+   HANDLE             mapped_file_;
+ #elif defined(OS_POSIX)
+   int                mapped_file_;
+   ino_t              inode_;
+ #endif
+   void*              memory_;
+   bool               read_only_;
+   size_t             max_size_;
+diff --git a/ipc/chromium/src/base/shared_memory_posix.cc b/ipc/chromium/src/base/shared_memory_posix.cc
+--- a/ipc/chromium/src/base/shared_memory_posix.cc
++++ b/ipc/chromium/src/base/shared_memory_posix.cc
+@@ -51,176 +51,75 @@ bool SharedMemory::IsHandleValid(const S
+   return handle.fd >= 0;
+ }
+ 
+ // static
+ SharedMemoryHandle SharedMemory::NULLHandle() {
+   return SharedMemoryHandle();
+ }
+ 
+-bool SharedMemory::Create(const std::string &cname, bool read_only,
+-                          bool open_existing, size_t size) {
+-  read_only_ = read_only;
+-
+-  std::wstring name = UTF8ToWide(cname);
+-
+-  int posix_flags = 0;
+-  posix_flags |= read_only ? O_RDONLY : O_RDWR;
+-  if (!open_existing || mapped_file_ <= 0)
+-    posix_flags |= O_CREAT;
+-
+-  if (!CreateOrOpen(name, posix_flags, size))
+-    return false;
+-
+-  max_size_ = size;
+-  return true;
+-}
+-
+-// Our current implementation of shmem is with mmap()ing of files.
+-// These files need to be deleted explicitly.
+-// In practice this call is only needed for unit tests.
+-bool SharedMemory::Delete(const std::wstring& name) {
+-  std::wstring mem_filename;
+-  if (FilenameForMemoryName(name, &mem_filename) == false)
+-    return false;
+-
+-  FilePath path(WideToUTF8(mem_filename));
+-  if (file_util::PathExists(path)) {
+-    return file_util::Delete(path);
+-  }
+-
+-  // Doesn't exist, so success.
+-  return true;
+-}
+-
+-bool SharedMemory::Open(const std::wstring &name, bool read_only) {
+-  read_only_ = read_only;
+-
+-  int posix_flags = 0;
+-  posix_flags |= read_only ? O_RDONLY : O_RDWR;
+-
+-  return CreateOrOpen(name, posix_flags, 0);
+-}
+-
+-// For the given shmem named |memname|, return a filename to mmap()
+-// (and possibly create).  Modifies |filename|.  Return false on
+-// error, or true of we are happy.
+-bool SharedMemory::FilenameForMemoryName(const std::wstring &memname,
+-                                         std::wstring *filename) {
+-  std::wstring mem_filename;
+-
+-  // mem_name will be used for a filename; make sure it doesn't
+-  // contain anything which will confuse us.
+-  DCHECK(memname.find_first_of(L"/") == std::string::npos);
+-  DCHECK(memname.find_first_of(L"\0") == std::string::npos);
+-
+-  FilePath temp_dir;
+-  if (file_util::GetShmemTempDir(&temp_dir) == false)
+-    return false;
+-
+-  mem_filename = UTF8ToWide(temp_dir.value());
+-  file_util::AppendToPath(&mem_filename, L"com.google.chrome.shmem." + memname);
+-  *filename = mem_filename;
+-  return true;
+-}
+-
+ namespace {
+ 
+ // A class to handle auto-closing of FILE*'s.
+ class ScopedFILEClose {
+  public:
+   inline void operator()(FILE* x) const {
+     if (x) {
+       fclose(x);
+     }
+   }
+ };
+ 
+ typedef mozilla::UniquePtr<FILE, ScopedFILEClose> ScopedFILE;
+ 
+ }
+ 
+-// Chromium mostly only use the unique/private shmem as specified by
+-// "name == L"". The exception is in the StatsTable.
+-// TODO(jrg): there is no way to "clean up" all unused named shmem if
+-// we restart from a crash.  (That isn't a new problem, but it is a problem.)
+-// In case we want to delete it later, it may be useful to save the value
+-// of mem_filename after FilenameForMemoryName().
+-bool SharedMemory::CreateOrOpen(const std::wstring &name,
+-                                int posix_flags, size_t size) {
++bool SharedMemory::Create(size_t size) {
++  read_only_ = false;
++
++  DCHECK(size > 0);
+   DCHECK(mapped_file_ == -1);
+ 
+   ScopedFILE file_closer;
+   FILE *fp;
+ 
+-  if (name == L"") {
+-    // It doesn't make sense to have a read-only private piece of shmem
+-    DCHECK(posix_flags & (O_RDWR | O_WRONLY));
+-
+-    FilePath path;
+-    fp = file_util::CreateAndOpenTemporaryShmemFile(&path);
+-
+-    // Deleting the file prevents anyone else from mapping it in
+-    // (making it private), and prevents the need for cleanup (once
+-    // the last fd is closed, it is truly freed).
+-    file_util::Delete(path);
+-  } else {
+-    std::wstring mem_filename;
+-    if (FilenameForMemoryName(name, &mem_filename) == false)
+-      return false;
++  FilePath path;
++  fp = file_util::CreateAndOpenTemporaryShmemFile(&path);
+ 
+-    std::string mode;
+-    switch (posix_flags) {
+-      case (O_RDWR | O_CREAT):
+-        // Careful: "w+" will truncate if it already exists.
+-        mode = "a+";
+-        break;
+-      case O_RDWR:
+-        mode = "r+";
+-        break;
+-      case O_RDONLY:
+-        mode = "r";
+-        break;
+-      default:
+-        NOTIMPLEMENTED();
+-        break;
+-    }
+-
+-    fp = file_util::OpenFile(mem_filename, mode.c_str());
+-  }
++  // Deleting the file prevents anyone else from mapping it in
++  // (making it private), and prevents the need for cleanup (once
++  // the last fd is closed, it is truly freed).
++  file_util::Delete(path);
+ 
+   if (fp == NULL)
+     return false;
+   file_closer.reset(fp);  // close when we go out of scope
+ 
+-  // Make sure the (new) file is the right size.
+-  // According to the man page, "Use of truncate() to extend a file is
+-  // not portable."
+-  if (size && (posix_flags & (O_RDWR | O_CREAT))) {
+-    // Get current size.
+-    struct stat stat;
+-    if (fstat(fileno(fp), &stat) != 0)
+-      return false;
+-    size_t current_size = stat.st_size;
+-    if (current_size != size) {
+-      if (ftruncate(fileno(fp), size) != 0)
+-        return false;
+-      if (fseeko(fp, size, SEEK_SET) != 0)
+-        return false;
+-    }
+-  }
++  // Set the file size.
++  //
++  // According to POSIX, (f)truncate can be used to extend files;
++  // previously this required the XSI option but as of the 2008
++  // edition it's required for everything.  (Linux documents that this
++  // may fail on non-"native" filesystems like FAT, but /dev/shm
++  // should always be tmpfs.)
++  if (ftruncate(fileno(fp), size) != 0)
++    return false;
++  // This probably isn't needed.
++  if (fseeko(fp, size, SEEK_SET) != 0)
++    return false;
+ 
+   mapped_file_ = dup(fileno(fp));
+   DCHECK(mapped_file_ >= 0);
+ 
+   struct stat st;
+   if (fstat(mapped_file_, &st))
+     NOTREACHED();
+   inode_ = st.st_ino;
+ 
++  max_size_ = size;
+   return true;
+ }
+ 
+ bool SharedMemory::Map(size_t bytes) {
+   if (mapped_file_ == -1)
+     return false;
+ 
+   memory_ = mmap(NULL, bytes, PROT_READ | (read_only_ ? 0 : PROT_WRITE),
+diff --git a/ipc/chromium/src/base/shared_memory_win.cc b/ipc/chromium/src/base/shared_memory_win.cc
+--- a/ipc/chromium/src/base/shared_memory_win.cc
++++ b/ipc/chromium/src/base/shared_memory_win.cc
+@@ -37,57 +37,28 @@ bool SharedMemory::IsHandleValid(const S
+   return handle != NULL;
+ }
+ 
+ // static
+ SharedMemoryHandle SharedMemory::NULLHandle() {
+   return NULL;
+ }
+ 
+-bool SharedMemory::Create(const std::string &cname, bool read_only,
+-                          bool open_existing, size_t size) {
++bool SharedMemory::Create(size_t size) {
+   DCHECK(mapped_file_ == NULL);
+-  std::wstring name = UTF8ToWide(cname);
+-  name_ = name;
+-  read_only_ = read_only;
++  read_only_ = false;
+   mapped_file_ = CreateFileMapping(INVALID_HANDLE_VALUE, NULL,
+-      read_only_ ? PAGE_READONLY : PAGE_READWRITE, 0, static_cast<DWORD>(size),
+-      name.empty() ? NULL : name.c_str());
++      PAGE_READWRITE, 0, static_cast<DWORD>(size), NULL);
+   if (!mapped_file_)
+     return false;
+ 
+-  // Check if the shared memory pre-exists.
+-  if (GetLastError() == ERROR_ALREADY_EXISTS && !open_existing) {
+-    Close();
+-    return false;
+-  }
+   max_size_ = size;
+   return true;
+ }
+ 
+-bool SharedMemory::Delete(const std::wstring& name) {
+-  // intentionally empty -- there is nothing for us to do on Windows.
+-  return true;
+-}
+-
+-bool SharedMemory::Open(const std::wstring &name, bool read_only) {
+-  DCHECK(mapped_file_ == NULL);
+-
+-  name_ = name;
+-  read_only_ = read_only;
+-  mapped_file_ = OpenFileMapping(
+-      read_only_ ? FILE_MAP_READ : FILE_MAP_ALL_ACCESS, false,
+-      name.empty() ? NULL : name.c_str());
+-  if (mapped_file_ != NULL) {
+-    // Note: size_ is not set in this case.
+-    return true;
+-  }
+-  return false;
+-}
+-
+ bool SharedMemory::Map(size_t bytes) {
+   if (mapped_file_ == NULL)
+     return false;
+ 
+   memory_ = MapViewOfFile(mapped_file_,
+       read_only_ ? FILE_MAP_READ : FILE_MAP_ALL_ACCESS, 0, 0, bytes);
+   if (memory_ != NULL) {
+     return true;
+diff --git a/ipc/glue/SharedMemoryBasic_chromium.h b/ipc/glue/SharedMemoryBasic_chromium.h
+--- a/ipc/glue/SharedMemoryBasic_chromium.h
++++ b/ipc/glue/SharedMemoryBasic_chromium.h
+@@ -32,17 +32,17 @@ public:
+   }
+ 
+   virtual bool SetHandle(const Handle& aHandle, OpenRights aRights) override {
+     return mSharedMemory.SetHandle(aHandle, aRights == RightsReadOnly);
+   }
+ 
+   virtual bool Create(size_t aNbytes) override
+   {
+-    bool ok = mSharedMemory.Create("", false, false, aNbytes);
++    bool ok = mSharedMemory.Create(aNbytes);
+     if (ok) {
+       Created(aNbytes);
+     }
+     return ok;
+   }
+ 
+   virtual bool Map(size_t nBytes) override
+   {

+ 142 - 0
frg/work-js/mozilla-release/patches/mozilla-esr68-push_449670.patch

@@ -0,0 +1,142 @@
+# HG changeset patch
+# User Jed Davis <jld@mozilla.com>
+# Date 1519164552 25200
+#      Tue Feb 20 15:09:12 2018 -0700
+# Node ID 5b55e19c6ce16c80648d7a786682c6a5cd833f6a
+# Parent  9561bdb8c479ff4b882a6936716ba9a8cc8bdae3
+Bug 1440199 - Part 3: Remove IPC shared memory IDs. r=froydnj
+
+This code isn't blocking anything, but it's dead and I don't think we
+have any plans to use it.
+
+MozReview-Commit-ID: KBoEfLceDns
+
+diff --git a/ipc/chromium/src/base/shared_memory.h b/ipc/chromium/src/base/shared_memory.h
+--- a/ipc/chromium/src/base/shared_memory.h
++++ b/ipc/chromium/src/base/shared_memory.h
+@@ -21,20 +21,17 @@
+ 
+ namespace base {
+ 
+ // SharedMemoryHandle is a platform specific type which represents
+ // the underlying OS handle to a shared memory segment.
+ #if defined(OS_WIN)
+ typedef HANDLE SharedMemoryHandle;
+ #elif defined(OS_POSIX)
+-// A SharedMemoryId is sufficient to identify a given shared memory segment on a
+-// system, but insufficient to map it.
+ typedef FileDescriptor SharedMemoryHandle;
+-typedef ino_t SharedMemoryId;
+ #endif
+ 
+ // Platform abstraction for shared memory.  Provides a C++ wrapper
+ // around the OS primitive for a memory mapped file.
+ class SharedMemory {
+  public:
+   // Create a new SharedMemory object.
+   SharedMemory();
+@@ -85,24 +82,16 @@ class SharedMemory {
+   // Mapped via Map().  Returns NULL if it is not mapped.
+   void *memory() const { return memory_; }
+ 
+   // Get access to the underlying OS handle for this segment.
+   // Use of this handle for anything other than an opaque
+   // identifier is not portable.
+   SharedMemoryHandle handle() const;
+ 
+-#if defined(OS_POSIX)
+-  // Return a unique identifier for this shared memory segment. Inode numbers
+-  // are technically only unique to a single filesystem. However, we always
+-  // allocate shared memory backing files from the same directory, so will end
+-  // up on the same filesystem.
+-  SharedMemoryId id() const { return inode_; }
+-#endif
+-
+   // Closes the open shared memory segment.
+   // It is safe to call Close repeatedly.
+   void Close(bool unmap_view = true);
+ 
+   // Share the shared memory to another process.  Attempts
+   // to create a platform-specific new_handle which can be
+   // used in a remote process to access the shared memory
+   // file.  new_handle is an ouput parameter to receive
+@@ -128,17 +117,16 @@ class SharedMemory {
+   bool ShareToProcessCommon(ProcessId target_pid,
+                             SharedMemoryHandle* new_handle,
+                             bool close_self);
+ 
+ #if defined(OS_WIN)
+   HANDLE             mapped_file_;
+ #elif defined(OS_POSIX)
+   int                mapped_file_;
+-  ino_t              inode_;
+ #endif
+   void*              memory_;
+   bool               read_only_;
+   size_t             max_size_;
+ 
+   DISALLOW_EVIL_CONSTRUCTORS(SharedMemory);
+ };
+ 
+diff --git a/ipc/chromium/src/base/shared_memory_posix.cc b/ipc/chromium/src/base/shared_memory_posix.cc
+--- a/ipc/chromium/src/base/shared_memory_posix.cc
++++ b/ipc/chromium/src/base/shared_memory_posix.cc
+@@ -17,36 +17,29 @@
+ #include "base/platform_thread.h"
+ #include "base/string_util.h"
+ #include "mozilla/UniquePtr.h"
+ 
+ namespace base {
+ 
+ SharedMemory::SharedMemory()
+     : mapped_file_(-1),
+-      inode_(0),
+       memory_(NULL),
+       read_only_(false),
+       max_size_(0) {
+ }
+ 
+ SharedMemory::~SharedMemory() {
+   Close();
+ }
+ 
+ bool SharedMemory::SetHandle(SharedMemoryHandle handle, bool read_only) {
+   DCHECK(mapped_file_ == -1);
+ 
+-  struct stat st;
+-  if (fstat(handle.fd, &st) < 0) {
+-    return false;
+-  }
+-
+   mapped_file_ = handle.fd;
+-  inode_ = st.st_ino;
+   read_only_ = read_only;
+   return true;
+ }
+ 
+ // static
+ bool SharedMemory::IsHandleValid(const SharedMemoryHandle& handle) {
+   return handle.fd >= 0;
+ }
+@@ -104,21 +97,16 @@ bool SharedMemory::Create(size_t size) {
+     return false;
+   // This probably isn't needed.
+   if (fseeko(fp, size, SEEK_SET) != 0)
+     return false;
+ 
+   mapped_file_ = dup(fileno(fp));
+   DCHECK(mapped_file_ >= 0);
+ 
+-  struct stat st;
+-  if (fstat(mapped_file_, &st))
+-    NOTREACHED();
+-  inode_ = st.st_ino;
+-
+   max_size_ = size;
+   return true;
+ }
+ 
+ bool SharedMemory::Map(size_t bytes) {
+   if (mapped_file_ == -1)
+     return false;
+ 

+ 23 - 3
frg/work-js/mozilla-release/patches/series

@@ -3793,6 +3793,8 @@ servo-20592-61a1.patch
 1452864-1-61a1.patch
 1452864-1-61a1.patch
 1452864-2-61a1.patch
 1452864-2-61a1.patch
 1452791-61a1.patch
 1452791-61a1.patch
+1452288-61a1.patch
+1452202-fix-61a1.patch
 1449982-01-61a1.patch
 1449982-01-61a1.patch
 1449982-02-61a1.patch
 1449982-02-61a1.patch
 1449982-03-61a1.patch
 1449982-03-61a1.patch
@@ -5176,6 +5178,8 @@ NOBUG-20180712-typetraits-63a1.patch
 1472291-2-63a1.patch
 1472291-2-63a1.patch
 1052582-1-63a1.patch
 1052582-1-63a1.patch
 1052582-2-63a1.patch
 1052582-2-63a1.patch
+1475461-1-63a1.patch
+1475461-2-63a1.patch
 1475504-PARTIAL-63a1.patch
 1475504-PARTIAL-63a1.patch
 1472716-1-63a1.patch
 1472716-1-63a1.patch
 1472490-63a1.patch
 1472490-63a1.patch
@@ -5194,6 +5198,7 @@ NOBUG-20180712-typetraits-63a1.patch
 1474024-63a1.patch
 1474024-63a1.patch
 1475335-63a1.patch
 1475335-63a1.patch
 1475660-63a1.patch
 1475660-63a1.patch
+1475980-63a1.patch
 1290972-63a1.patch
 1290972-63a1.patch
 1473217-2-63a1.patch
 1473217-2-63a1.patch
 1473217-3-63a1.patch
 1473217-3-63a1.patch
@@ -5279,6 +5284,7 @@ NOBUG-20180720-tokenstream-63a1.patch
 1477579-2-63a1.patch
 1477579-2-63a1.patch
 1477579-3-63a1.patch
 1477579-3-63a1.patch
 1477579-4-63a1.patch
 1477579-4-63a1.patch
+1468514-63a1.patch
 1477979-63a1.patch
 1477979-63a1.patch
 1447932-63a1.patch
 1447932-63a1.patch
 1473667-63a1.patch
 1473667-63a1.patch
@@ -5343,6 +5349,7 @@ NOBUG-20180720-tokenstream-63a1.patch
 1476953-63a1.patch
 1476953-63a1.patch
 1416723-1-63a1.patch
 1416723-1-63a1.patch
 1416723-2-63a1.patch
 1416723-2-63a1.patch
+1477632-63a1.patch
 1478587-01-63a1.patch
 1478587-01-63a1.patch
 1478587-02-63a1.patch
 1478587-02-63a1.patch
 1478587-03-63a1.patch
 1478587-03-63a1.patch
@@ -5368,6 +5375,14 @@ NOBUG-20180720-tokenstream-63a1.patch
 1475228-5-63a1.patch
 1475228-5-63a1.patch
 1475228-6-63a1.patch
 1475228-6-63a1.patch
 1475228-7-63a1.patch
 1475228-7-63a1.patch
+1477626-1-63a1.patch
+1477626-2-63a1.patch
+1477626-3-63a1.patch
+1477626-4-63a1.patch
+1477626-5-63a1.patch
+1477626-6-63a1.patch
+1477626-7-63a1.patch
+1477626-8-63a1.patch
 1478499-4-63a1.patch
 1478499-4-63a1.patch
 1478499-5no6-63a1.patch
 1478499-5no6-63a1.patch
 1479851-63a1.patch
 1479851-63a1.patch
@@ -8053,8 +8068,14 @@ TOP-NOBUG-REGEXP-47-fixes-25319.patch
 TOP-1880562-NSS3902-11509.patch
 TOP-1880562-NSS3902-11509.patch
 NOBUG-JSFIXUPS-25319.patch
 NOBUG-JSFIXUPS-25319.patch
 1462261-3-25319.patch
 1462261-3-25319.patch
+1420975-60a1.patch
+1442765-1-63a1.patch
+1442765-2-63a1.patch
 
 
-
+ipc-start
+mozilla-esr68-push_449668.patch
+mozilla-esr68-push_449669.patch
+mozilla-esr68-push_449670.patch
 
 
 
 
 dynamic-imports-ioptionsl-start
 dynamic-imports-ioptionsl-start
@@ -10744,7 +10765,6 @@ L-1407858-xx-59a1.patch
 L-1421358-59a1.patch
 L-1421358-59a1.patch
 L-1429945-59a1.patch
 L-1429945-59a1.patch
 L-1432808-60a1.patch
 L-1432808-60a1.patch
-L-1420975-60a1.patch
 L-1428072-2-60a1.patch
 L-1428072-2-60a1.patch
 L-1428072-3-60a1.patch
 L-1428072-3-60a1.patch
 L-1435764-xx-60a1.patch
 L-1435764-xx-60a1.patch
@@ -10826,7 +10846,7 @@ mozilla-central-push_421054.patch
 mozilla-central-push_421066.patch
 mozilla-central-push_421066.patch
 mozilla-central-push_421533.patch
 mozilla-central-push_421533.patch
 L-1478942-63a1.patch
 L-1478942-63a1.patch
-
+L-1477626-9-63a1.patch
 the-taken-out-stuff-start.patch
 the-taken-out-stuff-start.patch
 
 
 I-am-here.patch
 I-am-here.patch

+ 18 - 0
frg/work-js/mozilla-release/patches/series-test

@@ -3793,6 +3793,8 @@ servo-20592-61a1.patch
 1452864-1-61a1.patch
 1452864-1-61a1.patch
 1452864-2-61a1.patch
 1452864-2-61a1.patch
 1452791-61a1.patch
 1452791-61a1.patch
+1452288-61a1.patch
+1452202-fix-61a1.patch
 1449982-01-61a1.patch
 1449982-01-61a1.patch
 1449982-02-61a1.patch
 1449982-02-61a1.patch
 1449982-03-61a1.patch
 1449982-03-61a1.patch
@@ -5175,6 +5177,8 @@ NOBUG-20180712-typetraits-63a1.patch
 1472291-2-63a1.patch
 1472291-2-63a1.patch
 1052582-1-63a1.patch
 1052582-1-63a1.patch
 1052582-2-63a1.patch
 1052582-2-63a1.patch
+1475461-1-63a1.patch
+1475461-2-63a1.patch
 1475504-PARTIAL-63a1.patch
 1475504-PARTIAL-63a1.patch
 1472716-1-63a1.patch
 1472716-1-63a1.patch
 1472490-63a1.patch
 1472490-63a1.patch
@@ -5193,6 +5197,7 @@ NOBUG-20180712-typetraits-63a1.patch
 1474024-63a1.patch
 1474024-63a1.patch
 1475335-63a1.patch
 1475335-63a1.patch
 1475660-63a1.patch
 1475660-63a1.patch
+1475980-63a1.patch
 1290972-63a1.patch
 1290972-63a1.patch
 1473217-2-63a1.patch
 1473217-2-63a1.patch
 1473217-3-63a1.patch
 1473217-3-63a1.patch
@@ -5273,6 +5278,7 @@ NOBUG-20180720-tokenstream-63a1.patch
 1477579-2-63a1.patch
 1477579-2-63a1.patch
 1477579-3-63a1.patch
 1477579-3-63a1.patch
 1477579-4-63a1.patch
 1477579-4-63a1.patch
+1468514-63a1.patch
 1477979-63a1.patch
 1477979-63a1.patch
 1447932-63a1.patch
 1447932-63a1.patch
 1473667-63a1.patch
 1473667-63a1.patch
@@ -5337,6 +5343,7 @@ NOBUG-20180720-tokenstream-63a1.patch
 1476953-63a1.patch
 1476953-63a1.patch
 1416723-1-63a1.patch
 1416723-1-63a1.patch
 1416723-2-63a1.patch
 1416723-2-63a1.patch
+1477632-63a1.patch
 1478587-01-63a1.patch
 1478587-01-63a1.patch
 1478587-02-63a1.patch
 1478587-02-63a1.patch
 1478587-03-63a1.patch
 1478587-03-63a1.patch
@@ -5362,6 +5369,14 @@ NOBUG-20180720-tokenstream-63a1.patch
 1475228-5-63a1.patch
 1475228-5-63a1.patch
 1475228-6-63a1.patch
 1475228-6-63a1.patch
 1475228-7-63a1.patch
 1475228-7-63a1.patch
+1477626-1-63a1.patch
+1477626-2-63a1.patch
+1477626-3-63a1.patch
+1477626-4-63a1.patch
+1477626-5-63a1.patch
+1477626-6-63a1.patch
+1477626-7-63a1.patch
+1477626-8-63a1.patch
 1478499-4-63a1.patch
 1478499-4-63a1.patch
 1478499-5no6-63a1.patch
 1478499-5no6-63a1.patch
 1479851-63a1.patch
 1479851-63a1.patch
@@ -7981,3 +7996,6 @@ TOP-NOBUG-REGEXP-47-fixes-25319.patch
 TOP-1880562-NSS3902-11509.patch
 TOP-1880562-NSS3902-11509.patch
 NOBUG-JSFIXUPS-25319.patch
 NOBUG-JSFIXUPS-25319.patch
 1462261-3-25319.patch
 1462261-3-25319.patch
+1420975-60a1.patch
+1442765-1-63a1.patch
+1442765-2-63a1.patch