Browse Source

more stuff

Frank-Rainer Grahl 7 months ago
parent
commit
b12d37f1ac
34 changed files with 50518 additions and 3113 deletions
  1. 12 12
      frg/work-js/mozilla-release/patches/1033916-1-63a1.patch
  2. 10 10
      frg/work-js/mozilla-release/patches/1040316-63a1.patch
  3. 29 29
      frg/work-js/mozilla-release/patches/1378808-1-63a1.patch
  4. 3447 0
      frg/work-js/mozilla-release/patches/1416723-1-63a1.patch
  5. 36709 0
      frg/work-js/mozilla-release/patches/1416723-2-63a1.patch
  6. 11 9
      frg/work-js/mozilla-release/patches/1447591-1-63a1.patch
  7. 6 5
      frg/work-js/mozilla-release/patches/1447591-2-63a1.patch
  8. 166 0
      frg/work-js/mozilla-release/patches/1450261-1-62a1.patch
  9. 1564 0
      frg/work-js/mozilla-release/patches/1450261-2-63a1.patch
  10. 1384 0
      frg/work-js/mozilla-release/patches/1450261-3-63a1.patch
  11. 1430 0
      frg/work-js/mozilla-release/patches/1450261-4-63a1.patch
  12. 4292 0
      frg/work-js/mozilla-release/patches/1459900-4-63a1.patch
  13. 332 0
      frg/work-js/mozilla-release/patches/1459900-5-63a1.patch
  14. 456 0
      frg/work-js/mozilla-release/patches/1459900-6-63a1.patch
  15. 0 2938
      frg/work-js/mozilla-release/patches/1465060-1-std-62a1.patch.bak
  16. 139 0
      frg/work-js/mozilla-release/patches/1471289-63a1.patch
  17. 183 0
      frg/work-js/mozilla-release/patches/1472974-63a1.patch
  18. 76 0
      frg/work-js/mozilla-release/patches/1473956-63a1.patch
  19. 65 0
      frg/work-js/mozilla-release/patches/1475943-63a1.patch
  20. 2 2
      frg/work-js/mozilla-release/patches/1476012-2-63a1.patch
  21. 63 0
      frg/work-js/mozilla-release/patches/1476953-63a1.patch
  22. 4 6
      frg/work-js/mozilla-release/patches/1489698-2-65a1.patch
  23. 6 6
      frg/work-js/mozilla-release/patches/1489698-5-65a1.patch
  24. 3 3
      frg/work-js/mozilla-release/patches/1502159-2-65a1.patch
  25. 3 3
      frg/work-js/mozilla-release/patches/1502886-1-65a1.patch
  26. 3 3
      frg/work-js/mozilla-release/patches/1528028-67a1.patch
  27. 9 9
      frg/work-js/mozilla-release/patches/1590907-5-72a1.patch
  28. 4 4
      frg/work-js/mozilla-release/patches/1727374-914.patch
  29. 7 7
      frg/work-js/mozilla-release/patches/TOP-NOBUG-REGEXP-03-1537978-68a1-25318.patch
  30. 21 21
      frg/work-js/mozilla-release/patches/TOP-NOBUG-REGEXP-07-1626713-76a1-25318.patch
  31. 28 28
      frg/work-js/mozilla-release/patches/TOP-NOBUG-REGEXP-35-1435829-66a1-25318.patch
  32. 22 16
      frg/work-js/mozilla-release/patches/TOP-NOBUG-REGEXP-37-1642493-79a1-25318.patch
  33. 15 1
      frg/work-js/mozilla-release/patches/series
  34. 17 1
      frg/work-js/mozilla-release/patches/series-test

+ 12 - 12
frg/work-js/mozilla-release/patches/1033916-1-63a1.patch

@@ -2,7 +2,7 @@
 # User Jeff Walden <jwalden@mit.edu>
 # User Jeff Walden <jwalden@mit.edu>
 # Date 1534776368 25200
 # Date 1534776368 25200
 # Node ID 67d5039dcbc2522e187bcf3dbec1c6e92bd32167
 # Node ID 67d5039dcbc2522e187bcf3dbec1c6e92bd32167
-# Parent  c0a1fdc3845951de8f5e66901b3f498608a32e2f
+# Parent  81bc6d1242a341dfe9a236395077b926de987c23
 Bug 1033916 - Move JSAutoByteString out of jsapi.h into js/public/AutoByteString.h, incidentally breaking the jsfriendapi.h -> jsapi.h dependency.  r=jandem
 Bug 1033916 - Move JSAutoByteString out of jsapi.h into js/public/AutoByteString.h, incidentally breaking the jsfriendapi.h -> jsapi.h dependency.  r=jandem
 
 
 diff --git a/dom/base/ChromeUtils.cpp b/dom/base/ChromeUtils.cpp
 diff --git a/dom/base/ChromeUtils.cpp b/dom/base/ChromeUtils.cpp
@@ -447,7 +447,7 @@ diff --git a/js/src/frontend/EmitterScope.cpp b/js/src/frontend/EmitterScope.cpp
 diff --git a/js/src/frontend/Parser.cpp b/js/src/frontend/Parser.cpp
 diff --git a/js/src/frontend/Parser.cpp b/js/src/frontend/Parser.cpp
 --- a/js/src/frontend/Parser.cpp
 --- a/js/src/frontend/Parser.cpp
 +++ b/js/src/frontend/Parser.cpp
 +++ b/js/src/frontend/Parser.cpp
-@@ -30,16 +30,17 @@
+@@ -32,16 +32,17 @@
  #include "jstypes.h"
  #include "jstypes.h"
  
  
  #include "builtin/ModuleObject.h"
  #include "builtin/ModuleObject.h"
@@ -489,7 +489,7 @@ diff --git a/js/src/jsapi-tests/tests.h b/js/src/jsapi-tests/tests.h
 diff --git a/js/src/jsapi.cpp b/js/src/jsapi.cpp
 diff --git a/js/src/jsapi.cpp b/js/src/jsapi.cpp
 --- a/js/src/jsapi.cpp
 --- a/js/src/jsapi.cpp
 +++ b/js/src/jsapi.cpp
 +++ b/js/src/jsapi.cpp
-@@ -53,16 +53,17 @@
+@@ -50,16 +50,17 @@
  #include "frontend/Parser.h" // for JS_BufferIsCompileableUnit
  #include "frontend/Parser.h" // for JS_BufferIsCompileableUnit
  #include "gc/FreeOp.h"
  #include "gc/FreeOp.h"
  #include "gc/Marking.h"
  #include "gc/Marking.h"
@@ -939,12 +939,12 @@ diff --git a/js/src/vm/JSContext.cpp b/js/src/vm/JSContext.cpp
 +#include "js/AutoByteString.h"
 +#include "js/AutoByteString.h"
  #include "js/CharacterEncoding.h"
  #include "js/CharacterEncoding.h"
  #include "js/Printf.h"
  #include "js/Printf.h"
- #include "util/DoubleToString.h"
- #include "util/NativeStack.h"
- #include "util/Windows.h"
- #include "vm/BytecodeUtil.h"
- #include "vm/ErrorReporting.h"
- #include "vm/HelperThreads.h"
+ #ifdef JS_SIMULATOR_ARM64
+ # include "jit/arm64/vixl/Simulator-vixl.h"
+ #endif
+ #ifdef JS_SIMULATOR_ARM
+ # include "jit/arm/Simulator-arm.h"
+ #endif
 diff --git a/js/src/vm/JSFunction-inl.h b/js/src/vm/JSFunction-inl.h
 diff --git a/js/src/vm/JSFunction-inl.h b/js/src/vm/JSFunction-inl.h
 --- a/js/src/vm/JSFunction-inl.h
 --- a/js/src/vm/JSFunction-inl.h
 +++ b/js/src/vm/JSFunction-inl.h
 +++ b/js/src/vm/JSFunction-inl.h
@@ -1075,7 +1075,7 @@ diff --git a/js/src/vm/Scope.cpp b/js/src/vm/Scope.cpp
 diff --git a/js/src/vm/SelfHosting.cpp b/js/src/vm/SelfHosting.cpp
 diff --git a/js/src/vm/SelfHosting.cpp b/js/src/vm/SelfHosting.cpp
 --- a/js/src/vm/SelfHosting.cpp
 --- a/js/src/vm/SelfHosting.cpp
 +++ b/js/src/vm/SelfHosting.cpp
 +++ b/js/src/vm/SelfHosting.cpp
-@@ -34,16 +34,17 @@
+@@ -33,16 +33,17 @@
  #include "builtin/String.h"
  #include "builtin/String.h"
  #include "builtin/TypedObject.h"
  #include "builtin/TypedObject.h"
  #include "builtin/WeakMapObject.h"
  #include "builtin/WeakMapObject.h"
@@ -1180,11 +1180,11 @@ diff --git a/js/src/vm/Xdr.h b/js/src/vm/Xdr.h
 diff --git a/js/src/wasm/AsmJS.cpp b/js/src/wasm/AsmJS.cpp
 diff --git a/js/src/wasm/AsmJS.cpp b/js/src/wasm/AsmJS.cpp
 --- a/js/src/wasm/AsmJS.cpp
 --- a/js/src/wasm/AsmJS.cpp
 +++ b/js/src/wasm/AsmJS.cpp
 +++ b/js/src/wasm/AsmJS.cpp
-@@ -28,16 +28,17 @@
+@@ -27,16 +27,17 @@
+ 
  #include "jsmath.h"
  #include "jsmath.h"
  #include "jsutil.h"
  #include "jsutil.h"
  
  
- #include "builtin/SIMD.h"
  #include "builtin/String.h"
  #include "builtin/String.h"
  #include "frontend/Parser.h"
  #include "frontend/Parser.h"
  #include "gc/Policy.h"
  #include "gc/Policy.h"

+ 10 - 10
frg/work-js/mozilla-release/patches/1040316-63a1.patch

@@ -2,7 +2,7 @@
 # User Jeff Walden <jwalden@mit.edu>
 # User Jeff Walden <jwalden@mit.edu>
 # Date 1534776284 25200
 # Date 1534776284 25200
 # Node ID 6d10eda7f12de64044246e544d581537f30f8998
 # Node ID 6d10eda7f12de64044246e544d581537f30f8998
-# Parent  e158e3d76ab16f5d234397ba088038cc21b1e95a
+# Parent  57808f6b27e05dfdb40f8248d8833f7eee3f7109
 Bug 1040316 - Move AutoStableStringChars out of friendapi into public API.  r=jandem
 Bug 1040316 - Move AutoStableStringChars out of friendapi into public API.  r=jandem
 
 
 diff --git a/dom/base/nsJSUtils.h b/dom/base/nsJSUtils.h
 diff --git a/dom/base/nsJSUtils.h b/dom/base/nsJSUtils.h
@@ -421,7 +421,7 @@ diff --git a/js/src/builtin/TestingFunctions.cpp b/js/src/builtin/TestingFunctio
  #include "js/Vector.h"
  #include "js/Vector.h"
  #include "js/Wrapper.h"
  #include "js/Wrapper.h"
  #include "util/StringBuffer.h"
  #include "util/StringBuffer.h"
-@@ -80,16 +81,18 @@
+@@ -79,16 +80,18 @@
  #include "vm/NativeObject-inl.h"
  #include "vm/NativeObject-inl.h"
  #include "vm/StringType-inl.h"
  #include "vm/StringType-inl.h"
  
  
@@ -642,7 +642,7 @@ diff --git a/js/src/ctypes/Library.cpp b/js/src/ctypes/Library.cpp
 diff --git a/js/src/jsapi.cpp b/js/src/jsapi.cpp
 diff --git a/js/src/jsapi.cpp b/js/src/jsapi.cpp
 --- a/js/src/jsapi.cpp
 --- a/js/src/jsapi.cpp
 +++ b/js/src/jsapi.cpp
 +++ b/js/src/jsapi.cpp
-@@ -59,16 +59,17 @@
+@@ -56,16 +56,17 @@
  #include "jit/JitCommon.h"
  #include "jit/JitCommon.h"
  #include "jit/JitSpewer.h"
  #include "jit/JitSpewer.h"
  #include "js/CharacterEncoding.h"
  #include "js/CharacterEncoding.h"
@@ -660,7 +660,7 @@ diff --git a/js/src/jsapi.cpp b/js/src/jsapi.cpp
  #include "vm/AsyncFunction.h"
  #include "vm/AsyncFunction.h"
  #include "vm/AsyncIteration.h"
  #include "vm/AsyncIteration.h"
  #include "vm/DateObject.h"
  #include "vm/DateObject.h"
-@@ -105,16 +106,18 @@
+@@ -102,16 +103,18 @@
  
  
  using namespace js;
  using namespace js;
  using namespace js::gc;
  using namespace js::gc;
@@ -1066,7 +1066,7 @@ diff --git a/js/src/vm/Debugger.cpp b/js/src/vm/Debugger.cpp
  using mozilla::Maybe;
  using mozilla::Maybe;
  using mozilla::Some;
  using mozilla::Some;
  using mozilla::Nothing;
  using mozilla::Nothing;
-@@ -5067,17 +5069,17 @@ Debugger::isCompilableUnit(JSContext* cx
+@@ -5064,17 +5066,17 @@ Debugger::isCompilableUnit(JSContext* cx
      if (!args[0].isString()) {
      if (!args[0].isString()) {
          JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_NOT_EXPECTED_TYPE,
          JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_NOT_EXPECTED_TYPE,
                                    "Debugger.isCompilableUnit", "string",
                                    "Debugger.isCompilableUnit", "string",
@@ -1246,7 +1246,7 @@ diff --git a/js/src/vm/Runtime.cpp b/js/src/vm/Runtime.cpp
 diff --git a/js/src/vm/SelfHosting.cpp b/js/src/vm/SelfHosting.cpp
 diff --git a/js/src/vm/SelfHosting.cpp b/js/src/vm/SelfHosting.cpp
 --- a/js/src/vm/SelfHosting.cpp
 --- a/js/src/vm/SelfHosting.cpp
 +++ b/js/src/vm/SelfHosting.cpp
 +++ b/js/src/vm/SelfHosting.cpp
-@@ -36,16 +36,17 @@
+@@ -35,16 +35,17 @@
  #include "builtin/WeakMapObject.h"
  #include "builtin/WeakMapObject.h"
  #include "gc/HashUtil.h"
  #include "gc/HashUtil.h"
  #include "gc/Marking.h"
  #include "gc/Marking.h"
@@ -1264,7 +1264,7 @@ diff --git a/js/src/vm/SelfHosting.cpp b/js/src/vm/SelfHosting.cpp
  #include "vm/Interpreter.h"
  #include "vm/Interpreter.h"
  #include "vm/Iteration.h"
  #include "vm/Iteration.h"
  #include "vm/JSCompartment.h"
  #include "vm/JSCompartment.h"
-@@ -66,16 +67,17 @@
+@@ -65,16 +66,17 @@
  #include "vm/NativeObject-inl.h"
  #include "vm/NativeObject-inl.h"
  #include "vm/NumberObject-inl.h"
  #include "vm/NumberObject-inl.h"
  #include "vm/StringObject-inl.h"
  #include "vm/StringObject-inl.h"
@@ -1393,9 +1393,9 @@ diff --git a/js/src/vm/UbiNodeCensus.cpp b/js/src/vm/UbiNodeCensus.cpp
 diff --git a/js/src/wasm/AsmJS.cpp b/js/src/wasm/AsmJS.cpp
 diff --git a/js/src/wasm/AsmJS.cpp b/js/src/wasm/AsmJS.cpp
 --- a/js/src/wasm/AsmJS.cpp
 --- a/js/src/wasm/AsmJS.cpp
 +++ b/js/src/wasm/AsmJS.cpp
 +++ b/js/src/wasm/AsmJS.cpp
-@@ -30,16 +30,17 @@
+@@ -29,16 +29,17 @@
+ #include "jsutil.h"
  
  
- #include "builtin/SIMD.h"
  #include "builtin/String.h"
  #include "builtin/String.h"
  #include "frontend/Parser.h"
  #include "frontend/Parser.h"
  #include "gc/Policy.h"
  #include "gc/Policy.h"
@@ -1411,7 +1411,7 @@ diff --git a/js/src/wasm/AsmJS.cpp b/js/src/wasm/AsmJS.cpp
  #include "vm/Time.h"
  #include "vm/Time.h"
  #include "vm/TypedArrayObject.h"
  #include "vm/TypedArrayObject.h"
  #include "wasm/WasmCompile.h"
  #include "wasm/WasmCompile.h"
-@@ -66,16 +67,17 @@ using mozilla::IsNaN;
+@@ -65,16 +66,17 @@ using mozilla::IsNaN;
  using mozilla::IsNegativeZero;
  using mozilla::IsNegativeZero;
  using mozilla::IsPositiveZero;
  using mozilla::IsPositiveZero;
  using mozilla::IsPowerOfTwo;
  using mozilla::IsPowerOfTwo;

+ 29 - 29
frg/work-js/mozilla-release/patches/1378808-1-63a1.patch

@@ -2,7 +2,7 @@
 # User Logan F Smyth <loganfsmyth@gmail.com>
 # User Logan F Smyth <loganfsmyth@gmail.com>
 # Date 1531419899 25200
 # Date 1531419899 25200
 # Node ID fb38cfb1031dcd9ca730d7aeac46c8dacf24c0e7
 # Node ID fb38cfb1031dcd9ca730d7aeac46c8dacf24c0e7
-# Parent  fa889b1181c60606f81c2eca2f9d9f8a39aad0bc
+# Parent  8ef6bdc48f85bb11fa18228b3d9ddedb248fd9bf
 Bug 1378808 - Add a new ParseNodeKind::Arguments node type for call argument lists. r=jorendorff
 Bug 1378808 - Add a new ParseNodeKind::Arguments node type for call argument lists. r=jorendorff
 
 
 MozReview-Commit-ID: 7L4nNHjVoZo
 MozReview-Commit-ID: 7L4nNHjVoZo
@@ -10,7 +10,7 @@ MozReview-Commit-ID: 7L4nNHjVoZo
 diff --git a/js/src/builtin/ReflectParse.cpp b/js/src/builtin/ReflectParse.cpp
 diff --git a/js/src/builtin/ReflectParse.cpp b/js/src/builtin/ReflectParse.cpp
 --- a/js/src/builtin/ReflectParse.cpp
 --- a/js/src/builtin/ReflectParse.cpp
 +++ b/js/src/builtin/ReflectParse.cpp
 +++ b/js/src/builtin/ReflectParse.cpp
-@@ -2698,34 +2698,35 @@ ASTSerializer::expression(ParseNode* pn,
+@@ -2697,34 +2697,35 @@ ASTSerializer::expression(ParseNode* pn,
                 builder.unaryExpression(op, expr, &pn->pn_pos, dst);
                 builder.unaryExpression(op, expr, &pn->pn_pos, dst);
        }
        }
  
  
@@ -81,7 +81,7 @@ diff --git a/js/src/frontend/BytecodeEmitter.cpp b/js/src/frontend/BytecodeEmitt
          MOZ_ASSERT(pn->isArity(PN_LIST));
          MOZ_ASSERT(pn->isArity(PN_LIST));
          MOZ_ASSERT(pn->pn_count >= 2);
          MOZ_ASSERT(pn->pn_count >= 2);
          *answer = true;
          *answer = true;
-@@ -4817,17 +4825,17 @@ BytecodeEmitter::emitForOf(ParseNode* fo
+@@ -4820,17 +4828,17 @@ BytecodeEmitter::emitForOf(ParseNode* fo
  
  
      ParseNode* forHeadExpr = forOfHead->pn_kid3;
      ParseNode* forHeadExpr = forOfHead->pn_kid3;
  
  
@@ -100,7 +100,7 @@ diff --git a/js/src/frontend/BytecodeEmitter.cpp b/js/src/frontend/BytecodeEmitt
  
  
      if (!forOf.emitIterated())                            //
      if (!forOf.emitIterated())                            //
          return false;
          return false;
-@@ -6122,45 +6130,47 @@ BytecodeEmitter::emitSelfHostedCallFunct
+@@ -6125,45 +6133,47 @@ BytecodeEmitter::emitSelfHostedCallFunct
      // invokes the callee with the correct |this| object and arguments.
      // invokes the callee with the correct |this| object and arguments.
      // callFunction(fun, thisArg, arg0, arg1) thus becomes:
      // callFunction(fun, thisArg, arg0, arg1) thus becomes:
      // - emit lookup for fun
      // - emit lookup for fun
@@ -155,7 +155,7 @@ diff --git a/js/src/frontend/BytecodeEmitter.cpp b/js/src/frontend/BytecodeEmitt
  
  
      ParseNode* thisOrNewTarget = funNode->pn_next;
      ParseNode* thisOrNewTarget = funNode->pn_next;
      if (constructing) {
      if (constructing) {
-@@ -6179,36 +6189,36 @@ BytecodeEmitter::emitSelfHostedCallFunct
+@@ -6182,36 +6192,36 @@ BytecodeEmitter::emitSelfHostedCallFunct
              return false;
              return false;
      }
      }
  
  
@@ -197,7 +197,7 @@ diff --git a/js/src/frontend/BytecodeEmitter.cpp b/js/src/frontend/BytecodeEmitt
          return false;
          return false;
  
  
      ParseNode* kindNode = valNode->pn_next;
      ParseNode* kindNode = valNode->pn_next;
-@@ -6230,34 +6240,36 @@ BytecodeEmitter::emitSelfHostedForceInte
+@@ -6233,34 +6243,36 @@ BytecodeEmitter::emitSelfHostedForceInte
      if (!emit1(JSOP_UNDEFINED))
      if (!emit1(JSOP_UNDEFINED))
          return false;
          return false;
      return true;
      return true;
@@ -242,7 +242,7 @@ diff --git a/js/src/frontend/BytecodeEmitter.cpp b/js/src/frontend/BytecodeEmitt
          return false;
          return false;
  
  
      ParseNode* valNode = idNode->pn_next;
      ParseNode* valNode = idNode->pn_next;
-@@ -6268,45 +6280,45 @@ BytecodeEmitter::emitSelfHostedDefineDat
+@@ -6271,45 +6283,45 @@ BytecodeEmitter::emitSelfHostedDefineDat
      // but that's fine because the self-hosted code doesn't use the return
      // but that's fine because the self-hosted code doesn't use the return
      // value.
      // value.
      return emit1(JSOP_INITELEM);
      return emit1(JSOP_INITELEM);
@@ -296,7 +296,7 @@ diff --git a/js/src/frontend/BytecodeEmitter.cpp b/js/src/frontend/BytecodeEmitt
  
  
      if (!emitTree(idNode))
      if (!emitTree(idNode))
          return false;
          return false;
-@@ -6325,21 +6337,21 @@ BytecodeEmitter::isRestParameter(ParseNo
+@@ -6328,21 +6340,21 @@ BytecodeEmitter::isRestParameter(ParseNo
  
  
      FunctionBox* funbox = sc->asFunctionBox();
      FunctionBox* funbox = sc->asFunctionBox();
      RootedFunction fun(cx, funbox->function());
      RootedFunction fun(cx, funbox->function());
@@ -322,7 +322,7 @@ diff --git a/js/src/frontend/BytecodeEmitter.cpp b/js/src/frontend/BytecodeEmitt
      JSAtom* name = pn->name();
      JSAtom* name = pn->name();
      Maybe<NameLocation> paramLoc = locationOfNameBoundInFunctionScope(name);
      Maybe<NameLocation> paramLoc = locationOfNameBoundInFunctionScope(name);
      if (paramLoc && lookupName(name) == *paramLoc) {
      if (paramLoc && lookupName(name) == *paramLoc) {
-@@ -6459,111 +6471,32 @@ BytecodeEmitter::emitPipeline(ParseNode*
+@@ -6462,111 +6474,32 @@ BytecodeEmitter::emitPipeline(ParseNode*
  
  
          checkTypeSet(JSOP_CALL);
          checkTypeSet(JSOP_CALL);
      } while ((callee = callee->pn_next));
      } while ((callee = callee->pn_next));
@@ -439,7 +439,7 @@ diff --git a/js/src/frontend/BytecodeEmitter.cpp b/js/src/frontend/BytecodeEmitt
              // parameter:
              // parameter:
              //
              //
              //   function f(...args) {
              //   function f(...args) {
-@@ -6593,25 +6526,110 @@ BytecodeEmitter::emitCallOrNew(ParseNode
+@@ -6596,25 +6529,110 @@ BytecodeEmitter::emitCallOrNew(ParseNode
  
  
          if (!emitArray(args, argc))
          if (!emitArray(args, argc))
              return false;
              return false;
@@ -558,7 +558,7 @@ diff --git a/js/src/frontend/BytecodeEmitter.cpp b/js/src/frontend/BytecodeEmitt
              if (!emitCall(JSOP_CALL_IGNORES_RV, argc, pn))
              if (!emitCall(JSOP_CALL_IGNORES_RV, argc, pn))
                  return false;
                  return false;
              checkTypeSet(JSOP_CALL_IGNORES_RV);
              checkTypeSet(JSOP_CALL_IGNORES_RV);
-@@ -7184,17 +7202,17 @@ BytecodeEmitter::emitArray(ParseNode* pn
+@@ -7187,17 +7205,17 @@ BytecodeEmitter::emitArray(ParseNode* pn
                  return false;
                  return false;
          } else {
          } else {
              ParseNode* expr;
              ParseNode* expr;
@@ -580,7 +580,7 @@ diff --git a/js/src/frontend/BytecodeEmitter.cpp b/js/src/frontend/BytecodeEmitt
 diff --git a/js/src/frontend/BytecodeEmitter.h b/js/src/frontend/BytecodeEmitter.h
 diff --git a/js/src/frontend/BytecodeEmitter.h b/js/src/frontend/BytecodeEmitter.h
 --- a/js/src/frontend/BytecodeEmitter.h
 --- a/js/src/frontend/BytecodeEmitter.h
 +++ b/js/src/frontend/BytecodeEmitter.h
 +++ b/js/src/frontend/BytecodeEmitter.h
-@@ -801,16 +801,17 @@ struct MOZ_STACK_CLASS BytecodeEmitter
+@@ -806,16 +806,17 @@ struct MOZ_STACK_CLASS BytecodeEmitter
  
  
      MOZ_NEVER_INLINE MOZ_MUST_USE bool emitIncOrDec(ParseNode* pn);
      MOZ_NEVER_INLINE MOZ_MUST_USE bool emitIncOrDec(ParseNode* pn);
  
  
@@ -762,7 +762,7 @@ diff --git a/js/src/frontend/FullParseHandler.h b/js/src/frontend/FullParseHandl
  
  
      ParseNode* newClass(ParseNode* name, ParseNode* heritage, ParseNode* methodBlock,
      ParseNode* newClass(ParseNode* name, ParseNode* heritage, ParseNode* methodBlock,
                          const TokenPos& pos)
                          const TokenPos& pos)
-@@ -729,23 +732,18 @@ class FullParseHandler
+@@ -730,23 +733,18 @@ class FullParseHandler
      ParseNode* newModule(const TokenPos& pos) {
      ParseNode* newModule(const TokenPos& pos) {
          return new_<CodeNode>(ParseNodeKind::Module, JSOP_NOP, pos);
          return new_<CodeNode>(ParseNodeKind::Module, JSOP_NOP, pos);
      }
      }
@@ -909,7 +909,7 @@ diff --git a/js/src/frontend/ParseNode.h b/js/src/frontend/ParseNode.h
      F(TemplateStringList) \
      F(TemplateStringList) \
      F(TemplateString) \
      F(TemplateString) \
      F(TaggedTemplate) \
      F(TaggedTemplate) \
-@@ -365,34 +366,34 @@ IsTypeofKind(ParseNodeKind kind)
+@@ -366,34 +367,34 @@ IsTypeofKind(ParseNodeKind kind)
   * Not,
   * Not,
   * BitNot
   * BitNot
   * TypeOfName, unary    pn_kid: UNARY expr
   * TypeOfName, unary    pn_kid: UNARY expr
@@ -950,7 +950,7 @@ diff --git a/js/src/frontend/ParseNode.h b/js/src/frontend/ParseNode.h
   *                          destructuring lhs
   *                          destructuring lhs
   *                          pn_left: property id, pn_right: value
   *                          pn_left: property id, pn_right: value
   * Shorthand binary     Same fields as Colon. This is used for object
   * Shorthand binary     Same fields as Colon. This is used for object
-@@ -402,30 +403,31 @@ IsTypeofKind(ParseNodeKind kind)
+@@ -403,30 +404,31 @@ IsTypeofKind(ParseNodeKind kind)
   * Name,    name        pn_atom: name, string, or object atom
   * Name,    name        pn_atom: name, string, or object atom
   * String               pn_op: JSOP_GETNAME, JSOP_STRING, or JSOP_OBJECT
   * String               pn_op: JSOP_GETNAME, JSOP_STRING, or JSOP_OBJECT
   *                          If JSOP_GETNAME, pn_op may be JSOP_*ARG or JSOP_*VAR
   *                          If JSOP_GETNAME, pn_op may be JSOP_*ARG or JSOP_*VAR
@@ -988,7 +988,7 @@ diff --git a/js/src/frontend/ParseNode.h b/js/src/frontend/ParseNode.h
 diff --git a/js/src/frontend/Parser.cpp b/js/src/frontend/Parser.cpp
 diff --git a/js/src/frontend/Parser.cpp b/js/src/frontend/Parser.cpp
 --- a/js/src/frontend/Parser.cpp
 --- a/js/src/frontend/Parser.cpp
 +++ b/js/src/frontend/Parser.cpp
 +++ b/js/src/frontend/Parser.cpp
-@@ -3364,34 +3364,34 @@ GeneralParser<ParseHandler, CharT>::addE
+@@ -3392,34 +3392,34 @@ GeneralParser<ParseHandler, CharT>::addE
          return false;
          return false;
      }
      }
  
  
@@ -1027,7 +1027,7 @@ diff --git a/js/src/frontend/Parser.cpp b/js/src/frontend/Parser.cpp
  GeneralParser<ParseHandler, CharT>::templateLiteral(YieldHandling yieldHandling)
  GeneralParser<ParseHandler, CharT>::templateLiteral(YieldHandling yieldHandling)
  {
  {
      Node pn = noSubstitutionUntaggedTemplate();
      Node pn = noSubstitutionUntaggedTemplate();
-@@ -8599,68 +8599,71 @@ GeneralParser<ParseHandler, CharT>::assi
+@@ -8637,68 +8637,71 @@ GeneralParser<ParseHandler, CharT>::assi
              errorAt(pc->lastAwaitOffset, JSMSG_AWAIT_IN_DEFAULT);
              errorAt(pc->lastAwaitOffset, JSMSG_AWAIT_IN_DEFAULT);
              return null();
              return null();
          }
          }
@@ -1114,7 +1114,7 @@ diff --git a/js/src/frontend/Parser.cpp b/js/src/frontend/Parser.cpp
      if (!pc->sc()->allowSuperProperty())
      if (!pc->sc()->allowSuperProperty())
          return false;
          return false;
  
  
-@@ -8696,30 +8699,37 @@ GeneralParser<ParseHandler, CharT>::memb
+@@ -8734,30 +8737,37 @@ GeneralParser<ParseHandler, CharT>::memb
              // Gotten by tryNewTarget
              // Gotten by tryNewTarget
              tt = anyChars.currentToken().type;
              tt = anyChars.currentToken().type;
              Node ctorExpr = memberExpr(yieldHandling, TripledotProhibited, tt,
              Node ctorExpr = memberExpr(yieldHandling, TripledotProhibited, tt,
@@ -1161,7 +1161,7 @@ diff --git a/js/src/frontend/Parser.cpp b/js/src/frontend/Parser.cpp
          lhs = handler.newSuperBase(thisName, pos());
          lhs = handler.newSuperBase(thisName, pos());
          if (!lhs)
          if (!lhs)
              return null();
              return null();
-@@ -8782,25 +8792,26 @@ GeneralParser<ParseHandler, CharT>::memb
+@@ -8820,25 +8830,26 @@ GeneralParser<ParseHandler, CharT>::memb
                      return null();
                      return null();
                  }
                  }
  
  
@@ -1193,7 +1193,7 @@ diff --git a/js/src/frontend/Parser.cpp b/js/src/frontend/Parser.cpp
                  Node thisName = newThisName();
                  Node thisName = newThisName();
                  if (!thisName)
                  if (!thisName)
                      return null();
                      return null();
-@@ -8809,23 +8820,16 @@ GeneralParser<ParseHandler, CharT>::memb
+@@ -8847,23 +8858,16 @@ GeneralParser<ParseHandler, CharT>::memb
                  if (!nextMember)
                  if (!nextMember)
                      return null();
                      return null();
              } else {
              } else {
@@ -1217,7 +1217,7 @@ diff --git a/js/src/frontend/Parser.cpp b/js/src/frontend/Parser.cpp
                      if (prop == context->names().apply) {
                      if (prop == context->names().apply) {
                          op = JSOP_FUNAPPLY;
                          op = JSOP_FUNAPPLY;
                          if (pc->isFunctionBox())
                          if (pc->isFunctionBox())
-@@ -8857,34 +8861,44 @@ GeneralParser<ParseHandler, CharT>::memb
+@@ -8895,34 +8899,44 @@ GeneralParser<ParseHandler, CharT>::memb
                          // If we're in a method, mark the method as requiring
                          // If we're in a method, mark the method as requiring
                          // support for 'super', since direct eval code can use
                          // support for 'super', since direct eval code can use
                          // it. (If we're not in a method, that's fine, so
                          // it. (If we're not in a method, that's fine, so
@@ -1270,7 +1270,7 @@ diff --git a/js/src/frontend/Parser.cpp b/js/src/frontend/Parser.cpp
 diff --git a/js/src/frontend/Parser.h b/js/src/frontend/Parser.h
 diff --git a/js/src/frontend/Parser.h b/js/src/frontend/Parser.h
 --- a/js/src/frontend/Parser.h
 --- a/js/src/frontend/Parser.h
 +++ b/js/src/frontend/Parser.h
 +++ b/js/src/frontend/Parser.h
-@@ -1148,17 +1148,17 @@ class MOZ_STACK_CLASS GeneralParser
+@@ -1150,17 +1150,17 @@ class MOZ_STACK_CLASS GeneralParser
      enum FunctionBodyType { StatementListBody, ExpressionBody };
      enum FunctionBodyType { StatementListBody, ExpressionBody };
      Node functionBody(InHandling inHandling, YieldHandling yieldHandling, FunctionSyntaxKind kind,
      Node functionBody(InHandling inHandling, YieldHandling yieldHandling, FunctionSyntaxKind kind,
                        FunctionBodyType type);
                        FunctionBodyType type);
@@ -1317,7 +1317,7 @@ diff --git a/js/src/frontend/SyntaxParseHandler.h b/js/src/frontend/SyntaxParseH
  
  
      Node newNewTarget(Node newHolder, Node targetHolder) { return NodeGeneric; }
      Node newNewTarget(Node newHolder, Node targetHolder) { return NodeGeneric; }
      Node newPosHolder(const TokenPos& pos) { return NodeGeneric; }
      Node newPosHolder(const TokenPos& pos) { return NodeGeneric; }
-@@ -419,17 +421,17 @@ class SyntaxParseHandler
+@@ -423,17 +425,17 @@ class SyntaxParseHandler
          MOZ_ASSERT(list == NodeGeneric ||
          MOZ_ASSERT(list == NodeGeneric ||
                     list == NodeUnparenthesizedArray ||
                     list == NodeUnparenthesizedArray ||
                     list == NodeUnparenthesizedObject ||
                     list == NodeUnparenthesizedObject ||
@@ -1339,7 +1339,7 @@ diff --git a/js/src/frontend/SyntaxParseHandler.h b/js/src/frontend/SyntaxParseH
 diff --git a/js/src/wasm/AsmJS.cpp b/js/src/wasm/AsmJS.cpp
 diff --git a/js/src/wasm/AsmJS.cpp b/js/src/wasm/AsmJS.cpp
 --- a/js/src/wasm/AsmJS.cpp
 --- a/js/src/wasm/AsmJS.cpp
 +++ b/js/src/wasm/AsmJS.cpp
 +++ b/js/src/wasm/AsmJS.cpp
-@@ -452,32 +452,31 @@ ListLength(ParseNode* pn)
+@@ -433,32 +433,31 @@ ListLength(ParseNode* pn)
      MOZ_ASSERT(pn->isArity(PN_LIST));
      MOZ_ASSERT(pn->isArity(PN_LIST));
      return pn->pn_count;
      return pn->pn_count;
  }
  }
@@ -1375,7 +1375,7 @@ diff --git a/js/src/wasm/AsmJS.cpp b/js/src/wasm/AsmJS.cpp
      MOZ_ASSERT(pn->isKind(ParseNodeKind::Var) || pn->isKind(ParseNodeKind::Const));
      MOZ_ASSERT(pn->isKind(ParseNodeKind::Var) || pn->isKind(ParseNodeKind::Const));
      return ListHead(pn);
      return ListHead(pn);
  }
  }
-@@ -3557,19 +3556,21 @@ IsArrayViewCtorName(ModuleValidator& m, 
+@@ -2898,19 +2897,21 @@ IsArrayViewCtorName(ModuleValidator& m, 
          *type = Scalar::Float64;
          *type = Scalar::Float64;
      } else {
      } else {
          return false;
          return false;
@@ -1400,7 +1400,7 @@ diff --git a/js/src/wasm/AsmJS.cpp b/js/src/wasm/AsmJS.cpp
  
  
      return true;
      return true;
  }
  }
-@@ -3580,17 +3581,17 @@ CheckNewArrayView(ModuleValidator& m, Pr
+@@ -2921,17 +2922,17 @@ CheckNewArrayView(ModuleValidator& m, Pr
      PropertyName* globalName = m.globalArgumentName();
      PropertyName* globalName = m.globalArgumentName();
      if (!globalName)
      if (!globalName)
          return m.fail(newExpr, "cannot create array view without an asm.js global parameter");
          return m.fail(newExpr, "cannot create array view without an asm.js global parameter");
@@ -1419,7 +1419,7 @@ diff --git a/js/src/wasm/AsmJS.cpp b/js/src/wasm/AsmJS.cpp
  
  
          if (!IsUseOfName(base, globalName))
          if (!IsUseOfName(base, globalName))
              return m.failName(base, "expecting '%s.*Array", globalName);
              return m.failName(base, "expecting '%s.*Array", globalName);
-@@ -3609,17 +3610,17 @@ CheckNewArrayView(ModuleValidator& m, Pr
+@@ -2950,17 +2951,17 @@ CheckNewArrayView(ModuleValidator& m, Pr
  
  
          if (global->which() != ModuleValidator::Global::ArrayViewCtor)
          if (global->which() != ModuleValidator::Global::ArrayViewCtor)
              return m.failName(ctorExpr, "%s must be an imported array view constructor", globalName);
              return m.failName(ctorExpr, "%s must be an imported array view constructor", globalName);
@@ -1436,5 +1436,5 @@ diff --git a/js/src/wasm/AsmJS.cpp b/js/src/wasm/AsmJS.cpp
  }
  }
  
  
  static bool
  static bool
- IsSimdValidOperationType(SimdType type, SimdOperation op)
- {
+ CheckGlobalMathImport(ModuleValidator& m, ParseNode* initNode, PropertyName* varName,
+                       PropertyName* field)

+ 3447 - 0
frg/work-js/mozilla-release/patches/1416723-1-63a1.patch

@@ -0,0 +1,3447 @@
+# HG changeset patch
+# User Benjamin Bouvier <benj@benj.me>
+# Date 1532453646 -7200
+# Node ID 3d7a2ff6821ff741268ba2f4fb27d682002bc788
+# Parent  88f0905f8d8c4fde409cb78fc054e72b39a62f20
+Bug 1416723: Move SIMD code generation to masm methods; r=lth
+
+diff --git a/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp b/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
+--- a/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
++++ b/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
+@@ -24,16 +24,17 @@
+ using namespace js;
+ using namespace js::jit;
+ 
+ using mozilla::Abs;
+ using mozilla::BitwiseCast;
+ using mozilla::DebugOnly;
+ using mozilla::FloatingPoint;
+ using mozilla::FloorLog2;
++using mozilla::Maybe;
+ using mozilla::NegativeInfinity;
+ using mozilla::SpecificNaN;
+ 
+ using JS::GenericNaN;
+ 
+ namespace js {
+ namespace jit {
+ 
+@@ -2510,524 +2511,217 @@ CodeGenerator::visitInt32x4ToFloat32x4(L
+ }
+ 
+ void
+ CodeGenerator::visitFloat32x4ToInt32x4(LFloat32x4ToInt32x4* ins)
+ {
+     FloatRegister in = ToFloatRegister(ins->input());
+     FloatRegister out = ToFloatRegister(ins->output());
+     Register temp = ToRegister(ins->temp());
+-
+-    masm.convertFloat32x4ToInt32x4(in, out);
+-
+     auto* ool = new(alloc()) OutOfLineSimdFloatToIntCheck(temp, in, ins,
+                                                           ins->mir()->bytecodeOffset());
+     addOutOfLineCode(ool, ins->mir());
+-
+-    static const SimdConstant InvalidResult = SimdConstant::SplatX4(int32_t(-2147483648));
+-
+-    ScratchSimd128Scope scratch(masm);
+-    masm.loadConstantSimd128Int(InvalidResult, scratch);
+-    masm.packedEqualInt32x4(Operand(out), scratch);
+-    // TODO (bug 1156228): If we have SSE4.1, we can use PTEST here instead of
+-    // the two following instructions.
+-    masm.vmovmskps(scratch, temp);
+-    masm.cmp32(temp, Imm32(0));
+-    masm.j(Assembler::NotEqual, ool->entry());
+-
+-    masm.bind(ool->rejoin());
++    masm.checkedConvertFloat32x4ToInt32x4(in, out, temp, ool->entry(), ool->rejoin());
+ }
+ 
+ void
+-CodeGeneratorX86Shared::visitOutOfLineSimdFloatToIntCheck(OutOfLineSimdFloatToIntCheck *ool)
++CodeGeneratorX86Shared::visitOutOfLineSimdFloatToIntCheck(OutOfLineSimdFloatToIntCheck* ool)
+ {
+-    static const SimdConstant Int32MaxX4 = SimdConstant::SplatX4(2147483647.f);
+-    static const SimdConstant Int32MinX4 = SimdConstant::SplatX4(-2147483648.f);
+-
+     Label onConversionError;
+-
+-    FloatRegister input = ool->input();
+-    Register temp = ool->temp();
+-
+-    ScratchSimd128Scope scratch(masm);
+-    masm.loadConstantSimd128Float(Int32MinX4, scratch);
+-    masm.vcmpleps(Operand(input), scratch, scratch);
+-    masm.vmovmskps(scratch, temp);
+-    masm.cmp32(temp, Imm32(15));
+-    masm.j(Assembler::NotEqual, &onConversionError);
+-
+-    masm.loadConstantSimd128Float(Int32MaxX4, scratch);
+-    masm.vcmpleps(Operand(input), scratch, scratch);
+-    masm.vmovmskps(scratch, temp);
+-    masm.cmp32(temp, Imm32(0));
+-    masm.j(Assembler::NotEqual, &onConversionError);
+-
+-    masm.jump(ool->rejoin());
+-
++    masm.oolConvertFloat32x4ToInt32x4(ool->input(), ool->temp(), ool->rejoin(), &onConversionError);
+     masm.bind(&onConversionError);
+     if (gen->compilingWasm())
+         masm.wasmTrap(wasm::Trap::ImpreciseSimdConversion, ool->bytecodeOffset());
+     else
+         bailout(ool->ins()->snapshot());
+ }
+ 
+ // Convert Float32x4 to Uint32x4.
+-//
+ // If any input lane value is out of range or NaN, bail out.
+ void
+ CodeGenerator::visitFloat32x4ToUint32x4(LFloat32x4ToUint32x4* ins)
+ {
+-    const MSimdConvert* mir = ins->mir();
+     FloatRegister in = ToFloatRegister(ins->input());
+     FloatRegister out = ToFloatRegister(ins->output());
+     Register temp = ToRegister(ins->tempR());
+     FloatRegister tempF = ToFloatRegister(ins->tempF());
+ 
+-    // Classify lane values into 4 disjoint classes:
+-    //
+-    //   N-lanes:             in <= -1.0
+-    //   A-lanes:      -1.0 < in <= 0x0.ffffffp31
+-    //   B-lanes: 0x1.0p31 <= in <= 0x0.ffffffp32
+-    //   V-lanes: 0x1.0p32 <= in, or isnan(in)
+-    //
+-    // We need to bail out to throw a RangeError if we see any N-lanes or
+-    // V-lanes.
+-    //
+-    // For A-lanes and B-lanes, we make two float -> int32 conversions:
+-    //
+-    //   A = cvttps2dq(in)
+-    //   B = cvttps2dq(in - 0x1.0p31f)
+-    //
+-    // Note that the subtraction for the B computation is exact for B-lanes.
+-    // There is no rounding, so B is the low 31 bits of the correctly converted
+-    // result.
+-    //
+-    // The cvttps2dq instruction produces 0x80000000 when the input is NaN or
+-    // out of range for a signed int32_t. This conveniently provides the missing
+-    // high bit for B, so the desired result is A for A-lanes and A|B for
+-    // B-lanes.
+-
+-    ScratchSimd128Scope scratch(masm);
+-
+-    // TODO: If the majority of lanes are A-lanes, it could be faster to compute
+-    // A first, use vmovmskps to check for any non-A-lanes and handle them in
+-    // ool code. OTOH, we we're wrong about the lane distribution, that would be
+-    // slower.
+-
+-    // Compute B in |scratch|.
+-    static const float Adjust = 0x80000000; // 0x1.0p31f for the benefit of MSVC.
+-    static const SimdConstant Bias = SimdConstant::SplatX4(-Adjust);
+-    masm.loadConstantSimd128Float(Bias, scratch);
+-    masm.packedAddFloat32(Operand(in), scratch);
+-    masm.convertFloat32x4ToInt32x4(scratch, scratch);
+-
+-    // Compute A in |out|. This is the last time we use |in| and the first time
+-    // we use |out|, so we can tolerate if they are the same register.
+-    masm.convertFloat32x4ToInt32x4(in, out);
+-
+-    // We can identify A-lanes by the sign bits in A: Any A-lanes will be
+-    // positive in A, and N, B, and V-lanes will be 0x80000000 in A. Compute a
+-    // mask of non-A-lanes into |tempF|.
+-    masm.zeroSimd128Float(tempF);
+-    masm.packedGreaterThanInt32x4(Operand(out), tempF);
+-
+-    // Clear the A-lanes in B.
+-    masm.bitwiseAndSimd128(Operand(tempF), scratch);
+-
+-    // Compute the final result: A for A-lanes, A|B for B-lanes.
+-    masm.bitwiseOrSimd128(Operand(scratch), out);
+-
+-    // We still need to filter out the V-lanes. They would show up as 0x80000000
+-    // in both A and B. Since we cleared the valid A-lanes in B, the V-lanes are
+-    // the remaining negative lanes in B.
+-    masm.vmovmskps(scratch, temp);
+-    masm.cmp32(temp, Imm32(0));
+-
+-    if (gen->compilingWasm()) {
+-        Label ok;
+-        masm.j(Assembler::Equal, &ok);
+-        masm.wasmTrap(wasm::Trap::ImpreciseSimdConversion, mir->bytecodeOffset());
+-        masm.bind(&ok);
+-    } else {
+-        bailoutIf(Assembler::NotEqual, ins->snapshot());
+-    }
++    Label failed;
++    masm.checkedConvertFloat32x4ToUint32x4(in, out, temp, tempF, &failed);
++
++    Label ok;
++    masm.jump(&ok);
++    masm.bind(&failed);
++    if (gen->compilingWasm())
++        masm.wasmTrap(wasm::Trap::ImpreciseSimdConversion, ins->mir()->bytecodeOffset());
++    else
++        bailout(ins->snapshot());
++    masm.bind(&ok);
+ }
+ 
+ void
+ CodeGenerator::visitSimdValueInt32x4(LSimdValueInt32x4* ins)
+ {
+     MOZ_ASSERT(ins->mir()->type() == MIRType::Int32x4 || ins->mir()->type() == MIRType::Bool32x4);
+-
+-    FloatRegister output = ToFloatRegister(ins->output());
+-    if (AssemblerX86Shared::HasSSE41()) {
+-        masm.vmovd(ToRegister(ins->getOperand(0)), output);
+-        for (size_t i = 1; i < 4; ++i) {
+-            Register r = ToRegister(ins->getOperand(i));
+-            masm.vpinsrd(i, r, output, output);
+-        }
+-        return;
+-    }
+-
+-    masm.reserveStack(Simd128DataSize);
+-    for (size_t i = 0; i < 4; ++i) {
+-        Register r = ToRegister(ins->getOperand(i));
+-        masm.store32(r, Address(StackPointer, i * sizeof(int32_t)));
+-    }
+-    masm.loadAlignedSimd128Int(Address(StackPointer, 0), output);
+-    masm.freeStack(Simd128DataSize);
++    masm.createInt32x4(ToRegister(ins->getOperand(0)),
++                       ToRegister(ins->getOperand(1)),
++                       ToRegister(ins->getOperand(2)),
++                       ToRegister(ins->getOperand(3)),
++                       ToFloatRegister(ins->output())
++                      );
+ }
+ 
+ void
+ CodeGenerator::visitSimdValueFloat32x4(LSimdValueFloat32x4* ins)
+ {
+     MOZ_ASSERT(ins->mir()->type() == MIRType::Float32x4);
+ 
+     FloatRegister r0 = ToFloatRegister(ins->getOperand(0));
+     FloatRegister r1 = ToFloatRegister(ins->getOperand(1));
+     FloatRegister r2 = ToFloatRegister(ins->getOperand(2));
+     FloatRegister r3 = ToFloatRegister(ins->getOperand(3));
+     FloatRegister tmp = ToFloatRegister(ins->getTemp(0));
+     FloatRegister output = ToFloatRegister(ins->output());
+ 
+-    FloatRegister r0Copy = masm.reusedInputFloat32x4(r0, output);
+-    FloatRegister r1Copy = masm.reusedInputFloat32x4(r1, tmp);
+-
+-    masm.vunpcklps(r3, r1Copy, tmp);
+-    masm.vunpcklps(r2, r0Copy, output);
+-    masm.vunpcklps(tmp, output, output);
++    masm.createFloat32x4(r0, r1, r2, r3, tmp, output);
+ }
+ 
+ void
+ CodeGenerator::visitSimdSplatX16(LSimdSplatX16* ins)
+ {
+     MOZ_ASSERT(SimdTypeToLength(ins->mir()->type()) == 16);
+     Register input = ToRegister(ins->getOperand(0));
+     FloatRegister output = ToFloatRegister(ins->output());
+-    masm.vmovd(input, output);
+-    if (AssemblerX86Shared::HasSSSE3()) {
+-        masm.zeroSimd128Int(ScratchSimd128Reg);
+-        masm.vpshufb(ScratchSimd128Reg, output, output);
+-    } else {
+-        // Use two shifts to duplicate the low 8 bits into the low 16 bits.
+-        masm.vpsllw(Imm32(8), output, output);
+-        masm.vmovdqa(output, ScratchSimd128Reg);
+-        masm.vpsrlw(Imm32(8), ScratchSimd128Reg, ScratchSimd128Reg);
+-        masm.vpor(ScratchSimd128Reg, output, output);
+-        // Then do an X8 splat.
+-        masm.vpshuflw(0, output, output);
+-        masm.vpshufd(0, output, output);
+-    }
++    masm.splatX16(input, output);
+ }
+ 
+ void
+ CodeGenerator::visitSimdSplatX8(LSimdSplatX8* ins)
+ {
+     MOZ_ASSERT(SimdTypeToLength(ins->mir()->type()) == 8);
+     Register input = ToRegister(ins->getOperand(0));
+     FloatRegister output = ToFloatRegister(ins->output());
+-    masm.vmovd(input, output);
+-    masm.vpshuflw(0, output, output);
+-    masm.vpshufd(0, output, output);
++    masm.splatX8(input, output);
+ }
+ 
+ void
+ CodeGenerator::visitSimdSplatX4(LSimdSplatX4* ins)
+ {
+     FloatRegister output = ToFloatRegister(ins->output());
+-
+     MSimdSplat* mir = ins->mir();
+     MOZ_ASSERT(IsSimdType(mir->type()));
+     JS_STATIC_ASSERT(sizeof(float) == sizeof(int32_t));
+-
+-    if (mir->type() == MIRType::Float32x4) {
+-        FloatRegister r = ToFloatRegister(ins->getOperand(0));
+-        FloatRegister rCopy = masm.reusedInputFloat32x4(r, output);
+-        masm.vshufps(0, rCopy, rCopy, output);
+-    } else {
+-        Register r = ToRegister(ins->getOperand(0));
+-        masm.vmovd(r, output);
+-        masm.vpshufd(0, output, output);
+-    }
++    if (mir->type() == MIRType::Float32x4)
++        masm.splatX4(ToFloatRegister(ins->getOperand(0)), output);
++    else
++        masm.splatX4(ToRegister(ins->getOperand(0)), output);
+ }
+ 
+ void
+ CodeGenerator::visitSimdReinterpretCast(LSimdReinterpretCast* ins)
+ {
+     FloatRegister input = ToFloatRegister(ins->input());
+     FloatRegister output = ToFloatRegister(ins->output());
+-
+-    if (input.aliases(output))
+-        return;
+-
+-    if (IsIntegerSimdType(ins->mir()->type()))
+-        masm.vmovdqa(input, output);
+-    else
+-        masm.vmovaps(input, output);
+-}
+-
+-// Extract an integer lane from the 32x4 vector register |input| and place it in
+-// |output|.
+-void
+-CodeGeneratorX86Shared::emitSimdExtractLane32x4(FloatRegister input, Register output, unsigned lane)
+-{
+-    if (lane == 0) {
+-        // The value we want to extract is in the low double-word
+-        masm.moveLowInt32(input, output);
+-    } else if (AssemblerX86Shared::HasSSE41()) {
+-        masm.vpextrd(lane, input, output);
+-    } else {
+-        uint32_t mask = MacroAssembler::ComputeShuffleMask(lane);
+-        masm.shuffleInt32(mask, input, ScratchSimd128Reg);
+-        masm.moveLowInt32(ScratchSimd128Reg, output);
+-    }
+-}
+-
+-// Extract an integer lane from the 16x8 vector register |input|, sign- or
+-// zero-extend to 32 bits and place the result in |output|.
+-void
+-CodeGeneratorX86Shared::emitSimdExtractLane16x8(FloatRegister input, Register output,
+-                                                unsigned lane, SimdSign signedness)
+-{
+-    // Unlike pextrd and pextrb, this is available in SSE2.
+-    masm.vpextrw(lane, input, output);
+-
+-    if (signedness == SimdSign::Signed)
+-        masm.movswl(output, output);
+-}
+-
+-// Extract an integer lane from the 8x16 vector register |input|, sign- or
+-// zero-extend to 32 bits and place the result in |output|.
+-void
+-CodeGeneratorX86Shared::emitSimdExtractLane8x16(FloatRegister input, Register output,
+-                                                unsigned lane, SimdSign signedness)
+-{
+-    if (AssemblerX86Shared::HasSSE41()) {
+-        masm.vpextrb(lane, input, output);
+-        // vpextrb clears the high bits, so no further extension required.
+-        if (signedness == SimdSign::Unsigned)
+-            signedness = SimdSign::NotApplicable;
+-    } else {
+-        // Extract the relevant 16 bits containing our lane, then shift the
+-        // right 8 bits into place.
+-        emitSimdExtractLane16x8(input, output, lane / 2, SimdSign::Unsigned);
+-        if (lane % 2) {
+-            masm.shrl(Imm32(8), output);
+-            // The shrl handles the zero-extension. Don't repeat it.
+-            if (signedness == SimdSign::Unsigned)
+-                signedness = SimdSign::NotApplicable;
+-        }
+-    }
+-
+-    // We have the right low 8 bits in |output|, but we may need to fix the high
+-    // bits. Note that this requires |output| to be one of the %eax-%edx
+-    // registers.
+-    switch (signedness) {
+-      case SimdSign::Signed:
+-        masm.movsbl(output, output);
+-        break;
+-      case SimdSign::Unsigned:
+-        masm.movzbl(output, output);
+-        break;
+-      case SimdSign::NotApplicable:
+-        // No adjustment needed.
+-        break;
+-    }
++    bool isIntLaneType = IsIntegerSimdType(ins->mir()->type());
++    masm.reinterpretSimd(isIntLaneType, input, output);
+ }
+ 
+ void
+ CodeGenerator::visitSimdExtractElementB(LSimdExtractElementB* ins)
+ {
+     FloatRegister input = ToFloatRegister(ins->input());
+     Register output = ToRegister(ins->output());
+     MSimdExtractElement* mir = ins->mir();
+-    unsigned length = SimdTypeToLength(mir->specialization());
+-
+-    switch (length) {
+-      case 4:
+-        emitSimdExtractLane32x4(input, output, mir->lane());
+-        break;
+-      case 8:
+-        // Get a lane, don't bother fixing the high bits since we'll mask below.
+-        emitSimdExtractLane16x8(input, output, mir->lane(), SimdSign::NotApplicable);
+-        break;
+-      case 16:
+-        emitSimdExtractLane8x16(input, output, mir->lane(), SimdSign::NotApplicable);
+-        break;
+-      default:
+-        MOZ_CRASH("Unhandled SIMD length");
+-    }
+-
+-    // We need to generate a 0/1 value. We have 0/-1 and possibly dirty high bits.
+-    masm.and32(Imm32(1), output);
++    unsigned numLanes = SimdTypeToLength(mir->specialization());
++    masm.extractLaneSimdBool(input, output, numLanes, mir->lane());
+ }
+ 
+ void
+ CodeGenerator::visitSimdExtractElementI(LSimdExtractElementI* ins)
+ {
+     FloatRegister input = ToFloatRegister(ins->input());
+     Register output = ToRegister(ins->output());
+     MSimdExtractElement* mir = ins->mir();
+-    unsigned length = SimdTypeToLength(mir->specialization());
+-
+-    switch (length) {
++    unsigned numLanes = SimdTypeToLength(mir->specialization());
++    switch (numLanes) {
+       case 4:
+-        emitSimdExtractLane32x4(input, output, mir->lane());
++        masm.extractLaneInt32x4(input, output, mir->lane());
+         break;
+       case 8:
+-        emitSimdExtractLane16x8(input, output, mir->lane(), mir->signedness());
++        masm.extractLaneInt16x8(input, output, mir->lane(), mir->signedness());
+         break;
+       case 16:
+-        emitSimdExtractLane8x16(input, output, mir->lane(), mir->signedness());
++        masm.extractLaneInt8x16(input, output, mir->lane(), mir->signedness());
+         break;
+       default:
+         MOZ_CRASH("Unhandled SIMD length");
+     }
+ }
+ 
+ void
+ CodeGenerator::visitSimdExtractElementU2D(LSimdExtractElementU2D* ins)
+ {
+     FloatRegister input = ToFloatRegister(ins->input());
+     FloatRegister output = ToFloatRegister(ins->output());
+     Register temp = ToRegister(ins->temp());
+     MSimdExtractElement* mir = ins->mir();
+     MOZ_ASSERT(mir->specialization() == MIRType::Int32x4);
+-    emitSimdExtractLane32x4(input, temp, mir->lane());
++    masm.extractLaneInt32x4(input, temp, mir->lane());
+     masm.convertUInt32ToDouble(temp, output);
+ }
+ 
+ void
+ CodeGenerator::visitSimdExtractElementF(LSimdExtractElementF* ins)
+ {
+     FloatRegister input = ToFloatRegister(ins->input());
+     FloatRegister output = ToFloatRegister(ins->output());
+-
+     unsigned lane = ins->mir()->lane();
+-    if (lane == 0) {
+-        // The value we want to extract is in the low double-word
+-        if (input != output)
+-            masm.moveFloat32(input, output);
+-    } else if (lane == 2) {
+-        masm.moveHighPairToLowPairFloat32(input, output);
+-    } else {
+-        uint32_t mask = MacroAssembler::ComputeShuffleMask(lane);
+-        masm.shuffleFloat32(mask, input, output);
+-    }
+-    // NaNs contained within SIMD values are not enforced to be canonical, so
+-    // when we extract an element into a "regular" scalar JS value, we have to
+-    // canonicalize. In wasm code, we can skip this, as wasm only has to
+-    // canonicalize NaNs at FFI boundaries.
+-    if (!gen->compilingWasm())
+-        masm.canonicalizeFloat(output);
++    bool canonicalize = !gen->compilingWasm();
++    masm.extractLaneFloat32x4(input, output, lane, canonicalize);
+ }
+ 
+ void
+ CodeGenerator::visitSimdInsertElementI(LSimdInsertElementI* ins)
+ {
+-    FloatRegister vector = ToFloatRegister(ins->vector());
++    FloatRegister input = ToFloatRegister(ins->vector());
+     Register value = ToRegister(ins->value());
+     FloatRegister output = ToFloatRegister(ins->output());
+-    MOZ_ASSERT(vector == output); // defineReuseInput(0)
+-
++    MOZ_ASSERT(input == output); // defineReuseInput(0)
+     unsigned lane = ins->lane();
+     unsigned length = ins->length();
+-
+-    if (length == 8) {
+-        // Available in SSE 2.
+-        masm.vpinsrw(lane, value, vector, output);
+-        return;
+-    }
+-
+-    // Note that, contrarily to float32x4, we cannot use vmovd if the inserted
+-    // value goes into the first component, as vmovd clears out the higher lanes
+-    // of the output.
+-    if (AssemblerX86Shared::HasSSE41()) {
+-        // TODO: Teach Lowering that we don't need defineReuseInput if we have AVX.
+-        switch (length) {
+-          case 4:
+-            masm.vpinsrd(lane, value, vector, output);
+-            return;
+-          case 16:
+-            masm.vpinsrb(lane, value, vector, output);
+-            return;
+-        }
+-    }
+-
+-    masm.reserveStack(Simd128DataSize);
+-    masm.storeAlignedSimd128Int(vector, Address(StackPointer, 0));
+-    switch (length) {
+-      case 4:
+-        masm.store32(value, Address(StackPointer, lane * sizeof(int32_t)));
+-        break;
+-      case 16:
+-        // Note that this requires `value` to be in one the registers where the
+-        // low 8 bits are addressible (%eax - %edx on x86, all of them on x86-64).
+-        masm.store8(value, Address(StackPointer, lane * sizeof(int8_t)));
+-        break;
+-      default:
+-        MOZ_CRASH("Unsupported SIMD length");
+-    }
+-    masm.loadAlignedSimd128Int(Address(StackPointer, 0), output);
+-    masm.freeStack(Simd128DataSize);
++    masm.insertLaneSimdInt(input, value, output, lane, length);
+ }
+ 
+ void
+ CodeGenerator::visitSimdInsertElementF(LSimdInsertElementF* ins)
+ {
+-    FloatRegister vector = ToFloatRegister(ins->vector());
++    FloatRegister input = ToFloatRegister(ins->vector());
+     FloatRegister value = ToFloatRegister(ins->value());
+     FloatRegister output = ToFloatRegister(ins->output());
+-    MOZ_ASSERT(vector == output); // defineReuseInput(0)
+-
+-    if (ins->lane() == 0) {
+-        // As both operands are registers, vmovss doesn't modify the upper bits
+-        // of the destination operand.
+-        if (value != output)
+-            masm.vmovss(value, vector, output);
+-        return;
+-    }
+-
+-    if (AssemblerX86Shared::HasSSE41()) {
+-        // The input value is in the low float32 of the 'value' FloatRegister.
+-        masm.vinsertps(masm.vinsertpsMask(0, ins->lane()), value, output, output);
+-        return;
+-    }
+-
+-    unsigned component = unsigned(ins->lane());
+-    masm.reserveStack(Simd128DataSize);
+-    masm.storeAlignedSimd128Float(vector, Address(StackPointer, 0));
+-    masm.storeFloat32(value, Address(StackPointer, component * sizeof(int32_t)));
+-    masm.loadAlignedSimd128Float(Address(StackPointer, 0), output);
+-    masm.freeStack(Simd128DataSize);
++    MOZ_ASSERT(input == output); // defineReuseInput(0)
++    masm.insertLaneFloat32x4(input, value, output, ins->lane());
+ }
+ 
+ void
+ CodeGenerator::visitSimdAllTrue(LSimdAllTrue* ins)
+ {
+     FloatRegister input = ToFloatRegister(ins->input());
+     Register output = ToRegister(ins->output());
+-
+-    // We know that the input lanes are boolean, so they are either 0 or -1.
+-    // The all-true vector has all 128 bits set, no matter the lane geometry.
+-    masm.vpmovmskb(input, output);
+-    masm.cmp32(output, Imm32(0xffff));
+-    masm.emitSet(Assembler::Zero, output);
++    masm.allTrueSimdBool(input, output);
+ }
+ 
+ void
+ CodeGenerator::visitSimdAnyTrue(LSimdAnyTrue* ins)
+ {
+     FloatRegister input = ToFloatRegister(ins->input());
+     Register output = ToRegister(ins->output());
+-
+-    masm.vpmovmskb(input, output);
+-    masm.cmp32(output, Imm32(0x0));
+-    masm.emitSet(Assembler::NonZero, output);
++    masm.anyTrueSimdBool(input, output);
+ }
+ 
++// XXX note for reviewer: this is SIMD.js only, no need to keep it for wasm.
+ template <class T, class Reg> void
+ CodeGeneratorX86Shared::visitSimdGeneralShuffle(LSimdGeneralShuffleBase* ins, Reg tempRegister)
+ {
+     MSimdGeneralShuffle* mir = ins->mir();
+     unsigned numVectors = mir->numVectors();
+ 
+     Register laneTemp = ToRegister(ins->temp());
+ 
+@@ -3076,16 +2770,17 @@ CodeGeneratorX86Shared::visitSimdGeneral
+         bailout(ins->snapshot());
+     }
+ 
+     masm.bind(&join);
+     masm.setFramePushed(masm.framePushed() + stackSpace);
+     masm.freeStack(stackSpace);
+ }
+ 
++// XXX SIMD.js only
+ void
+ CodeGenerator::visitSimdGeneralShuffleI(LSimdGeneralShuffleI* ins)
+ {
+     switch (ins->mir()->type()) {
+       case MIRType::Int8x16:
+         return visitSimdGeneralShuffle<int8_t, Register>(ins, ToRegister(ins->temp()));
+       case MIRType::Int16x8:
+         return visitSimdGeneralShuffle<int16_t, Register>(ins, ToRegister(ins->temp()));
+@@ -3106,628 +2801,160 @@ void
+ CodeGenerator::visitSimdSwizzleI(LSimdSwizzleI* ins)
+ {
+     FloatRegister input = ToFloatRegister(ins->input());
+     FloatRegister output = ToFloatRegister(ins->output());
+     const unsigned numLanes = ins->numLanes();
+ 
+     switch (numLanes) {
+         case 4: {
+-            uint32_t x = ins->lane(0);
+-            uint32_t y = ins->lane(1);
+-            uint32_t z = ins->lane(2);
+-            uint32_t w = ins->lane(3);
+-
+-            uint32_t mask = MacroAssembler::ComputeShuffleMask(x, y, z, w);
+-            masm.shuffleInt32(mask, input, output);
++            unsigned lanes[4];
++            for (unsigned i = 0; i < 4; i++)
++                lanes[i] = ins->lane(i);
++            masm.swizzleInt32x4(input, output, lanes);
+             return;
+         }
+     }
+ 
+     // In the general case, use pshufb if it is available. Convert to a
+     // byte-wise swizzle.
+     const unsigned bytesPerLane = 16 / numLanes;
+-    int8_t bLane[16];
++    int8_t lanes[16];
+     for (unsigned i = 0; i < numLanes; i++) {
+-        for (unsigned b = 0; b < bytesPerLane; b++) {
+-            bLane[i * bytesPerLane + b] = ins->lane(i) * bytesPerLane + b;
+-        }
++        for (unsigned b = 0; b < bytesPerLane; b++)
++            lanes[i * bytesPerLane + b] = ins->lane(i) * bytesPerLane + b;
+     }
+ 
+-    if (AssemblerX86Shared::HasSSSE3()) {
+-        ScratchSimd128Scope scratch(masm);
+-        masm.loadConstantSimd128Int(SimdConstant::CreateX16(bLane), scratch);
+-        FloatRegister inputCopy = masm.reusedInputInt32x4(input, output);
+-        masm.vpshufb(scratch, inputCopy, output);
+-        return;
+-    }
+-
+-    // Worst-case fallback for pre-SSSE3 machines. Bounce through memory.
+-    Register temp = ToRegister(ins->getTemp(0));
+-    masm.reserveStack(2 * Simd128DataSize);
+-    masm.storeAlignedSimd128Int(input, Address(StackPointer, Simd128DataSize));
+-    for (unsigned i = 0; i < 16; i++) {
+-        masm.load8ZeroExtend(Address(StackPointer, Simd128DataSize + bLane[i]), temp);
+-        masm.store8(temp, Address(StackPointer, i));
+-    }
+-    masm.loadAlignedSimd128Int(Address(StackPointer, 0), output);
+-    masm.freeStack(2 * Simd128DataSize);
++    Maybe<Register> maybeTemp;
++    if (!ins->getTemp(0)->isBogusTemp())
++        maybeTemp.emplace(ToRegister(ins->getTemp(0)));
++
++    masm.swizzleInt8x16(input, output, maybeTemp, lanes);
+ }
+ 
+ void
+ CodeGenerator::visitSimdSwizzleF(LSimdSwizzleF* ins)
+ {
+     FloatRegister input = ToFloatRegister(ins->input());
+     FloatRegister output = ToFloatRegister(ins->output());
+     MOZ_ASSERT(ins->numLanes() == 4);
+-
+-    uint32_t x = ins->lane(0);
+-    uint32_t y = ins->lane(1);
+-    uint32_t z = ins->lane(2);
+-    uint32_t w = ins->lane(3);
+-
+-    if (AssemblerX86Shared::HasSSE3()) {
+-        if (ins->lanesMatch(0, 0, 2, 2)) {
+-            masm.vmovsldup(input, output);
+-            return;
+-        }
+-        if (ins->lanesMatch(1, 1, 3, 3)) {
+-            masm.vmovshdup(input, output);
+-            return;
+-        }
+-    }
+-
+-    // TODO Here and below, arch specific lowering could identify this pattern
+-    // and use defineReuseInput to avoid this move (bug 1084404)
+-    if (ins->lanesMatch(2, 3, 2, 3)) {
+-        FloatRegister inputCopy = masm.reusedInputFloat32x4(input, output);
+-        masm.vmovhlps(input, inputCopy, output);
+-        return;
+-    }
+-
+-    if (ins->lanesMatch(0, 1, 0, 1)) {
+-        if (AssemblerX86Shared::HasSSE3() && !AssemblerX86Shared::HasAVX()) {
+-            masm.vmovddup(input, output);
+-            return;
+-        }
+-        FloatRegister inputCopy = masm.reusedInputFloat32x4(input, output);
+-        masm.vmovlhps(input, inputCopy, output);
+-        return;
+-    }
+-
+-    if (ins->lanesMatch(0, 0, 1, 1)) {
+-        FloatRegister inputCopy = masm.reusedInputFloat32x4(input, output);
+-        masm.vunpcklps(input, inputCopy, output);
+-        return;
+-    }
+-
+-    if (ins->lanesMatch(2, 2, 3, 3)) {
+-        FloatRegister inputCopy = masm.reusedInputFloat32x4(input, output);
+-        masm.vunpckhps(input, inputCopy, output);
+-        return;
+-    }
+-
+-    uint32_t mask = MacroAssembler::ComputeShuffleMask(x, y, z, w);
+-    masm.shuffleFloat32(mask, input, output);
++    unsigned lanes[4];
++    for (unsigned i = 0; i < 4; i++)
++        lanes[i] = ins->lane(i);
++    masm.swizzleFloat32x4(input, output, lanes);
+ }
+ 
+ void
+ CodeGenerator::visitSimdShuffle(LSimdShuffle* ins)
+ {
+     FloatRegister lhs = ToFloatRegister(ins->lhs());
+     FloatRegister rhs = ToFloatRegister(ins->rhs());
+     FloatRegister output = ToFloatRegister(ins->output());
+     const unsigned numLanes = ins->numLanes();
+     const unsigned bytesPerLane = 16 / numLanes;
+ 
+     // Convert the shuffle to a byte-wise shuffle.
+-    uint8_t bLane[16];
++    uint8_t lanes[16];
+     for (unsigned i = 0; i < numLanes; i++) {
+         for (unsigned b = 0; b < bytesPerLane; b++) {
+-            bLane[i * bytesPerLane + b] = ins->lane(i) * bytesPerLane + b;
++            lanes[i * bytesPerLane + b] = ins->lane(i) * bytesPerLane + b;
+         }
+     }
+ 
+-    // Use pshufb if it is available.
+-    if (AssemblerX86Shared::HasSSSE3()) {
+-        FloatRegister scratch1 = ToFloatRegister(ins->temp());
+-        ScratchSimd128Scope scratch2(masm);
+-
+-        // Use pshufb instructions to gather the lanes from each source vector.
+-        // A negative index creates a zero lane, so the two vectors can be combined.
+-
+-        // Set scratch2 = lanes from lhs.
+-        int8_t idx[16];
+-        for (unsigned i = 0; i < 16; i++)
+-            idx[i] = bLane[i] < 16 ? bLane[i] : -1;
+-        masm.loadConstantSimd128Int(SimdConstant::CreateX16(idx), scratch1);
+-        FloatRegister lhsCopy = masm.reusedInputInt32x4(lhs, scratch2);
+-        masm.vpshufb(scratch1, lhsCopy, scratch2);
+-
+-        // Set output = lanes from rhs.
+-        for (unsigned i = 0; i < 16; i++)
+-            idx[i] = bLane[i] >= 16 ? bLane[i] - 16 : -1;
+-        masm.loadConstantSimd128Int(SimdConstant::CreateX16(idx), scratch1);
+-        FloatRegister rhsCopy = masm.reusedInputInt32x4(rhs, output);
+-        masm.vpshufb(scratch1, rhsCopy, output);
+-
+-        // Combine.
+-        masm.vpor(scratch2, output, output);
+-        return;
+-    }
+-
+-    // Worst-case fallback for pre-SSE3 machines. Bounce through memory.
+-    Register temp = ToRegister(ins->getTemp(0));
+-    masm.reserveStack(3 * Simd128DataSize);
+-    masm.storeAlignedSimd128Int(lhs, Address(StackPointer, Simd128DataSize));
+-    masm.storeAlignedSimd128Int(rhs, Address(StackPointer, 2 * Simd128DataSize));
+-    for (unsigned i = 0; i < 16; i++) {
+-        masm.load8ZeroExtend(Address(StackPointer, Simd128DataSize + bLane[i]), temp);
+-        masm.store8(temp, Address(StackPointer, i));
+-    }
+-    masm.loadAlignedSimd128Int(Address(StackPointer, 0), output);
+-    masm.freeStack(3 * Simd128DataSize);
++    Maybe<FloatRegister> maybeFloatTemp;
++    Maybe<Register> maybeTemp;
++    if (AssemblerX86Shared::HasSSSE3())
++        maybeFloatTemp.emplace(ToFloatRegister(ins->temp()));
++    else
++        maybeTemp.emplace(ToRegister(ins->temp()));
++
++    masm.shuffleInt8x16(lhs, rhs, output, maybeFloatTemp, maybeTemp, lanes);
+ }
+ 
+ void
+ CodeGenerator::visitSimdShuffleX4(LSimdShuffleX4* ins)
+ {
+     FloatRegister lhs = ToFloatRegister(ins->lhs());
+     Operand rhs = ToOperand(ins->rhs());
+     FloatRegister out = ToFloatRegister(ins->output());
+-
+-    uint32_t x = ins->lane(0);
+-    uint32_t y = ins->lane(1);
+-    uint32_t z = ins->lane(2);
+-    uint32_t w = ins->lane(3);
+-
+-    // Check that lanes come from LHS in majority:
+-    unsigned numLanesFromLHS = (x < 4) + (y < 4) + (z < 4) + (w < 4);
+-    MOZ_ASSERT(numLanesFromLHS >= 2);
+-
+-    // When reading this method, remember that vshufps takes the two first
+-    // inputs of the destination operand (right operand) and the two last
+-    // inputs of the source operand (left operand).
+-    //
+-    // Legend for explanations:
+-    // - L: LHS
+-    // - R: RHS
+-    // - T: temporary
+-
+-    uint32_t mask;
+-
+-    // If all lanes came from a single vector, we should have constructed a
+-    // MSimdSwizzle instead.
+-    MOZ_ASSERT(numLanesFromLHS < 4);
+-
+-    // If all values stay in their lane, this is a blend.
+-    if (AssemblerX86Shared::HasSSE41()) {
+-        if (x % 4 == 0 && y % 4 == 1 && z % 4 == 2 && w % 4 == 3) {
+-            masm.vblendps(masm.blendpsMask(x >= 4, y >= 4, z >= 4, w >= 4), rhs, lhs, out);
+-            return;
+-        }
++    unsigned lanes[4];
++    for (unsigned i = 0; i < 4; i++)
++        lanes[i] = ins->lane(i);
++    Maybe<FloatRegister> maybeTemp;
++    if (!ins->temp()->isBogusTemp())
++        maybeTemp.emplace(ToFloatRegister(ins->temp()));
++    masm.shuffleX4(lhs, rhs, out, maybeTemp, lanes);
++}
++
++static inline Assembler::Condition
++ToCondition(MSimdBinaryComp::Operation op)
++{
++    switch (op) {
++      case MSimdBinaryComp::greaterThan: return Assembler::GreaterThan;
++      case MSimdBinaryComp::equal: return Assembler::Equal;
++      case MSimdBinaryComp::lessThan: return Assembler::LessThan;
++      case MSimdBinaryComp::notEqual: return Assembler::NotEqual;
++      case MSimdBinaryComp::greaterThanOrEqual: return Assembler::GreaterThanOrEqual;
++      case MSimdBinaryComp::lessThanOrEqual: return Assembler::LessThanOrEqual;
+     }
+-
+-    // One element of the second, all other elements of the first
+-    if (numLanesFromLHS == 3) {
+-        unsigned firstMask = -1, secondMask = -1;
+-
+-        // register-register vmovss preserves the high lanes.
+-        if (ins->lanesMatch(4, 1, 2, 3) && rhs.kind() == Operand::FPREG) {
+-            masm.vmovss(FloatRegister::FromCode(rhs.fpu()), lhs, out);
+-            return;
+-        }
+-
+-        // SSE4.1 vinsertps can handle any single element.
+-        unsigned numLanesUnchanged = (x == 0) + (y == 1) + (z == 2) + (w == 3);
+-        if (AssemblerX86Shared::HasSSE41() && numLanesUnchanged == 3) {
+-            unsigned srcLane;
+-            unsigned dstLane;
+-            if (x >= 4) {
+-                srcLane = x - 4;
+-                dstLane = 0;
+-            } else if (y >= 4) {
+-                srcLane = y - 4;
+-                dstLane = 1;
+-            } else if (z >= 4) {
+-                srcLane = z - 4;
+-                dstLane = 2;
+-            } else {
+-                MOZ_ASSERT(w >= 4);
+-                srcLane = w - 4;
+-                dstLane = 3;
+-            }
+-            masm.vinsertps(masm.vinsertpsMask(srcLane, dstLane), rhs, lhs, out);
+-            return;
+-        }
+-
+-        FloatRegister rhsCopy = ToFloatRegister(ins->temp());
+-
+-        if (x < 4 && y < 4) {
+-            if (w >= 4) {
+-                w %= 4;
+-                // T = (Rw Rw Lz Lz) = vshufps(firstMask, lhs, rhs, rhsCopy)
+-                firstMask = MacroAssembler::ComputeShuffleMask(w, w, z, z);
+-                // (Lx Ly Lz Rw) = (Lx Ly Tz Tx) = vshufps(secondMask, T, lhs, out)
+-                secondMask = MacroAssembler::ComputeShuffleMask(x, y, 2, 0);
+-            } else {
+-                MOZ_ASSERT(z >= 4);
+-                z %= 4;
+-                // T = (Rz Rz Lw Lw) = vshufps(firstMask, lhs, rhs, rhsCopy)
+-                firstMask = MacroAssembler::ComputeShuffleMask(z, z, w, w);
+-                // (Lx Ly Rz Lw) = (Lx Ly Tx Tz) = vshufps(secondMask, T, lhs, out)
+-                secondMask = MacroAssembler::ComputeShuffleMask(x, y, 0, 2);
+-            }
+-
+-            masm.vshufps(firstMask, lhs, rhsCopy, rhsCopy);
+-            masm.vshufps(secondMask, rhsCopy, lhs, out);
+-            return;
+-        }
+-
+-        MOZ_ASSERT(z < 4 && w < 4);
+-
+-        if (y >= 4) {
+-            y %= 4;
+-            // T = (Ry Ry Lx Lx) = vshufps(firstMask, lhs, rhs, rhsCopy)
+-            firstMask = MacroAssembler::ComputeShuffleMask(y, y, x, x);
+-            // (Lx Ry Lz Lw) = (Tz Tx Lz Lw) = vshufps(secondMask, lhs, T, out)
+-            secondMask = MacroAssembler::ComputeShuffleMask(2, 0, z, w);
+-        } else {
+-            MOZ_ASSERT(x >= 4);
+-            x %= 4;
+-            // T = (Rx Rx Ly Ly) = vshufps(firstMask, lhs, rhs, rhsCopy)
+-            firstMask = MacroAssembler::ComputeShuffleMask(x, x, y, y);
+-            // (Rx Ly Lz Lw) = (Tx Tz Lz Lw) = vshufps(secondMask, lhs, T, out)
+-            secondMask = MacroAssembler::ComputeShuffleMask(0, 2, z, w);
+-        }
+-
+-        masm.vshufps(firstMask, lhs, rhsCopy, rhsCopy);
+-        if (AssemblerX86Shared::HasAVX()) {
+-            masm.vshufps(secondMask, lhs, rhsCopy, out);
+-        } else {
+-            masm.vshufps(secondMask, lhs, rhsCopy, rhsCopy);
+-            masm.moveSimd128Float(rhsCopy, out);
+-        }
+-        return;
+-    }
+-
+-    // Two elements from one vector, two other elements from the other
+-    MOZ_ASSERT(numLanesFromLHS == 2);
+-
+-    // TODO Here and below, symmetric case would be more handy to avoid a move,
+-    // but can't be reached because operands would get swapped (bug 1084404).
+-    if (ins->lanesMatch(2, 3, 6, 7)) {
+-        ScratchSimd128Scope scratch(masm);
+-        if (AssemblerX86Shared::HasAVX()) {
+-            FloatRegister rhsCopy = masm.reusedInputAlignedFloat32x4(rhs, scratch);
+-            masm.vmovhlps(lhs, rhsCopy, out);
+-        } else {
+-            masm.loadAlignedSimd128Float(rhs, scratch);
+-            masm.vmovhlps(lhs, scratch, scratch);
+-            masm.moveSimd128Float(scratch, out);
+-        }
+-        return;
+-    }
+-
+-    if (ins->lanesMatch(0, 1, 4, 5)) {
+-        FloatRegister rhsCopy;
+-        ScratchSimd128Scope scratch(masm);
+-        if (rhs.kind() == Operand::FPREG) {
+-            // No need to make an actual copy, since the operand is already
+-            // in a register, and it won't be clobbered by the vmovlhps.
+-            rhsCopy = FloatRegister::FromCode(rhs.fpu());
+-        } else {
+-            masm.loadAlignedSimd128Float(rhs, scratch);
+-            rhsCopy = scratch;
+-        }
+-        masm.vmovlhps(rhsCopy, lhs, out);
+-        return;
+-    }
+-
+-    if (ins->lanesMatch(0, 4, 1, 5)) {
+-        masm.vunpcklps(rhs, lhs, out);
+-        return;
+-    }
+-
+-    // TODO swapped case would be better (bug 1084404)
+-    if (ins->lanesMatch(4, 0, 5, 1)) {
+-        ScratchSimd128Scope scratch(masm);
+-        if (AssemblerX86Shared::HasAVX()) {
+-            FloatRegister rhsCopy = masm.reusedInputAlignedFloat32x4(rhs, scratch);
+-            masm.vunpcklps(lhs, rhsCopy, out);
+-        } else {
+-            masm.loadAlignedSimd128Float(rhs, scratch);
+-            masm.vunpcklps(lhs, scratch, scratch);
+-            masm.moveSimd128Float(scratch, out);
+-        }
+-        return;
+-    }
+-
+-    if (ins->lanesMatch(2, 6, 3, 7)) {
+-        masm.vunpckhps(rhs, lhs, out);
+-        return;
+-    }
+-
+-    // TODO swapped case would be better (bug 1084404)
+-    if (ins->lanesMatch(6, 2, 7, 3)) {
+-        ScratchSimd128Scope scratch(masm);
+-        if (AssemblerX86Shared::HasAVX()) {
+-            FloatRegister rhsCopy = masm.reusedInputAlignedFloat32x4(rhs, scratch);
+-            masm.vunpckhps(lhs, rhsCopy, out);
+-        } else {
+-            masm.loadAlignedSimd128Float(rhs, scratch);
+-            masm.vunpckhps(lhs, scratch, scratch);
+-            masm.moveSimd128Float(scratch, out);
+-        }
+-        return;
+-    }
+-
+-    // In one vshufps
+-    if (x < 4 && y < 4) {
+-        mask = MacroAssembler::ComputeShuffleMask(x, y, z % 4, w % 4);
+-        masm.vshufps(mask, rhs, lhs, out);
+-        return;
+-    }
+-
+-    // At creation, we should have explicitly swapped in this case.
+-    MOZ_ASSERT(!(z >= 4 && w >= 4));
+-
+-    // In two vshufps, for the most generic case:
+-    uint32_t firstMask[4], secondMask[4];
+-    unsigned i = 0, j = 2, k = 0;
+-
+-#define COMPUTE_MASK(lane)       \
+-    if (lane >= 4) {             \
+-        firstMask[j] = lane % 4; \
+-        secondMask[k++] = j++;   \
+-    } else {                     \
+-        firstMask[i] = lane;     \
+-        secondMask[k++] = i++;   \
+-    }
+-
+-    COMPUTE_MASK(x)
+-    COMPUTE_MASK(y)
+-    COMPUTE_MASK(z)
+-    COMPUTE_MASK(w)
+-#undef COMPUTE_MASK
+-
+-    MOZ_ASSERT(i == 2 && j == 4 && k == 4);
+-
+-    mask = MacroAssembler::ComputeShuffleMask(firstMask[0], firstMask[1],
+-                                              firstMask[2], firstMask[3]);
+-    masm.vshufps(mask, rhs, lhs, lhs);
+-
+-    mask = MacroAssembler::ComputeShuffleMask(secondMask[0], secondMask[1],
+-                                              secondMask[2], secondMask[3]);
+-    masm.vshufps(mask, lhs, lhs, lhs);
++    MOZ_CRASH("unexpected cond");
+ }
+ 
+ void
+ CodeGenerator::visitSimdBinaryCompIx16(LSimdBinaryCompIx16* ins)
+ {
+-    static const SimdConstant allOnes = SimdConstant::SplatX16(-1);
+-
+     FloatRegister lhs = ToFloatRegister(ins->lhs());
+     Operand rhs = ToOperand(ins->rhs());
+     FloatRegister output = ToFloatRegister(ins->output());
+     MOZ_ASSERT_IF(!Assembler::HasAVX(), output == lhs);
+-
+-    ScratchSimd128Scope scratch(masm);
+-
+-    MSimdBinaryComp::Operation op = ins->operation();
+-    switch (op) {
+-      case MSimdBinaryComp::greaterThan:
+-        masm.vpcmpgtb(rhs, lhs, output);
+-        return;
+-      case MSimdBinaryComp::equal:
+-        masm.vpcmpeqb(rhs, lhs, output);
+-        return;
+-      case MSimdBinaryComp::lessThan:
+-        // src := rhs
+-        if (rhs.kind() == Operand::FPREG)
+-            masm.moveSimd128Int(ToFloatRegister(ins->rhs()), scratch);
+-        else
+-            masm.loadAlignedSimd128Int(rhs, scratch);
+-
+-        // src := src > lhs (i.e. lhs < rhs)
+-        // Improve by doing custom lowering (rhs is tied to the output register)
+-        masm.vpcmpgtb(ToOperand(ins->lhs()), scratch, scratch);
+-        masm.moveSimd128Int(scratch, output);
+-        return;
+-      case MSimdBinaryComp::notEqual:
+-        // Ideally for notEqual, greaterThanOrEqual, and lessThanOrEqual, we
+-        // should invert the comparison by, e.g. swapping the arms of a select
+-        // if that's what it's used in.
+-        masm.loadConstantSimd128Int(allOnes, scratch);
+-        masm.vpcmpeqb(rhs, lhs, output);
+-        masm.bitwiseXorSimd128(Operand(scratch), output);
+-        return;
+-      case MSimdBinaryComp::greaterThanOrEqual:
+-        // src := rhs
+-        if (rhs.kind() == Operand::FPREG)
+-            masm.moveSimd128Int(ToFloatRegister(ins->rhs()), scratch);
+-        else
+-            masm.loadAlignedSimd128Int(rhs, scratch);
+-        masm.vpcmpgtb(ToOperand(ins->lhs()), scratch, scratch);
+-        masm.loadConstantSimd128Int(allOnes, output);
+-        masm.bitwiseXorSimd128(Operand(scratch), output);
+-        return;
+-      case MSimdBinaryComp::lessThanOrEqual:
+-        // lhs <= rhs is equivalent to !(rhs < lhs), which we compute here.
+-        masm.loadConstantSimd128Int(allOnes, scratch);
+-        masm.vpcmpgtb(rhs, lhs, output);
+-        masm.bitwiseXorSimd128(Operand(scratch), output);
+-        return;
+-    }
+-    MOZ_CRASH("unexpected SIMD op");
++    masm.compareInt8x16(lhs, rhs, ToCondition(ins->operation()), output);
+ }
+ 
+ void
+ CodeGenerator::visitSimdBinaryCompIx8(LSimdBinaryCompIx8* ins)
+ {
+-    static const SimdConstant allOnes = SimdConstant::SplatX8(-1);
+-
+     FloatRegister lhs = ToFloatRegister(ins->lhs());
+     Operand rhs = ToOperand(ins->rhs());
+     FloatRegister output = ToFloatRegister(ins->output());
+     MOZ_ASSERT_IF(!Assembler::HasAVX(), output == lhs);
+-
+-    ScratchSimd128Scope scratch(masm);
+-
+-    MSimdBinaryComp::Operation op = ins->operation();
+-    switch (op) {
+-      case MSimdBinaryComp::greaterThan:
+-        masm.vpcmpgtw(rhs, lhs, output);
+-        return;
+-      case MSimdBinaryComp::equal:
+-        masm.vpcmpeqw(rhs, lhs, output);
+-        return;
+-      case MSimdBinaryComp::lessThan:
+-        // src := rhs
+-        if (rhs.kind() == Operand::FPREG)
+-            masm.moveSimd128Int(ToFloatRegister(ins->rhs()), scratch);
+-        else
+-            masm.loadAlignedSimd128Int(rhs, scratch);
+-
+-        // src := src > lhs (i.e. lhs < rhs)
+-        // Improve by doing custom lowering (rhs is tied to the output register)
+-        masm.vpcmpgtw(ToOperand(ins->lhs()), scratch, scratch);
+-        masm.moveSimd128Int(scratch, output);
+-        return;
+-      case MSimdBinaryComp::notEqual:
+-        // Ideally for notEqual, greaterThanOrEqual, and lessThanOrEqual, we
+-        // should invert the comparison by, e.g. swapping the arms of a select
+-        // if that's what it's used in.
+-        masm.loadConstantSimd128Int(allOnes, scratch);
+-        masm.vpcmpeqw(rhs, lhs, output);
+-        masm.bitwiseXorSimd128(Operand(scratch), output);
+-        return;
+-      case MSimdBinaryComp::greaterThanOrEqual:
+-        // src := rhs
+-        if (rhs.kind() == Operand::FPREG)
+-            masm.moveSimd128Int(ToFloatRegister(ins->rhs()), scratch);
+-        else
+-            masm.loadAlignedSimd128Int(rhs, scratch);
+-        masm.vpcmpgtw(ToOperand(ins->lhs()), scratch, scratch);
+-        masm.loadConstantSimd128Int(allOnes, output);
+-        masm.bitwiseXorSimd128(Operand(scratch), output);
+-        return;
+-      case MSimdBinaryComp::lessThanOrEqual:
+-        // lhs <= rhs is equivalent to !(rhs < lhs), which we compute here.
+-        masm.loadConstantSimd128Int(allOnes, scratch);
+-        masm.vpcmpgtw(rhs, lhs, output);
+-        masm.bitwiseXorSimd128(Operand(scratch), output);
+-        return;
+-    }
+-    MOZ_CRASH("unexpected SIMD op");
++    masm.compareInt16x8(lhs, rhs, ToCondition(ins->operation()), output);
+ }
+ 
+ void
+ CodeGenerator::visitSimdBinaryCompIx4(LSimdBinaryCompIx4* ins)
+ {
+-    static const SimdConstant allOnes = SimdConstant::SplatX4(-1);
+-
+     FloatRegister lhs = ToFloatRegister(ins->lhs());
+     Operand rhs = ToOperand(ins->rhs());
+     MOZ_ASSERT(ToFloatRegister(ins->output()) == lhs);
+-
+-    ScratchSimd128Scope scratch(masm);
+-
+-    MSimdBinaryComp::Operation op = ins->operation();
+-    switch (op) {
+-      case MSimdBinaryComp::greaterThan:
+-        masm.packedGreaterThanInt32x4(rhs, lhs);
+-        return;
+-      case MSimdBinaryComp::equal:
+-        masm.packedEqualInt32x4(rhs, lhs);
+-        return;
+-      case MSimdBinaryComp::lessThan:
+-        // src := rhs
+-        if (rhs.kind() == Operand::FPREG)
+-            masm.moveSimd128Int(ToFloatRegister(ins->rhs()), scratch);
+-        else
+-            masm.loadAlignedSimd128Int(rhs, scratch);
+-
+-        // src := src > lhs (i.e. lhs < rhs)
+-        // Improve by doing custom lowering (rhs is tied to the output register)
+-        masm.packedGreaterThanInt32x4(ToOperand(ins->lhs()), scratch);
+-        masm.moveSimd128Int(scratch, lhs);
+-        return;
+-      case MSimdBinaryComp::notEqual:
+-        // Ideally for notEqual, greaterThanOrEqual, and lessThanOrEqual, we
+-        // should invert the comparison by, e.g. swapping the arms of a select
+-        // if that's what it's used in.
+-        masm.loadConstantSimd128Int(allOnes, scratch);
+-        masm.packedEqualInt32x4(rhs, lhs);
+-        masm.bitwiseXorSimd128(Operand(scratch), lhs);
+-        return;
+-      case MSimdBinaryComp::greaterThanOrEqual:
+-        // src := rhs
+-        if (rhs.kind() == Operand::FPREG)
+-            masm.moveSimd128Int(ToFloatRegister(ins->rhs()), scratch);
+-        else
+-            masm.loadAlignedSimd128Int(rhs, scratch);
+-        masm.packedGreaterThanInt32x4(ToOperand(ins->lhs()), scratch);
+-        masm.loadConstantSimd128Int(allOnes, lhs);
+-        masm.bitwiseXorSimd128(Operand(scratch), lhs);
+-        return;
+-      case MSimdBinaryComp::lessThanOrEqual:
+-        // lhs <= rhs is equivalent to !(rhs < lhs), which we compute here.
+-        masm.loadConstantSimd128Int(allOnes, scratch);
+-        masm.packedGreaterThanInt32x4(rhs, lhs);
+-        masm.bitwiseXorSimd128(Operand(scratch), lhs);
+-        return;
+-    }
+-    MOZ_CRASH("unexpected SIMD op");
++    masm.compareInt32x4(lhs, rhs, ToCondition(ins->operation()), lhs);
+ }
+ 
+ void
+ CodeGenerator::visitSimdBinaryCompFx4(LSimdBinaryCompFx4* ins)
+ {
+     FloatRegister lhs = ToFloatRegister(ins->lhs());
+     Operand rhs = ToOperand(ins->rhs());
+     FloatRegister output = ToFloatRegister(ins->output());
+-
+-    MSimdBinaryComp::Operation op = ins->operation();
+-    switch (op) {
+-      case MSimdBinaryComp::equal:
+-        masm.vcmpeqps(rhs, lhs, output);
+-        return;
+-      case MSimdBinaryComp::lessThan:
+-        masm.vcmpltps(rhs, lhs, output);
+-        return;
+-      case MSimdBinaryComp::lessThanOrEqual:
+-        masm.vcmpleps(rhs, lhs, output);
+-        return;
+-      case MSimdBinaryComp::notEqual:
+-        masm.vcmpneqps(rhs, lhs, output);
+-        return;
+-      case MSimdBinaryComp::greaterThanOrEqual:
+-      case MSimdBinaryComp::greaterThan:
+-        // We reverse these before register allocation so that we don't have to
+-        // copy into and out of temporaries after codegen.
+-        MOZ_CRASH("lowering should have reversed this");
+-    }
+-    MOZ_CRASH("unexpected SIMD op");
++    masm.compareFloat32x4(lhs, rhs, ToCondition(ins->operation()), output);
+ }
+ 
+ void
+ CodeGenerator::visitSimdBinaryArithIx16(LSimdBinaryArithIx16* ins)
+ {
+     FloatRegister lhs = ToFloatRegister(ins->lhs());
+     Operand rhs = ToOperand(ins->rhs());
+     FloatRegister output = ToFloatRegister(ins->output());
+ 
+     MSimdBinaryArith::Operation op = ins->operation();
+     switch (op) {
+       case MSimdBinaryArith::Op_add:
+-        masm.vpaddb(rhs, lhs, output);
++        masm.addInt8x16(lhs, rhs, output);
+         return;
+       case MSimdBinaryArith::Op_sub:
+-        masm.vpsubb(rhs, lhs, output);
++        masm.subInt8x16(lhs, rhs, output);
+         return;
+       case MSimdBinaryArith::Op_mul:
+         // 8x16 mul is a valid operation, but not supported in SSE or AVX.
+         // The operation is synthesized from 16x8 multiplies by
+         // MSimdBinaryArith::AddLegalized().
+         break;
+       case MSimdBinaryArith::Op_div:
+       case MSimdBinaryArith::Op_max:
+@@ -3744,23 +2971,23 @@ CodeGenerator::visitSimdBinaryArithIx8(L
+ {
+     FloatRegister lhs = ToFloatRegister(ins->lhs());
+     Operand rhs = ToOperand(ins->rhs());
+     FloatRegister output = ToFloatRegister(ins->output());
+ 
+     MSimdBinaryArith::Operation op = ins->operation();
+     switch (op) {
+       case MSimdBinaryArith::Op_add:
+-        masm.vpaddw(rhs, lhs, output);
++        masm.addInt16x8(lhs, rhs, output);
+         return;
+       case MSimdBinaryArith::Op_sub:
+-        masm.vpsubw(rhs, lhs, output);
++        masm.subInt16x8(lhs, rhs, output);
+         return;
+       case MSimdBinaryArith::Op_mul:
+-        masm.vpmullw(rhs, lhs, output);
++        masm.mulInt16x8(lhs, rhs, output);
+         return;
+       case MSimdBinaryArith::Op_div:
+       case MSimdBinaryArith::Op_max:
+       case MSimdBinaryArith::Op_min:
+       case MSimdBinaryArith::Op_minNum:
+       case MSimdBinaryArith::Op_maxNum:
+         break;
+     }
+@@ -3769,45 +2996,29 @@ CodeGenerator::visitSimdBinaryArithIx8(L
+ 
+ void
+ CodeGenerator::visitSimdBinaryArithIx4(LSimdBinaryArithIx4* ins)
+ {
+     FloatRegister lhs = ToFloatRegister(ins->lhs());
+     Operand rhs = ToOperand(ins->rhs());
+     FloatRegister output = ToFloatRegister(ins->output());
+ 
+-    ScratchSimd128Scope scratch(masm);
+-
+     MSimdBinaryArith::Operation op = ins->operation();
+     switch (op) {
+       case MSimdBinaryArith::Op_add:
+-        masm.vpaddd(rhs, lhs, output);
++        masm.addInt32x4(lhs, rhs, output);
+         return;
+       case MSimdBinaryArith::Op_sub:
+-        masm.vpsubd(rhs, lhs, output);
++        masm.subInt32x4(lhs, rhs, output);
+         return;
+       case MSimdBinaryArith::Op_mul: {
+-        if (AssemblerX86Shared::HasSSE41()) {
+-            masm.vpmulld(rhs, lhs, output);
+-            return;
+-        }
+-
+-        masm.loadAlignedSimd128Int(rhs, scratch);
+-        masm.vpmuludq(lhs, scratch, scratch);
+-        // scratch contains (Rx, _, Rz, _) where R is the resulting vector.
+-
+-        FloatRegister temp = ToFloatRegister(ins->temp());
+-        masm.vpshufd(MacroAssembler::ComputeShuffleMask(1, 1, 3, 3), lhs, lhs);
+-        masm.vpshufd(MacroAssembler::ComputeShuffleMask(1, 1, 3, 3), rhs, temp);
+-        masm.vpmuludq(temp, lhs, lhs);
+-        // lhs contains (Ry, _, Rw, _) where R is the resulting vector.
+-
+-        masm.vshufps(MacroAssembler::ComputeShuffleMask(0, 2, 0, 2), scratch, lhs, lhs);
+-        // lhs contains (Ry, Rw, Rx, Rz)
+-        masm.vshufps(MacroAssembler::ComputeShuffleMask(2, 0, 3, 1), lhs, lhs, lhs);
++        Maybe<FloatRegister> maybeTemp;
++        if (!AssemblerX86Shared::HasSSE41())
++            maybeTemp.emplace(ToFloatRegister(ins->getTemp(0)));
++        masm.mulInt32x4(lhs, rhs, maybeTemp, output);
+         return;
+       }
+       case MSimdBinaryArith::Op_div:
+         // x86 doesn't have SIMD i32 div.
+         break;
+       case MSimdBinaryArith::Op_max:
+         // we can do max with a single instruction only if we have SSE4.1
+         // using the PMAXSD instruction.
+@@ -3825,114 +3036,44 @@ CodeGenerator::visitSimdBinaryArithIx4(L
+ 
+ void
+ CodeGenerator::visitSimdBinaryArithFx4(LSimdBinaryArithFx4* ins)
+ {
+     FloatRegister lhs = ToFloatRegister(ins->lhs());
+     Operand rhs = ToOperand(ins->rhs());
+     FloatRegister output = ToFloatRegister(ins->output());
+ 
+-    ScratchSimd128Scope scratch(masm);
+-
+     MSimdBinaryArith::Operation op = ins->operation();
+     switch (op) {
+       case MSimdBinaryArith::Op_add:
+-        masm.vaddps(rhs, lhs, output);
++        masm.addFloat32x4(lhs, rhs, output);
+         return;
+       case MSimdBinaryArith::Op_sub:
+-        masm.vsubps(rhs, lhs, output);
++        masm.subFloat32x4(lhs, rhs, output);
+         return;
+       case MSimdBinaryArith::Op_mul:
+-        masm.vmulps(rhs, lhs, output);
++        masm.mulFloat32x4(lhs, rhs, output);
+         return;
+       case MSimdBinaryArith::Op_div:
+-        masm.vdivps(rhs, lhs, output);
++        masm.divFloat32x4(lhs, rhs, output);
+         return;
+       case MSimdBinaryArith::Op_max: {
+-        FloatRegister lhsCopy = masm.reusedInputFloat32x4(lhs, scratch);
+-        masm.vcmpunordps(rhs, lhsCopy, scratch);
+-
+-        FloatRegister tmp = ToFloatRegister(ins->temp());
+-        FloatRegister rhsCopy = masm.reusedInputAlignedFloat32x4(rhs, tmp);
+-        masm.vmaxps(Operand(lhs), rhsCopy, tmp);
+-        masm.vmaxps(rhs, lhs, output);
+-
+-        masm.vandps(tmp, output, output);
+-        masm.vorps(scratch, output, output); // or in the all-ones NaNs
++        masm.maxFloat32x4(lhs, rhs, ToFloatRegister(ins->temp()), output);
+         return;
+       }
+       case MSimdBinaryArith::Op_min: {
+-        FloatRegister rhsCopy = masm.reusedInputAlignedFloat32x4(rhs, scratch);
+-        masm.vminps(Operand(lhs), rhsCopy, scratch);
+-        masm.vminps(rhs, lhs, output);
+-        masm.vorps(scratch, output, output); // NaN or'd with arbitrary bits is NaN
++        masm.minFloat32x4(lhs, rhs, output);
+         return;
+       }
+       case MSimdBinaryArith::Op_minNum: {
+-        FloatRegister tmp = ToFloatRegister(ins->temp());
+-        masm.loadConstantSimd128Int(SimdConstant::SplatX4(int32_t(0x80000000)), tmp);
+-
+-        FloatRegister mask = scratch;
+-        FloatRegister tmpCopy = masm.reusedInputFloat32x4(tmp, scratch);
+-        masm.vpcmpeqd(Operand(lhs), tmpCopy, mask);
+-        masm.vandps(tmp, mask, mask);
+-
+-        FloatRegister lhsCopy = masm.reusedInputFloat32x4(lhs, tmp);
+-        masm.vminps(rhs, lhsCopy, tmp);
+-        masm.vorps(mask, tmp, tmp);
+-
+-        FloatRegister rhsCopy = masm.reusedInputAlignedFloat32x4(rhs, mask);
+-        masm.vcmpneqps(rhs, rhsCopy, mask);
+-
+-        if (AssemblerX86Shared::HasAVX()) {
+-            masm.vblendvps(mask, lhs, tmp, output);
+-        } else {
+-            // Emulate vblendvps.
+-            // With SSE.4.1 we could use blendvps, however it's awkward since
+-            // it requires the mask to be in xmm0.
+-            if (lhs != output)
+-                masm.moveSimd128Float(lhs, output);
+-            masm.vandps(Operand(mask), output, output);
+-            masm.vandnps(Operand(tmp), mask, mask);
+-            masm.vorps(Operand(mask), output, output);
+-        }
++        masm.minNumFloat32x4(lhs, rhs, ToFloatRegister(ins->temp()), output);
+         return;
+       }
+       case MSimdBinaryArith::Op_maxNum: {
+-        FloatRegister mask = scratch;
+-        masm.loadConstantSimd128Int(SimdConstant::SplatX4(0), mask);
+-        masm.vpcmpeqd(Operand(lhs), mask, mask);
+-
+-        FloatRegister tmp = ToFloatRegister(ins->temp());
+-        masm.loadConstantSimd128Int(SimdConstant::SplatX4(int32_t(0x80000000)), tmp);
+-        masm.vandps(tmp, mask, mask);
+-
+-        FloatRegister lhsCopy = masm.reusedInputFloat32x4(lhs, tmp);
+-        masm.vmaxps(rhs, lhsCopy, tmp);
+-        masm.vandnps(Operand(tmp), mask, mask);
+-
+-        // Ensure tmp always contains the temporary result
+-        mask = tmp;
+-        tmp = scratch;
+-
+-        FloatRegister rhsCopy = masm.reusedInputAlignedFloat32x4(rhs, mask);
+-        masm.vcmpneqps(rhs, rhsCopy, mask);
+-
+-        if (AssemblerX86Shared::HasAVX()) {
+-            masm.vblendvps(mask, lhs, tmp, output);
+-        } else {
+-            // Emulate vblendvps.
+-            // With SSE.4.1 we could use blendvps, however it's awkward since
+-            // it requires the mask to be in xmm0.
+-            if (lhs != output)
+-                masm.moveSimd128Float(lhs, output);
+-            masm.vandps(Operand(mask), output, output);
+-            masm.vandnps(Operand(tmp), mask, mask);
+-            masm.vorps(Operand(mask), output, output);
+-        }
++        masm.maxNumFloat32x4(lhs, rhs, ToFloatRegister(ins->temp()), output);
+         return;
+       }
+     }
+     MOZ_CRASH("unexpected SIMD op");
+ }
+ 
+ void
+ CodeGenerator::visitSimdBinarySaturating(LSimdBinarySaturating* ins)
+@@ -3943,160 +3084,119 @@ CodeGenerator::visitSimdBinarySaturating
+ 
+     SimdSign sign = ins->signedness();
+     MOZ_ASSERT(sign != SimdSign::NotApplicable);
+ 
+     switch (ins->type()) {
+       case MIRType::Int8x16:
+         switch (ins->operation()) {
+           case MSimdBinarySaturating::add:
+-            if (sign == SimdSign::Signed)
+-                masm.vpaddsb(rhs, lhs, output);
+-            else
+-                masm.vpaddusb(rhs, lhs, output);
++            masm.addSatInt8x16(lhs, rhs, sign, output);
+             return;
+           case MSimdBinarySaturating::sub:
+-            if (sign == SimdSign::Signed)
+-                masm.vpsubsb(rhs, lhs, output);
+-            else
+-                masm.vpsubusb(rhs, lhs, output);
++            masm.subSatInt8x16(lhs, rhs, sign, output);
+             return;
+         }
+         break;
+ 
+       case MIRType::Int16x8:
+         switch (ins->operation()) {
+           case MSimdBinarySaturating::add:
+-            if (sign == SimdSign::Signed)
+-                masm.vpaddsw(rhs, lhs, output);
+-            else
+-                masm.vpaddusw(rhs, lhs, output);
++            masm.addSatInt16x8(lhs, rhs, sign, output);
+             return;
+           case MSimdBinarySaturating::sub:
+-            if (sign == SimdSign::Signed)
+-                masm.vpsubsw(rhs, lhs, output);
+-            else
+-                masm.vpsubusw(rhs, lhs, output);
++            masm.subSatInt16x8(lhs, rhs, sign, output);
+             return;
+         }
+         break;
+ 
+       default:
+         break;
+     }
+     MOZ_CRASH("unsupported type for SIMD saturating arithmetic");
+ }
+ 
+ void
+ CodeGenerator::visitSimdUnaryArithIx16(LSimdUnaryArithIx16* ins)
+ {
+     Operand in = ToOperand(ins->input());
+     FloatRegister out = ToFloatRegister(ins->output());
+-
+-    static const SimdConstant allOnes = SimdConstant::SplatX16(-1);
+-
+     switch (ins->operation()) {
+       case MSimdUnaryArith::neg:
+-        masm.zeroSimd128Int(out);
+-        masm.packedSubInt8(in, out);
++        masm.negInt8x16(in, out);
+         return;
+       case MSimdUnaryArith::not_:
+-        masm.loadConstantSimd128Int(allOnes, out);
+-        masm.bitwiseXorSimd128(in, out);
++        masm.notInt8x16(in, out);
+         return;
+       case MSimdUnaryArith::abs:
+       case MSimdUnaryArith::reciprocalApproximation:
+       case MSimdUnaryArith::reciprocalSqrtApproximation:
+       case MSimdUnaryArith::sqrt:
+         break;
+     }
+     MOZ_CRASH("unexpected SIMD op");
+ }
+ 
+ void
+ CodeGenerator::visitSimdUnaryArithIx8(LSimdUnaryArithIx8* ins)
+ {
+     Operand in = ToOperand(ins->input());
+     FloatRegister out = ToFloatRegister(ins->output());
+-
+-    static const SimdConstant allOnes = SimdConstant::SplatX8(-1);
+-
+     switch (ins->operation()) {
+       case MSimdUnaryArith::neg:
+-        masm.zeroSimd128Int(out);
+-        masm.packedSubInt16(in, out);
++        masm.negInt16x8(in, out);
+         return;
+       case MSimdUnaryArith::not_:
+-        masm.loadConstantSimd128Int(allOnes, out);
+-        masm.bitwiseXorSimd128(in, out);
++        masm.notInt16x8(in, out);
+         return;
+       case MSimdUnaryArith::abs:
+       case MSimdUnaryArith::reciprocalApproximation:
+       case MSimdUnaryArith::reciprocalSqrtApproximation:
+       case MSimdUnaryArith::sqrt:
+         break;
+     }
+     MOZ_CRASH("unexpected SIMD op");
+ }
+ 
+ void
+ CodeGenerator::visitSimdUnaryArithIx4(LSimdUnaryArithIx4* ins)
+ {
+     Operand in = ToOperand(ins->input());
+     FloatRegister out = ToFloatRegister(ins->output());
+-
+-    static const SimdConstant allOnes = SimdConstant::SplatX4(-1);
+-
+     switch (ins->operation()) {
+       case MSimdUnaryArith::neg:
+-        masm.zeroSimd128Int(out);
+-        masm.packedSubInt32(in, out);
++        masm.negInt32x4(in, out);
+         return;
+       case MSimdUnaryArith::not_:
+-        masm.loadConstantSimd128Int(allOnes, out);
+-        masm.bitwiseXorSimd128(in, out);
++        masm.notInt32x4(in, out);
+         return;
+       case MSimdUnaryArith::abs:
+       case MSimdUnaryArith::reciprocalApproximation:
+       case MSimdUnaryArith::reciprocalSqrtApproximation:
+       case MSimdUnaryArith::sqrt:
+         break;
+     }
+     MOZ_CRASH("unexpected SIMD op");
+ }
+ 
+ void
+ CodeGenerator::visitSimdUnaryArithFx4(LSimdUnaryArithFx4* ins)
+ {
+     Operand in = ToOperand(ins->input());
+     FloatRegister out = ToFloatRegister(ins->output());
+ 
+-    // All ones but the sign bit
+-    float signMask = SpecificNaN<float>(0, FloatingPoint<float>::kSignificandBits);
+-    static const SimdConstant signMasks = SimdConstant::SplatX4(signMask);
+-
+-    // All ones including the sign bit
+-    float ones = SpecificNaN<float>(1, FloatingPoint<float>::kSignificandBits);
+-    static const SimdConstant allOnes = SimdConstant::SplatX4(ones);
+-
+-    // All zeros but the sign bit
+-    static const SimdConstant minusZero = SimdConstant::SplatX4(-0.f);
+-
+     switch (ins->operation()) {
+       case MSimdUnaryArith::abs:
+-        masm.loadConstantSimd128Float(signMasks, out);
+-        masm.bitwiseAndSimd128(in, out);
++        masm.absFloat32x4(in, out);
+         return;
+       case MSimdUnaryArith::neg:
+-        masm.loadConstantSimd128Float(minusZero, out);
+-        masm.bitwiseXorSimd128(in, out);
++        masm.negFloat32x4(in, out);
+         return;
+       case MSimdUnaryArith::not_:
+-        masm.loadConstantSimd128Float(allOnes, out);
+-        masm.bitwiseXorSimd128(in, out);
++        masm.notFloat32x4(in, out);
+         return;
+       case MSimdUnaryArith::reciprocalApproximation:
+         masm.packedRcpApproximationFloat32x4(in, out);
+         return;
+       case MSimdUnaryArith::reciprocalSqrtApproximation:
+         masm.packedRcpSqrtApproximationFloat32x4(in, out);
+         return;
+       case MSimdUnaryArith::sqrt:
+@@ -4112,51 +3212,48 @@ CodeGenerator::visitSimdBinaryBitwise(LS
+     FloatRegister lhs = ToFloatRegister(ins->lhs());
+     Operand rhs = ToOperand(ins->rhs());
+     FloatRegister output = ToFloatRegister(ins->output());
+ 
+     MSimdBinaryBitwise::Operation op = ins->operation();
+     switch (op) {
+       case MSimdBinaryBitwise::and_:
+         if (ins->type() == MIRType::Float32x4)
+-            masm.vandps(rhs, lhs, output);
++            masm.bitwiseAndFloat32x4(lhs, rhs, output);
+         else
+-            masm.vpand(rhs, lhs, output);
++            masm.bitwiseAndSimdInt(lhs, rhs, output);
+         return;
+       case MSimdBinaryBitwise::or_:
+         if (ins->type() == MIRType::Float32x4)
+-            masm.vorps(rhs, lhs, output);
++            masm.bitwiseOrFloat32x4(lhs, rhs, output);
+         else
+-            masm.vpor(rhs, lhs, output);
++            masm.bitwiseOrSimdInt(lhs, rhs, output);
+         return;
+       case MSimdBinaryBitwise::xor_:
+         if (ins->type() == MIRType::Float32x4)
+-            masm.vxorps(rhs, lhs, output);
++            masm.bitwiseXorFloat32x4(lhs, rhs, output);
+         else
+-            masm.vpxor(rhs, lhs, output);
++            masm.bitwiseXorSimdInt(lhs, rhs, output);
+         return;
+     }
+     MOZ_CRASH("unexpected SIMD bitwise op");
+ }
+ 
+ void
+ CodeGenerator::visitSimdShift(LSimdShift* ins)
+ {
+     FloatRegister out = ToFloatRegister(ins->output());
+     MOZ_ASSERT(ToFloatRegister(ins->vector()) == out); // defineReuseInput(0);
+ 
+-    // The shift amount is masked to the number of bits in a lane.
+-    uint32_t shiftmask = (128u / SimdTypeToLength(ins->type())) - 1;
+-
+     // Note that SSE doesn't have instructions for shifting 8x16 vectors.
+     // These shifts are synthesized by the MSimdShift::AddLegalized() function.
+     const LAllocation* val = ins->value();
+     if (val->isConstant()) {
+         MOZ_ASSERT(ins->temp()->isBogusTemp());
+-        Imm32 count(uint32_t(ToInt32(val)) & shiftmask);
++        Imm32 count(uint32_t(ToInt32(val)));
+         switch (ins->type()) {
+           case MIRType::Int16x8:
+             switch (ins->operation()) {
+               case MSimdShift::lsh:
+                 masm.packedLeftShiftByScalarInt16x8(count, out);
+                 return;
+               case MSimdShift::rsh:
+                 masm.packedRightShiftByScalarInt16x8(count, out);
+@@ -4180,48 +3277,43 @@ CodeGenerator::visitSimdShift(LSimdShift
+             }
+             break;
+           default:
+             MOZ_CRASH("unsupported type for SIMD shifts");
+         }
+         MOZ_CRASH("unexpected SIMD bitwise op");
+     }
+ 
+-    // Truncate val to 5 bits. We should have a temp register for that.
+-    MOZ_ASSERT(val->isRegister());
+-    Register count = ToRegister(ins->temp());
+-    masm.mov(ToRegister(val), count);
+-    masm.andl(Imm32(shiftmask), count);
+-    ScratchFloat32Scope scratch(masm);
+-    masm.vmovd(count, scratch);
++    Register temp = ToRegister(ins->temp());
++    Register count = ToRegister(val);
+ 
+     switch (ins->type()) {
+       case MIRType::Int16x8:
+         switch (ins->operation()) {
+           case MSimdShift::lsh:
+-            masm.packedLeftShiftByScalarInt16x8(scratch, out);
++            masm.packedLeftShiftByScalarInt16x8(out, count, temp, out);
+             return;
+           case MSimdShift::rsh:
+-            masm.packedRightShiftByScalarInt16x8(scratch, out);
++            masm.packedRightShiftByScalarInt16x8(out, count, temp, out);
+             return;
+           case MSimdShift::ursh:
+-            masm.packedUnsignedRightShiftByScalarInt16x8(scratch, out);
++            masm.packedUnsignedRightShiftByScalarInt16x8(out, count, temp, out);
+             return;
+         }
+         break;
+       case MIRType::Int32x4:
+         switch (ins->operation()) {
+           case MSimdShift::lsh:
+-            masm.packedLeftShiftByScalarInt32x4(scratch, out);
++            masm.packedLeftShiftByScalarInt32x4(out, count, temp, out);
+             return;
+           case MSimdShift::rsh:
+-            masm.packedRightShiftByScalarInt32x4(scratch, out);
++            masm.packedRightShiftByScalarInt32x4(out, count, temp, out);
+             return;
+           case MSimdShift::ursh:
+-            masm.packedUnsignedRightShiftByScalarInt32x4(scratch, out);
++            masm.packedUnsignedRightShiftByScalarInt32x4(out, count, temp, out);
+             return;
+         }
+         break;
+       default:
+         MOZ_CRASH("unsupported type for SIMD shifts");
+     }
+     MOZ_CRASH("unexpected SIMD bitwise op");
+ }
+@@ -4230,36 +3322,22 @@ void
+ CodeGenerator::visitSimdSelect(LSimdSelect* ins)
+ {
+     FloatRegister mask = ToFloatRegister(ins->mask());
+     FloatRegister onTrue = ToFloatRegister(ins->lhs());
+     FloatRegister onFalse = ToFloatRegister(ins->rhs());
+     FloatRegister output = ToFloatRegister(ins->output());
+     FloatRegister temp = ToFloatRegister(ins->temp());
+ 
+-    if (onTrue != output)
+-        masm.vmovaps(onTrue, output);
+-    if (mask != temp)
+-        masm.vmovaps(mask, temp);
+-
+     MSimdSelect* mir = ins->mir();
+     unsigned lanes = SimdTypeToLength(mir->type());
+-
+-    if (AssemblerX86Shared::HasAVX() && lanes == 4) {
+-        // TBD: Use vpblendvb for lanes > 4, HasAVX.
+-        masm.vblendvps(mask, onTrue, onFalse, output);
+-        return;
+-    }
+-
+-    // SSE4.1 has plain blendvps which can do this, but it is awkward
+-    // to use because it requires the mask to be in xmm0.
+-
+-    masm.bitwiseAndSimd128(Operand(temp), output);
+-    masm.bitwiseAndNotSimd128(Operand(onFalse), temp);
+-    masm.bitwiseOrSimd128(Operand(temp), output);
++    if (lanes == 4)
++        masm.selectX4(mask, onTrue, onFalse, temp, output);
++    else
++        masm.selectSimd128(mask, onTrue, onFalse, temp, output);
+ }
+ 
+ void
+ CodeGenerator::visitCompareExchangeTypedArrayElement(LCompareExchangeTypedArrayElement* lir)
+ {
+     Register elements = ToRegister(lir->elements());
+     AnyRegister output = ToAnyRegister(lir->output());
+     Register temp = lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
+diff --git a/js/src/jit/x86-shared/CodeGenerator-x86-shared.h b/js/src/jit/x86-shared/CodeGenerator-x86-shared.h
+--- a/js/src/jit/x86-shared/CodeGenerator-x86-shared.h
++++ b/js/src/jit/x86-shared/CodeGenerator-x86-shared.h
+@@ -168,22 +168,16 @@ class CodeGeneratorX86Shared : public Co
+     {
+         MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+         masm.cmpPtr(reg, ImmWord(0));
+         emitBranch(cond, ifTrue, ifFalse);
+     }
+ 
+     void emitTableSwitchDispatch(MTableSwitch* mir, Register index, Register base);
+ 
+-    void emitSimdExtractLane8x16(FloatRegister input, Register output, unsigned lane,
+-                                 SimdSign signedness);
+-    void emitSimdExtractLane16x8(FloatRegister input, Register output, unsigned lane,
+-                                 SimdSign signedness);
+-    void emitSimdExtractLane32x4(FloatRegister input, Register output, unsigned lane);
+-
+     template <class T, class Reg> void visitSimdGeneralShuffle(LSimdGeneralShuffleBase* lir, Reg temp);
+ 
+     void generateInvalidateEpilogue();
+ 
+     void canonicalizeIfDeterministic(Scalar::Type type, const LAllocation* value);
+ 
+   public:
+     // Out of line visitors.
+diff --git a/js/src/jit/x86-shared/MacroAssembler-x86-shared-SIMD.cpp b/js/src/jit/x86-shared/MacroAssembler-x86-shared-SIMD.cpp
+new file mode 100644
+--- /dev/null
++++ b/js/src/jit/x86-shared/MacroAssembler-x86-shared-SIMD.cpp
+@@ -0,0 +1,1228 @@
++/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
++ * vim: set ts=8 sts=4 et sw=4 tw=99:
++ * This Source Code Form is subject to the terms of the Mozilla Public
++ * License, v. 2.0. If a copy of the MPL was not distributed with this
++ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
++
++#include "jit/MacroAssembler.h"
++#include "jit/x86-shared/MacroAssembler-x86-shared.h"
++
++#include "jit/MacroAssembler-inl.h"
++
++using namespace js;
++using namespace js::jit;
++
++using mozilla::DebugOnly;
++using mozilla::FloatingPoint;
++using mozilla::Maybe;
++using mozilla::SpecificNaN;
++
++void
++MacroAssemblerX86Shared::checkedConvertFloat32x4ToInt32x4(FloatRegister src, FloatRegister dest,
++                                                          Register temp, Label* oolEntry,
++                                                          Label* rejoin)
++{
++    // Does the conversion and jumps to the OOL entry if the result value
++    // is the undefined integer pattern.
++    static const SimdConstant InvalidResult = SimdConstant::SplatX4(int32_t(-2147483648));
++    convertFloat32x4ToInt32x4(src, dest);
++
++    ScratchSimd128Scope scratch(asMasm());
++    asMasm().loadConstantSimd128Int(InvalidResult, scratch);
++    packedEqualInt32x4(Operand(dest), scratch);
++    // TODO (bug 1156228): If we have SSE4.1, we can use PTEST here instead of
++    // the two following instructions.
++    vmovmskps(scratch, temp);
++    cmp32(temp, Imm32(0));
++    j(Assembler::NotEqual, oolEntry);
++    bind(rejoin);
++}
++
++void
++MacroAssemblerX86Shared::oolConvertFloat32x4ToInt32x4(FloatRegister src, Register temp,
++                                                      Label* rejoin, Label* onConversionError)
++{
++    static const SimdConstant Int32MaxX4 = SimdConstant::SplatX4(2147483647.f);
++    static const SimdConstant Int32MinX4 = SimdConstant::SplatX4(-2147483648.f);
++
++    ScratchSimd128Scope scratch(asMasm());
++    asMasm().loadConstantSimd128Float(Int32MinX4, scratch);
++    vcmpleps(Operand(src), scratch, scratch);
++    vmovmskps(scratch, temp);
++    cmp32(temp, Imm32(15));
++    j(Assembler::NotEqual, onConversionError);
++
++    asMasm().loadConstantSimd128Float(Int32MaxX4, scratch);
++    vcmpleps(Operand(src), scratch, scratch);
++    vmovmskps(scratch, temp);
++    cmp32(temp, Imm32(0));
++    j(Assembler::NotEqual, onConversionError);
++
++    jump(rejoin);
++}
++
++void
++MacroAssemblerX86Shared::checkedConvertFloat32x4ToUint32x4(FloatRegister in, FloatRegister out,
++                                                           Register temp, FloatRegister tempF,
++                                                           Label* failed)
++{
++    // Classify lane values into 4 disjoint classes:
++    //
++    //   N-lanes:             in <= -1.0
++    //   A-lanes:      -1.0 < in <= 0x0.ffffffp31
++    //   B-lanes: 0x1.0p31 <= in <= 0x0.ffffffp32
++    //   V-lanes: 0x1.0p32 <= in, or isnan(in)
++    //
++    // We need to bail out to throw a RangeError if we see any N-lanes or
++    // V-lanes.
++    //
++    // For A-lanes and B-lanes, we make two float -> int32 conversions:
++    //
++    //   A = cvttps2dq(in)
++    //   B = cvttps2dq(in - 0x1.0p31f)
++    //
++    // Note that the subtraction for the B computation is exact for B-lanes.
++    // There is no rounding, so B is the low 31 bits of the correctly converted
++    // result.
++    //
++    // The cvttps2dq instruction produces 0x80000000 when the input is NaN or
++    // out of range for a signed int32_t. This conveniently provides the missing
++    // high bit for B, so the desired result is A for A-lanes and A|B for
++    // B-lanes.
++
++    ScratchSimd128Scope scratch(asMasm());
++
++    // TODO: If the majority of lanes are A-lanes, it could be faster to compute
++    // A first, use vmovmskps to check for any non-A-lanes and handle them in
++    // ool code. OTOH, we we're wrong about the lane distribution, that would be
++    // slower.
++
++    // Compute B in |scratch|.
++    static const float Adjust = 0x80000000; // 0x1.0p31f for the benefit of MSVC.
++    static const SimdConstant Bias = SimdConstant::SplatX4(-Adjust);
++    asMasm().loadConstantSimd128Float(Bias, scratch);
++    packedAddFloat32(Operand(in), scratch);
++    convertFloat32x4ToInt32x4(scratch, scratch);
++
++    // Compute A in |out|. This is the last time we use |in| and the first time
++    // we use |out|, so we can tolerate if they are the same register.
++    convertFloat32x4ToInt32x4(in, out);
++
++    // We can identify A-lanes by the sign bits in A: Any A-lanes will be
++    // positive in A, and N, B, and V-lanes will be 0x80000000 in A. Compute a
++    // mask of non-A-lanes into |tempF|.
++    zeroSimd128Float(tempF);
++    packedGreaterThanInt32x4(Operand(out), tempF);
++
++    // Clear the A-lanes in B.
++    bitwiseAndSimdInt(scratch, Operand(tempF), scratch);
++
++    // Compute the final result: A for A-lanes, A|B for B-lanes.
++    bitwiseOrSimdInt(out, Operand(scratch), out);
++
++    // We still need to filter out the V-lanes. They would show up as 0x80000000
++    // in both A and B. Since we cleared the valid A-lanes in B, the V-lanes are
++    // the remaining negative lanes in B.
++    vmovmskps(scratch, temp);
++    cmp32(temp, Imm32(0));
++    j(Assembler::NotEqual, failed);
++}
++
++void
++MacroAssemblerX86Shared::createInt32x4(Register lane0, Register lane1, Register lane2,
++                                       Register lane3, FloatRegister dest)
++{
++    if (AssemblerX86Shared::HasSSE41()) {
++        vmovd(lane0, dest);
++        vpinsrd(1, lane1, dest, dest);
++        vpinsrd(2, lane2, dest, dest);
++        vpinsrd(3, lane3, dest, dest);
++        return;
++    }
++
++    asMasm().reserveStack(Simd128DataSize);
++    store32(lane0, Address(StackPointer, 0 * sizeof(int32_t)));
++    store32(lane1, Address(StackPointer, 1 * sizeof(int32_t)));
++    store32(lane2, Address(StackPointer, 2 * sizeof(int32_t)));
++    store32(lane3, Address(StackPointer, 3 * sizeof(int32_t)));
++    loadAlignedSimd128Int(Address(StackPointer, 0), dest);
++    asMasm().freeStack(Simd128DataSize);
++}
++
++void
++MacroAssemblerX86Shared::createFloat32x4(FloatRegister lane0, FloatRegister lane1,
++                                         FloatRegister lane2, FloatRegister lane3,
++                                         FloatRegister temp, FloatRegister output)
++{
++    FloatRegister lane0Copy = reusedInputFloat32x4(lane0, output);
++    FloatRegister lane1Copy = reusedInputFloat32x4(lane1, temp);
++    vunpcklps(lane3, lane1Copy, temp);
++    vunpcklps(lane2, lane0Copy, output);
++    vunpcklps(temp, output, output);
++}
++
++void
++MacroAssemblerX86Shared::splatX16(Register input, FloatRegister output)
++{
++    vmovd(input, output);
++    if (AssemblerX86Shared::HasSSSE3()) {
++        zeroSimd128Int(ScratchSimd128Reg);
++        vpshufb(ScratchSimd128Reg, output, output);
++    } else {
++        // Use two shifts to duplicate the low 8 bits into the low 16 bits.
++        vpsllw(Imm32(8), output, output);
++        vmovdqa(output, ScratchSimd128Reg);
++        vpsrlw(Imm32(8), ScratchSimd128Reg, ScratchSimd128Reg);
++        vpor(ScratchSimd128Reg, output, output);
++        // Then do an X8 splat.
++        vpshuflw(0, output, output);
++        vpshufd(0, output, output);
++    }
++}
++
++void
++MacroAssemblerX86Shared::splatX8(Register input, FloatRegister output)
++{
++    vmovd(input, output);
++    vpshuflw(0, output, output);
++    vpshufd(0, output, output);
++}
++
++void
++MacroAssemblerX86Shared::splatX4(Register input, FloatRegister output)
++{
++    vmovd(input, output);
++    vpshufd(0, output, output);
++}
++
++void
++MacroAssemblerX86Shared::splatX4(FloatRegister input, FloatRegister output)
++{
++    FloatRegister inputCopy = reusedInputFloat32x4(input, output);
++    vshufps(0, inputCopy, inputCopy, output);
++}
++
++void
++MacroAssemblerX86Shared::reinterpretSimd(bool isIntegerLaneType, FloatRegister input,
++                                         FloatRegister output)
++{
++    if (input.aliases(output))
++        return;
++    if (isIntegerLaneType)
++        vmovdqa(input, output);
++    else
++        vmovaps(input, output);
++}
++
++void
++MacroAssemblerX86Shared::extractLaneInt32x4(FloatRegister input, Register output, unsigned lane)
++{
++    if (lane == 0) {
++        // The value we want to extract is in the low double-word
++        moveLowInt32(input, output);
++    } else if (AssemblerX86Shared::HasSSE41()) {
++        vpextrd(lane, input, output);
++    } else {
++        uint32_t mask = MacroAssembler::ComputeShuffleMask(lane);
++        shuffleInt32(mask, input, ScratchSimd128Reg);
++        moveLowInt32(ScratchSimd128Reg, output);
++    }
++}
++
++void
++MacroAssemblerX86Shared::extractLaneFloat32x4(FloatRegister input, FloatRegister output,
++                                              unsigned lane, bool canonicalize)
++{
++    if (lane == 0) {
++        // The value we want to extract is in the low double-word
++        if (input != output)
++            moveFloat32(input, output);
++    } else if (lane == 2) {
++        moveHighPairToLowPairFloat32(input, output);
++    } else {
++        uint32_t mask = MacroAssembler::ComputeShuffleMask(lane);
++        shuffleFloat32(mask, input, output);
++    }
++    // NaNs contained within SIMD values are not enforced to be canonical, so
++    // when we extract an element into a "regular" scalar JS value, we have to
++    // canonicalize. In wasm code, we can skip this, as wasm only has to
++    // canonicalize NaNs at FFI boundaries.
++    if (canonicalize)
++        asMasm().canonicalizeFloat(output);
++}
++
++void
++MacroAssemblerX86Shared::extractLaneInt16x8(FloatRegister input, Register output, unsigned lane,
++                                            SimdSign sign)
++{
++    // Unlike pextrd and pextrb, this is available in SSE2.
++    vpextrw(lane, input, output);
++    if (sign == SimdSign::Signed)
++        movswl(output, output);
++}
++
++void
++MacroAssemblerX86Shared::extractLaneInt8x16(FloatRegister input, Register output, unsigned lane,
++                                            SimdSign sign)
++{
++    if (AssemblerX86Shared::HasSSE41()) {
++        vpextrb(lane, input, output);
++        // vpextrb clears the high bits, so no further extension required.
++        if (sign == SimdSign::Unsigned)
++            sign = SimdSign::NotApplicable;
++    } else {
++        // Extract the relevant 16 bits containing our lane, then shift the
++        // right 8 bits into place.
++        extractLaneInt16x8(input, output, lane / 2, SimdSign::Unsigned);
++        if (lane % 2) {
++            shrl(Imm32(8), output);
++            // The shrl handles the zero-extension. Don't repeat it.
++            if (sign == SimdSign::Unsigned)
++                sign = SimdSign::NotApplicable;
++        }
++    }
++
++    // We have the right low 8 bits in |output|, but we may need to fix the high
++    // bits. Note that this requires |output| to be one of the %eax-%edx
++    // registers.
++    switch (sign) {
++      case SimdSign::Signed:
++        movsbl(output, output);
++        break;
++      case SimdSign::Unsigned:
++        movzbl(output, output);
++        break;
++      case SimdSign::NotApplicable:
++        // No adjustment needed.
++        break;
++    }
++}
++
++void
++MacroAssemblerX86Shared::extractLaneSimdBool(FloatRegister input, Register output, unsigned numLanes,
++                                             unsigned lane)
++{
++    switch (numLanes) {
++      case 4:
++        extractLaneInt32x4(input, output, lane);
++        break;
++      case 8:
++        // Get a lane, don't bother fixing the high bits since we'll mask below.
++        extractLaneInt16x8(input, output, lane, SimdSign::NotApplicable);
++        break;
++      case 16:
++        extractLaneInt8x16(input, output, lane, SimdSign::NotApplicable);
++        break;
++      default:
++        MOZ_CRASH("Unhandled SIMD number of lanes");
++    }
++    // We need to generate a 0/1 value. We have 0/-1 and possibly dirty high bits.
++    asMasm().and32(Imm32(1), output);
++}
++
++void
++MacroAssemblerX86Shared::insertLaneSimdInt(FloatRegister input, Register value, FloatRegister output,
++                                           unsigned lane, unsigned numLanes)
++{
++    if (numLanes == 8) {
++        // Available in SSE 2.
++        vpinsrw(lane, value, input, output);
++        return;
++    }
++
++    // Note that, contrarily to float32x4, we cannot use vmovd if the inserted
++    // value goes into the first component, as vmovd clears out the higher lanes
++    // of the output.
++    if (AssemblerX86Shared::HasSSE41()) {
++        // TODO: Teach Lowering that we don't need defineReuseInput if we have AVX.
++        switch (numLanes) {
++          case 4:
++            vpinsrd(lane, value, input, output);
++            return;
++          case 16:
++            vpinsrb(lane, value, input, output);
++            return;
++        }
++    }
++
++    asMasm().reserveStack(Simd128DataSize);
++    storeAlignedSimd128Int(input, Address(StackPointer, 0));
++    switch (numLanes) {
++      case 4:
++        store32(value, Address(StackPointer, lane * sizeof(int32_t)));
++        break;
++      case 16:
++        // Note that this requires `value` to be in one the registers where the
++        // low 8 bits are addressible (%eax - %edx on x86, all of them on x86-64).
++        store8(value, Address(StackPointer, lane * sizeof(int8_t)));
++        break;
++      default:
++        MOZ_CRASH("Unsupported SIMD numLanes");
++    }
++    loadAlignedSimd128Int(Address(StackPointer, 0), output);
++    asMasm().freeStack(Simd128DataSize);
++}
++
++void
++MacroAssemblerX86Shared::insertLaneFloat32x4(FloatRegister input, FloatRegister value,
++                                             FloatRegister output, unsigned lane)
++{
++    if (lane == 0) {
++        // As both operands are registers, vmovss doesn't modify the upper bits
++        // of the destination operand.
++        if (value != output)
++            vmovss(value, input, output);
++        return;
++    }
++
++    if (AssemblerX86Shared::HasSSE41()) {
++        // The input value is in the low float32 of the 'value' FloatRegister.
++        vinsertps(vinsertpsMask(0, lane), value, output, output);
++        return;
++    }
++
++    asMasm().reserveStack(Simd128DataSize);
++    storeAlignedSimd128Float(input, Address(StackPointer, 0));
++    asMasm().storeFloat32(value, Address(StackPointer, lane * sizeof(int32_t)));
++    loadAlignedSimd128Float(Address(StackPointer, 0), output);
++    asMasm().freeStack(Simd128DataSize);
++}
++
++void
++MacroAssemblerX86Shared::allTrueSimdBool(FloatRegister input, Register output)
++{
++    // We know that the input lanes are boolean, so they are either 0 or -1.
++    // The all-true vector has all 128 bits set, no matter the lane geometry.
++    vpmovmskb(input, output);
++    cmp32(output, Imm32(0xffff));
++    emitSet(Assembler::Zero, output);
++}
++
++void
++MacroAssemblerX86Shared::anyTrueSimdBool(FloatRegister input, Register output)
++{
++    vpmovmskb(input, output);
++    cmp32(output, Imm32(0x0));
++    emitSet(Assembler::NonZero, output);
++}
++
++void
++MacroAssemblerX86Shared::swizzleInt32x4(FloatRegister input, FloatRegister output,
++                                        unsigned lanes[4])
++{
++    uint32_t mask = MacroAssembler::ComputeShuffleMask(lanes[0], lanes[1], lanes[2], lanes[3]);
++    shuffleInt32(mask, input, output);
++}
++
++void
++MacroAssemblerX86Shared::swizzleInt8x16(FloatRegister input, FloatRegister output,
++                                        const Maybe<Register>& temp, int8_t lanes[16])
++{
++    if (AssemblerX86Shared::HasSSSE3()) {
++        ScratchSimd128Scope scratch(asMasm());
++        asMasm().loadConstantSimd128Int(SimdConstant::CreateX16(lanes), scratch);
++        FloatRegister inputCopy = reusedInputInt32x4(input, output);
++        vpshufb(scratch, inputCopy, output);
++        return;
++    }
++
++    // Worst-case fallback for pre-SSSE3 machines. Bounce through memory.
++    MOZ_ASSERT(!!temp, "needs a temp for the memory fallback");
++    asMasm().reserveStack(2 * Simd128DataSize);
++    storeAlignedSimd128Int(input, Address(StackPointer, Simd128DataSize));
++    for (unsigned i = 0; i < 16; i++) {
++        load8ZeroExtend(Address(StackPointer, Simd128DataSize + lanes[i]), *temp);
++        store8(*temp, Address(StackPointer, i));
++    }
++    loadAlignedSimd128Int(Address(StackPointer, 0), output);
++    asMasm().freeStack(2 * Simd128DataSize);
++}
++
++static inline bool
++LanesMatch(unsigned lanes[4], unsigned x, unsigned y, unsigned z, unsigned w)
++{
++    return lanes[0] == x && lanes[1] == y && lanes[2] == z && lanes[3] == w;
++}
++
++void
++MacroAssemblerX86Shared::swizzleFloat32x4(FloatRegister input, FloatRegister output,
++                                          unsigned lanes[4])
++{
++    if (AssemblerX86Shared::HasSSE3()) {
++        if (LanesMatch(lanes, 0, 0, 2, 2)) {
++            vmovsldup(input, output);
++            return;
++        }
++        if (LanesMatch(lanes, 1, 1, 3, 3)) {
++            vmovshdup(input, output);
++            return;
++        }
++    }
++
++    // TODO Here and below, arch specific lowering could identify this pattern
++    // and use defineReuseInput to avoid this move (bug 1084404)
++    if (LanesMatch(lanes, 2, 3, 2, 3)) {
++        FloatRegister inputCopy = reusedInputFloat32x4(input, output);
++        vmovhlps(input, inputCopy, output);
++        return;
++    }
++
++    if (LanesMatch(lanes, 0, 1, 0, 1)) {
++        if (AssemblerX86Shared::HasSSE3() && !AssemblerX86Shared::HasAVX()) {
++            vmovddup(input, output);
++            return;
++        }
++        FloatRegister inputCopy = reusedInputFloat32x4(input, output);
++        vmovlhps(input, inputCopy, output);
++        return;
++    }
++
++    if (LanesMatch(lanes, 0, 0, 1, 1)) {
++        FloatRegister inputCopy = reusedInputFloat32x4(input, output);
++        vunpcklps(input, inputCopy, output);
++        return;
++    }
++
++    if (LanesMatch(lanes, 2, 2, 3, 3)) {
++        FloatRegister inputCopy = reusedInputFloat32x4(input, output);
++        vunpckhps(input, inputCopy, output);
++        return;
++    }
++
++    uint32_t x = lanes[0];
++    uint32_t y = lanes[1];
++    uint32_t z = lanes[2];
++    uint32_t w = lanes[3];
++
++    uint32_t mask = MacroAssembler::ComputeShuffleMask(x, y, z, w);
++    shuffleFloat32(mask, input, output);
++}
++
++void
++MacroAssemblerX86Shared::shuffleInt8x16(FloatRegister lhs, FloatRegister rhs, FloatRegister output,
++                                        const Maybe<FloatRegister>& maybeFloatTemp,
++                                        const Maybe<Register>& maybeTemp, uint8_t lanes[16])
++{
++    DebugOnly<bool> hasSSSE3 = AssemblerX86Shared::HasSSSE3();
++    MOZ_ASSERT(hasSSSE3 == !!maybeFloatTemp);
++    MOZ_ASSERT(!hasSSSE3 == !!maybeTemp);
++
++    // Use pshufb if it is available.
++    if (AssemblerX86Shared::HasSSSE3()) {
++        ScratchSimd128Scope scratch(asMasm());
++
++        // Use pshufb instructions to gather the lanes from each source vector.
++        // A negative index creates a zero lane, so the two vectors can be combined.
++
++        // Set scratch = lanes from lhs.
++        int8_t idx[16];
++        for (unsigned i = 0; i < 16; i++)
++            idx[i] = lanes[i] < 16 ? lanes[i] : -1;
++        asMasm().loadConstantSimd128Int(SimdConstant::CreateX16(idx), *maybeFloatTemp);
++        FloatRegister lhsCopy = reusedInputInt32x4(lhs, scratch);
++        vpshufb(*maybeFloatTemp, lhsCopy, scratch);
++
++        // Set output = lanes from rhs.
++        for (unsigned i = 0; i < 16; i++)
++            idx[i] = lanes[i] >= 16 ? lanes[i] - 16 : -1;
++        asMasm().loadConstantSimd128Int(SimdConstant::CreateX16(idx), *maybeFloatTemp);
++        FloatRegister rhsCopy = reusedInputInt32x4(rhs, output);
++        vpshufb(*maybeFloatTemp, rhsCopy, output);
++
++        // Combine.
++        vpor(scratch, output, output);
++        return;
++    }
++
++    // Worst-case fallback for pre-SSE3 machines. Bounce through memory.
++    asMasm().reserveStack(3 * Simd128DataSize);
++    storeAlignedSimd128Int(lhs, Address(StackPointer, Simd128DataSize));
++    storeAlignedSimd128Int(rhs, Address(StackPointer, 2 * Simd128DataSize));
++    for (unsigned i = 0; i < 16; i++) {
++        load8ZeroExtend(Address(StackPointer, Simd128DataSize + lanes[i]), *maybeTemp);
++        store8(*maybeTemp, Address(StackPointer, i));
++    }
++    loadAlignedSimd128Int(Address(StackPointer, 0), output);
++    asMasm().freeStack(3 * Simd128DataSize);
++}
++
++void
++MacroAssemblerX86Shared::shuffleX4(FloatRegister lhs, Operand rhs, FloatRegister out,
++                                   const Maybe<FloatRegister>& maybeTemp, unsigned lanes[4])
++{
++    uint32_t x = lanes[0];
++    uint32_t y = lanes[1];
++    uint32_t z = lanes[2];
++    uint32_t w = lanes[3];
++
++    // Check that lanes come from LHS in majority:
++    unsigned numLanesFromLHS = (x < 4) + (y < 4) + (z < 4) + (w < 4);
++    MOZ_ASSERT(numLanesFromLHS >= 2);
++
++    // When reading this method, remember that vshufps takes the two first
++    // inputs of the destination operand (right operand) and the two last
++    // inputs of the source operand (left operand).
++    //
++    // Legend for explanations:
++    // - L: LHS
++    // - R: RHS
++    // - T: temporary
++
++    uint32_t mask;
++
++    // If all lanes came from a single vector, we should use swizzle instead.
++    MOZ_ASSERT(numLanesFromLHS < 4);
++
++    // If all values stay in their lane, this is a blend.
++    if (AssemblerX86Shared::HasSSE41()) {
++        if (x % 4 == 0 && y % 4 == 1 && z % 4 == 2 && w % 4 == 3) {
++            vblendps(blendpsMask(x >= 4, y >= 4, z >= 4, w >= 4), rhs, lhs, out);
++            return;
++        }
++    }
++
++    // One element of the second, all other elements of the first
++    if (numLanesFromLHS == 3) {
++        unsigned firstMask = -1, secondMask = -1;
++
++        // register-register vmovss preserves the high lanes.
++        if (LanesMatch(lanes, 4, 1, 2, 3) && rhs.kind() == Operand::FPREG) {
++            vmovss(FloatRegister::FromCode(rhs.fpu()), lhs, out);
++            return;
++        }
++
++        // SSE4.1 vinsertps can handle any single element.
++        unsigned numLanesUnchanged = (x == 0) + (y == 1) + (z == 2) + (w == 3);
++        if (AssemblerX86Shared::HasSSE41() && numLanesUnchanged == 3) {
++            unsigned srcLane;
++            unsigned dstLane;
++            if (x >= 4) {
++                srcLane = x - 4;
++                dstLane = 0;
++            } else if (y >= 4) {
++                srcLane = y - 4;
++                dstLane = 1;
++            } else if (z >= 4) {
++                srcLane = z - 4;
++                dstLane = 2;
++            } else {
++                MOZ_ASSERT(w >= 4);
++                srcLane = w - 4;
++                dstLane = 3;
++            }
++            vinsertps(vinsertpsMask(srcLane, dstLane), rhs, lhs, out);
++            return;
++        }
++
++        MOZ_ASSERT(!!maybeTemp);
++        FloatRegister rhsCopy = *maybeTemp;
++        loadAlignedSimd128Float(rhs, rhsCopy);
++
++        if (x < 4 && y < 4) {
++            if (w >= 4) {
++                w %= 4;
++                // T = (Rw Rw Lz Lz) = vshufps(firstMask, lhs, rhs, rhsCopy)
++                firstMask = MacroAssembler::ComputeShuffleMask(w, w, z, z);
++                // (Lx Ly Lz Rw) = (Lx Ly Tz Tx) = vshufps(secondMask, T, lhs, out)
++                secondMask = MacroAssembler::ComputeShuffleMask(x, y, 2, 0);
++            } else {
++                MOZ_ASSERT(z >= 4);
++                z %= 4;
++                // T = (Rz Rz Lw Lw) = vshufps(firstMask, lhs, rhs, rhsCopy)
++                firstMask = MacroAssembler::ComputeShuffleMask(z, z, w, w);
++                // (Lx Ly Rz Lw) = (Lx Ly Tx Tz) = vshufps(secondMask, T, lhs, out)
++                secondMask = MacroAssembler::ComputeShuffleMask(x, y, 0, 2);
++            }
++
++            vshufps(firstMask, lhs, rhsCopy, rhsCopy);
++            vshufps(secondMask, rhsCopy, lhs, out);
++            return;
++        }
++
++        MOZ_ASSERT(z < 4 && w < 4);
++
++        if (y >= 4) {
++            y %= 4;
++            // T = (Ry Ry Lx Lx) = vshufps(firstMask, lhs, rhs, rhsCopy)
++            firstMask = MacroAssembler::ComputeShuffleMask(y, y, x, x);
++            // (Lx Ry Lz Lw) = (Tz Tx Lz Lw) = vshufps(secondMask, lhs, T, out)
++            secondMask = MacroAssembler::ComputeShuffleMask(2, 0, z, w);
++        } else {
++            MOZ_ASSERT(x >= 4);
++            x %= 4;
++            // T = (Rx Rx Ly Ly) = vshufps(firstMask, lhs, rhs, rhsCopy)
++            firstMask = MacroAssembler::ComputeShuffleMask(x, x, y, y);
++            // (Rx Ly Lz Lw) = (Tx Tz Lz Lw) = vshufps(secondMask, lhs, T, out)
++            secondMask = MacroAssembler::ComputeShuffleMask(0, 2, z, w);
++        }
++
++        vshufps(firstMask, lhs, rhsCopy, rhsCopy);
++        if (AssemblerX86Shared::HasAVX()) {
++            vshufps(secondMask, lhs, rhsCopy, out);
++        } else {
++            vshufps(secondMask, lhs, rhsCopy, rhsCopy);
++            moveSimd128Float(rhsCopy, out);
++        }
++        return;
++    }
++
++    // Two elements from one vector, two other elements from the other
++    MOZ_ASSERT(numLanesFromLHS == 2);
++
++    // TODO Here and below, symmetric case would be more handy to avoid a move,
++    // but can't be reached because operands would get swapped (bug 1084404).
++    if (LanesMatch(lanes, 2, 3, 6, 7)) {
++        ScratchSimd128Scope scratch(asMasm());
++        if (AssemblerX86Shared::HasAVX()) {
++            FloatRegister rhsCopy = reusedInputAlignedFloat32x4(rhs, scratch);
++            vmovhlps(lhs, rhsCopy, out);
++        } else {
++            loadAlignedSimd128Float(rhs, scratch);
++            vmovhlps(lhs, scratch, scratch);
++            moveSimd128Float(scratch, out);
++        }
++        return;
++    }
++
++    if (LanesMatch(lanes, 0, 1, 4, 5)) {
++        FloatRegister rhsCopy;
++        ScratchSimd128Scope scratch(asMasm());
++        if (rhs.kind() == Operand::FPREG) {
++            // No need to make an actual copy, since the operand is already
++            // in a register, and it won't be clobbered by the vmovlhps.
++            rhsCopy = FloatRegister::FromCode(rhs.fpu());
++        } else {
++            loadAlignedSimd128Float(rhs, scratch);
++            rhsCopy = scratch;
++        }
++        vmovlhps(rhsCopy, lhs, out);
++        return;
++    }
++
++    if (LanesMatch(lanes, 0, 4, 1, 5)) {
++        vunpcklps(rhs, lhs, out);
++        return;
++    }
++
++    // TODO swapped case would be better (bug 1084404)
++    if (LanesMatch(lanes, 4, 0, 5, 1)) {
++        ScratchSimd128Scope scratch(asMasm());
++        if (AssemblerX86Shared::HasAVX()) {
++            FloatRegister rhsCopy = reusedInputAlignedFloat32x4(rhs, scratch);
++            vunpcklps(lhs, rhsCopy, out);
++        } else {
++            loadAlignedSimd128Float(rhs, scratch);
++            vunpcklps(lhs, scratch, scratch);
++            moveSimd128Float(scratch, out);
++        }
++        return;
++    }
++
++    if (LanesMatch(lanes, 2, 6, 3, 7)) {
++        vunpckhps(rhs, lhs, out);
++        return;
++    }
++
++    // TODO swapped case would be better (bug 1084404)
++    if (LanesMatch(lanes, 6, 2, 7, 3)) {
++        ScratchSimd128Scope scratch(asMasm());
++        if (AssemblerX86Shared::HasAVX()) {
++            FloatRegister rhsCopy = reusedInputAlignedFloat32x4(rhs, scratch);
++            vunpckhps(lhs, rhsCopy, out);
++        } else {
++            loadAlignedSimd128Float(rhs, scratch);
++            vunpckhps(lhs, scratch, scratch);
++            moveSimd128Float(scratch, out);
++        }
++        return;
++    }
++
++    // In one vshufps
++    if (x < 4 && y < 4) {
++        mask = MacroAssembler::ComputeShuffleMask(x, y, z % 4, w % 4);
++        vshufps(mask, rhs, lhs, out);
++        return;
++    }
++
++    // At creation, we should have explicitly swapped in this case.
++    MOZ_ASSERT(!(z >= 4 && w >= 4));
++
++    // In two vshufps, for the most generic case:
++    uint32_t firstMask[4], secondMask[4];
++    unsigned i = 0, j = 2, k = 0;
++
++#define COMPUTE_MASK(lane)       \
++    if (lane >= 4) {             \
++        firstMask[j] = lane % 4; \
++        secondMask[k++] = j++;   \
++    } else {                     \
++        firstMask[i] = lane;     \
++        secondMask[k++] = i++;   \
++    }
++
++    COMPUTE_MASK(x)
++    COMPUTE_MASK(y)
++    COMPUTE_MASK(z)
++    COMPUTE_MASK(w)
++#undef COMPUTE_MASK
++
++    MOZ_ASSERT(i == 2 && j == 4 && k == 4);
++
++    mask = MacroAssembler::ComputeShuffleMask(firstMask[0], firstMask[1],
++                                              firstMask[2], firstMask[3]);
++    vshufps(mask, rhs, lhs, lhs);
++
++    mask = MacroAssembler::ComputeShuffleMask(secondMask[0], secondMask[1],
++                                              secondMask[2], secondMask[3]);
++    vshufps(mask, lhs, lhs, lhs);
++}
++
++static inline FloatRegister
++ToSimdFloatRegister(const Operand& op)
++{
++    return FloatRegister(op.fpu(), FloatRegister::Codes::ContentType::Simd128);
++}
++
++void
++MacroAssemblerX86Shared::compareInt8x16(FloatRegister lhs, Operand rhs, Assembler::Condition cond,
++                                        FloatRegister output)
++{
++    static const SimdConstant allOnes = SimdConstant::SplatX16(-1);
++    ScratchSimd128Scope scratch(asMasm());
++    switch (cond) {
++      case Assembler::Condition::GreaterThan:
++        vpcmpgtb(rhs, lhs, output);
++        break;
++      case Assembler::Condition::Equal:
++        vpcmpeqb(rhs, lhs, output);
++        break;
++      case Assembler::Condition::LessThan:
++        // src := rhs
++        if (rhs.kind() == Operand::FPREG)
++            moveSimd128Int(ToSimdFloatRegister(rhs), scratch);
++        else
++            loadAlignedSimd128Int(rhs, scratch);
++
++        // src := src > lhs (i.e. lhs < rhs)
++        // Improve by doing custom lowering (rhs is tied to the output register)
++        vpcmpgtb(Operand(lhs), scratch, scratch);
++        moveSimd128Int(scratch, output);
++        break;
++      case Assembler::Condition::NotEqual:
++        // Ideally for notEqual, greaterThanOrEqual, and lessThanOrEqual, we
++        // should invert the comparison by, e.g. swapping the arms of a select
++        // if that's what it's used in.
++        asMasm().loadConstantSimd128Int(allOnes, scratch);
++        vpcmpeqb(rhs, lhs, output);
++        bitwiseXorSimdInt(output, Operand(scratch), output);
++        break;
++      case Assembler::Condition::GreaterThanOrEqual:
++        // src := rhs
++        if (rhs.kind() == Operand::FPREG)
++            moveSimd128Int(ToSimdFloatRegister(rhs), scratch);
++        else
++            loadAlignedSimd128Int(rhs, scratch);
++        vpcmpgtb(Operand(lhs), scratch, scratch);
++        asMasm().loadConstantSimd128Int(allOnes, output);
++        bitwiseXorSimdInt(output, Operand(scratch), output);
++        break;
++      case Assembler::Condition::LessThanOrEqual:
++        // lhs <= rhs is equivalent to !(rhs < lhs), which we compute here.
++        asMasm().loadConstantSimd128Int(allOnes, scratch);
++        vpcmpgtb(rhs, lhs, output);
++        bitwiseXorSimdInt(output, Operand(scratch), output);
++        break;
++      default:
++        MOZ_CRASH("unexpected condition op");
++    }
++}
++
++void
++MacroAssemblerX86Shared::compareInt16x8(FloatRegister lhs, Operand rhs, Assembler::Condition cond,
++                                        FloatRegister output)
++{
++    static const SimdConstant allOnes = SimdConstant::SplatX8(-1);
++
++    ScratchSimd128Scope scratch(asMasm());
++    switch (cond) {
++      case Assembler::Condition::GreaterThan:
++        vpcmpgtw(rhs, lhs, output);
++        break;
++      case Assembler::Condition::Equal:
++        vpcmpeqw(rhs, lhs, output);
++        break;
++      case Assembler::Condition::LessThan:
++        // src := rhs
++        if (rhs.kind() == Operand::FPREG)
++            moveSimd128Int(ToSimdFloatRegister(rhs), scratch);
++        else
++            loadAlignedSimd128Int(rhs, scratch);
++
++        // src := src > lhs (i.e. lhs < rhs)
++        // Improve by doing custom lowering (rhs is tied to the output register)
++        vpcmpgtw(Operand(lhs), scratch, scratch);
++        moveSimd128Int(scratch, output);
++        break;
++      case Assembler::Condition::NotEqual:
++        // Ideally for notEqual, greaterThanOrEqual, and lessThanOrEqual, we
++        // should invert the comparison by, e.g. swapping the arms of a select
++        // if that's what it's used in.
++        asMasm().loadConstantSimd128Int(allOnes, scratch);
++        vpcmpeqw(rhs, lhs, output);
++        bitwiseXorSimdInt(output, Operand(scratch), output);
++        break;
++      case Assembler::Condition::GreaterThanOrEqual:
++        // src := rhs
++        if (rhs.kind() == Operand::FPREG)
++            moveSimd128Int(ToSimdFloatRegister(rhs), scratch);
++        else
++            loadAlignedSimd128Int(rhs, scratch);
++        vpcmpgtw(Operand(lhs), scratch, scratch);
++        asMasm().loadConstantSimd128Int(allOnes, output);
++        bitwiseXorSimdInt(output, Operand(scratch), output);
++        break;
++      case Assembler::Condition::LessThanOrEqual:
++        // lhs <= rhs is equivalent to !(rhs < lhs), which we compute here.
++        asMasm().loadConstantSimd128Int(allOnes, scratch);
++        vpcmpgtw(rhs, lhs, output);
++        bitwiseXorSimdInt(output, Operand(scratch), output);
++        break;
++      default:
++        MOZ_CRASH("unexpected condition op");
++    }
++}
++
++void
++MacroAssemblerX86Shared::compareInt32x4(FloatRegister lhs, Operand rhs, Assembler::Condition cond,
++                                        FloatRegister output)
++{
++    static const SimdConstant allOnes = SimdConstant::SplatX4(-1);
++    ScratchSimd128Scope scratch(asMasm());
++    switch (cond) {
++      case Assembler::Condition::GreaterThan:
++        packedGreaterThanInt32x4(rhs, lhs);
++        break;
++      case Assembler::Condition::Equal:
++        packedEqualInt32x4(rhs, lhs);
++        break;
++      case Assembler::Condition::LessThan:
++        // src := rhs
++        if (rhs.kind() == Operand::FPREG)
++            moveSimd128Int(ToSimdFloatRegister(rhs), scratch);
++        else
++            loadAlignedSimd128Int(rhs, scratch);
++
++        // src := src > lhs (i.e. lhs < rhs)
++        // Improve by doing custom lowering (rhs is tied to the output register)
++        packedGreaterThanInt32x4(Operand(lhs), scratch);
++        moveSimd128Int(scratch, lhs);
++        break;
++      case Assembler::Condition::NotEqual:
++        // Ideally for notEqual, greaterThanOrEqual, and lessThanOrEqual, we
++        // should invert the comparison by, e.g. swapping the arms of a select
++        // if that's what it's used in.
++        asMasm().loadConstantSimd128Int(allOnes, scratch);
++        packedEqualInt32x4(rhs, lhs);
++        bitwiseXorSimdInt(lhs, Operand(scratch), lhs);
++        break;
++      case Assembler::Condition::GreaterThanOrEqual:
++        // src := rhs
++        if (rhs.kind() == Operand::FPREG)
++            moveSimd128Int(ToSimdFloatRegister(rhs), scratch);
++        else
++            loadAlignedSimd128Int(rhs, scratch);
++        packedGreaterThanInt32x4(Operand(lhs), scratch);
++        asMasm().loadConstantSimd128Int(allOnes, lhs);
++        bitwiseXorSimdInt(lhs, Operand(scratch), lhs);
++        break;
++      case Assembler::Condition::LessThanOrEqual:
++        // lhs <= rhs is equivalent to !(rhs < lhs), which we compute here.
++        asMasm().loadConstantSimd128Int(allOnes, scratch);
++        packedGreaterThanInt32x4(rhs, lhs);
++        bitwiseXorSimdInt(lhs, Operand(scratch), lhs);
++        break;
++      default:
++        MOZ_CRASH("unexpected condition op");
++    }
++}
++
++void
++MacroAssemblerX86Shared::compareFloat32x4(FloatRegister lhs, Operand rhs, Assembler::Condition cond,
++                                          FloatRegister output)
++{
++    switch (cond) {
++      case Assembler::Condition::Equal:
++        vcmpeqps(rhs, lhs, output);
++        break;
++      case Assembler::Condition::LessThan:
++        vcmpltps(rhs, lhs, output);
++        break;
++      case Assembler::Condition::LessThanOrEqual:
++        vcmpleps(rhs, lhs, output);
++        break;
++      case Assembler::Condition::NotEqual:
++        vcmpneqps(rhs, lhs, output);
++        break;
++      case Assembler::Condition::GreaterThanOrEqual:
++      case Assembler::Condition::GreaterThan:
++        // We reverse these before register allocation so that we don't have to
++        // copy into and out of temporaries after codegen.
++        MOZ_CRASH("should have reversed this");
++      default:
++        MOZ_CRASH("unexpected condition op");
++    }
++}
++
++void
++MacroAssemblerX86Shared::mulInt32x4(FloatRegister lhs, Operand rhs,
++                                    const Maybe<FloatRegister>& temp, FloatRegister output)
++{
++    if (AssemblerX86Shared::HasSSE41()) {
++        vpmulld(rhs, lhs, output);
++        return;
++    }
++
++    ScratchSimd128Scope scratch(asMasm());
++    loadAlignedSimd128Int(rhs, scratch);
++    vpmuludq(lhs, scratch, scratch);
++    // scratch contains (Rx, _, Rz, _) where R is the resulting vector.
++
++    MOZ_ASSERT(!!temp);
++    vpshufd(MacroAssembler::ComputeShuffleMask(1, 1, 3, 3), lhs, lhs);
++    vpshufd(MacroAssembler::ComputeShuffleMask(1, 1, 3, 3), rhs, *temp);
++    vpmuludq(*temp, lhs, lhs);
++    // lhs contains (Ry, _, Rw, _) where R is the resulting vector.
++
++    vshufps(MacroAssembler::ComputeShuffleMask(0, 2, 0, 2), scratch, lhs, lhs);
++    // lhs contains (Ry, Rw, Rx, Rz)
++    vshufps(MacroAssembler::ComputeShuffleMask(2, 0, 3, 1), lhs, lhs, lhs);
++}
++
++void
++MacroAssemblerX86Shared::minFloat32x4(FloatRegister lhs, Operand rhs, FloatRegister output)
++{
++    ScratchSimd128Scope scratch(asMasm());
++    FloatRegister rhsCopy = reusedInputAlignedFloat32x4(rhs, scratch);
++    vminps(Operand(lhs), rhsCopy, scratch);
++    vminps(rhs, lhs, output);
++    vorps(scratch, output, output); // NaN or'd with arbitrary bits is NaN
++}
++
++void
++MacroAssemblerX86Shared::maxFloat32x4(FloatRegister lhs, Operand rhs, FloatRegister temp,
++                                      FloatRegister output)
++{
++    ScratchSimd128Scope scratch(asMasm());
++    FloatRegister lhsCopy = reusedInputFloat32x4(lhs, scratch);
++    vcmpunordps(rhs, lhsCopy, scratch);
++
++    FloatRegister rhsCopy = reusedInputAlignedFloat32x4(rhs, temp);
++    vmaxps(Operand(lhs), rhsCopy, temp);
++    vmaxps(rhs, lhs, output);
++
++    vandps(temp, output, output);
++    vorps(scratch, output, output); // or in the all-ones NaNs
++}
++
++void
++MacroAssemblerX86Shared::minNumFloat32x4(FloatRegister lhs, Operand rhs, FloatRegister temp,
++                                         FloatRegister output)
++{
++    ScratchSimd128Scope scratch(asMasm());
++    asMasm().loadConstantSimd128Int(SimdConstant::SplatX4(int32_t(0x80000000)), temp);
++
++    FloatRegister mask = scratch;
++    FloatRegister tmpCopy = reusedInputFloat32x4(temp, scratch);
++    vpcmpeqd(Operand(lhs), tmpCopy, mask);
++    vandps(temp, mask, mask);
++
++    FloatRegister lhsCopy = reusedInputFloat32x4(lhs, temp);
++    vminps(rhs, lhsCopy, temp);
++    vorps(mask, temp, temp);
++
++    FloatRegister rhsCopy = reusedInputAlignedFloat32x4(rhs, mask);
++    vcmpneqps(rhs, rhsCopy, mask);
++
++    if (AssemblerX86Shared::HasAVX()) {
++        vblendvps(mask, lhs, temp, output);
++    } else {
++        // Emulate vblendvps.
++        // With SSE.4.1 we could use blendvps, however it's awkward since
++        // it requires the mask to be in xmm0.
++        if (lhs != output)
++            moveSimd128Float(lhs, output);
++        vandps(Operand(mask), output, output);
++        vandnps(Operand(temp), mask, mask);
++        vorps(Operand(mask), output, output);
++    }
++}
++
++void
++MacroAssemblerX86Shared::maxNumFloat32x4(FloatRegister lhs, Operand rhs, FloatRegister temp,
++                                         FloatRegister output)
++{
++    ScratchSimd128Scope scratch(asMasm());
++    FloatRegister mask = scratch;
++
++    asMasm().loadConstantSimd128Int(SimdConstant::SplatX4(0), mask);
++    vpcmpeqd(Operand(lhs), mask, mask);
++
++    asMasm().loadConstantSimd128Int(SimdConstant::SplatX4(int32_t(0x80000000)), temp);
++    vandps(temp, mask, mask);
++
++    FloatRegister lhsCopy = reusedInputFloat32x4(lhs, temp);
++    vmaxps(rhs, lhsCopy, temp);
++    vandnps(Operand(temp), mask, mask);
++
++    // Ensure temp always contains the temporary result
++    mask = temp;
++    temp = scratch;
++
++    FloatRegister rhsCopy = reusedInputAlignedFloat32x4(rhs, mask);
++    vcmpneqps(rhs, rhsCopy, mask);
++
++    if (AssemblerX86Shared::HasAVX()) {
++        vblendvps(mask, lhs, temp, output);
++    } else {
++        // Emulate vblendvps.
++        // With SSE.4.1 we could use blendvps, however it's awkward since
++        // it requires the mask to be in xmm0.
++        if (lhs != output)
++            moveSimd128Float(lhs, output);
++        vandps(Operand(mask), output, output);
++        vandnps(Operand(temp), mask, mask);
++        vorps(Operand(mask), output, output);
++    }
++}
++
++void
++MacroAssemblerX86Shared::negFloat32x4(Operand in, FloatRegister out)
++{
++    // All zeros but the sign bit
++    static const SimdConstant minusZero = SimdConstant::SplatX4(-0.f);
++    asMasm().loadConstantSimd128Float(minusZero, out);
++    bitwiseXorFloat32x4(out, in, out);
++}
++
++void
++MacroAssemblerX86Shared::notInt8x16(Operand in, FloatRegister out)
++{
++    static const SimdConstant allOnes = SimdConstant::SplatX16(-1);
++    asMasm().loadConstantSimd128Int(allOnes, out);
++    bitwiseXorSimdInt(out, in, out);
++}
++
++void
++MacroAssemblerX86Shared::notInt16x8(Operand in, FloatRegister out)
++{
++    static const SimdConstant allOnes = SimdConstant::SplatX8(-1);
++    asMasm().loadConstantSimd128Int(allOnes, out);
++    bitwiseXorSimdInt(out, in, out);
++}
++
++void
++MacroAssemblerX86Shared::notInt32x4(Operand in, FloatRegister out)
++{
++    static const SimdConstant allOnes = SimdConstant::SplatX4(-1);
++    asMasm().loadConstantSimd128Int(allOnes, out);
++    bitwiseXorSimdInt(out, in, out);
++}
++
++void
++MacroAssemblerX86Shared::notFloat32x4(Operand in, FloatRegister out)
++{
++    float ones = SpecificNaN<float>(1, FloatingPoint<float>::kSignificandBits);
++    static const SimdConstant allOnes = SimdConstant::SplatX4(ones);
++    asMasm().loadConstantSimd128Float(allOnes, out);
++    bitwiseXorFloat32x4(out, in, out);
++}
++
++void
++MacroAssemblerX86Shared::absFloat32x4(Operand in, FloatRegister out)
++{
++    // All ones but the sign bit
++    float signMask = SpecificNaN<float>(0, FloatingPoint<float>::kSignificandBits);
++    static const SimdConstant signMasks = SimdConstant::SplatX4(signMask);
++    asMasm().loadConstantSimd128Float(signMasks, out);
++    bitwiseAndFloat32x4(out, in, out);
++}
++
++static inline void
++MaskSimdShiftCount(MacroAssembler& masm, unsigned shiftmask, Register count, Register temp,
++                   FloatRegister dest)
++{
++    masm.mov(count, temp);
++    masm.andl(Imm32(shiftmask), temp);
++    masm.vmovd(temp, dest);
++}
++
++void
++MacroAssemblerX86Shared::packedLeftShiftByScalarInt16x8(FloatRegister in, Register count,
++                                                        Register temp, FloatRegister dest)
++{
++    ScratchSimd128Scope scratch(asMasm());
++    MaskSimdShiftCount(asMasm(), 15, count, temp, scratch);
++    vpsllw(scratch, in, dest);
++}
++
++void
++MacroAssemblerX86Shared::packedRightShiftByScalarInt16x8(FloatRegister in, Register count,
++                                                         Register temp, FloatRegister dest)
++{
++    ScratchSimd128Scope scratch(asMasm());
++    MaskSimdShiftCount(asMasm(), 15, count, temp, scratch);
++    vpsraw(scratch, in, dest);
++}
++
++void
++MacroAssemblerX86Shared::packedUnsignedRightShiftByScalarInt16x8(FloatRegister in, Register count,
++                                                                 Register temp, FloatRegister dest)
++{
++    ScratchSimd128Scope scratch(asMasm());
++    MaskSimdShiftCount(asMasm(), 15, count, temp, scratch);
++    vpsrlw(scratch, in, dest);
++}
++
++void
++MacroAssemblerX86Shared::packedLeftShiftByScalarInt32x4(FloatRegister in, Register count,
++                                                        Register temp, FloatRegister dest)
++{
++    ScratchSimd128Scope scratch(asMasm());
++    MaskSimdShiftCount(asMasm(), 31, count, temp, scratch);
++    vpslld(scratch, in, dest);
++}
++
++void
++MacroAssemblerX86Shared::packedRightShiftByScalarInt32x4(FloatRegister in, Register count,
++                                                         Register temp, FloatRegister dest)
++{
++    ScratchSimd128Scope scratch(asMasm());
++    MaskSimdShiftCount(asMasm(), 31, count, temp, scratch);
++    vpsrad(scratch, in, dest);
++}
++
++void
++MacroAssemblerX86Shared::packedUnsignedRightShiftByScalarInt32x4(FloatRegister in, Register count,
++                                                                 Register temp, FloatRegister dest)
++{
++    ScratchSimd128Scope scratch(asMasm());
++    MaskSimdShiftCount(asMasm(), 31, count, temp, scratch);
++    vpsrld(scratch, in, dest);
++}
++
++void
++MacroAssemblerX86Shared::selectSimd128(FloatRegister mask, FloatRegister onTrue, FloatRegister onFalse,
++                       FloatRegister temp, FloatRegister output)
++{
++    if (onTrue != output)
++        vmovaps(onTrue, output);
++    if (mask != temp)
++        vmovaps(mask, temp);
++
++    // SSE4.1 has plain blendvps which can do this, but it is awkward
++    // to use because it requires the mask to be in xmm0.
++
++    bitwiseAndSimdInt(output, Operand(temp), output);
++    bitwiseAndNotSimdInt(temp, Operand(onFalse), temp);
++    bitwiseOrSimdInt(output, Operand(temp), output);
++}
+diff --git a/js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h b/js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h
+--- a/js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h
++++ b/js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h
+@@ -1162,19 +1162,19 @@ MacroAssembler::canonicalizeFloat32x4(Fl
+ 
+     FloatRegister mask = scratch;
+     vcmpordps(Operand(reg), reg, mask);
+ 
+     FloatRegister ifFalse = scratch2;
+     float nanf = float(JS::GenericNaN());
+     loadConstantSimd128Float(SimdConstant::SplatX4(nanf), ifFalse);
+ 
+-    bitwiseAndSimd128(Operand(mask), reg);
+-    bitwiseAndNotSimd128(Operand(ifFalse), mask);
+-    bitwiseOrSimd128(Operand(mask), reg);
++    bitwiseAndFloat32x4(reg, Operand(mask), reg);
++    bitwiseAndNotFloat32x4(mask, Operand(ifFalse), mask);
++    bitwiseOrFloat32x4(reg, Operand(mask), reg);
+ }
+ 
+ // ========================================================================
+ // Memory access primitives.
+ void
+ MacroAssembler::storeUncanonicalizedDouble(FloatRegister src, const Address& dest)
+ {
+     vmovsd(src, dest);
+diff --git a/js/src/jit/x86-shared/MacroAssembler-x86-shared.h b/js/src/jit/x86-shared/MacroAssembler-x86-shared.h
+--- a/js/src/jit/x86-shared/MacroAssembler-x86-shared.h
++++ b/js/src/jit/x86-shared/MacroAssembler-x86-shared.h
+@@ -101,17 +101,18 @@ class MacroAssemblerX86Shared : public A
+ 
+     void compareFloat(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs) {
+         if (cond & DoubleConditionBitInvert)
+             vucomiss(lhs, rhs);
+         else
+             vucomiss(rhs, lhs);
+     }
+ 
+-    void branchNegativeZero(FloatRegister reg, Register scratch, Label* label, bool  maybeNonZero = true);
++    void branchNegativeZero(FloatRegister reg, Register scratch, Label* label,
++                            bool maybeNonZero = true);
+     void branchNegativeZeroFloat32(FloatRegister reg, Register scratch, Label* label);
+ 
+     void move32(Imm32 imm, Register dest) {
+         // Use the ImmWord version of mov to register, which has special
+         // optimizations. Casting to uint32_t here ensures that the value
+         // is zero-extended.
+         mov(ImmWord(uint32_t(imm.value)), dest);
+     }
+@@ -405,37 +406,206 @@ class MacroAssemblerX86Shared : public A
+         // least signed int32, or NaN), this will return the undefined integer
+         // value (0x8000000).
+         vcvttps2dq(src, dest);
+     }
+     void convertInt32x4ToFloat32x4(FloatRegister src, FloatRegister dest) {
+         vcvtdq2ps(src, dest);
+     }
+ 
+-    void bitwiseAndSimd128(const Operand& src, FloatRegister dest) {
+-        // TODO Using the "ps" variant for all types incurs a domain crossing
+-        // penalty for integer types and double.
+-        vandps(src, dest, dest);
++    // SIMD methods, defined in MacroAssembler-x86-shared-SIMD.cpp.
++    void checkedConvertFloat32x4ToInt32x4(FloatRegister src, FloatRegister dest, Register temp,
++                                          Label* oolCheck, Label* rejoin);
++    void oolConvertFloat32x4ToInt32x4(FloatRegister src, Register temp, Label* rejoin,
++                                      Label* onConversionError);
++    void checkedConvertFloat32x4ToUint32x4(FloatRegister src, FloatRegister dest, Register temp,
++                                           FloatRegister tempF, Label* failed);
++
++    void createInt32x4(Register lane0, Register lane1, Register lane2, Register lane3,
++                       FloatRegister dest);
++    void createFloat32x4(FloatRegister lane0, FloatRegister lane1, FloatRegister lane2,
++                         FloatRegister lane3, FloatRegister temp, FloatRegister output);
++
++    void splatX16(Register input, FloatRegister output);
++    void splatX8(Register input, FloatRegister output);
++    void splatX4(Register input, FloatRegister output);
++    void splatX4(FloatRegister input, FloatRegister output);
++
++    void reinterpretSimd(bool isIntegerLaneType, FloatRegister input, FloatRegister output);
++
++    void extractLaneInt32x4(FloatRegister input, Register output, unsigned lane);
++    void extractLaneFloat32x4(FloatRegister input, FloatRegister output, unsigned lane,
++                              bool canonicalize);
++    void extractLaneInt16x8(FloatRegister input, Register output, unsigned lane, SimdSign sign);
++    void extractLaneInt8x16(FloatRegister input, Register output, unsigned lane, SimdSign sign);
++    void extractLaneSimdBool(FloatRegister input, Register output, unsigned numLanes, unsigned lane);
++
++    void insertLaneSimdInt(FloatRegister input, Register value, FloatRegister output,
++                           unsigned lane, unsigned numLanes);
++    void insertLaneFloat32x4(FloatRegister input, FloatRegister value, FloatRegister output,
++                             unsigned lane);
++
++    void allTrueSimdBool(FloatRegister input, Register output);
++    void anyTrueSimdBool(FloatRegister input, Register output);
++
++    void swizzleInt32x4(FloatRegister input, FloatRegister output, unsigned lanes[4]);
++    void swizzleFloat32x4(FloatRegister input, FloatRegister output, unsigned lanes[4]);
++    void swizzleInt8x16(FloatRegister input, FloatRegister output,
++                        const mozilla::Maybe<Register>& temp, int8_t lanes[16]);
++
++    void shuffleX4(FloatRegister lhs, Operand rhs, FloatRegister out,
++                   const mozilla::Maybe<FloatRegister>& maybeTemp, unsigned lanes[4]);
++    void shuffleInt8x16(FloatRegister lhs, FloatRegister rhs, FloatRegister output,
++                        const mozilla::Maybe<FloatRegister>& maybeFloatTemp,
++                        const mozilla::Maybe<Register>& maybeTemp, uint8_t lanes[16]);
++
++    void compareInt8x16(FloatRegister lhs, Operand rhs, Assembler::Condition cond,
++                        FloatRegister output);
++    void compareInt16x8(FloatRegister lhs, Operand rhs, Assembler::Condition cond,
++                        FloatRegister output);
++    void compareInt32x4(FloatRegister lhs, Operand rhs, Assembler::Condition cond,
++                        FloatRegister output);
++    void compareFloat32x4(FloatRegister lhs, Operand rhs, Assembler::Condition cond,
++                          FloatRegister output);
++
++    void addInt8x16(FloatRegister lhs, Operand rhs, FloatRegister output) {
++        vpaddb(rhs, lhs, output);
++    }
++    void addInt16x8(FloatRegister lhs, Operand rhs, FloatRegister output) {
++        vpaddw(rhs, lhs, output);
++    }
++    void addInt32x4(FloatRegister lhs, Operand rhs, FloatRegister output) {
++        vpaddd(rhs, lhs, output);
++    }
++    void addFloat32x4(FloatRegister lhs, Operand rhs, FloatRegister output) {
++        vaddps(rhs, lhs, output);
++    }
++
++    void addSatInt8x16(FloatRegister lhs, Operand rhs, SimdSign sign, FloatRegister output) {
++        if (sign == SimdSign::Signed)
++            vpaddsb(rhs, lhs, output);
++        else
++            vpaddusb(rhs, lhs, output);
++    }
++    void addSatInt16x8(FloatRegister lhs, Operand rhs, SimdSign sign, FloatRegister output) {
++        if (sign == SimdSign::Signed)
++            vpaddsw(rhs, lhs, output);
++        else
++            vpaddusw(rhs, lhs, output);
++    }
++
++    void subInt8x16(FloatRegister lhs, Operand rhs, FloatRegister output) {
++        vpsubb(rhs, lhs, output);
++    }
++    void subInt16x8(FloatRegister lhs, Operand rhs, FloatRegister output) {
++        vpsubw(rhs, lhs, output);
+     }
+-    void bitwiseAndNotSimd128(const Operand& src, FloatRegister dest) {
+-        vandnps(src, dest, dest);
++    void subInt32x4(FloatRegister lhs, Operand rhs, FloatRegister output) {
++        vpsubd(rhs, lhs, output);
++    }
++    void subFloat32x4(FloatRegister lhs, Operand rhs, FloatRegister output) {
++        vsubps(rhs, lhs, output);
++    }
++
++    void subSatInt8x16(FloatRegister lhs, Operand rhs, SimdSign sign, FloatRegister output) {
++        if (sign == SimdSign::Signed)
++            vpsubsb(rhs, lhs, output);
++        else
++            vpsubusb(rhs, lhs, output);
++    }
++    void subSatInt16x8(FloatRegister lhs, Operand rhs, SimdSign sign, FloatRegister output) {
++        if (sign == SimdSign::Signed)
++            vpsubsw(rhs, lhs, output);
++        else
++            vpsubusw(rhs, lhs, output);
++    }
++
++    void mulInt16x8(FloatRegister lhs, Operand rhs, FloatRegister output) {
++        vpmullw(rhs, lhs, output);
++    }
++    void mulInt32x4(FloatRegister lhs, Operand rhs, const mozilla::Maybe<FloatRegister>& temp,
++                    FloatRegister output);
++    void mulFloat32x4(FloatRegister lhs, Operand rhs, FloatRegister output) {
++        vmulps(rhs, lhs, output);
++    }
++
++    void negInt8x16(Operand in, FloatRegister out) {
++        zeroSimd128Int(out);
++        packedSubInt8(in, out);
++    }
++    void negInt16x8(Operand in, FloatRegister out) {
++        zeroSimd128Int(out);
++        packedSubInt16(in, out);
++    }
++    void negInt32x4(Operand in, FloatRegister out) {
++        zeroSimd128Int(out);
++        packedSubInt32(in, out);
+     }
+-    void bitwiseOrSimd128(const Operand& src, FloatRegister dest) {
+-        vorps(src, dest, dest);
++    void negFloat32x4(Operand in, FloatRegister out);
++
++    void notInt8x16(Operand in, FloatRegister out);
++    void notInt16x8(Operand in, FloatRegister out);
++    void notInt32x4(Operand in, FloatRegister out);
++    void notFloat32x4(Operand in, FloatRegister out);
++
++    void divFloat32x4(FloatRegister lhs, Operand rhs, FloatRegister output) {
++        vdivps(rhs, lhs, output);
++    }
++    void minFloat32x4(FloatRegister lhs, Operand rhs, FloatRegister output);
++    void maxFloat32x4(FloatRegister lhs, Operand rhs, FloatRegister temp, FloatRegister output);
++    void minNumFloat32x4(FloatRegister lhs, Operand rhs, FloatRegister temp, FloatRegister output);
++    void maxNumFloat32x4(FloatRegister lhs, Operand rhs, FloatRegister temp, FloatRegister output);
++
++    void absFloat32x4(Operand in, FloatRegister out);
++
++    void bitwiseAndFloat32x4(FloatRegister lhs, const Operand& rhs, FloatRegister dest) {
++        vandps(rhs, lhs, dest);
++    }
++    void bitwiseAndSimdInt(FloatRegister lhs, const Operand& rhs, FloatRegister dest) {
++        vpand(rhs, lhs, dest);
+     }
+-    void bitwiseXorSimd128(const Operand& src, FloatRegister dest) {
+-        vxorps(src, dest, dest);
++
++    void bitwiseOrFloat32x4(FloatRegister lhs, const Operand& rhs, FloatRegister dest) {
++        vorps(rhs, lhs, dest);
++    }
++    void bitwiseOrSimdInt(FloatRegister lhs, const Operand& rhs, FloatRegister dest) {
++        vpor(rhs, lhs, dest);
++    }
++
++    void bitwiseXorFloat32x4(FloatRegister lhs, const Operand& rhs, FloatRegister dest) {
++        vxorps(rhs, lhs, dest);
+     }
++    void bitwiseXorSimdInt(FloatRegister lhs, const Operand& rhs, FloatRegister dest) {
++        vpxor(rhs, lhs, dest);
++    }
++
++    void bitwiseAndNotFloat32x4(FloatRegister lhs, const Operand& rhs, FloatRegister dest) {
++        vandnps(rhs, lhs, dest);
++    }
++    void bitwiseAndNotSimdInt(FloatRegister lhs, const Operand& rhs, FloatRegister dest) {
++        vpandn(rhs, lhs, dest);
++    }
++
+     void zeroSimd128Float(FloatRegister dest) {
+         vxorps(dest, dest, dest);
+     }
+     void zeroSimd128Int(FloatRegister dest) {
+         vpxor(dest, dest, dest);
+     }
+ 
++    void selectSimd128(FloatRegister mask, FloatRegister onTrue, FloatRegister onFalse,
++                       FloatRegister temp, FloatRegister output);
++    void selectX4(FloatRegister mask, FloatRegister onTrue, FloatRegister onFalse,
++                  FloatRegister temp, FloatRegister output) {
++        if (AssemblerX86Shared::HasAVX())
++            vblendvps(mask, onTrue, onFalse, output);
++        else
++            selectSimd128(mask, onTrue, onFalse, temp, output);
++    }
++
+     template <class T, class Reg> inline void loadScalar(const Operand& src, Reg dest);
+     template <class T, class Reg> inline void storeScalar(Reg src, const Address& dest);
+     template <class T> inline void loadAlignedVector(const Address& src, FloatRegister dest);
+     template <class T> inline void storeAlignedVector(FloatRegister src, const Address& dest);
+ 
+     void loadInt32x1(const Address& src, FloatRegister dest) {
+         vmovd(Operand(src), dest);
+     }
+@@ -572,51 +742,48 @@ class MacroAssemblerX86Shared : public A
+     void packedRcpSqrtApproximationFloat32x4(const Operand& src, FloatRegister dest) {
+         // TODO See comment above. See also bug 1068028.
+         vrsqrtps(src, dest);
+     }
+     void packedSqrtFloat32x4(const Operand& src, FloatRegister dest) {
+         vsqrtps(src, dest);
+     }
+ 
+-    void packedLeftShiftByScalarInt16x8(FloatRegister src, FloatRegister dest) {
+-        vpsllw(src, dest, dest);
+-    }
++  public:
++    void packedLeftShiftByScalarInt16x8(FloatRegister in, Register count, Register temp, FloatRegister dest);
++    void packedRightShiftByScalarInt16x8(FloatRegister in, Register count, Register temp, FloatRegister dest);
++    void packedUnsignedRightShiftByScalarInt16x8(FloatRegister in, Register count, Register temp, FloatRegister dest);
++
+     void packedLeftShiftByScalarInt16x8(Imm32 count, FloatRegister dest) {
++        count.value &= 15;
+         vpsllw(count, dest, dest);
+     }
+-    void packedRightShiftByScalarInt16x8(FloatRegister src, FloatRegister dest) {
+-        vpsraw(src, dest, dest);
+-    }
+     void packedRightShiftByScalarInt16x8(Imm32 count, FloatRegister dest) {
++        count.value &= 15;
+         vpsraw(count, dest, dest);
+     }
+-    void packedUnsignedRightShiftByScalarInt16x8(FloatRegister src, FloatRegister dest) {
+-        vpsrlw(src, dest, dest);
+-    }
+     void packedUnsignedRightShiftByScalarInt16x8(Imm32 count, FloatRegister dest) {
++        count.value &= 15;
+         vpsrlw(count, dest, dest);
+     }
+ 
+-    void packedLeftShiftByScalarInt32x4(FloatRegister src, FloatRegister dest) {
+-        vpslld(src, dest, dest);
+-    }
++    void packedLeftShiftByScalarInt32x4(FloatRegister in, Register count, Register temp, FloatRegister dest);
++    void packedRightShiftByScalarInt32x4(FloatRegister in, Register count, Register temp, FloatRegister dest);
++    void packedUnsignedRightShiftByScalarInt32x4(FloatRegister in, Register count, Register temp, FloatRegister dest);
++
+     void packedLeftShiftByScalarInt32x4(Imm32 count, FloatRegister dest) {
++        count.value &= 31;
+         vpslld(count, dest, dest);
+     }
+-    void packedRightShiftByScalarInt32x4(FloatRegister src, FloatRegister dest) {
+-        vpsrad(src, dest, dest);
+-    }
+     void packedRightShiftByScalarInt32x4(Imm32 count, FloatRegister dest) {
++        count.value &= 31;
+         vpsrad(count, dest, dest);
+     }
+-    void packedUnsignedRightShiftByScalarInt32x4(FloatRegister src, FloatRegister dest) {
+-        vpsrld(src, dest, dest);
+-    }
+     void packedUnsignedRightShiftByScalarInt32x4(Imm32 count, FloatRegister dest) {
++        count.value &= 31;
+         vpsrld(count, dest, dest);
+     }
+ 
+     void loadFloat32x3(const Address& src, FloatRegister dest) {
+         Address srcZ(src);
+         srcZ.offset += 2 * sizeof(float);
+         vmovsd(src, dest);
+         ScratchSimd128Scope scratch(asMasm());
+diff --git a/js/src/moz.build b/js/src/moz.build
+--- a/js/src/moz.build
++++ b/js/src/moz.build
+@@ -496,16 +496,17 @@ elif CONFIG['JS_CODEGEN_X86'] or CONFIG[
+     UNIFIED_SOURCES += [
+         'jit/x86-shared/Architecture-x86-shared.cpp',
+         'jit/x86-shared/Assembler-x86-shared.cpp',
+         'jit/x86-shared/AssemblerBuffer-x86-shared.cpp',
+         'jit/x86-shared/BaselineCompiler-x86-shared.cpp',
+         'jit/x86-shared/BaselineIC-x86-shared.cpp',
+         'jit/x86-shared/CodeGenerator-x86-shared.cpp',
+         'jit/x86-shared/Lowering-x86-shared.cpp',
++        'jit/x86-shared/MacroAssembler-x86-shared-SIMD.cpp',
+         'jit/x86-shared/MacroAssembler-x86-shared.cpp',
+         'jit/x86-shared/MoveEmitter-x86-shared.cpp',
+     ]
+     SOURCES += [
+         'jit/x86-shared/Disassembler-x86-shared.cpp',  # using namespace js::jit::X86Encoding;
+     ]
+     if CONFIG['JS_CODEGEN_X64']:
+         LOpcodesGenerated.inputs += ['jit/x64/LIR-x64.h']
+

+ 36709 - 0
frg/work-js/mozilla-release/patches/1416723-2-63a1.patch

@@ -0,0 +1,36709 @@
+# HG changeset patch
+# User Benjamin Bouvier <benj@benj.me>
+# Date 1532438101 -7200
+# Node ID 4534ae540e86d686f29098cbcc2f932a6b83117b
+# Parent  7b9d8efc7cbd35507cb5dcde5ce197aba4b256d2
+Bug 1416723: Remove SIMD.js support; r=luke, r=nbp
+
+diff --git a/dom/serviceworkers/test/test_serviceworker_interfaces.js.1416723-2.later b/dom/serviceworkers/test/test_serviceworker_interfaces.js.1416723-2.later
+new file mode 100644
+--- /dev/null
++++ b/dom/serviceworkers/test/test_serviceworker_interfaces.js.1416723-2.later
+@@ -0,0 +1,20 @@
++--- test_serviceworker_interfaces.js
+++++ test_serviceworker_interfaces.js
++@@ -52,17 +52,16 @@ var ecmaGlobals =
++     "Proxy",
++     "RangeError",
++     {name: "ReadableStream", optional: true},
++     "ReferenceError",
++     "Reflect",
++     "RegExp",
++     "Set",
++     {name: "SharedArrayBuffer", disabled: true},
++-    {name: "SIMD", nightly: true},
++     "String",
++     "Symbol",
++     "SyntaxError",
++     {name: "TypedObject", nightly: true},
++     "TypeError",
++     "Uint16Array",
++     "Uint32Array",
++     "Uint8Array",
+diff --git a/dom/tests/mochitest/general/test_interfaces.js b/dom/tests/mochitest/general/test_interfaces.js
+--- a/dom/tests/mochitest/general/test_interfaces.js
++++ b/dom/tests/mochitest/general/test_interfaces.js
+@@ -57,17 +57,16 @@ var ecmaGlobals =
+     "Proxy",
+     "RangeError",
+     {name: "ReadableStream", disabled: true},
+     "ReferenceError",
+     "Reflect",
+     "RegExp",
+     "Set",
+     {name: "SharedArrayBuffer", disabled: true},
+-    {name: "SIMD", nightly: true},
+     "String",
+     "Symbol",
+     "SyntaxError",
+     {name: "TypedObject", nightly: true},
+     "TypeError",
+     "Uint16Array",
+     "Uint32Array",
+     "Uint8Array",
+diff --git a/dom/workers/test/serviceworkers/test_serviceworker_interfaces.js b/dom/workers/test/serviceworkers/test_serviceworker_interfaces.js
+--- a/dom/workers/test/serviceworkers/test_serviceworker_interfaces.js
++++ b/dom/workers/test/serviceworkers/test_serviceworker_interfaces.js
+@@ -52,17 +52,16 @@ var ecmaGlobals =
+     "Proxy",
+     "RangeError",
+     {name: "ReadableStream", optional: true},
+     "ReferenceError",
+     "Reflect",
+     "RegExp",
+     "Set",
+     {name: "SharedArrayBuffer", disabled: true},
+-    {name: "SIMD", nightly: true},
+     "String",
+     "Symbol",
+     "SyntaxError",
+     {name: "TypedObject", nightly: true},
+     "TypeError",
+     "Uint16Array",
+     "Uint32Array",
+     "Uint8Array",
+diff --git a/dom/workers/test/test_worker_interfaces.js b/dom/workers/test/test_worker_interfaces.js
+--- a/dom/workers/test/test_worker_interfaces.js
++++ b/dom/workers/test/test_worker_interfaces.js
+@@ -52,17 +52,16 @@ var ecmaGlobals =
+     "Proxy",
+     "RangeError",
+     {name: "ReadableStream", disabled: true},
+     "ReferenceError",
+     "Reflect",
+     "RegExp",
+     "Set",
+     {name: "SharedArrayBuffer", disabled: true},
+-    {name: "SIMD", nightly: true},
+     "String",
+     "Symbol",
+     "SyntaxError",
+     {name: "TypedObject", nightly: true},
+     "TypeError",
+     "Uint16Array",
+     "Uint32Array",
+     "Uint8Array",
+diff --git a/js/public/ProtoKey.h b/js/public/ProtoKey.h
+--- a/js/public/ProtoKey.h
++++ b/js/public/ProtoKey.h
+@@ -43,22 +43,16 @@
+ #endif
+ 
+ #ifdef ENABLE_BINARYDATA
+ #define IF_BDATA(real,imaginary) real
+ #else
+ #define IF_BDATA(real,imaginary) imaginary
+ #endif
+ 
+-#ifdef ENABLE_SIMD
+-# define IF_SIMD(real,imaginary) real
+-#else
+-# define IF_SIMD(real,imaginary) imaginary
+-#endif
+-
+ #ifdef ENABLE_SHARED_ARRAY_BUFFER
+ #define IF_SAB(real,imaginary) real
+ #else
+ #define IF_SAB(real,imaginary) imaginary
+ #endif
+ 
+ #define JS_FOR_PROTOTYPES(real,imaginary) \
+     imaginary(Null,             InitNullClass,          dummy) \
+@@ -101,17 +95,16 @@ IF_BIGINT(real,imaginary)(BigInt, InitVi
+     real(Map,                   InitViaClassSpec,       OCLASP(Map)) \
+     real(Set,                   InitViaClassSpec,       OCLASP(Set)) \
+     real(DataView,              InitViaClassSpec,       OCLASP(DataView)) \
+     real(Symbol,                InitSymbolClass,        OCLASP(Symbol)) \
+ IF_SAB(real,imaginary)(SharedArrayBuffer,       InitViaClassSpec, OCLASP(SharedArrayBuffer)) \
+ IF_INTL(real,imaginary) (Intl,                  InitIntlClass,          CLASP(Intl)) \
+ IF_BDATA(real,imaginary)(TypedObject,           InitTypedObjectModuleObject,   OCLASP(TypedObjectModule)) \
+     real(Reflect,               InitReflect,            nullptr) \
+-IF_SIMD(real,imaginary)(SIMD,                   InitSimdClass, OCLASP(Simd)) \
+     real(WeakSet,               InitWeakSetClass,       OCLASP(WeakSet)) \
+     real(TypedArray,            InitViaClassSpec,       &js::TypedArrayObject::sharedTypedArrayPrototypeClass) \
+ IF_SAB(real,imaginary)(Atomics, InitAtomicsClass, OCLASP(Atomics)) \
+     real(SavedFrame,            InitViaClassSpec,       &js::SavedFrame::class_) \
+     real(Promise,               InitViaClassSpec,       OCLASP(Promise)) \
+     real(ReadableStream,        InitViaClassSpec,       &js::ReadableStream::class_) \
+     real(ReadableStreamDefaultReader,           InitViaClassSpec, &js::ReadableStreamDefaultReader::class_) \
+     real(ReadableStreamBYOBReader,              InitViaClassSpec, &js::ReadableStreamBYOBReader::class_) \
+diff --git a/js/src/builtin/SIMD.cpp b/js/src/builtin/SIMD.cpp
+deleted file mode 100644
+--- a/js/src/builtin/SIMD.cpp
++++ /dev/null
+@@ -1,1644 +0,0 @@
+-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+- * vim: set ts=8 sts=4 et sw=4 tw=99:
+- * This Source Code Form is subject to the terms of the Mozilla Public
+- * License, v. 2.0. If a copy of the MPL was not distributed with this
+- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+-
+-/*
+- * JS SIMD pseudo-module.
+- * Specification matches polyfill:
+- * https://github.com/johnmccutchan/ecmascript_simd/blob/master/src/ecmascript_simd.js
+- * The objects float32x4 and int32x4 are installed on the SIMD pseudo-module.
+- */
+-
+-#include "builtin/SIMD.h"
+-
+-#include "mozilla/FloatingPoint.h"
+-#include "mozilla/IntegerTypeTraits.h"
+-#include "mozilla/Sprintf.h"
+-#include "mozilla/TypeTraits.h"
+-
+-#include "jsapi.h"
+-#include "jsfriendapi.h"
+-#include "jsnum.h"
+-
+-#include "builtin/TypedObject.h"
+-#include "jit/AtomicOperations.h"
+-#include "jit/InlinableNatives.h"
+-#include "js/Value.h"
+-
+-#include "vm/JSObject-inl.h"
+-
+-using namespace js;
+-
+-using mozilla::IsNaN;
+-using mozilla::EnableIf;
+-using mozilla::IsIntegral;
+-using mozilla::IsFloatingPoint;
+-using mozilla::IsSigned;
+-using mozilla::MakeUnsigned;
+-
+-///////////////////////////////////////////////////////////////////////////
+-// SIMD
+-
+-static_assert(unsigned(SimdType::Count) == 12, "sync with TypedObjectConstants.h");
+-
+-static bool ArgumentToLaneIndex(JSContext* cx, JS::HandleValue v, unsigned limit, unsigned* lane);
+-
+-static bool
+-CheckVectorObject(HandleValue v, SimdType expectedType)
+-{
+-    if (!v.isObject())
+-        return false;
+-
+-    JSObject& obj = v.toObject();
+-    if (!obj.is<TypedObject>())
+-        return false;
+-
+-    TypeDescr& typeRepr = obj.as<TypedObject>().typeDescr();
+-    if (typeRepr.kind() != type::Simd)
+-        return false;
+-
+-    return typeRepr.as<SimdTypeDescr>().type() == expectedType;
+-}
+-
+-template<class V>
+-bool
+-js::IsVectorObject(HandleValue v)
+-{
+-    return CheckVectorObject(v, V::type);
+-}
+-
+-#define FOR_EACH_SIMD(macro) \
+-  macro(Int8x16)             \
+-  macro(Int16x8)             \
+-  macro(Int32x4)             \
+-  macro(Uint8x16)            \
+-  macro(Uint16x8)            \
+-  macro(Uint32x4)            \
+-  macro(Float32x4)           \
+-  macro(Float64x2)           \
+-  macro(Bool8x16)            \
+-  macro(Bool16x8)            \
+-  macro(Bool32x4)            \
+-  macro(Bool64x2)
+-
+-#define InstantiateIsVectorObject_(T) \
+-    template bool js::IsVectorObject<T>(HandleValue v);
+-FOR_EACH_SIMD(InstantiateIsVectorObject_)
+-#undef InstantiateIsVectorObject_
+-
+-const char*
+-js::SimdTypeToString(SimdType type)
+-{
+-    switch (type) {
+-#define RETSTR_(TYPE) case SimdType::TYPE: return #TYPE;
+-      FOR_EACH_SIMD(RETSTR_)
+-#undef RETSTR_
+-      case SimdType::Count: break;
+-    }
+-    return "<bad SimdType>";
+-}
+-
+-PropertyName*
+-js::SimdTypeToName(const JSAtomState& atoms, SimdType type)
+-{
+-    switch (type) {
+-#define CASE_(TypeName) case SimdType::TypeName: return atoms.TypeName;
+-      FOR_EACH_SIMD(CASE_)
+-#undef CASE_
+-      case SimdType::Count: break;
+-    }
+-    MOZ_CRASH("bad SIMD type");
+-}
+-
+-bool
+-js::IsSimdTypeName(const JSAtomState& atoms, const PropertyName* name, SimdType* type)
+-{
+-#define CHECK_(TypeName) if (name == atoms.TypeName) {   \
+-                             *type = SimdType::TypeName; \
+-                             return true;                \
+-                         }
+-    FOR_EACH_SIMD(CHECK_)
+-#undef CHECK_
+-    return false;
+-}
+-
+-static inline bool
+-ErrorBadArgs(JSContext* cx)
+-{
+-    JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_TYPED_ARRAY_BAD_ARGS);
+-    return false;
+-}
+-
+-static inline bool
+-ErrorWrongTypeArg(JSContext* cx, unsigned argIndex, Handle<TypeDescr*> typeDescr)
+-{
+-    MOZ_ASSERT(argIndex < 10);
+-    char charArgIndex[2];
+-    SprintfLiteral(charArgIndex, "%u", argIndex);
+-
+-    HeapSlot& typeNameSlot = typeDescr->getReservedSlotRef(JS_DESCR_SLOT_STRING_REPR);
+-    char* typeNameStr = JS_EncodeString(cx, typeNameSlot.toString());
+-    if (!typeNameStr)
+-        return false;
+-
+-    JS_ReportErrorNumberLatin1(cx, GetErrorMessage, nullptr, JSMSG_SIMD_NOT_A_VECTOR,
+-                               typeNameStr, charArgIndex);
+-    JS_free(cx, typeNameStr);
+-    return false;
+-}
+-
+-static inline bool
+-ErrorBadIndex(JSContext* cx)
+-{
+-    JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_BAD_INDEX);
+-    return false;
+-}
+-
+-/* Non-standard: convert and range check an index value for SIMD operations.
+- *
+- *   1. numericIndex = ToNumber(argument)            (may throw TypeError)
+- *   2. intIndex = ToInteger(numericIndex)
+- *   3. if intIndex != numericIndex throw RangeError
+- *
+- * This function additionally bounds the range to the non-negative contiguous
+- * integers:
+- *
+- *   4. if intIndex < 0 or intIndex > 2^53 throw RangeError
+- *
+- * Return true and set |*index| to the integer value if |argument| is a valid
+- * array index argument. Otherwise report an TypeError or RangeError and return
+- * false.
+- *
+- * The returned index will always be in the range 0 <= *index <= 2^53.
+- */
+-static bool
+-NonStandardToIndex(JSContext* cx, HandleValue v, uint64_t* index)
+-{
+-    // Fast common case.
+-    if (v.isInt32()) {
+-        int32_t i = v.toInt32();
+-        if (i >= 0) {
+-            *index = i;
+-            return true;
+-        }
+-    }
+-
+-    // Slow case. Use ToNumber() to coerce. This may throw a TypeError.
+-    double d;
+-    if (!ToNumber(cx, v, &d))
+-        return false;
+-
+-    // Check that |d| is an integer in the valid range.
+-    //
+-    // Not all floating point integers fit in the range of a uint64_t, so we
+-    // need a rough range check before the real range check in our caller. We
+-    // could limit indexes to UINT64_MAX, but this would mean that our callers
+-    // have to be very careful about integer overflow. The contiguous integer
+-    // floating point numbers end at 2^53, so make that our upper limit. If we
+-    // ever support arrays with more than 2^53 elements, this will need to
+-    // change.
+-    //
+-    // Reject infinities, NaNs, and numbers outside the contiguous integer range
+-    // with a RangeError.
+-
+-    // Write relation so NaNs throw a RangeError.
+-    if (!(0 <= d && d <= (uint64_t(1) << 53))) {
+-        JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_BAD_INDEX);
+-        return false;
+-    }
+-
+-    // Check that d is an integer, throw a RangeError if not.
+-    // Note that this conversion could invoke undefined behaviour without the
+-    // range check above.
+-    uint64_t i(d);
+-    if (d != double(i)) {
+-        JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_BAD_INDEX);
+-        return false;
+-    }
+-
+-    *index = i;
+-    return true;
+-}
+-
+-template<typename T>
+-static SimdTypeDescr*
+-GetTypeDescr(JSContext* cx)
+-{
+-    RootedGlobalObject global(cx, cx->global());
+-    return GlobalObject::getOrCreateSimdTypeDescr(cx, global, T::type);
+-}
+-
+-template<typename V>
+-bool
+-js::ToSimdConstant(JSContext* cx, HandleValue v, jit::SimdConstant* out)
+-{
+-    typedef typename V::Elem Elem;
+-    Rooted<TypeDescr*> typeDescr(cx, GetTypeDescr<V>(cx));
+-    if (!typeDescr)
+-        return false;
+-    if (!IsVectorObject<V>(v))
+-        return ErrorWrongTypeArg(cx, 1, typeDescr);
+-
+-    JS::AutoCheckCannotGC nogc(cx);
+-    Elem* mem = reinterpret_cast<Elem*>(v.toObject().as<TypedObject>().typedMem(nogc));
+-    *out = jit::SimdConstant::CreateSimd128(mem);
+-    return true;
+-}
+-
+-template bool js::ToSimdConstant<Int8x16>(JSContext* cx, HandleValue v, jit::SimdConstant* out);
+-template bool js::ToSimdConstant<Int16x8>(JSContext* cx, HandleValue v, jit::SimdConstant* out);
+-template bool js::ToSimdConstant<Int32x4>(JSContext* cx, HandleValue v, jit::SimdConstant* out);
+-template bool js::ToSimdConstant<Float32x4>(JSContext* cx, HandleValue v, jit::SimdConstant* out);
+-template bool js::ToSimdConstant<Bool8x16>(JSContext* cx, HandleValue v, jit::SimdConstant* out);
+-template bool js::ToSimdConstant<Bool16x8>(JSContext* cx, HandleValue v, jit::SimdConstant* out);
+-template bool js::ToSimdConstant<Bool32x4>(JSContext* cx, HandleValue v, jit::SimdConstant* out);
+-
+-template<typename Elem>
+-static Elem
+-TypedObjectMemory(HandleValue v, const JS::AutoRequireNoGC& nogc)
+-{
+-    TypedObject& obj = v.toObject().as<TypedObject>();
+-    return reinterpret_cast<Elem>(obj.typedMem(nogc));
+-}
+-
+-static const ClassOps SimdTypeDescrClassOps = {
+-    nullptr, /* addProperty */
+-    nullptr, /* delProperty */
+-    nullptr, /* enumerate */
+-    nullptr, /* newEnumerate */
+-    nullptr, /* resolve */
+-    nullptr, /* mayResolve */
+-    TypeDescr::finalize,
+-    SimdTypeDescr::call
+-};
+-
+-const Class SimdTypeDescr::class_ = {
+-    "SIMD",
+-    JSCLASS_HAS_RESERVED_SLOTS(JS_DESCR_SLOTS) | JSCLASS_BACKGROUND_FINALIZE,
+-    &SimdTypeDescrClassOps
+-};
+-
+-namespace {
+-
+-// Define classes (Int8x16Defn, Int16x8Defn, etc.) to group together various
+-// properties and so on.
+-#define DEFINE_DEFN_(TypeName)                                       \
+-class TypeName##Defn {                                               \
+-  public:                                                            \
+-    static const JSFunctionSpec Methods[];                           \
+-};
+-
+-FOR_EACH_SIMD(DEFINE_DEFN_)
+-#undef DEFINE_DEFN_
+-
+-} // namespace
+-
+-// Shared type descriptor methods for all SIMD types.
+-static const JSFunctionSpec TypeDescriptorMethods[] = {
+-    JS_SELF_HOSTED_FN("toSource", "DescrToSource", 0, 0),
+-    JS_SELF_HOSTED_FN("array", "ArrayShorthand", 1, 0),
+-    JS_SELF_HOSTED_FN("equivalent", "TypeDescrEquivalent", 1, 0),
+-    JS_FS_END
+-};
+-
+-// Shared TypedObject methods for all SIMD types.
+-static const JSFunctionSpec SimdTypedObjectMethods[] = {
+-    JS_SELF_HOSTED_FN("toString", "SimdToString", 0, 0),
+-    JS_SELF_HOSTED_FN("valueOf", "SimdValueOf", 0, 0),
+-    JS_SELF_HOSTED_FN("toSource", "SimdToSource", 0, 0),
+-    JS_FS_END
+-};
+-
+-// Provide JSJitInfo structs for those types that are supported by Ion.
+-// The controlling SIMD type is encoded as the InlinableNative primary opcode.
+-// The SimdOperation within the type is encoded in the .depth field.
+-//
+-// The JS_INLINABLE_FN macro refers to js::JitInfo_##native which we provide as
+-// Simd##Type##_##Operation
+-//
+-// /!\ Don't forget to keep this list in sync with the SIMD instrinics used in
+-// SelfHosting.cpp.
+-
+-namespace js {
+-namespace jit {
+-
+-static_assert(uint64_t(SimdOperation::Last) <= UINT16_MAX, "SimdOperation must fit in uint16_t");
+-
+-// See also JitInfo_* in MCallOptimize.cpp. We provide a JSJitInfo for all the
+-// named functions here. The default JitInfo_SimdInt32x4 etc structs represent the
+-// SimdOperation::Constructor.
+-#define DEFN(TYPE, OP) const JSJitInfo JitInfo_Simd##TYPE##_##OP = {                             \
+-     /* .getter, unused for inlinable natives. */                                                \
+-    { nullptr },                                                                                 \
+-    /* .inlinableNative, but we have to init first union member: .protoID. */                    \
+-    { uint16_t(InlinableNative::Simd##TYPE) },                                                   \
+-    /* .nativeOp. Actually initializing first union member .depth. */                            \
+-    { uint16_t(SimdOperation::Fn_##OP) },                                                        \
+-    /* .type_ bitfield says this in an inlinable native function. */                             \
+-    JSJitInfo::InlinableNative                                                                   \
+-    /* Remaining fields are not used for inlinable natives. They are zero-initialized. */        \
+-};
+-
+-// This list of inlinable types should match the one in jit/InlinableNatives.h.
+-#define TDEFN(Name, Func, Operands) DEFN(Float32x4, Name)
+-FLOAT32X4_FUNCTION_LIST(TDEFN)
+-#undef TDEFN
+-
+-#define TDEFN(Name, Func, Operands) DEFN(Int8x16, Name)
+-INT8X16_FUNCTION_LIST(TDEFN)
+-#undef TDEFN
+-
+-#define TDEFN(Name, Func, Operands) DEFN(Uint8x16, Name)
+-UINT8X16_FUNCTION_LIST(TDEFN)
+-#undef TDEFN
+-
+-#define TDEFN(Name, Func, Operands) DEFN(Int16x8, Name)
+-INT16X8_FUNCTION_LIST(TDEFN)
+-#undef TDEFN
+-
+-#define TDEFN(Name, Func, Operands) DEFN(Uint16x8, Name)
+-UINT16X8_FUNCTION_LIST(TDEFN)
+-#undef TDEFN
+-
+-#define TDEFN(Name, Func, Operands) DEFN(Int32x4, Name)
+-INT32X4_FUNCTION_LIST(TDEFN)
+-#undef TDEFN
+-
+-#define TDEFN(Name, Func, Operands) DEFN(Uint32x4, Name)
+-UINT32X4_FUNCTION_LIST(TDEFN)
+-#undef TDEFN
+-
+-#define TDEFN(Name, Func, Operands) DEFN(Bool8x16, Name)
+-BOOL8X16_FUNCTION_LIST(TDEFN)
+-#undef TDEFN
+-
+-#define TDEFN(Name, Func, Operands) DEFN(Bool16x8, Name)
+-BOOL16X8_FUNCTION_LIST(TDEFN)
+-#undef TDEFN
+-
+-#define TDEFN(Name, Func, Operands) DEFN(Bool32x4, Name)
+-BOOL32X4_FUNCTION_LIST(TDEFN)
+-#undef TDEFN
+-
+-} // namespace jit
+-} // namespace js
+-
+-const JSFunctionSpec Float32x4Defn::Methods[] = {
+-#define SIMD_FLOAT32X4_FUNCTION_ITEM(Name, Func, Operands) \
+-    JS_INLINABLE_FN(#Name, js::simd_float32x4_##Name, Operands, 0, SimdFloat32x4_##Name),
+-    FLOAT32X4_FUNCTION_LIST(SIMD_FLOAT32X4_FUNCTION_ITEM)
+-#undef SIMD_FLOAT32x4_FUNCTION_ITEM
+-    JS_FS_END
+-};
+-
+-const JSFunctionSpec Float64x2Defn::Methods[]  = {
+-#define SIMD_FLOAT64X2_FUNCTION_ITEM(Name, Func, Operands) \
+-    JS_FN(#Name, js::simd_float64x2_##Name, Operands, 0),
+-    FLOAT64X2_FUNCTION_LIST(SIMD_FLOAT64X2_FUNCTION_ITEM)
+-#undef SIMD_FLOAT64X2_FUNCTION_ITEM
+-    JS_FS_END
+-};
+-
+-const JSFunctionSpec Int8x16Defn::Methods[] = {
+-#define SIMD_INT8X16_FUNCTION_ITEM(Name, Func, Operands) \
+-    JS_INLINABLE_FN(#Name, js::simd_int8x16_##Name, Operands, 0, SimdInt8x16_##Name),
+-    INT8X16_FUNCTION_LIST(SIMD_INT8X16_FUNCTION_ITEM)
+-#undef SIMD_INT8X16_FUNCTION_ITEM
+-    JS_FS_END
+-};
+-
+-const JSFunctionSpec Int16x8Defn::Methods[] = {
+-#define SIMD_INT16X8_FUNCTION_ITEM(Name, Func, Operands) \
+-    JS_INLINABLE_FN(#Name, js::simd_int16x8_##Name, Operands, 0, SimdInt16x8_##Name),
+-    INT16X8_FUNCTION_LIST(SIMD_INT16X8_FUNCTION_ITEM)
+-#undef SIMD_INT16X8_FUNCTION_ITEM
+-    JS_FS_END
+-};
+-
+-const JSFunctionSpec Int32x4Defn::Methods[] = {
+-#define SIMD_INT32X4_FUNCTION_ITEM(Name, Func, Operands) \
+-    JS_INLINABLE_FN(#Name, js::simd_int32x4_##Name, Operands, 0, SimdInt32x4_##Name),
+-    INT32X4_FUNCTION_LIST(SIMD_INT32X4_FUNCTION_ITEM)
+-#undef SIMD_INT32X4_FUNCTION_ITEM
+-    JS_FS_END
+-};
+-
+-const JSFunctionSpec Uint8x16Defn::Methods[] = {
+-#define SIMD_UINT8X16_FUNCTION_ITEM(Name, Func, Operands) \
+-    JS_INLINABLE_FN(#Name, js::simd_uint8x16_##Name, Operands, 0, SimdUint8x16_##Name),
+-    UINT8X16_FUNCTION_LIST(SIMD_UINT8X16_FUNCTION_ITEM)
+-#undef SIMD_UINT8X16_FUNCTION_ITEM
+-    JS_FS_END
+-};
+-
+-const JSFunctionSpec Uint16x8Defn::Methods[] = {
+-#define SIMD_UINT16X8_FUNCTION_ITEM(Name, Func, Operands) \
+-    JS_INLINABLE_FN(#Name, js::simd_uint16x8_##Name, Operands, 0, SimdUint16x8_##Name),
+-    UINT16X8_FUNCTION_LIST(SIMD_UINT16X8_FUNCTION_ITEM)
+-#undef SIMD_UINT16X8_FUNCTION_ITEM
+-    JS_FS_END
+-};
+-
+-const JSFunctionSpec Uint32x4Defn::Methods[] = {
+-#define SIMD_UINT32X4_FUNCTION_ITEM(Name, Func, Operands) \
+-    JS_INLINABLE_FN(#Name, js::simd_uint32x4_##Name, Operands, 0, SimdUint32x4_##Name),
+-    UINT32X4_FUNCTION_LIST(SIMD_UINT32X4_FUNCTION_ITEM)
+-#undef SIMD_UINT32X4_FUNCTION_ITEM
+-    JS_FS_END
+-};
+-
+-const JSFunctionSpec Bool8x16Defn::Methods[] = {
+-#define SIMD_BOOL8X16_FUNCTION_ITEM(Name, Func, Operands) \
+-    JS_INLINABLE_FN(#Name, js::simd_bool8x16_##Name, Operands, 0, SimdBool8x16_##Name),
+-    BOOL8X16_FUNCTION_LIST(SIMD_BOOL8X16_FUNCTION_ITEM)
+-#undef SIMD_BOOL8X16_FUNCTION_ITEM
+-    JS_FS_END
+-};
+-
+-const JSFunctionSpec Bool16x8Defn::Methods[] = {
+-#define SIMD_BOOL16X8_FUNCTION_ITEM(Name, Func, Operands) \
+-    JS_INLINABLE_FN(#Name, js::simd_bool16x8_##Name, Operands, 0, SimdBool16x8_##Name),
+-    BOOL16X8_FUNCTION_LIST(SIMD_BOOL16X8_FUNCTION_ITEM)
+-#undef SIMD_BOOL16X8_FUNCTION_ITEM
+-    JS_FS_END
+-};
+-
+-const JSFunctionSpec Bool32x4Defn::Methods[] = {
+-#define SIMD_BOOL32X4_FUNCTION_ITEM(Name, Func, Operands) \
+-    JS_INLINABLE_FN(#Name, js::simd_bool32x4_##Name, Operands, 0, SimdBool32x4_##Name),
+-    BOOL32X4_FUNCTION_LIST(SIMD_BOOL32X4_FUNCTION_ITEM)
+-#undef SIMD_BOOL32X4_FUNCTION_ITEM
+-    JS_FS_END
+-};
+-
+-const JSFunctionSpec Bool64x2Defn::Methods[] = {
+-#define SIMD_BOOL64X2_FUNCTION_ITEM(Name, Func, Operands) \
+-    JS_FN(#Name, js::simd_bool64x2_##Name, Operands, 0),
+-    BOOL64X2_FUNCTION_LIST(SIMD_BOOL64X2_FUNCTION_ITEM)
+-#undef SIMD_BOOL64x2_FUNCTION_ITEM
+-    JS_FS_END
+-};
+-
+-template <typename T>
+-static bool
+-FillLanes(JSContext* cx, Handle<TypedObject*> result, const CallArgs& args)
+-{
+-    typedef typename T::Elem Elem;
+-    Elem tmp;
+-    for (unsigned i = 0; i < T::lanes; i++) {
+-        if (!T::Cast(cx, args.get(i), &tmp))
+-            return false;
+-        // Reassure typedMem() that we won't GC while holding onto the returned
+-        // pointer, even though we could GC on every iteration of this loop
+-        // (but it is safe because we re-fetch each time.)
+-        JS::AutoCheckCannotGC nogc(cx);
+-        reinterpret_cast<Elem*>(result->typedMem(nogc))[i] = tmp;
+-    }
+-    args.rval().setObject(*result);
+-    return true;
+-}
+-
+-bool
+-SimdTypeDescr::call(JSContext* cx, unsigned argc, Value* vp)
+-{
+-    CallArgs args = CallArgsFromVp(argc, vp);
+-
+-    Rooted<SimdTypeDescr*> descr(cx, &args.callee().as<SimdTypeDescr>());
+-    Rooted<TypedObject*> result(cx, TypedObject::createZeroed(cx, descr));
+-    if (!result)
+-        return false;
+-
+-#define CASE_CALL_(Type) \
+-      case SimdType::Type:   return FillLanes< ::Type>(cx, result, args);
+-
+-    switch (descr->type()) {
+-      FOR_EACH_SIMD(CASE_CALL_)
+-      case SimdType::Count: break;
+-    }
+-
+-#undef CASE_CALL_
+-    MOZ_CRASH("unexpected SIMD descriptor");
+-    return false;
+-}
+-
+-///////////////////////////////////////////////////////////////////////////
+-// SIMD class
+-
+-static const ClassOps SimdObjectClassOps = {
+-    nullptr, /* addProperty */
+-    nullptr, /* delProperty */
+-    nullptr, /* enumerate */
+-    nullptr, /* newEnumerate */
+-    SimdObject::resolve
+-};
+-
+-const Class SimdObject::class_ = {
+-    "SIMD",
+-    JSCLASS_HAS_RESERVED_SLOTS(uint32_t(SimdType::Count)),
+-    &SimdObjectClassOps
+-};
+-
+-/* static */ bool
+-GlobalObject::initSimdObject(JSContext* cx, Handle<GlobalObject*> global)
+-{
+-    // SIMD relies on the TypedObject module being initialized.
+-    // In particular, the self-hosted code for array() wants
+-    // to be able to call GetTypedObjectModule(). It is NOT necessary
+-    // to install the TypedObjectModule global, but at the moment
+-    // those two things are not separable.
+-    if (!GlobalObject::getOrCreateTypedObjectModule(cx, global))
+-        return false;
+-
+-    RootedObject globalSimdObject(cx);
+-    RootedObject objProto(cx, GlobalObject::getOrCreateObjectPrototype(cx, global));
+-    if (!objProto)
+-        return false;
+-
+-    globalSimdObject = NewObjectWithGivenProto(cx, &SimdObject::class_, objProto, SingletonObject);
+-    if (!globalSimdObject)
+-        return false;
+-
+-    RootedValue globalSimdValue(cx, ObjectValue(*globalSimdObject));
+-    if (!DefineDataProperty(cx, global, cx->names().SIMD, globalSimdValue, JSPROP_RESOLVING))
+-        return false;
+-
+-    global->setConstructor(JSProto_SIMD, globalSimdValue);
+-    return true;
+-}
+-
+-static bool
+-CreateSimdType(JSContext* cx, Handle<GlobalObject*> global, HandlePropertyName stringRepr,
+-               SimdType simdType, const JSFunctionSpec* methods)
+-{
+-    RootedObject funcProto(cx, GlobalObject::getOrCreateFunctionPrototype(cx, global));
+-    if (!funcProto)
+-        return false;
+-
+-    // Create type constructor itself and initialize its reserved slots.
+-    Rooted<SimdTypeDescr*> typeDescr(cx);
+-    typeDescr = NewObjectWithGivenProto<SimdTypeDescr>(cx, funcProto, SingletonObject);
+-    if (!typeDescr)
+-        return false;
+-
+-    typeDescr->initReservedSlot(JS_DESCR_SLOT_KIND, Int32Value(type::Simd));
+-    typeDescr->initReservedSlot(JS_DESCR_SLOT_STRING_REPR, StringValue(stringRepr));
+-    typeDescr->initReservedSlot(JS_DESCR_SLOT_ALIGNMENT, Int32Value(SimdTypeDescr::alignment(simdType)));
+-    typeDescr->initReservedSlot(JS_DESCR_SLOT_SIZE, Int32Value(SimdTypeDescr::size(simdType)));
+-    typeDescr->initReservedSlot(JS_DESCR_SLOT_OPAQUE, BooleanValue(false));
+-    typeDescr->initReservedSlot(JS_DESCR_SLOT_TYPE, Int32Value(uint8_t(simdType)));
+-
+-    if (!CreateUserSizeAndAlignmentProperties(cx, typeDescr))
+-        return false;
+-
+-    // Create prototype property, which inherits from Object.prototype.
+-    RootedObject objProto(cx, GlobalObject::getOrCreateObjectPrototype(cx, global));
+-    if (!objProto)
+-        return false;
+-    Rooted<TypedProto*> proto(cx);
+-    proto = NewObjectWithGivenProto<TypedProto>(cx, objProto, SingletonObject);
+-    if (!proto)
+-        return false;
+-    typeDescr->initReservedSlot(JS_DESCR_SLOT_TYPROTO, ObjectValue(*proto));
+-
+-    // Link constructor to prototype and install properties.
+-    if (!JS_DefineFunctions(cx, typeDescr, TypeDescriptorMethods))
+-        return false;
+-
+-    if (!LinkConstructorAndPrototype(cx, typeDescr, proto) ||
+-        !JS_DefineFunctions(cx, proto, SimdTypedObjectMethods))
+-    {
+-        return false;
+-    }
+-
+-    // Bind type descriptor to the global SIMD object
+-    RootedObject globalSimdObject(cx, GlobalObject::getOrCreateSimdGlobalObject(cx, global));
+-    MOZ_ASSERT(globalSimdObject);
+-
+-    RootedValue typeValue(cx, ObjectValue(*typeDescr));
+-    if (!JS_DefineFunctions(cx, typeDescr, methods) ||
+-        !DefineDataProperty(cx, globalSimdObject, stringRepr, typeValue,
+-                            JSPROP_READONLY | JSPROP_PERMANENT | JSPROP_RESOLVING))
+-    {
+-        return false;
+-    }
+-
+-    uint32_t slot = uint32_t(typeDescr->type());
+-    MOZ_ASSERT(globalSimdObject->as<NativeObject>().getReservedSlot(slot).isUndefined());
+-    globalSimdObject->as<NativeObject>().setReservedSlot(slot, ObjectValue(*typeDescr));
+-    return !!typeDescr;
+-}
+-
+-/* static */ bool
+-GlobalObject::initSimdType(JSContext* cx, Handle<GlobalObject*> global, SimdType simdType)
+-{
+-#define CREATE_(Type) \
+-    case SimdType::Type: \
+-      return CreateSimdType(cx, global, cx->names().Type, simdType, Type##Defn::Methods);
+-
+-    switch (simdType) {
+-      FOR_EACH_SIMD(CREATE_)
+-      case SimdType::Count: break;
+-    }
+-    MOZ_CRASH("unexpected simd type");
+-
+-#undef CREATE_
+-}
+-
+-/* static */ SimdTypeDescr*
+-GlobalObject::getOrCreateSimdTypeDescr(JSContext* cx, Handle<GlobalObject*> global,
+-                                       SimdType simdType)
+-{
+-    MOZ_ASSERT(unsigned(simdType) < unsigned(SimdType::Count), "Invalid SIMD type");
+-
+-    RootedObject globalSimdObject(cx, GlobalObject::getOrCreateSimdGlobalObject(cx, global));
+-    if (!globalSimdObject)
+-       return nullptr;
+-
+-    uint32_t typeSlotIndex = uint32_t(simdType);
+-    if (globalSimdObject->as<NativeObject>().getReservedSlot(typeSlotIndex).isUndefined() &&
+-        !GlobalObject::initSimdType(cx, global, simdType))
+-    {
+-        return nullptr;
+-    }
+-
+-    const Value& slot = globalSimdObject->as<NativeObject>().getReservedSlot(typeSlotIndex);
+-    MOZ_ASSERT(slot.isObject());
+-    return &slot.toObject().as<SimdTypeDescr>();
+-}
+-
+-bool
+-SimdObject::resolve(JSContext* cx, JS::HandleObject obj, JS::HandleId id, bool* resolved)
+-{
+-    *resolved = false;
+-    if (!JSID_IS_ATOM(id))
+-        return true;
+-    JSAtom* str = JSID_TO_ATOM(id);
+-    Rooted<GlobalObject*> global(cx, cx->global());
+-#define TRY_RESOLVE_(Type)                                                    \
+-    if (str == cx->names().Type) {                                            \
+-        *resolved = CreateSimdType(cx, global, cx->names().Type,              \
+-                                   SimdType::Type, Type##Defn::Methods);      \
+-        return *resolved;                                                     \
+-    }
+-    FOR_EACH_SIMD(TRY_RESOLVE_)
+-#undef TRY_RESOLVE_
+-    return true;
+-}
+-
+-JSObject*
+-js::InitSimdClass(JSContext* cx, Handle<GlobalObject*> global)
+-{
+-    return GlobalObject::getOrCreateSimdGlobalObject(cx, global);
+-}
+-
+-template<typename V>
+-JSObject*
+-js::CreateSimd(JSContext* cx, const typename V::Elem* data)
+-{
+-    typedef typename V::Elem Elem;
+-    Rooted<TypeDescr*> typeDescr(cx, GetTypeDescr<V>(cx));
+-    if (!typeDescr)
+-        return nullptr;
+-
+-    Rooted<TypedObject*> result(cx, TypedObject::createZeroed(cx, typeDescr));
+-    if (!result)
+-        return nullptr;
+-
+-    JS::AutoCheckCannotGC nogc(cx);
+-    Elem* resultMem = reinterpret_cast<Elem*>(result->typedMem(nogc));
+-    memcpy(resultMem, data, sizeof(Elem) * V::lanes);
+-    return result;
+-}
+-
+-#define InstantiateCreateSimd_(Type) \
+-    template JSObject* js::CreateSimd<Type>(JSContext* cx, const Type::Elem* data);
+-
+-FOR_EACH_SIMD(InstantiateCreateSimd_)
+-
+-#undef InstantiateCreateSimd_
+-
+-#undef FOR_EACH_SIMD
+-
+-namespace js {
+-
+-namespace detail {
+-
+-template<typename T, typename Enable = void>
+-struct MaybeMakeUnsigned {
+-    using Type = T;
+-};
+-
+-template<typename T>
+-struct MaybeMakeUnsigned<T, typename EnableIf<IsIntegral<T>::value && IsSigned<T>::value>::Type> {
+-    using Type = typename MakeUnsigned<T>::Type;
+-};
+-
+-} // namespace detail
+-
+-// Unary SIMD operators
+-template<typename T>
+-struct Identity {
+-    static T apply(T x) { return x; }
+-};
+-template<typename T>
+-struct Abs {
+-    static T apply(T x) { return mozilla::Abs(x); }
+-};
+-template<typename T>
+-struct Neg {
+-    using MaybeUnsignedT = typename detail::MaybeMakeUnsigned<T>::Type;
+-    static T apply(T x) {
+-        // Prepend |1U| to force integral promotion through *unsigned* types.
+-        // Otherwise when |T = uint16_t| and |int| is 32-bit, we could have
+-        // |uint16_t(-1) * uint16_t(65535)| which would really be
+-        // |int(65535) * int(65535)|, but as |4294836225 > 2147483647| would
+-        // perform signed integer overflow.
+-        // https://stackoverflow.com/questions/24795651/whats-the-best-c-way-to-multiply-unsigned-integers-modularly-safely
+-        return static_cast<MaybeUnsignedT>(1U * MaybeUnsignedT(-1) * MaybeUnsignedT(x));
+-    }
+-};
+-template<typename T>
+-struct Not {
+-    static T apply(T x) { return ~x; }
+-};
+-template<typename T>
+-struct LogicalNot {
+-    static T apply(T x) { return !x; }
+-};
+-template<typename T>
+-struct RecApprox {
+-    static_assert(IsFloatingPoint<T>::value, "RecApprox only supported for floating points");
+-    static T apply(T x) { return 1 / x; }
+-};
+-template<typename T>
+-struct RecSqrtApprox {
+-    static_assert(IsFloatingPoint<T>::value, "RecSqrtApprox only supported for floating points");
+-    static T apply(T x) { return 1 / sqrt(x); }
+-};
+-template<typename T>
+-struct Sqrt {
+-    static_assert(IsFloatingPoint<T>::value, "Sqrt only supported for floating points");
+-    static T apply(T x) { return sqrt(x); }
+-};
+-
+-// Binary SIMD operators
+-template<typename T>
+-struct Add {
+-    using MaybeUnsignedT = typename detail::MaybeMakeUnsigned<T>::Type;
+-    static T apply(T l, T r) { return MaybeUnsignedT(l) + MaybeUnsignedT(r); }
+-};
+-template<typename T>
+-struct Sub {
+-    using MaybeUnsignedT = typename detail::MaybeMakeUnsigned<T>::Type;
+-    static T apply(T l, T r) { return MaybeUnsignedT(l) - MaybeUnsignedT(r); }
+-};
+-template<typename T>
+-struct Div {
+-    static_assert(IsFloatingPoint<T>::value, "Div only supported for floating points");
+-    static T apply(T l, T r) { return l / r; }
+-};
+-template<typename T>
+-struct Mul {
+-    using MaybeUnsignedT = typename detail::MaybeMakeUnsigned<T>::Type;
+-    static T apply(T l, T r) { return MaybeUnsignedT(l) * MaybeUnsignedT(r); }
+-};
+-template<typename T>
+-struct Minimum {
+-    static T apply(T l, T r) { return math_min_impl(l, r); }
+-};
+-template<typename T>
+-struct MinNum {
+-    static T apply(T l, T r) { return IsNaN(l) ? r : (IsNaN(r) ? l : math_min_impl(l, r)); }
+-};
+-template<typename T>
+-struct Maximum {
+-    static T apply(T l, T r) { return math_max_impl(l, r); }
+-};
+-template<typename T>
+-struct MaxNum {
+-    static T apply(T l, T r) { return IsNaN(l) ? r : (IsNaN(r) ? l : math_max_impl(l, r)); }
+-};
+-template<typename T>
+-struct LessThan {
+-    static bool apply(T l, T r) { return l < r; }
+-};
+-template<typename T>
+-struct LessThanOrEqual {
+-    static bool apply(T l, T r) { return l <= r; }
+-};
+-template<typename T>
+-struct GreaterThan {
+-    static bool apply(T l, T r) { return l > r; }
+-};
+-template<typename T>
+-struct GreaterThanOrEqual {
+-    static bool apply(T l, T r) { return l >= r; }
+-};
+-template<typename T>
+-struct Equal {
+-    static bool apply(T l, T r) { return l == r; }
+-};
+-template<typename T>
+-struct NotEqual {
+-    static bool apply(T l, T r) { return l != r; }
+-};
+-template<typename T>
+-struct Xor {
+-    static T apply(T l, T r) { return l ^ r; }
+-};
+-template<typename T>
+-struct And {
+-    static T apply(T l, T r) { return l & r; }
+-};
+-template<typename T>
+-struct Or {
+-    static T apply(T l, T r) { return l | r; }
+-};
+-
+-// For the following three operators, if the value v we're trying to shift is
+-// such that v << bits can't fit in the int32 range, then we have undefined
+-// behavior, according to C++11 [expr.shift]p2. However, left-shifting an
+-// unsigned type is well-defined.
+-//
+-// In C++, shifting by an amount outside the range [0;N-1] is undefined
+-// behavior. SIMD.js reduces the shift amount modulo the number of bits in a
+-// lane and has defined behavior for all shift amounts.
+-template<typename T>
+-struct ShiftLeft {
+-    static T apply(T v, int32_t bits) {
+-        typedef typename mozilla::MakeUnsigned<T>::Type UnsignedT;
+-        uint32_t maskedBits = uint32_t(bits) % (sizeof(T) * 8);
+-        return UnsignedT(v) << maskedBits;
+-    }
+-};
+-template<typename T>
+-struct ShiftRightArithmetic {
+-    static T apply(T v, int32_t bits) {
+-        typedef typename mozilla::MakeSigned<T>::Type SignedT;
+-        uint32_t maskedBits = uint32_t(bits) % (sizeof(T) * 8);
+-        return SignedT(v) >> maskedBits;
+-    }
+-};
+-template<typename T>
+-struct ShiftRightLogical {
+-    static T apply(T v, int32_t bits) {
+-        typedef typename mozilla::MakeUnsigned<T>::Type UnsignedT;
+-        uint32_t maskedBits = uint32_t(bits) % (sizeof(T) * 8);
+-        return UnsignedT(v) >> maskedBits;
+-    }
+-};
+-
+-// Saturating arithmetic is only defined on types smaller than int.
+-// Clamp `x` into the range supported by the integral type T.
+-template<typename T>
+-static T
+-Saturate(int x)
+-{
+-    static_assert(mozilla::IsIntegral<T>::value, "Only integer saturation supported");
+-    static_assert(sizeof(T) < sizeof(int), "Saturating int-sized arithmetic is not safe");
+-    const T lower = mozilla::MinValue<T>::value;
+-    const T upper = mozilla::MaxValue<T>::value;
+-    if (x > int(upper))
+-        return upper;
+-    if (x < int(lower))
+-        return lower;
+-    return T(x);
+-}
+-
+-// Since signed integer overflow is undefined behavior in C++, it would be
+-// wildly irresponsible to attempt something as dangerous as adding two numbers
+-// coming from user code. However, in this case we know that T is smaller than
+-// int, so there is no way these operations can cause overflow. The
+-// static_assert in Saturate() enforces this for us.
+-template<typename T>
+-struct AddSaturate {
+-    static T apply(T l, T r) { return Saturate<T>(l + r); }
+-};
+-template<typename T>
+-struct SubSaturate {
+-    static T apply(T l, T r) { return Saturate<T>(l - r); }
+-};
+-
+-} // namespace js
+-
+-template<typename Out>
+-static bool
+-StoreResult(JSContext* cx, CallArgs& args, typename Out::Elem* result)
+-{
+-    RootedObject obj(cx, CreateSimd<Out>(cx, result));
+-    if (!obj)
+-        return false;
+-    args.rval().setObject(*obj);
+-    return true;
+-}
+-
+-// StoreResult can GC, and it is commonly used after pulling something out of a
+-// TypedObject:
+-//
+-//   Elem result = op(TypedObjectMemory<Elem>(args[0]));
+-//   StoreResult<Out>(..., result);
+-//
+-// The pointer extracted from the typed object in args[0] in the above example
+-// could be an interior pointer, and therefore be invalidated by GC.
+-// TypedObjectMemory() requires an assertion token to be passed in to prove
+-// that we won't GC, but the scope of eg an AutoCheckCannotGC RAII object
+-// extends to the end of its containing scope -- which would include the call
+-// to StoreResult, resulting in a rooting hazard.
+-//
+-// TypedObjectElemArray fixes this by wrapping the problematic pointer in a
+-// type, and the analysis is able to see that it is dead before calling
+-// StoreResult. (But if another GC called is made before the pointer is dead,
+-// it will correctly report a hazard.)
+-//
+-template <typename Elem>
+-class TypedObjectElemArray {
+-    Elem* elements;
+-  public:
+-    explicit TypedObjectElemArray(HandleValue objVal) {
+-        JS::AutoCheckCannotGC nogc;
+-        elements = TypedObjectMemory<Elem*>(objVal, nogc);
+-    }
+-    Elem& operator[](int i) { return elements[i]; }
+-} JS_HAZ_GC_POINTER;
+-
+-// Coerces the inputs of type In to the type Coercion, apply the operator Op
+-// and converts the result to the type Out.
+-template<typename In, typename Coercion, template<typename C> class Op, typename Out>
+-static bool
+-CoercedUnaryFunc(JSContext* cx, unsigned argc, Value* vp)
+-{
+-    typedef typename Coercion::Elem CoercionElem;
+-    typedef typename Out::Elem RetElem;
+-
+-    CallArgs args = CallArgsFromVp(argc, vp);
+-    if (args.length() != 1 || !IsVectorObject<In>(args[0]))
+-        return ErrorBadArgs(cx);
+-
+-    CoercionElem result[Coercion::lanes];
+-    TypedObjectElemArray<CoercionElem> val(args[0]);
+-    for (unsigned i = 0; i < Coercion::lanes; i++)
+-        result[i] = Op<CoercionElem>::apply(val[i]);
+-    return StoreResult<Out>(cx, args, (RetElem*) result);
+-}
+-
+-// Coerces the inputs of type In to the type Coercion, apply the operator Op
+-// and converts the result to the type Out.
+-template<typename In, typename Coercion, template<typename C> class Op, typename Out>
+-static bool
+-CoercedBinaryFunc(JSContext* cx, unsigned argc, Value* vp)
+-{
+-    typedef typename Coercion::Elem CoercionElem;
+-    typedef typename Out::Elem RetElem;
+-
+-    CallArgs args = CallArgsFromVp(argc, vp);
+-    if (args.length() != 2 || !IsVectorObject<In>(args[0]) || !IsVectorObject<In>(args[1]))
+-        return ErrorBadArgs(cx);
+-
+-    CoercionElem result[Coercion::lanes];
+-    TypedObjectElemArray<CoercionElem> left(args[0]);
+-    TypedObjectElemArray<CoercionElem> right(args[1]);
+-    for (unsigned i = 0; i < Coercion::lanes; i++)
+-        result[i] = Op<CoercionElem>::apply(left[i], right[i]);
+-    return StoreResult<Out>(cx, args, (RetElem*) result);
+-}
+-
+-// Same as above, with no coercion, i.e. Coercion == In.
+-template<typename In, template<typename C> class Op, typename Out>
+-static bool
+-UnaryFunc(JSContext* cx, unsigned argc, Value* vp)
+-{
+-    return CoercedUnaryFunc<In, Out, Op, Out>(cx, argc, vp);
+-}
+-
+-template<typename In, template<typename C> class Op, typename Out>
+-static bool
+-BinaryFunc(JSContext* cx, unsigned argc, Value* vp)
+-{
+-    return CoercedBinaryFunc<In, Out, Op, Out>(cx, argc, vp);
+-}
+-
+-template<typename V>
+-static bool
+-ExtractLane(JSContext* cx, unsigned argc, Value* vp)
+-{
+-    typedef typename V::Elem Elem;
+-
+-    CallArgs args = CallArgsFromVp(argc, vp);
+-    if (args.length() < 2 || !IsVectorObject<V>(args[0]))
+-        return ErrorBadArgs(cx);
+-
+-    unsigned lane;
+-    if (!ArgumentToLaneIndex(cx, args[1], V::lanes, &lane))
+-        return false;
+-
+-    JS::AutoCheckCannotGC nogc(cx);
+-    Elem* vec = TypedObjectMemory<Elem*>(args[0], nogc);
+-    Elem val = vec[lane];
+-    args.rval().set(V::ToValue(val));
+-    return true;
+-}
+-
+-template<typename V>
+-static bool
+-AllTrue(JSContext* cx, unsigned argc, Value* vp)
+-{
+-    typedef typename V::Elem Elem;
+-
+-    CallArgs args = CallArgsFromVp(argc, vp);
+-    if (args.length() < 1 || !IsVectorObject<V>(args[0]))
+-        return ErrorBadArgs(cx);
+-
+-    JS::AutoCheckCannotGC nogc(cx);
+-    Elem* vec = TypedObjectMemory<Elem*>(args[0], nogc);
+-    bool allTrue = true;
+-    for (unsigned i = 0; allTrue && i < V::lanes; i++)
+-        allTrue = vec[i];
+-
+-    args.rval().setBoolean(allTrue);
+-    return true;
+-}
+-
+-template<typename V>
+-static bool
+-AnyTrue(JSContext* cx, unsigned argc, Value* vp)
+-{
+-    typedef typename V::Elem Elem;
+-
+-    CallArgs args = CallArgsFromVp(argc, vp);
+-    if (args.length() < 1 || !IsVectorObject<V>(args[0]))
+-        return ErrorBadArgs(cx);
+-
+-    JS::AutoCheckCannotGC nogc(cx);
+-    Elem* vec = TypedObjectMemory<Elem*>(args[0], nogc);
+-    bool anyTrue = false;
+-    for (unsigned i = 0; !anyTrue && i < V::lanes; i++)
+-        anyTrue = vec[i];
+-
+-    args.rval().setBoolean(anyTrue);
+-    return true;
+-}
+-
+-template<typename V>
+-static bool
+-ReplaceLane(JSContext* cx, unsigned argc, Value* vp)
+-{
+-    typedef typename V::Elem Elem;
+-
+-    CallArgs args = CallArgsFromVp(argc, vp);
+-    // Only the first and second arguments are mandatory
+-    if (args.length() < 2 || !IsVectorObject<V>(args[0]))
+-        return ErrorBadArgs(cx);
+-
+-    unsigned lane;
+-    if (!ArgumentToLaneIndex(cx, args[1], V::lanes, &lane))
+-        return false;
+-
+-    Elem value;
+-    if (!V::Cast(cx, args.get(2), &value))
+-        return false;
+-
+-    TypedObjectElemArray<Elem> vec(args[0]);
+-    Elem result[V::lanes];
+-    for (unsigned i = 0; i < V::lanes; i++)
+-        result[i] = i == lane ? value : vec[i];
+-
+-    return StoreResult<V>(cx, args, result);
+-}
+-
+-template<typename V>
+-static bool
+-Swizzle(JSContext* cx, unsigned argc, Value* vp)
+-{
+-    typedef typename V::Elem Elem;
+-
+-    CallArgs args = CallArgsFromVp(argc, vp);
+-    if (args.length() != (V::lanes + 1) || !IsVectorObject<V>(args[0]))
+-        return ErrorBadArgs(cx);
+-
+-    unsigned lanes[V::lanes];
+-    for (unsigned i = 0; i < V::lanes; i++) {
+-        if (!ArgumentToLaneIndex(cx, args[i + 1], V::lanes, &lanes[i]))
+-            return false;
+-    }
+-
+-    TypedObjectElemArray<Elem> val(args[0]);
+-    Elem result[V::lanes];
+-    for (unsigned i = 0; i < V::lanes; i++)
+-        result[i] = val[lanes[i]];
+-
+-    return StoreResult<V>(cx, args, result);
+-}
+-
+-template<typename V>
+-static bool
+-Shuffle(JSContext* cx, unsigned argc, Value* vp)
+-{
+-    typedef typename V::Elem Elem;
+-
+-    CallArgs args = CallArgsFromVp(argc, vp);
+-    if (args.length() != (V::lanes + 2) || !IsVectorObject<V>(args[0]) || !IsVectorObject<V>(args[1]))
+-        return ErrorBadArgs(cx);
+-
+-    unsigned lanes[V::lanes];
+-    for (unsigned i = 0; i < V::lanes; i++) {
+-        if (!ArgumentToLaneIndex(cx, args[i + 2], 2 * V::lanes, &lanes[i]))
+-            return false;
+-    }
+-
+-    Elem result[V::lanes];
+-    {
+-        JS::AutoCheckCannotGC nogc(cx);
+-        Elem* lhs = TypedObjectMemory<Elem*>(args[0], nogc);
+-        Elem* rhs = TypedObjectMemory<Elem*>(args[1], nogc);
+-
+-        for (unsigned i = 0; i < V::lanes; i++) {
+-            Elem* selectedInput = lanes[i] < V::lanes ? lhs : rhs;
+-            result[i] = selectedInput[lanes[i] % V::lanes];
+-        }
+-    }
+-
+-    return StoreResult<V>(cx, args, result);
+-}
+-
+-template<typename V, template<typename T> class Op>
+-static bool
+-BinaryScalar(JSContext* cx, unsigned argc, Value* vp)
+-{
+-    typedef typename V::Elem Elem;
+-
+-    CallArgs args = CallArgsFromVp(argc, vp);
+-    if (args.length() != 2)
+-        return ErrorBadArgs(cx);
+-
+-    if (!IsVectorObject<V>(args[0]))
+-        return ErrorBadArgs(cx);
+-
+-    int32_t bits;
+-    if (!ToInt32(cx, args[1], &bits))
+-        return false;
+-
+-    TypedObjectElemArray<Elem> val(args[0]);
+-    Elem result[V::lanes];
+-    for (unsigned i = 0; i < V::lanes; i++)
+-        result[i] = Op<Elem>::apply(val[i], bits);
+-
+-    return StoreResult<V>(cx, args, result);
+-}
+-
+-template<typename In, template<typename C> class Op, typename Out>
+-static bool
+-CompareFunc(JSContext* cx, unsigned argc, Value* vp)
+-{
+-    typedef typename In::Elem InElem;
+-    typedef typename Out::Elem OutElem;
+-
+-    CallArgs args = CallArgsFromVp(argc, vp);
+-    if (args.length() != 2 || !IsVectorObject<In>(args[0]) || !IsVectorObject<In>(args[1]))
+-        return ErrorBadArgs(cx);
+-
+-    OutElem result[Out::lanes];
+-    TypedObjectElemArray<InElem> left(args[0]);
+-    TypedObjectElemArray<InElem> right(args[1]);
+-    for (unsigned i = 0; i < Out::lanes; i++) {
+-        unsigned j = (i * In::lanes) / Out::lanes;
+-        result[i] = Op<InElem>::apply(left[j], right[j]) ? -1 : 0;
+-    }
+-
+-    return StoreResult<Out>(cx, args, result);
+-}
+-
+-// This struct defines whether we should throw during a conversion attempt,
+-// when trying to convert a value of type from From to the type To.  This
+-// happens whenever a C++ conversion would have undefined behavior (and perhaps
+-// be platform-dependent).
+-template<typename From, typename To>
+-struct ThrowOnConvert;
+-
+-struct NeverThrow
+-{
+-    static bool value(int32_t v) {
+-        return false;
+-    }
+-};
+-
+-// While int32 to float conversions can be lossy, these conversions have
+-// defined behavior in C++, so we don't need to care about them here. In practice,
+-// this means round to nearest, tie with even (zero bit in significand).
+-template<>
+-struct ThrowOnConvert<int32_t, float> : public NeverThrow {};
+-
+-template<>
+-struct ThrowOnConvert<uint32_t, float> : public NeverThrow {};
+-
+-// All int32 can be safely converted to doubles.
+-template<>
+-struct ThrowOnConvert<int32_t, double> : public NeverThrow {};
+-
+-template<>
+-struct ThrowOnConvert<uint32_t, double> : public NeverThrow {};
+-
+-// All floats can be safely converted to doubles.
+-template<>
+-struct ThrowOnConvert<float, double> : public NeverThrow {};
+-
+-// Double to float conversion for inputs which aren't in the float range are
+-// undefined behavior in C++, but they're defined in IEEE754.
+-template<>
+-struct ThrowOnConvert<double, float> : public NeverThrow {};
+-
+-// Float to integer conversions have undefined behavior if the float value
+-// is out of the representable integer range (on x86, will yield the undefined
+-// value pattern, namely 0x80000000; on arm, will clamp the input value), so
+-// check this here.
+-template<typename From, typename IntegerType>
+-struct ThrowIfNotInRange
+-{
+-    static_assert(mozilla::IsIntegral<IntegerType>::value, "bad destination type");
+-
+-    static bool value(From v) {
+-        // Truncate to integer value before the range check.
+-        double d = trunc(double(v));
+-        // Arrange relations so NaN returns true (i.e., it throws a RangeError).
+-        return !(d >= double(mozilla::MinValue<IntegerType>::value) &&
+-                 d <= double(mozilla::MaxValue<IntegerType>::value));
+-    }
+-};
+-
+-template<>
+-struct ThrowOnConvert<double, int32_t> : public ThrowIfNotInRange<double, int32_t> {};
+-
+-template<>
+-struct ThrowOnConvert<double, uint32_t> : public ThrowIfNotInRange<double, uint32_t> {};
+-
+-template<>
+-struct ThrowOnConvert<float, int32_t> : public ThrowIfNotInRange<float, int32_t> {};
+-
+-template<>
+-struct ThrowOnConvert<float, uint32_t> : public ThrowIfNotInRange<float, uint32_t> {};
+-
+-template<typename V, typename Vret>
+-static bool
+-FuncConvert(JSContext* cx, unsigned argc, Value* vp)
+-{
+-    typedef typename V::Elem Elem;
+-    typedef typename Vret::Elem RetElem;
+-
+-    static_assert(!mozilla::IsSame<V,Vret>::value, "Can't convert SIMD type to itself");
+-    static_assert(V::lanes == Vret::lanes, "Can only convert from same number of lanes");
+-    static_assert(!mozilla::IsIntegral<Elem>::value || !mozilla::IsIntegral<RetElem>::value,
+-                  "Cannot convert between integer SIMD types");
+-
+-    CallArgs args = CallArgsFromVp(argc, vp);
+-    if (args.length() != 1 || !IsVectorObject<V>(args[0]))
+-        return ErrorBadArgs(cx);
+-
+-    TypedObjectElemArray<Elem> val(args[0]);
+-    RetElem result[Vret::lanes];
+-    for (unsigned i = 0; i < V::lanes; i++) {
+-        if (ThrowOnConvert<Elem, RetElem>::value(val[i])) {
+-            JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_SIMD_FAILED_CONVERSION);
+-            return false;
+-        }
+-        result[i] = ConvertScalar<RetElem>(val[i]);
+-    }
+-
+-    return StoreResult<Vret>(cx, args, result);
+-}
+-
+-template<typename V, typename Vret>
+-static bool
+-FuncConvertBits(JSContext* cx, unsigned argc, Value* vp)
+-{
+-    typedef typename V::Elem Elem;
+-    typedef typename Vret::Elem RetElem;
+-
+-    static_assert(!mozilla::IsSame<V, Vret>::value, "Can't convert SIMD type to itself");
+-    static_assert(V::lanes * sizeof(Elem) == Vret::lanes * sizeof(RetElem),
+-                  "Can only bitcast from the same number of bits");
+-
+-    CallArgs args = CallArgsFromVp(argc, vp);
+-    if (args.length() != 1 || !IsVectorObject<V>(args[0]))
+-        return ErrorBadArgs(cx);
+-
+-    // While we could just pass the typedMem of args[0] as StoreResults' last
+-    // argument, a GC could move the pointer to its memory in the meanwhile.
+-    // For consistency with other SIMD functions, simply copy the input in a
+-    // temporary array.
+-    RetElem copy[Vret::lanes];
+-    {
+-        JS::AutoCheckCannotGC nogc(cx);
+-        memcpy(copy, TypedObjectMemory<RetElem*>(args[0], nogc), Vret::lanes * sizeof(RetElem));
+-    }
+-    return StoreResult<Vret>(cx, args, copy);
+-}
+-
+-template<typename Vret>
+-static bool
+-FuncSplat(JSContext* cx, unsigned argc, Value* vp)
+-{
+-    typedef typename Vret::Elem RetElem;
+-
+-    CallArgs args = CallArgsFromVp(argc, vp);
+-    RetElem arg;
+-    if (!Vret::Cast(cx, args.get(0), &arg))
+-        return false;
+-
+-    RetElem result[Vret::lanes];
+-    for (unsigned i = 0; i < Vret::lanes; i++)
+-        result[i] = arg;
+-    return StoreResult<Vret>(cx, args, result);
+-}
+-
+-template<typename V>
+-static bool
+-Bool(JSContext* cx, unsigned argc, Value* vp)
+-{
+-    typedef typename V::Elem Elem;
+-
+-    CallArgs args = CallArgsFromVp(argc, vp);
+-
+-    Elem result[V::lanes];
+-    for (unsigned i = 0; i < V::lanes; i++)
+-        result[i] = ToBoolean(args.get(i)) ? -1 : 0;
+-    return StoreResult<V>(cx, args, result);
+-}
+-
+-template<typename V, typename MaskType>
+-static bool
+-SelectBits(JSContext* cx, unsigned argc, Value* vp)
+-{
+-    typedef typename V::Elem Elem;
+-    typedef typename MaskType::Elem MaskTypeElem;
+-
+-    CallArgs args = CallArgsFromVp(argc, vp);
+-    if (args.length() != 3 || !IsVectorObject<MaskType>(args[0]) ||
+-        !IsVectorObject<V>(args[1]) || !IsVectorObject<V>(args[2]))
+-    {
+-        return ErrorBadArgs(cx);
+-    }
+-
+-    TypedObjectElemArray<MaskTypeElem> val(args[0]);
+-    TypedObjectElemArray<MaskTypeElem> tv(args[1]);
+-    TypedObjectElemArray<MaskTypeElem> fv(args[2]);
+-
+-    MaskTypeElem tr[MaskType::lanes];
+-    for (unsigned i = 0; i < MaskType::lanes; i++)
+-        tr[i] = And<MaskTypeElem>::apply(val[i], tv[i]);
+-
+-    MaskTypeElem fr[MaskType::lanes];
+-    for (unsigned i = 0; i < MaskType::lanes; i++)
+-        fr[i] = And<MaskTypeElem>::apply(Not<MaskTypeElem>::apply(val[i]), fv[i]);
+-
+-    MaskTypeElem orInt[MaskType::lanes];
+-    for (unsigned i = 0; i < MaskType::lanes; i++)
+-        orInt[i] = Or<MaskTypeElem>::apply(tr[i], fr[i]);
+-
+-    Elem* result = reinterpret_cast<Elem*>(orInt);
+-    return StoreResult<V>(cx, args, result);
+-}
+-
+-template<typename V, typename MaskType>
+-static bool
+-Select(JSContext* cx, unsigned argc, Value* vp)
+-{
+-    typedef typename V::Elem Elem;
+-    typedef typename MaskType::Elem MaskTypeElem;
+-
+-    CallArgs args = CallArgsFromVp(argc, vp);
+-    if (args.length() != 3 || !IsVectorObject<MaskType>(args[0]) ||
+-        !IsVectorObject<V>(args[1]) || !IsVectorObject<V>(args[2]))
+-    {
+-        return ErrorBadArgs(cx);
+-    }
+-
+-    TypedObjectElemArray<MaskTypeElem> mask(args[0]);
+-    TypedObjectElemArray<Elem> tv(args[1]);
+-    TypedObjectElemArray<Elem> fv(args[2]);
+-
+-    Elem result[V::lanes];
+-    for (unsigned i = 0; i < V::lanes; i++)
+-        result[i] = mask[i] ? tv[i] : fv[i];
+-
+-    return StoreResult<V>(cx, args, result);
+-}
+-
+-// Extract an integer lane index from a function argument.
+-//
+-// Register an exception and return false if the argument is not suitable.
+-static bool
+-ArgumentToLaneIndex(JSContext* cx, JS::HandleValue v, unsigned limit, unsigned* lane)
+-{
+-    uint64_t arg;
+-    if (!NonStandardToIndex(cx, v, &arg))
+-        return false;
+-    if (arg >= limit)
+-        return ErrorBadIndex(cx);
+-
+-    *lane = unsigned(arg);
+-    return true;
+-}
+-
+-// Look for arguments (ta, idx) where ta is a TypedArray and idx is a
+-// non-negative integer.
+-// Check that accessBytes can be accessed starting from index idx in the array.
+-// Return the array handle in typedArray and idx converted to a byte offset in byteStart.
+-static bool
+-TypedArrayFromArgs(JSContext* cx, const CallArgs& args, uint32_t accessBytes,
+-                   MutableHandleObject typedArray, size_t* byteStart)
+-{
+-    if (!args[0].isObject())
+-        return ErrorBadArgs(cx);
+-
+-    JSObject& argobj = args[0].toObject();
+-    if (!argobj.is<TypedArrayObject>())
+-        return ErrorBadArgs(cx);
+-
+-    typedArray.set(&argobj);
+-
+-    uint64_t index;
+-    if (!NonStandardToIndex(cx, args[1], &index))
+-        return false;
+-
+-    // Do the range check in 64 bits even when size_t is 32 bits.
+-    // This can't overflow because index <= 2^53.
+-    uint64_t bytes = index * typedArray->as<TypedArrayObject>().bytesPerElement();
+-    // Keep in sync with AsmJS OnOutOfBounds function.
+-    if ((bytes + accessBytes) > typedArray->as<TypedArrayObject>().byteLength())
+-        return ErrorBadIndex(cx);
+-
+-    *byteStart = bytes;
+-
+-    return true;
+-}
+-
+-template<class V, unsigned NumElem>
+-static bool
+-Load(JSContext* cx, unsigned argc, Value* vp)
+-{
+-    typedef typename V::Elem Elem;
+-
+-    CallArgs args = CallArgsFromVp(argc, vp);
+-    if (args.length() != 2)
+-        return ErrorBadArgs(cx);
+-
+-    size_t byteStart;
+-    RootedObject typedArray(cx);
+-    if (!TypedArrayFromArgs(cx, args, sizeof(Elem) * NumElem, &typedArray, &byteStart))
+-        return false;
+-
+-    Rooted<TypeDescr*> typeDescr(cx, GetTypeDescr<V>(cx));
+-    if (!typeDescr)
+-        return false;
+-
+-    Rooted<TypedObject*> result(cx, TypedObject::createZeroed(cx, typeDescr));
+-    if (!result)
+-        return false;
+-
+-    JS::AutoCheckCannotGC nogc(cx);
+-    SharedMem<Elem*> src =
+-        typedArray->as<TypedArrayObject>().viewDataEither().addBytes(byteStart).cast<Elem*>();
+-    Elem* dst = reinterpret_cast<Elem*>(result->typedMem(nogc));
+-    jit::AtomicOperations::podCopySafeWhenRacy(SharedMem<Elem*>::unshared(dst), src, NumElem);
+-
+-    args.rval().setObject(*result);
+-    return true;
+-}
+-
+-template<class V, unsigned NumElem>
+-static bool
+-Store(JSContext* cx, unsigned argc, Value* vp)
+-{
+-    typedef typename V::Elem Elem;
+-
+-    CallArgs args = CallArgsFromVp(argc, vp);
+-    if (args.length() != 3)
+-        return ErrorBadArgs(cx);
+-
+-    size_t byteStart;
+-    RootedObject typedArray(cx);
+-    if (!TypedArrayFromArgs(cx, args, sizeof(Elem) * NumElem, &typedArray, &byteStart))
+-        return false;
+-
+-    if (!IsVectorObject<V>(args[2]))
+-        return ErrorBadArgs(cx);
+-
+-    JS::AutoCheckCannotGC nogc(cx);
+-    Elem* src = TypedObjectMemory<Elem*>(args[2], nogc);
+-    SharedMem<Elem*> dst =
+-        typedArray->as<TypedArrayObject>().viewDataEither().addBytes(byteStart).cast<Elem*>();
+-    js::jit::AtomicOperations::podCopySafeWhenRacy(dst, SharedMem<Elem*>::unshared(src), NumElem);
+-
+-    args.rval().setObject(args[2].toObject());
+-    return true;
+-}
+-
+-#define DEFINE_SIMD_FLOAT32X4_FUNCTION(Name, Func, Operands)       \
+-bool                                                               \
+-js::simd_float32x4_##Name(JSContext* cx, unsigned argc, Value* vp) \
+-{                                                                  \
+-    return Func(cx, argc, vp);                                     \
+-}
+-FLOAT32X4_FUNCTION_LIST(DEFINE_SIMD_FLOAT32X4_FUNCTION)
+-#undef DEFINE_SIMD_FLOAT32X4_FUNCTION
+-
+-#define DEFINE_SIMD_FLOAT64X2_FUNCTION(Name, Func, Operands)       \
+-bool                                                               \
+-js::simd_float64x2_##Name(JSContext* cx, unsigned argc, Value* vp) \
+-{                                                                  \
+-    return Func(cx, argc, vp);                                     \
+-}
+-FLOAT64X2_FUNCTION_LIST(DEFINE_SIMD_FLOAT64X2_FUNCTION)
+-#undef DEFINE_SIMD_FLOAT64X2_FUNCTION
+-
+-#define DEFINE_SIMD_INT8X16_FUNCTION(Name, Func, Operands)         \
+-bool                                                               \
+-js::simd_int8x16_##Name(JSContext* cx, unsigned argc, Value* vp)   \
+-{                                                                  \
+-    return Func(cx, argc, vp);                                     \
+-}
+-INT8X16_FUNCTION_LIST(DEFINE_SIMD_INT8X16_FUNCTION)
+-#undef DEFINE_SIMD_INT8X16_FUNCTION
+-
+-#define DEFINE_SIMD_INT16X8_FUNCTION(Name, Func, Operands)         \
+-bool                                                               \
+-js::simd_int16x8_##Name(JSContext* cx, unsigned argc, Value* vp)   \
+-{                                                                  \
+-    return Func(cx, argc, vp);                                     \
+-}
+-INT16X8_FUNCTION_LIST(DEFINE_SIMD_INT16X8_FUNCTION)
+-#undef DEFINE_SIMD_INT16X8_FUNCTION
+-
+-#define DEFINE_SIMD_INT32X4_FUNCTION(Name, Func, Operands)         \
+-bool                                                               \
+-js::simd_int32x4_##Name(JSContext* cx, unsigned argc, Value* vp)   \
+-{                                                                  \
+-    return Func(cx, argc, vp);                                     \
+-}
+-INT32X4_FUNCTION_LIST(DEFINE_SIMD_INT32X4_FUNCTION)
+-#undef DEFINE_SIMD_INT32X4_FUNCTION
+-
+-#define DEFINE_SIMD_UINT8X16_FUNCTION(Name, Func, Operands)        \
+-bool                                                               \
+-js::simd_uint8x16_##Name(JSContext* cx, unsigned argc, Value* vp)  \
+-{                                                                  \
+-    return Func(cx, argc, vp);                                     \
+-}
+-UINT8X16_FUNCTION_LIST(DEFINE_SIMD_UINT8X16_FUNCTION)
+-#undef DEFINE_SIMD_UINT8X16_FUNCTION
+-
+-#define DEFINE_SIMD_UINT16X8_FUNCTION(Name, Func, Operands)        \
+-bool                                                               \
+-js::simd_uint16x8_##Name(JSContext* cx, unsigned argc, Value* vp)  \
+-{                                                                  \
+-    return Func(cx, argc, vp);                                     \
+-}
+-UINT16X8_FUNCTION_LIST(DEFINE_SIMD_UINT16X8_FUNCTION)
+-#undef DEFINE_SIMD_UINT16X8_FUNCTION
+-
+-#define DEFINE_SIMD_UINT32X4_FUNCTION(Name, Func, Operands)        \
+-bool                                                               \
+-js::simd_uint32x4_##Name(JSContext* cx, unsigned argc, Value* vp)  \
+-{                                                                  \
+-    return Func(cx, argc, vp);                                     \
+-}
+-UINT32X4_FUNCTION_LIST(DEFINE_SIMD_UINT32X4_FUNCTION)
+-#undef DEFINE_SIMD_UINT32X4_FUNCTION
+-
+-#define DEFINE_SIMD_BOOL8X16_FUNCTION(Name, Func, Operands)        \
+-bool                                                               \
+-js::simd_bool8x16_##Name(JSContext* cx, unsigned argc, Value* vp)  \
+-{                                                                  \
+-    return Func(cx, argc, vp);                                     \
+-}
+-
+-BOOL8X16_FUNCTION_LIST(DEFINE_SIMD_BOOL8X16_FUNCTION)
+-#undef DEFINE_SIMD_BOOL8X16_FUNCTION
+-
+-#define DEFINE_SIMD_BOOL16X8_FUNCTION(Name, Func, Operands)        \
+-bool                                                               \
+-js::simd_bool16x8_##Name(JSContext* cx, unsigned argc, Value* vp)  \
+-{                                                                  \
+-    return Func(cx, argc, vp);                                     \
+-}
+-BOOL16X8_FUNCTION_LIST(DEFINE_SIMD_BOOL16X8_FUNCTION)
+-#undef DEFINE_SIMD_BOOL16X8_FUNCTION
+-
+-#define DEFINE_SIMD_BOOL32X4_FUNCTION(Name, Func, Operands)        \
+-bool                                                               \
+-js::simd_bool32x4_##Name(JSContext* cx, unsigned argc, Value* vp)  \
+-{                                                                  \
+-    return Func(cx, argc, vp);                                     \
+-}
+-BOOL32X4_FUNCTION_LIST(DEFINE_SIMD_BOOL32X4_FUNCTION)
+-#undef DEFINE_SIMD_BOOL32X4_FUNCTION
+-
+-#define DEFINE_SIMD_BOOL64X2_FUNCTION(Name, Func, Operands)        \
+-bool                                                               \
+-js::simd_bool64x2_##Name(JSContext* cx, unsigned argc, Value* vp)  \
+-{                                                                  \
+-    return Func(cx, argc, vp);                                     \
+-}
+-BOOL64X2_FUNCTION_LIST(DEFINE_SIMD_BOOL64X2_FUNCTION)
+-#undef DEFINE_SIMD_BOOL64X2_FUNCTION
+diff --git a/js/src/builtin/SIMD.h b/js/src/builtin/SIMD.h
+deleted file mode 100644
+--- a/js/src/builtin/SIMD.h
++++ /dev/null
+@@ -1,298 +0,0 @@
+-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+- * vim: set ts=8 sts=4 et sw=4 tw=99:
+- * This Source Code Form is subject to the terms of the Mozilla Public
+- * License, v. 2.0. If a copy of the MPL was not distributed with this
+- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+-
+-#ifndef builtin_SIMD_h
+-#define builtin_SIMD_h
+-
+-#include "jsapi.h"
+-#include "NamespaceImports.h"
+-
+-#include "builtin/SIMDConstants.h"
+-#include "jit/IonTypes.h"
+-#include "js/Conversions.h"
+-
+-/*
+- * JS SIMD functions.
+- * Spec matching polyfill:
+- * https://github.com/tc39/ecmascript_simd/blob/master/src/ecmascript_simd.js
+- */
+-
+-namespace js {
+-
+-class GlobalObject;
+-
+-// These classes implement the concept containing the following constraints:
+-// - requires typename Elem: this is the scalar lane type, stored in each lane
+-// of the SIMD vector.
+-// - requires static const unsigned lanes: this is the number of lanes (length)
+-// of the SIMD vector.
+-// - requires static const SimdType type: this is the SimdType enum value
+-// corresponding to the SIMD type.
+-// - requires static bool Cast(JSContext*, JS::HandleValue, Elem*): casts a
+-// given Value to the current scalar lane type and saves it in the Elem
+-// out-param.
+-// - requires static Value ToValue(Elem): returns a Value of the right type
+-// containing the given value.
+-//
+-// This concept is used in the templates above to define the functions
+-// associated to a given type and in their implementations, to avoid code
+-// redundancy.
+-
+-struct Float32x4 {
+-    typedef float Elem;
+-    static const unsigned lanes = 4;
+-    static const SimdType type = SimdType::Float32x4;
+-    static MOZ_MUST_USE bool Cast(JSContext* cx, JS::HandleValue v, Elem* out) {
+-        double d;
+-        if (!ToNumber(cx, v, &d))
+-            return false;
+-        *out = float(d);
+-        return true;
+-    }
+-    static Value ToValue(Elem value) {
+-        return DoubleValue(JS::CanonicalizeNaN(value));
+-    }
+-};
+-
+-struct Float64x2 {
+-    typedef double Elem;
+-    static const unsigned lanes = 2;
+-    static const SimdType type = SimdType::Float64x2;
+-    static MOZ_MUST_USE bool Cast(JSContext* cx, JS::HandleValue v, Elem* out) {
+-        return ToNumber(cx, v, out);
+-    }
+-    static Value ToValue(Elem value) {
+-        return DoubleValue(JS::CanonicalizeNaN(value));
+-    }
+-};
+-
+-struct Int8x16 {
+-    typedef int8_t Elem;
+-    static const unsigned lanes = 16;
+-    static const SimdType type = SimdType::Int8x16;
+-    static MOZ_MUST_USE bool Cast(JSContext* cx, JS::HandleValue v, Elem* out) {
+-        return ToInt8(cx, v, out);
+-    }
+-    static Value ToValue(Elem value) {
+-        return NumberValue(value);
+-    }
+-};
+-
+-struct Int16x8 {
+-    typedef int16_t Elem;
+-    static const unsigned lanes = 8;
+-    static const SimdType type = SimdType::Int16x8;
+-    static MOZ_MUST_USE bool Cast(JSContext* cx, JS::HandleValue v, Elem* out) {
+-        return ToInt16(cx, v, out);
+-    }
+-    static Value ToValue(Elem value) {
+-        return NumberValue(value);
+-    }
+-};
+-
+-struct Int32x4 {
+-    typedef int32_t Elem;
+-    static const unsigned lanes = 4;
+-    static const SimdType type = SimdType::Int32x4;
+-    static MOZ_MUST_USE bool Cast(JSContext* cx, JS::HandleValue v, Elem* out) {
+-        return ToInt32(cx, v, out);
+-    }
+-    static Value ToValue(Elem value) {
+-        return NumberValue(value);
+-    }
+-};
+-
+-struct Uint8x16 {
+-    typedef uint8_t Elem;
+-    static const unsigned lanes = 16;
+-    static const SimdType type = SimdType::Uint8x16;
+-    static MOZ_MUST_USE bool Cast(JSContext* cx, JS::HandleValue v, Elem* out) {
+-        return ToUint8(cx, v, out);
+-    }
+-    static Value ToValue(Elem value) {
+-        return NumberValue(value);
+-    }
+-};
+-
+-struct Uint16x8 {
+-    typedef uint16_t Elem;
+-    static const unsigned lanes = 8;
+-    static const SimdType type = SimdType::Uint16x8;
+-    static MOZ_MUST_USE bool Cast(JSContext* cx, JS::HandleValue v, Elem* out) {
+-        return ToUint16(cx, v, out);
+-    }
+-    static Value ToValue(Elem value) {
+-        return NumberValue(value);
+-    }
+-};
+-
+-struct Uint32x4 {
+-    typedef uint32_t Elem;
+-    static const unsigned lanes = 4;
+-    static const SimdType type = SimdType::Uint32x4;
+-    static MOZ_MUST_USE bool Cast(JSContext* cx, JS::HandleValue v, Elem* out) {
+-        return ToUint32(cx, v, out);
+-    }
+-    static Value ToValue(Elem value) {
+-        return NumberValue(value);
+-    }
+-};
+-
+-struct Bool8x16 {
+-    typedef int8_t Elem;
+-    static const unsigned lanes = 16;
+-    static const SimdType type = SimdType::Bool8x16;
+-    static MOZ_MUST_USE bool Cast(JSContext* cx, JS::HandleValue v, Elem* out) {
+-        *out = ToBoolean(v) ? -1 : 0;
+-        return true;
+-    }
+-    static Value ToValue(Elem value) {
+-        return BooleanValue(value);
+-    }
+-};
+-
+-struct Bool16x8 {
+-    typedef int16_t Elem;
+-    static const unsigned lanes = 8;
+-    static const SimdType type = SimdType::Bool16x8;
+-    static MOZ_MUST_USE bool Cast(JSContext* cx, JS::HandleValue v, Elem* out) {
+-        *out = ToBoolean(v) ? -1 : 0;
+-        return true;
+-    }
+-    static Value ToValue(Elem value) {
+-        return BooleanValue(value);
+-    }
+-};
+-
+-struct Bool32x4 {
+-    typedef int32_t Elem;
+-    static const unsigned lanes = 4;
+-    static const SimdType type = SimdType::Bool32x4;
+-    static MOZ_MUST_USE bool Cast(JSContext* cx, JS::HandleValue v, Elem* out) {
+-        *out = ToBoolean(v) ? -1 : 0;
+-        return true;
+-    }
+-    static Value ToValue(Elem value) {
+-        return BooleanValue(value);
+-    }
+-};
+-
+-struct Bool64x2 {
+-    typedef int64_t Elem;
+-    static const unsigned lanes = 2;
+-    static const SimdType type = SimdType::Bool64x2;
+-    static MOZ_MUST_USE bool Cast(JSContext* cx, JS::HandleValue v, Elem* out) {
+-        *out = ToBoolean(v) ? -1 : 0;
+-        return true;
+-    }
+-    static Value ToValue(Elem value) {
+-        return BooleanValue(value);
+-    }
+-};
+-
+-// Get the well known name of the SIMD.* object corresponding to type.
+-PropertyName* SimdTypeToName(const JSAtomState& atoms, SimdType type);
+-
+-// Check if name is the well known name of a SIMD type.
+-// Returns true and sets *type iff name is known.
+-bool IsSimdTypeName(const JSAtomState& atoms, const PropertyName* name, SimdType* type);
+-
+-const char* SimdTypeToString(SimdType type);
+-
+-template<typename V>
+-JSObject* CreateSimd(JSContext* cx, const typename V::Elem* data);
+-
+-template<typename V>
+-bool IsVectorObject(HandleValue v);
+-
+-template<typename V>
+-MOZ_MUST_USE bool ToSimdConstant(JSContext* cx, HandleValue v, jit::SimdConstant* out);
+-
+-JSObject*
+-InitSimdClass(JSContext* cx, Handle<GlobalObject*> global);
+-
+-namespace jit {
+-
+-extern const JSJitInfo JitInfo_SimdInt32x4_extractLane;
+-extern const JSJitInfo JitInfo_SimdFloat32x4_extractLane;
+-
+-} // namespace jit
+-
+-#define DECLARE_SIMD_FLOAT32X4_FUNCTION(Name, Func, Operands)   \
+-extern MOZ_MUST_USE bool                                        \
+-simd_float32x4_##Name(JSContext* cx, unsigned argc, Value* vp);
+-FLOAT32X4_FUNCTION_LIST(DECLARE_SIMD_FLOAT32X4_FUNCTION)
+-#undef DECLARE_SIMD_FLOAT32X4_FUNCTION
+-
+-#define DECLARE_SIMD_FLOAT64X2_FUNCTION(Name, Func, Operands)   \
+-extern MOZ_MUST_USE bool                                        \
+-simd_float64x2_##Name(JSContext* cx, unsigned argc, Value* vp);
+-FLOAT64X2_FUNCTION_LIST(DECLARE_SIMD_FLOAT64X2_FUNCTION)
+-#undef DECLARE_SIMD_FLOAT64X2_FUNCTION
+-
+-#define DECLARE_SIMD_INT8X16_FUNCTION(Name, Func, Operands)     \
+-extern MOZ_MUST_USE bool                                        \
+-simd_int8x16_##Name(JSContext* cx, unsigned argc, Value* vp);
+-INT8X16_FUNCTION_LIST(DECLARE_SIMD_INT8X16_FUNCTION)
+-#undef DECLARE_SIMD_INT8X16_FUNCTION
+-
+-#define DECLARE_SIMD_INT16X8_FUNCTION(Name, Func, Operands)     \
+-extern MOZ_MUST_USE bool                                        \
+-simd_int16x8_##Name(JSContext* cx, unsigned argc, Value* vp);
+-INT16X8_FUNCTION_LIST(DECLARE_SIMD_INT16X8_FUNCTION)
+-#undef DECLARE_SIMD_INT16X8_FUNCTION
+-
+-#define DECLARE_SIMD_INT32X4_FUNCTION(Name, Func, Operands)     \
+-extern MOZ_MUST_USE bool                                        \
+-simd_int32x4_##Name(JSContext* cx, unsigned argc, Value* vp);
+-INT32X4_FUNCTION_LIST(DECLARE_SIMD_INT32X4_FUNCTION)
+-#undef DECLARE_SIMD_INT32X4_FUNCTION
+-
+-#define DECLARE_SIMD_UINT8X16_FUNCTION(Name, Func, Operands)    \
+-extern MOZ_MUST_USE bool                                        \
+-simd_uint8x16_##Name(JSContext* cx, unsigned argc, Value* vp);
+-UINT8X16_FUNCTION_LIST(DECLARE_SIMD_UINT8X16_FUNCTION)
+-#undef DECLARE_SIMD_UINT8X16_FUNCTION
+-
+-#define DECLARE_SIMD_UINT16X8_FUNCTION(Name, Func, Operands)    \
+-extern MOZ_MUST_USE bool                                        \
+-simd_uint16x8_##Name(JSContext* cx, unsigned argc, Value* vp);
+-UINT16X8_FUNCTION_LIST(DECLARE_SIMD_UINT16X8_FUNCTION)
+-#undef DECLARE_SIMD_UINT16X8_FUNCTION
+-
+-#define DECLARE_SIMD_UINT32X4_FUNCTION(Name, Func, Operands)    \
+-extern MOZ_MUST_USE bool                                        \
+-simd_uint32x4_##Name(JSContext* cx, unsigned argc, Value* vp);
+-UINT32X4_FUNCTION_LIST(DECLARE_SIMD_UINT32X4_FUNCTION)
+-#undef DECLARE_SIMD_UINT32X4_FUNCTION
+-
+-#define DECLARE_SIMD_BOOL8X16_FUNCTION(Name, Func, Operands)    \
+-extern MOZ_MUST_USE bool                                        \
+-simd_bool8x16_##Name(JSContext* cx, unsigned argc, Value* vp);
+-BOOL8X16_FUNCTION_LIST(DECLARE_SIMD_BOOL8X16_FUNCTION)
+-#undef DECLARE_SIMD_BOOL8X16_FUNCTION
+-
+-#define DECLARE_SIMD_BOOL16X8_FUNCTION(Name, Func, Operands)    \
+-extern MOZ_MUST_USE bool                                        \
+-simd_bool16x8_##Name(JSContext* cx, unsigned argc, Value* vp);
+-BOOL16X8_FUNCTION_LIST(DECLARE_SIMD_BOOL16X8_FUNCTION)
+-#undef DECLARE_SIMD_BOOL16X8_FUNCTION
+-
+-#define DECLARE_SIMD_BOOL32X4_FUNCTION(Name, Func, Operands)    \
+-extern MOZ_MUST_USE bool                                        \
+-simd_bool32x4_##Name(JSContext* cx, unsigned argc, Value* vp);
+-BOOL32X4_FUNCTION_LIST(DECLARE_SIMD_BOOL32X4_FUNCTION)
+-#undef DECLARE_SIMD_BOOL32X4_FUNCTION
+-
+-#define DECLARE_SIMD_BOOL64X2_FUNCTION(Name, Func, Operands)    \
+-extern MOZ_MUST_USE bool                                        \
+-simd_bool64x2_##Name(JSContext* cx, unsigned argc, Value* vp);
+-BOOL64X2_FUNCTION_LIST(DECLARE_SIMD_BOOL64X2_FUNCTION)
+-#undef DECLARE_SIMD_BOOL64X2_FUNCTION
+-
+-}  /* namespace js */
+-
+-#endif /* builtin_SIMD_h */
+diff --git a/js/src/builtin/SIMDConstants.h b/js/src/builtin/SIMDConstants.h
+deleted file mode 100644
+--- a/js/src/builtin/SIMDConstants.h
++++ /dev/null
+@@ -1,941 +0,0 @@
+-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+- * vim: set ts=8 sts=4 et sw=4 tw=99:
+- * This Source Code Form is subject to the terms of the Mozilla Public
+- * License, v. 2.0. If a copy of the MPL was not distributed with this
+- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+-
+-#ifndef builtin_SIMDConstants_h
+-#define builtin_SIMDConstants_h
+-
+-#include "mozilla/Assertions.h"
+-
+-#include "builtin/TypedObjectConstants.h"
+-
+-// Bool8x16.
+-#define BOOL8X16_UNARY_FUNCTION_LIST(V)                                               \
+-  V(not, (UnaryFunc<Bool8x16, LogicalNot, Bool8x16>), 1)                              \
+-  V(check, (UnaryFunc<Bool8x16, Identity, Bool8x16>), 1)                              \
+-  V(splat, (FuncSplat<Bool8x16>), 1)                                                  \
+-  V(allTrue, (AllTrue<Bool8x16>), 1)                                                  \
+-  V(anyTrue, (AnyTrue<Bool8x16>), 1)
+-
+-#define BOOL8X16_BINARY_FUNCTION_LIST(V)                                              \
+-  V(extractLane, (ExtractLane<Bool8x16>), 2)                                          \
+-  V(and, (BinaryFunc<Bool8x16, And, Bool8x16>), 2)                                    \
+-  V(or, (BinaryFunc<Bool8x16, Or, Bool8x16>), 2)                                      \
+-  V(xor, (BinaryFunc<Bool8x16, Xor, Bool8x16>), 2)                                    \
+-
+-#define BOOL8X16_TERNARY_FUNCTION_LIST(V)                                             \
+-  V(replaceLane, (ReplaceLane<Bool8x16>), 3)
+-
+-#define BOOL8X16_FUNCTION_LIST(V)                                                     \
+-  BOOL8X16_UNARY_FUNCTION_LIST(V)                                                     \
+-  BOOL8X16_BINARY_FUNCTION_LIST(V)                                                    \
+-  BOOL8X16_TERNARY_FUNCTION_LIST(V)
+-
+-// Bool 16x8.
+-#define BOOL16X8_UNARY_FUNCTION_LIST(V)                                               \
+-  V(not, (UnaryFunc<Bool16x8, LogicalNot, Bool16x8>), 1)                              \
+-  V(check, (UnaryFunc<Bool16x8, Identity, Bool16x8>), 1)                              \
+-  V(splat, (FuncSplat<Bool16x8>), 1)                                                  \
+-  V(allTrue, (AllTrue<Bool16x8>), 1)                                                  \
+-  V(anyTrue, (AnyTrue<Bool16x8>), 1)
+-
+-#define BOOL16X8_BINARY_FUNCTION_LIST(V)                                              \
+-  V(extractLane, (ExtractLane<Bool16x8>), 2)                                          \
+-  V(and, (BinaryFunc<Bool16x8, And, Bool16x8>), 2)                                    \
+-  V(or, (BinaryFunc<Bool16x8, Or, Bool16x8>), 2)                                      \
+-  V(xor, (BinaryFunc<Bool16x8, Xor, Bool16x8>), 2)                                    \
+-
+-#define BOOL16X8_TERNARY_FUNCTION_LIST(V)                                             \
+-  V(replaceLane, (ReplaceLane<Bool16x8>), 3)
+-
+-#define BOOL16X8_FUNCTION_LIST(V)                                                     \
+-  BOOL16X8_UNARY_FUNCTION_LIST(V)                                                     \
+-  BOOL16X8_BINARY_FUNCTION_LIST(V)                                                    \
+-  BOOL16X8_TERNARY_FUNCTION_LIST(V)
+-
+-// Bool32x4.
+-#define BOOL32X4_UNARY_FUNCTION_LIST(V)                                               \
+-  V(not, (UnaryFunc<Bool32x4, LogicalNot, Bool32x4>), 1)                              \
+-  V(check, (UnaryFunc<Bool32x4, Identity, Bool32x4>), 1)                              \
+-  V(splat, (FuncSplat<Bool32x4>), 1)                                                  \
+-  V(allTrue, (AllTrue<Bool32x4>), 1)                                                  \
+-  V(anyTrue, (AnyTrue<Bool32x4>), 1)
+-
+-#define BOOL32X4_BINARY_FUNCTION_LIST(V)                                              \
+-  V(extractLane, (ExtractLane<Bool32x4>), 2)                                          \
+-  V(and, (BinaryFunc<Bool32x4, And, Bool32x4>), 2)                                    \
+-  V(or, (BinaryFunc<Bool32x4, Or, Bool32x4>), 2)                                      \
+-  V(xor, (BinaryFunc<Bool32x4, Xor, Bool32x4>), 2)                                    \
+-
+-#define BOOL32X4_TERNARY_FUNCTION_LIST(V)                                             \
+-  V(replaceLane, (ReplaceLane<Bool32x4>), 3)
+-
+-#define BOOL32X4_FUNCTION_LIST(V)                                                     \
+-  BOOL32X4_UNARY_FUNCTION_LIST(V)                                                     \
+-  BOOL32X4_BINARY_FUNCTION_LIST(V)                                                    \
+-  BOOL32X4_TERNARY_FUNCTION_LIST(V)
+-
+-// Bool64x2.
+-#define BOOL64X2_UNARY_FUNCTION_LIST(V)                                               \
+-  V(not, (UnaryFunc<Bool64x2, LogicalNot, Bool64x2>), 1)                              \
+-  V(check, (UnaryFunc<Bool64x2, Identity, Bool64x2>), 1)                              \
+-  V(splat, (FuncSplat<Bool64x2>), 1)                                                  \
+-  V(allTrue, (AllTrue<Bool64x2>), 1)                                                  \
+-  V(anyTrue, (AnyTrue<Bool64x2>), 1)
+-
+-#define BOOL64X2_BINARY_FUNCTION_LIST(V)                                              \
+-  V(extractLane, (ExtractLane<Bool64x2>), 2)                                          \
+-  V(and, (BinaryFunc<Bool64x2, And, Bool64x2>), 2)                                    \
+-  V(or, (BinaryFunc<Bool64x2, Or, Bool64x2>), 2)                                      \
+-  V(xor, (BinaryFunc<Bool64x2, Xor, Bool64x2>), 2)                                    \
+-
+-#define BOOL64X2_TERNARY_FUNCTION_LIST(V)                                             \
+-  V(replaceLane, (ReplaceLane<Bool64x2>), 3)
+-
+-#define BOOL64X2_FUNCTION_LIST(V)                                                     \
+-  BOOL64X2_UNARY_FUNCTION_LIST(V)                                                     \
+-  BOOL64X2_BINARY_FUNCTION_LIST(V)                                                    \
+-  BOOL64X2_TERNARY_FUNCTION_LIST(V)
+-
+-// Float32x4.
+-#define FLOAT32X4_UNARY_FUNCTION_LIST(V)                                              \
+-  V(abs, (UnaryFunc<Float32x4, Abs, Float32x4>), 1)                                   \
+-  V(check, (UnaryFunc<Float32x4, Identity, Float32x4>), 1)                            \
+-  V(fromFloat64x2Bits, (FuncConvertBits<Float64x2, Float32x4>), 1)                    \
+-  V(fromInt8x16Bits,   (FuncConvertBits<Int8x16,   Float32x4>), 1)                    \
+-  V(fromInt16x8Bits,   (FuncConvertBits<Int16x8,   Float32x4>), 1)                    \
+-  V(fromInt32x4,       (FuncConvert<Int32x4,       Float32x4>), 1)                    \
+-  V(fromInt32x4Bits,   (FuncConvertBits<Int32x4,   Float32x4>), 1)                    \
+-  V(fromUint8x16Bits,  (FuncConvertBits<Uint8x16,  Float32x4>), 1)                    \
+-  V(fromUint16x8Bits,  (FuncConvertBits<Uint16x8,  Float32x4>), 1)                    \
+-  V(fromUint32x4,      (FuncConvert<Uint32x4,      Float32x4>), 1)                    \
+-  V(fromUint32x4Bits,  (FuncConvertBits<Uint32x4,  Float32x4>), 1)                    \
+-  V(neg, (UnaryFunc<Float32x4, Neg, Float32x4>), 1)                                   \
+-  V(reciprocalApproximation, (UnaryFunc<Float32x4, RecApprox, Float32x4>), 1)         \
+-  V(reciprocalSqrtApproximation, (UnaryFunc<Float32x4, RecSqrtApprox, Float32x4>), 1) \
+-  V(splat, (FuncSplat<Float32x4>), 1)                                                 \
+-  V(sqrt, (UnaryFunc<Float32x4, Sqrt, Float32x4>), 1)
+-
+-#define FLOAT32X4_BINARY_FUNCTION_LIST(V)                                             \
+-  V(add, (BinaryFunc<Float32x4, Add, Float32x4>), 2)                                  \
+-  V(div, (BinaryFunc<Float32x4, Div, Float32x4>), 2)                                  \
+-  V(equal, (CompareFunc<Float32x4, Equal, Bool32x4>), 2)                              \
+-  V(extractLane, (ExtractLane<Float32x4>), 2)                                         \
+-  V(greaterThan, (CompareFunc<Float32x4, GreaterThan, Bool32x4>), 2)                  \
+-  V(greaterThanOrEqual, (CompareFunc<Float32x4, GreaterThanOrEqual, Bool32x4>), 2)    \
+-  V(lessThan, (CompareFunc<Float32x4, LessThan, Bool32x4>), 2)                        \
+-  V(lessThanOrEqual, (CompareFunc<Float32x4, LessThanOrEqual, Bool32x4>), 2)          \
+-  V(load,  (Load<Float32x4, 4>), 2)                                                   \
+-  V(load3, (Load<Float32x4, 3>), 2)                                                   \
+-  V(load2, (Load<Float32x4, 2>), 2)                                                   \
+-  V(load1, (Load<Float32x4, 1>), 2)                                                   \
+-  V(max, (BinaryFunc<Float32x4, Maximum, Float32x4>), 2)                              \
+-  V(maxNum, (BinaryFunc<Float32x4, MaxNum, Float32x4>), 2)                            \
+-  V(min, (BinaryFunc<Float32x4, Minimum, Float32x4>), 2)                              \
+-  V(minNum, (BinaryFunc<Float32x4, MinNum, Float32x4>), 2)                            \
+-  V(mul, (BinaryFunc<Float32x4, Mul, Float32x4>), 2)                                  \
+-  V(notEqual, (CompareFunc<Float32x4, NotEqual, Bool32x4>), 2)                        \
+-  V(sub, (BinaryFunc<Float32x4, Sub, Float32x4>), 2)
+-
+-#define FLOAT32X4_TERNARY_FUNCTION_LIST(V)                                            \
+-  V(replaceLane, (ReplaceLane<Float32x4>), 3)                                         \
+-  V(select, (Select<Float32x4, Bool32x4>), 3)                                         \
+-  V(store,  (Store<Float32x4, 4>), 3)                                                 \
+-  V(store3, (Store<Float32x4, 3>), 3)                                                 \
+-  V(store2, (Store<Float32x4, 2>), 3)                                                 \
+-  V(store1, (Store<Float32x4, 1>), 3)
+-
+-#define FLOAT32X4_SHUFFLE_FUNCTION_LIST(V)                                            \
+-  V(swizzle, Swizzle<Float32x4>, 5)                                                   \
+-  V(shuffle, Shuffle<Float32x4>, 6)
+-
+-#define FLOAT32X4_FUNCTION_LIST(V)                                                    \
+-  FLOAT32X4_UNARY_FUNCTION_LIST(V)                                                    \
+-  FLOAT32X4_BINARY_FUNCTION_LIST(V)                                                   \
+-  FLOAT32X4_TERNARY_FUNCTION_LIST(V)                                                  \
+-  FLOAT32X4_SHUFFLE_FUNCTION_LIST(V)
+-
+-// Float64x2.
+-#define FLOAT64X2_UNARY_FUNCTION_LIST(V)                                              \
+-  V(abs, (UnaryFunc<Float64x2, Abs, Float64x2>), 1)                                   \
+-  V(check, (UnaryFunc<Float64x2, Identity, Float64x2>), 1)                            \
+-  V(fromFloat32x4Bits, (FuncConvertBits<Float32x4, Float64x2>), 1)                    \
+-  V(fromInt8x16Bits,   (FuncConvertBits<Int8x16,   Float64x2>), 1)                    \
+-  V(fromInt16x8Bits,   (FuncConvertBits<Int16x8,   Float64x2>), 1)                    \
+-  V(fromInt32x4Bits,   (FuncConvertBits<Int32x4,   Float64x2>), 1)                    \
+-  V(fromUint8x16Bits,  (FuncConvertBits<Uint8x16,  Float64x2>), 1)                    \
+-  V(fromUint16x8Bits,  (FuncConvertBits<Uint16x8,  Float64x2>), 1)                    \
+-  V(fromUint32x4Bits,  (FuncConvertBits<Uint32x4,  Float64x2>), 1)                    \
+-  V(neg, (UnaryFunc<Float64x2, Neg, Float64x2>), 1)                                   \
+-  V(reciprocalApproximation, (UnaryFunc<Float64x2, RecApprox, Float64x2>), 1)         \
+-  V(reciprocalSqrtApproximation, (UnaryFunc<Float64x2, RecSqrtApprox, Float64x2>), 1) \
+-  V(splat, (FuncSplat<Float64x2>), 1)                                                 \
+-  V(sqrt, (UnaryFunc<Float64x2, Sqrt, Float64x2>), 1)
+-
+-#define FLOAT64X2_BINARY_FUNCTION_LIST(V)                                             \
+-  V(add, (BinaryFunc<Float64x2, Add, Float64x2>), 2)                                  \
+-  V(div, (BinaryFunc<Float64x2, Div, Float64x2>), 2)                                  \
+-  V(equal, (CompareFunc<Float64x2, Equal, Bool64x2>), 2)                              \
+-  V(extractLane, (ExtractLane<Float64x2>), 2)                                         \
+-  V(greaterThan, (CompareFunc<Float64x2, GreaterThan, Bool64x2>), 2)                  \
+-  V(greaterThanOrEqual, (CompareFunc<Float64x2, GreaterThanOrEqual, Bool64x2>), 2)    \
+-  V(lessThan, (CompareFunc<Float64x2, LessThan, Bool64x2>), 2)                        \
+-  V(lessThanOrEqual, (CompareFunc<Float64x2, LessThanOrEqual, Bool64x2>), 2)          \
+-  V(load,  (Load<Float64x2, 2>), 2)                                                   \
+-  V(load1, (Load<Float64x2, 1>), 2)                                                   \
+-  V(max, (BinaryFunc<Float64x2, Maximum, Float64x2>), 2)                              \
+-  V(maxNum, (BinaryFunc<Float64x2, MaxNum, Float64x2>), 2)                            \
+-  V(min, (BinaryFunc<Float64x2, Minimum, Float64x2>), 2)                              \
+-  V(minNum, (BinaryFunc<Float64x2, MinNum, Float64x2>), 2)                            \
+-  V(mul, (BinaryFunc<Float64x2, Mul, Float64x2>), 2)                                  \
+-  V(notEqual, (CompareFunc<Float64x2, NotEqual, Bool64x2>), 2)                        \
+-  V(sub, (BinaryFunc<Float64x2, Sub, Float64x2>), 2)
+-
+-#define FLOAT64X2_TERNARY_FUNCTION_LIST(V)                                            \
+-  V(replaceLane, (ReplaceLane<Float64x2>), 3)                                         \
+-  V(select, (Select<Float64x2, Bool64x2>), 3)                                         \
+-  V(store,  (Store<Float64x2, 2>), 3)                                                 \
+-  V(store1, (Store<Float64x2, 1>), 3)
+-
+-#define FLOAT64X2_SHUFFLE_FUNCTION_LIST(V)                                            \
+-  V(swizzle, Swizzle<Float64x2>, 3)                                                   \
+-  V(shuffle, Shuffle<Float64x2>, 4)
+-
+-#define FLOAT64X2_FUNCTION_LIST(V)                                                    \
+-  FLOAT64X2_UNARY_FUNCTION_LIST(V)                                                    \
+-  FLOAT64X2_BINARY_FUNCTION_LIST(V)                                                   \
+-  FLOAT64X2_TERNARY_FUNCTION_LIST(V)                                                  \
+-  FLOAT64X2_SHUFFLE_FUNCTION_LIST(V)
+-
+-// Int8x16.
+-#define INT8X16_UNARY_FUNCTION_LIST(V)                                                \
+-  V(check, (UnaryFunc<Int8x16, Identity, Int8x16>), 1)                                \
+-  V(fromFloat32x4Bits, (FuncConvertBits<Float32x4, Int8x16>), 1)                      \
+-  V(fromFloat64x2Bits, (FuncConvertBits<Float64x2, Int8x16>), 1)                      \
+-  V(fromInt16x8Bits,   (FuncConvertBits<Int16x8,   Int8x16>), 1)                      \
+-  V(fromInt32x4Bits,   (FuncConvertBits<Int32x4,   Int8x16>), 1)                      \
+-  V(fromUint8x16Bits,  (FuncConvertBits<Uint8x16,  Int8x16>), 1)                      \
+-  V(fromUint16x8Bits,  (FuncConvertBits<Uint16x8,  Int8x16>), 1)                      \
+-  V(fromUint32x4Bits,  (FuncConvertBits<Uint32x4,  Int8x16>), 1)                      \
+-  V(neg, (UnaryFunc<Int8x16, Neg, Int8x16>), 1)                                       \
+-  V(not, (UnaryFunc<Int8x16, Not, Int8x16>), 1)                                       \
+-  V(splat, (FuncSplat<Int8x16>), 1)
+-
+-#define INT8X16_BINARY_FUNCTION_LIST(V)                                               \
+-  V(add, (BinaryFunc<Int8x16, Add, Int8x16>), 2)                                      \
+-  V(addSaturate, (BinaryFunc<Int8x16, AddSaturate, Int8x16>), 2)                      \
+-  V(and, (BinaryFunc<Int8x16, And, Int8x16>), 2)                                      \
+-  V(equal, (CompareFunc<Int8x16, Equal, Bool8x16>), 2)                                \
+-  V(extractLane, (ExtractLane<Int8x16>), 2)                                           \
+-  V(greaterThan, (CompareFunc<Int8x16, GreaterThan, Bool8x16>), 2)                    \
+-  V(greaterThanOrEqual, (CompareFunc<Int8x16, GreaterThanOrEqual, Bool8x16>), 2)      \
+-  V(lessThan, (CompareFunc<Int8x16, LessThan, Bool8x16>), 2)                          \
+-  V(lessThanOrEqual, (CompareFunc<Int8x16, LessThanOrEqual, Bool8x16>), 2)            \
+-  V(load, (Load<Int8x16, 16>), 2)                                                     \
+-  V(mul, (BinaryFunc<Int8x16, Mul, Int8x16>), 2)                                      \
+-  V(notEqual, (CompareFunc<Int8x16, NotEqual, Bool8x16>), 2)                          \
+-  V(or, (BinaryFunc<Int8x16, Or, Int8x16>), 2)                                        \
+-  V(sub, (BinaryFunc<Int8x16, Sub, Int8x16>), 2)                                      \
+-  V(subSaturate, (BinaryFunc<Int8x16, SubSaturate, Int8x16>), 2)                      \
+-  V(shiftLeftByScalar, (BinaryScalar<Int8x16, ShiftLeft>), 2)                         \
+-  V(shiftRightByScalar, (BinaryScalar<Int8x16, ShiftRightArithmetic>), 2)             \
+-  V(xor, (BinaryFunc<Int8x16, Xor, Int8x16>), 2)
+-
+-#define INT8X16_TERNARY_FUNCTION_LIST(V)                                              \
+-  V(replaceLane, (ReplaceLane<Int8x16>), 3)                                           \
+-  V(select, (Select<Int8x16, Bool8x16>), 3)                                           \
+-  V(store, (Store<Int8x16, 16>), 3)
+-
+-#define INT8X16_SHUFFLE_FUNCTION_LIST(V)                                              \
+-  V(swizzle, Swizzle<Int8x16>, 17)                                                    \
+-  V(shuffle, Shuffle<Int8x16>, 18)
+-
+-#define INT8X16_FUNCTION_LIST(V)                                                      \
+-  INT8X16_UNARY_FUNCTION_LIST(V)                                                      \
+-  INT8X16_BINARY_FUNCTION_LIST(V)                                                     \
+-  INT8X16_TERNARY_FUNCTION_LIST(V)                                                    \
+-  INT8X16_SHUFFLE_FUNCTION_LIST(V)
+-
+-// Uint8x16.
+-#define UINT8X16_UNARY_FUNCTION_LIST(V)                                               \
+-  V(check, (UnaryFunc<Uint8x16, Identity, Uint8x16>), 1)                              \
+-  V(fromFloat32x4Bits, (FuncConvertBits<Float32x4, Uint8x16>), 1)                     \
+-  V(fromFloat64x2Bits, (FuncConvertBits<Float64x2, Uint8x16>), 1)                     \
+-  V(fromInt8x16Bits,   (FuncConvertBits<Int8x16,   Uint8x16>), 1)                     \
+-  V(fromInt16x8Bits,   (FuncConvertBits<Int16x8,   Uint8x16>), 1)                     \
+-  V(fromInt32x4Bits,   (FuncConvertBits<Int32x4,   Uint8x16>), 1)                     \
+-  V(fromUint16x8Bits,  (FuncConvertBits<Uint16x8,  Uint8x16>), 1)                     \
+-  V(fromUint32x4Bits,  (FuncConvertBits<Uint32x4,  Uint8x16>), 1)                     \
+-  V(neg, (UnaryFunc<Uint8x16, Neg, Uint8x16>), 1)                                     \
+-  V(not, (UnaryFunc<Uint8x16, Not, Uint8x16>), 1)                                     \
+-  V(splat, (FuncSplat<Uint8x16>), 1)
+-
+-#define UINT8X16_BINARY_FUNCTION_LIST(V)                                              \
+-  V(add, (BinaryFunc<Uint8x16, Add, Uint8x16>), 2)                                    \
+-  V(addSaturate, (BinaryFunc<Uint8x16, AddSaturate, Uint8x16>), 2)                    \
+-  V(and, (BinaryFunc<Uint8x16, And, Uint8x16>), 2)                                    \
+-  V(equal, (CompareFunc<Uint8x16, Equal, Bool8x16>), 2)                               \
+-  V(extractLane, (ExtractLane<Uint8x16>), 2)                                          \
+-  V(greaterThan, (CompareFunc<Uint8x16, GreaterThan, Bool8x16>), 2)                   \
+-  V(greaterThanOrEqual, (CompareFunc<Uint8x16, GreaterThanOrEqual, Bool8x16>), 2)     \
+-  V(lessThan, (CompareFunc<Uint8x16, LessThan, Bool8x16>), 2)                         \
+-  V(lessThanOrEqual, (CompareFunc<Uint8x16, LessThanOrEqual, Bool8x16>), 2)           \
+-  V(load, (Load<Uint8x16, 16>), 2)                                                    \
+-  V(mul, (BinaryFunc<Uint8x16, Mul, Uint8x16>), 2)                                    \
+-  V(notEqual, (CompareFunc<Uint8x16, NotEqual, Bool8x16>), 2)                         \
+-  V(or, (BinaryFunc<Uint8x16, Or, Uint8x16>), 2)                                      \
+-  V(sub, (BinaryFunc<Uint8x16, Sub, Uint8x16>), 2)                                    \
+-  V(subSaturate, (BinaryFunc<Uint8x16, SubSaturate, Uint8x16>), 2)                    \
+-  V(shiftLeftByScalar, (BinaryScalar<Uint8x16, ShiftLeft>), 2)                        \
+-  V(shiftRightByScalar, (BinaryScalar<Uint8x16, ShiftRightLogical>), 2)               \
+-  V(xor, (BinaryFunc<Uint8x16, Xor, Uint8x16>), 2)
+-
+-#define UINT8X16_TERNARY_FUNCTION_LIST(V)                                             \
+-  V(replaceLane, (ReplaceLane<Uint8x16>), 3)                                          \
+-  V(select, (Select<Uint8x16, Bool8x16>), 3)                                          \
+-  V(store, (Store<Uint8x16, 16>), 3)
+-
+-#define UINT8X16_SHUFFLE_FUNCTION_LIST(V)                                             \
+-  V(swizzle, Swizzle<Uint8x16>, 17)                                                   \
+-  V(shuffle, Shuffle<Uint8x16>, 18)
+-
+-#define UINT8X16_FUNCTION_LIST(V)                                                     \
+-  UINT8X16_UNARY_FUNCTION_LIST(V)                                                     \
+-  UINT8X16_BINARY_FUNCTION_LIST(V)                                                    \
+-  UINT8X16_TERNARY_FUNCTION_LIST(V)                                                   \
+-  UINT8X16_SHUFFLE_FUNCTION_LIST(V)
+-
+-// Int16x8.
+-#define INT16X8_UNARY_FUNCTION_LIST(V)                                                \
+-  V(check, (UnaryFunc<Int16x8, Identity, Int16x8>), 1)                                \
+-  V(fromFloat32x4Bits, (FuncConvertBits<Float32x4, Int16x8>), 1)                      \
+-  V(fromFloat64x2Bits, (FuncConvertBits<Float64x2, Int16x8>), 1)                      \
+-  V(fromInt8x16Bits,   (FuncConvertBits<Int8x16,   Int16x8>), 1)                      \
+-  V(fromInt32x4Bits,   (FuncConvertBits<Int32x4,   Int16x8>), 1)                      \
+-  V(fromUint8x16Bits,  (FuncConvertBits<Uint8x16,  Int16x8>), 1)                      \
+-  V(fromUint16x8Bits,  (FuncConvertBits<Uint16x8,  Int16x8>), 1)                      \
+-  V(fromUint32x4Bits,  (FuncConvertBits<Uint32x4,  Int16x8>), 1)                      \
+-  V(neg, (UnaryFunc<Int16x8, Neg, Int16x8>), 1)                                       \
+-  V(not, (UnaryFunc<Int16x8, Not, Int16x8>), 1)                                       \
+-  V(splat, (FuncSplat<Int16x8>), 1)
+-
+-#define INT16X8_BINARY_FUNCTION_LIST(V)                                               \
+-  V(add, (BinaryFunc<Int16x8, Add, Int16x8>), 2)                                      \
+-  V(addSaturate, (BinaryFunc<Int16x8, AddSaturate, Int16x8>), 2)                      \
+-  V(and, (BinaryFunc<Int16x8, And, Int16x8>), 2)                                      \
+-  V(equal, (CompareFunc<Int16x8, Equal, Bool16x8>), 2)                                \
+-  V(extractLane, (ExtractLane<Int16x8>), 2)                                           \
+-  V(greaterThan, (CompareFunc<Int16x8, GreaterThan, Bool16x8>), 2)                    \
+-  V(greaterThanOrEqual, (CompareFunc<Int16x8, GreaterThanOrEqual, Bool16x8>), 2)      \
+-  V(lessThan, (CompareFunc<Int16x8, LessThan, Bool16x8>), 2)                          \
+-  V(lessThanOrEqual, (CompareFunc<Int16x8, LessThanOrEqual, Bool16x8>), 2)            \
+-  V(load, (Load<Int16x8, 8>), 2)                                                      \
+-  V(mul, (BinaryFunc<Int16x8, Mul, Int16x8>), 2)                                      \
+-  V(notEqual, (CompareFunc<Int16x8, NotEqual, Bool16x8>), 2)                          \
+-  V(or, (BinaryFunc<Int16x8, Or, Int16x8>), 2)                                        \
+-  V(sub, (BinaryFunc<Int16x8, Sub, Int16x8>), 2)                                      \
+-  V(subSaturate, (BinaryFunc<Int16x8, SubSaturate, Int16x8>), 2)                      \
+-  V(shiftLeftByScalar, (BinaryScalar<Int16x8, ShiftLeft>), 2)                         \
+-  V(shiftRightByScalar, (BinaryScalar<Int16x8, ShiftRightArithmetic>), 2)             \
+-  V(xor, (BinaryFunc<Int16x8, Xor, Int16x8>), 2)
+-
+-#define INT16X8_TERNARY_FUNCTION_LIST(V)                                              \
+-  V(replaceLane, (ReplaceLane<Int16x8>), 3)                                           \
+-  V(select, (Select<Int16x8, Bool16x8>), 3)                                           \
+-  V(store, (Store<Int16x8, 8>), 3)
+-
+-#define INT16X8_SHUFFLE_FUNCTION_LIST(V)                                              \
+-  V(swizzle, Swizzle<Int16x8>, 9)                                                     \
+-  V(shuffle, Shuffle<Int16x8>, 10)
+-
+-#define INT16X8_FUNCTION_LIST(V)                                                      \
+-  INT16X8_UNARY_FUNCTION_LIST(V)                                                      \
+-  INT16X8_BINARY_FUNCTION_LIST(V)                                                     \
+-  INT16X8_TERNARY_FUNCTION_LIST(V)                                                    \
+-  INT16X8_SHUFFLE_FUNCTION_LIST(V)
+-
+-// Uint16x8.
+-#define UINT16X8_UNARY_FUNCTION_LIST(V)                                               \
+-  V(check, (UnaryFunc<Uint16x8, Identity, Uint16x8>), 1)                              \
+-  V(fromFloat32x4Bits, (FuncConvertBits<Float32x4, Uint16x8>), 1)                     \
+-  V(fromFloat64x2Bits, (FuncConvertBits<Float64x2, Uint16x8>), 1)                     \
+-  V(fromInt8x16Bits,   (FuncConvertBits<Int8x16,   Uint16x8>), 1)                     \
+-  V(fromInt16x8Bits,   (FuncConvertBits<Int16x8,   Uint16x8>), 1)                     \
+-  V(fromInt32x4Bits,   (FuncConvertBits<Int32x4,   Uint16x8>), 1)                     \
+-  V(fromUint8x16Bits,  (FuncConvertBits<Uint8x16,  Uint16x8>), 1)                     \
+-  V(fromUint32x4Bits,  (FuncConvertBits<Uint32x4,  Uint16x8>), 1)                     \
+-  V(neg, (UnaryFunc<Uint16x8, Neg, Uint16x8>), 1)                                     \
+-  V(not, (UnaryFunc<Uint16x8, Not, Uint16x8>), 1)                                     \
+-  V(splat, (FuncSplat<Uint16x8>), 1)
+-
+-#define UINT16X8_BINARY_FUNCTION_LIST(V)                                              \
+-  V(add, (BinaryFunc<Uint16x8, Add, Uint16x8>), 2)                                    \
+-  V(addSaturate, (BinaryFunc<Uint16x8, AddSaturate, Uint16x8>), 2)                    \
+-  V(and, (BinaryFunc<Uint16x8, And, Uint16x8>), 2)                                    \
+-  V(equal, (CompareFunc<Uint16x8, Equal, Bool16x8>), 2)                               \
+-  V(extractLane, (ExtractLane<Uint16x8>), 2)                                          \
+-  V(greaterThan, (CompareFunc<Uint16x8, GreaterThan, Bool16x8>), 2)                   \
+-  V(greaterThanOrEqual, (CompareFunc<Uint16x8, GreaterThanOrEqual, Bool16x8>), 2)     \
+-  V(lessThan, (CompareFunc<Uint16x8, LessThan, Bool16x8>), 2)                         \
+-  V(lessThanOrEqual, (CompareFunc<Uint16x8, LessThanOrEqual, Bool16x8>), 2)           \
+-  V(load, (Load<Uint16x8, 8>), 2)                                                     \
+-  V(mul, (BinaryFunc<Uint16x8, Mul, Uint16x8>), 2)                                    \
+-  V(notEqual, (CompareFunc<Uint16x8, NotEqual, Bool16x8>), 2)                         \
+-  V(or, (BinaryFunc<Uint16x8, Or, Uint16x8>), 2)                                      \
+-  V(sub, (BinaryFunc<Uint16x8, Sub, Uint16x8>), 2)                                    \
+-  V(subSaturate, (BinaryFunc<Uint16x8, SubSaturate, Uint16x8>), 2)                    \
+-  V(shiftLeftByScalar, (BinaryScalar<Uint16x8, ShiftLeft>), 2)                        \
+-  V(shiftRightByScalar, (BinaryScalar<Uint16x8, ShiftRightLogical>), 2)               \
+-  V(xor, (BinaryFunc<Uint16x8, Xor, Uint16x8>), 2)
+-
+-#define UINT16X8_TERNARY_FUNCTION_LIST(V)                                             \
+-  V(replaceLane, (ReplaceLane<Uint16x8>), 3)                                          \
+-  V(select, (Select<Uint16x8, Bool16x8>), 3)                                          \
+-  V(store, (Store<Uint16x8, 8>), 3)
+-
+-#define UINT16X8_SHUFFLE_FUNCTION_LIST(V)                                             \
+-  V(swizzle, Swizzle<Uint16x8>, 9)                                                    \
+-  V(shuffle, Shuffle<Uint16x8>, 10)
+-
+-#define UINT16X8_FUNCTION_LIST(V)                                                     \
+-  UINT16X8_UNARY_FUNCTION_LIST(V)                                                     \
+-  UINT16X8_BINARY_FUNCTION_LIST(V)                                                    \
+-  UINT16X8_TERNARY_FUNCTION_LIST(V)                                                   \
+-  UINT16X8_SHUFFLE_FUNCTION_LIST(V)
+-
+-// Int32x4.
+-#define INT32X4_UNARY_FUNCTION_LIST(V)                                                \
+-  V(check, (UnaryFunc<Int32x4, Identity, Int32x4>), 1)                                \
+-  V(fromFloat32x4,     (FuncConvert<Float32x4,     Int32x4>), 1)                      \
+-  V(fromFloat32x4Bits, (FuncConvertBits<Float32x4, Int32x4>), 1)                      \
+-  V(fromFloat64x2Bits, (FuncConvertBits<Float64x2, Int32x4>), 1)                      \
+-  V(fromInt8x16Bits,   (FuncConvertBits<Int8x16,   Int32x4>), 1)                      \
+-  V(fromInt16x8Bits,   (FuncConvertBits<Int16x8,   Int32x4>), 1)                      \
+-  V(fromUint8x16Bits,  (FuncConvertBits<Uint8x16,  Int32x4>), 1)                      \
+-  V(fromUint16x8Bits,  (FuncConvertBits<Uint16x8,  Int32x4>), 1)                      \
+-  V(fromUint32x4Bits,  (FuncConvertBits<Uint32x4,  Int32x4>), 1)                      \
+-  V(neg, (UnaryFunc<Int32x4, Neg, Int32x4>), 1)                                       \
+-  V(not, (UnaryFunc<Int32x4, Not, Int32x4>), 1)                                       \
+-  V(splat, (FuncSplat<Int32x4>), 0)
+-
+-#define INT32X4_BINARY_FUNCTION_LIST(V)                                               \
+-  V(add, (BinaryFunc<Int32x4, Add, Int32x4>), 2)                                      \
+-  V(and, (BinaryFunc<Int32x4, And, Int32x4>), 2)                                      \
+-  V(equal, (CompareFunc<Int32x4, Equal, Bool32x4>), 2)                                \
+-  V(extractLane, (ExtractLane<Int32x4>), 2)                                           \
+-  V(greaterThan, (CompareFunc<Int32x4, GreaterThan, Bool32x4>), 2)                    \
+-  V(greaterThanOrEqual, (CompareFunc<Int32x4, GreaterThanOrEqual, Bool32x4>), 2)      \
+-  V(lessThan, (CompareFunc<Int32x4, LessThan, Bool32x4>), 2)                          \
+-  V(lessThanOrEqual, (CompareFunc<Int32x4, LessThanOrEqual, Bool32x4>), 2)            \
+-  V(load,  (Load<Int32x4, 4>), 2)                                                     \
+-  V(load3, (Load<Int32x4, 3>), 2)                                                     \
+-  V(load2, (Load<Int32x4, 2>), 2)                                                     \
+-  V(load1, (Load<Int32x4, 1>), 2)                                                     \
+-  V(mul, (BinaryFunc<Int32x4, Mul, Int32x4>), 2)                                      \
+-  V(notEqual, (CompareFunc<Int32x4, NotEqual, Bool32x4>), 2)                          \
+-  V(or, (BinaryFunc<Int32x4, Or, Int32x4>), 2)                                        \
+-  V(sub, (BinaryFunc<Int32x4, Sub, Int32x4>), 2)                                      \
+-  V(shiftLeftByScalar, (BinaryScalar<Int32x4, ShiftLeft>), 2)                         \
+-  V(shiftRightByScalar, (BinaryScalar<Int32x4, ShiftRightArithmetic>), 2)             \
+-  V(xor, (BinaryFunc<Int32x4, Xor, Int32x4>), 2)
+-
+-#define INT32X4_TERNARY_FUNCTION_LIST(V)                                              \
+-  V(replaceLane, (ReplaceLane<Int32x4>), 3)                                           \
+-  V(select, (Select<Int32x4, Bool32x4>), 3)                                           \
+-  V(store,  (Store<Int32x4, 4>), 3)                                                   \
+-  V(store3, (Store<Int32x4, 3>), 3)                                                   \
+-  V(store2, (Store<Int32x4, 2>), 3)                                                   \
+-  V(store1, (Store<Int32x4, 1>), 3)
+-
+-#define INT32X4_SHUFFLE_FUNCTION_LIST(V)                                              \
+-  V(swizzle, Swizzle<Int32x4>, 5)                                                     \
+-  V(shuffle, Shuffle<Int32x4>, 6)
+-
+-#define INT32X4_FUNCTION_LIST(V)                                                      \
+-  INT32X4_UNARY_FUNCTION_LIST(V)                                                      \
+-  INT32X4_BINARY_FUNCTION_LIST(V)                                                     \
+-  INT32X4_TERNARY_FUNCTION_LIST(V)                                                    \
+-  INT32X4_SHUFFLE_FUNCTION_LIST(V)
+-
+-// Uint32x4.
+-#define UINT32X4_UNARY_FUNCTION_LIST(V)                                               \
+-  V(check, (UnaryFunc<Uint32x4, Identity, Uint32x4>), 1)                              \
+-  V(fromFloat32x4,     (FuncConvert<Float32x4,     Uint32x4>), 1)                     \
+-  V(fromFloat32x4Bits, (FuncConvertBits<Float32x4, Uint32x4>), 1)                     \
+-  V(fromFloat64x2Bits, (FuncConvertBits<Float64x2, Uint32x4>), 1)                     \
+-  V(fromInt8x16Bits,   (FuncConvertBits<Int8x16,   Uint32x4>), 1)                     \
+-  V(fromInt16x8Bits,   (FuncConvertBits<Int16x8,   Uint32x4>), 1)                     \
+-  V(fromInt32x4Bits,   (FuncConvertBits<Int32x4,   Uint32x4>), 1)                     \
+-  V(fromUint8x16Bits,  (FuncConvertBits<Uint8x16,  Uint32x4>), 1)                     \
+-  V(fromUint16x8Bits,  (FuncConvertBits<Uint16x8,  Uint32x4>), 1)                     \
+-  V(neg, (UnaryFunc<Uint32x4, Neg, Uint32x4>), 1)                                     \
+-  V(not, (UnaryFunc<Uint32x4, Not, Uint32x4>), 1)                                     \
+-  V(splat, (FuncSplat<Uint32x4>), 0)
+-
+-#define UINT32X4_BINARY_FUNCTION_LIST(V)                                              \
+-  V(add, (BinaryFunc<Uint32x4, Add, Uint32x4>), 2)                                    \
+-  V(and, (BinaryFunc<Uint32x4, And, Uint32x4>), 2)                                    \
+-  V(equal, (CompareFunc<Uint32x4, Equal, Bool32x4>), 2)                               \
+-  V(extractLane, (ExtractLane<Uint32x4>), 2)                                          \
+-  V(greaterThan, (CompareFunc<Uint32x4, GreaterThan, Bool32x4>), 2)                   \
+-  V(greaterThanOrEqual, (CompareFunc<Uint32x4, GreaterThanOrEqual, Bool32x4>), 2)     \
+-  V(lessThan, (CompareFunc<Uint32x4, LessThan, Bool32x4>), 2)                         \
+-  V(lessThanOrEqual, (CompareFunc<Uint32x4, LessThanOrEqual, Bool32x4>), 2)           \
+-  V(load,  (Load<Uint32x4, 4>), 2)                                                    \
+-  V(load3, (Load<Uint32x4, 3>), 2)                                                    \
+-  V(load2, (Load<Uint32x4, 2>), 2)                                                    \
+-  V(load1, (Load<Uint32x4, 1>), 2)                                                    \
+-  V(mul, (BinaryFunc<Uint32x4, Mul, Uint32x4>), 2)                                    \
+-  V(notEqual, (CompareFunc<Uint32x4, NotEqual, Bool32x4>), 2)                         \
+-  V(or, (BinaryFunc<Uint32x4, Or, Uint32x4>), 2)                                      \
+-  V(sub, (BinaryFunc<Uint32x4, Sub, Uint32x4>), 2)                                    \
+-  V(shiftLeftByScalar, (BinaryScalar<Uint32x4, ShiftLeft>), 2)                        \
+-  V(shiftRightByScalar, (BinaryScalar<Uint32x4, ShiftRightLogical>), 2)               \
+-  V(xor, (BinaryFunc<Uint32x4, Xor, Uint32x4>), 2)
+-
+-#define UINT32X4_TERNARY_FUNCTION_LIST(V)                                             \
+-  V(replaceLane, (ReplaceLane<Uint32x4>), 3)                                          \
+-  V(select, (Select<Uint32x4, Bool32x4>), 3)                                          \
+-  V(store,  (Store<Uint32x4, 4>), 3)                                                  \
+-  V(store3, (Store<Uint32x4, 3>), 3)                                                  \
+-  V(store2, (Store<Uint32x4, 2>), 3)                                                  \
+-  V(store1, (Store<Uint32x4, 1>), 3)
+-
+-#define UINT32X4_SHUFFLE_FUNCTION_LIST(V)                                             \
+-  V(swizzle, Swizzle<Uint32x4>, 5)                                                    \
+-  V(shuffle, Shuffle<Uint32x4>, 6)
+-
+-#define UINT32X4_FUNCTION_LIST(V)                                                     \
+-  UINT32X4_UNARY_FUNCTION_LIST(V)                                                     \
+-  UINT32X4_BINARY_FUNCTION_LIST(V)                                                    \
+-  UINT32X4_TERNARY_FUNCTION_LIST(V)                                                   \
+-  UINT32X4_SHUFFLE_FUNCTION_LIST(V)
+-
+-/*
+- * The FOREACH macros below partition all of the SIMD operations into disjoint
+- * sets.
+- */
+-
+-// Operations available on all SIMD types. Mixed arity.
+-#define FOREACH_COMMON_SIMD_OP(_)     \
+-    _(extractLane)                    \
+-    _(replaceLane)                    \
+-    _(check)                          \
+-    _(splat)
+-
+-// Lanewise operations available on numeric SIMD types.
+-// Include lane-wise select here since it is not arithmetic and defined on
+-// numeric types too.
+-#define FOREACH_LANE_SIMD_OP(_)       \
+-    _(select)                         \
+-    _(swizzle)                        \
+-    _(shuffle)
+-
+-// Memory operations available on numeric SIMD types.
+-#define FOREACH_MEMORY_SIMD_OP(_)     \
+-    _(load)                           \
+-    _(store)
+-
+-// Memory operations available on numeric X4 SIMD types.
+-#define FOREACH_MEMORY_X4_SIMD_OP(_)  \
+-    _(load1)                          \
+-    _(load2)                          \
+-    _(load3)                          \
+-    _(store1)                         \
+-    _(store2)                         \
+-    _(store3)
+-
+-// Unary operations on Bool vectors.
+-#define FOREACH_BOOL_SIMD_UNOP(_)     \
+-    _(allTrue)                        \
+-    _(anyTrue)
+-
+-// Unary bitwise SIMD operators defined on all integer and boolean SIMD types.
+-#define FOREACH_BITWISE_SIMD_UNOP(_)  \
+-    _(not)
+-
+-// Binary bitwise SIMD operators defined on all integer and boolean SIMD types.
+-#define FOREACH_BITWISE_SIMD_BINOP(_) \
+-    _(and)                            \
+-    _(or)                             \
+-    _(xor)
+-
+-// Bitwise shifts defined on integer SIMD types.
+-#define FOREACH_SHIFT_SIMD_OP(_)      \
+-    _(shiftLeftByScalar)              \
+-    _(shiftRightByScalar)
+-
+-// Unary arithmetic operators defined on numeric SIMD types.
+-#define FOREACH_NUMERIC_SIMD_UNOP(_)  \
+-    _(neg)
+-
+-// Binary arithmetic operators defined on numeric SIMD types.
+-#define FOREACH_NUMERIC_SIMD_BINOP(_) \
+-    _(add)                            \
+-    _(sub)                            \
+-    _(mul)
+-
+-// Unary arithmetic operators defined on floating point SIMD types.
+-#define FOREACH_FLOAT_SIMD_UNOP(_)    \
+-    _(abs)                            \
+-    _(sqrt)                           \
+-    _(reciprocalApproximation)        \
+-    _(reciprocalSqrtApproximation)
+-
+-// Binary arithmetic operators defined on floating point SIMD types.
+-#define FOREACH_FLOAT_SIMD_BINOP(_)   \
+-    _(div)                            \
+-    _(max)                            \
+-    _(min)                            \
+-    _(maxNum)                         \
+-    _(minNum)
+-
+-// Binary operations on small integer (< 32 bits) vectors.
+-#define FOREACH_SMINT_SIMD_BINOP(_)   \
+-    _(addSaturate)                    \
+-    _(subSaturate)
+-
+-// Comparison operators defined on numeric SIMD types.
+-#define FOREACH_COMP_SIMD_OP(_)       \
+-    _(lessThan)                       \
+-    _(lessThanOrEqual)                \
+-    _(equal)                          \
+-    _(notEqual)                       \
+-    _(greaterThan)                    \
+-    _(greaterThanOrEqual)
+-
+-/*
+- * All SIMD operations, excluding casts.
+- */
+-#define FORALL_SIMD_NONCAST_OP(_)     \
+-    FOREACH_COMMON_SIMD_OP(_)         \
+-    FOREACH_LANE_SIMD_OP(_)           \
+-    FOREACH_MEMORY_SIMD_OP(_)         \
+-    FOREACH_MEMORY_X4_SIMD_OP(_)      \
+-    FOREACH_BOOL_SIMD_UNOP(_)         \
+-    FOREACH_BITWISE_SIMD_UNOP(_)      \
+-    FOREACH_BITWISE_SIMD_BINOP(_)     \
+-    FOREACH_SHIFT_SIMD_OP(_)          \
+-    FOREACH_NUMERIC_SIMD_UNOP(_)      \
+-    FOREACH_NUMERIC_SIMD_BINOP(_)     \
+-    FOREACH_FLOAT_SIMD_UNOP(_)        \
+-    FOREACH_FLOAT_SIMD_BINOP(_)       \
+-    FOREACH_SMINT_SIMD_BINOP(_)       \
+-    FOREACH_COMP_SIMD_OP(_)
+-
+-/*
+- * All operations on integer SIMD types, excluding casts and
+- * FOREACH_MEMORY_X4_OP.
+- */
+-#define FORALL_INT_SIMD_OP(_)         \
+-    FOREACH_COMMON_SIMD_OP(_)         \
+-    FOREACH_LANE_SIMD_OP(_)           \
+-    FOREACH_MEMORY_SIMD_OP(_)         \
+-    FOREACH_BITWISE_SIMD_UNOP(_)      \
+-    FOREACH_BITWISE_SIMD_BINOP(_)     \
+-    FOREACH_SHIFT_SIMD_OP(_)          \
+-    FOREACH_NUMERIC_SIMD_UNOP(_)      \
+-    FOREACH_NUMERIC_SIMD_BINOP(_)     \
+-    FOREACH_COMP_SIMD_OP(_)
+-
+-/*
+- * All operations on floating point SIMD types, excluding casts and
+- * FOREACH_MEMORY_X4_OP.
+- */
+-#define FORALL_FLOAT_SIMD_OP(_)       \
+-    FOREACH_COMMON_SIMD_OP(_)         \
+-    FOREACH_LANE_SIMD_OP(_)           \
+-    FOREACH_MEMORY_SIMD_OP(_)         \
+-    FOREACH_NUMERIC_SIMD_UNOP(_)      \
+-    FOREACH_NUMERIC_SIMD_BINOP(_)     \
+-    FOREACH_FLOAT_SIMD_UNOP(_)        \
+-    FOREACH_FLOAT_SIMD_BINOP(_)       \
+-    FOREACH_COMP_SIMD_OP(_)
+-
+-/*
+- * All operations on Bool SIMD types.
+- *
+- * These types don't have casts, so no need to specialize.
+- */
+-#define FORALL_BOOL_SIMD_OP(_)        \
+-    FOREACH_COMMON_SIMD_OP(_)         \
+-    FOREACH_BOOL_SIMD_UNOP(_)         \
+-    FOREACH_BITWISE_SIMD_UNOP(_)      \
+-    FOREACH_BITWISE_SIMD_BINOP(_)
+-
+-/*
+- * The sets of cast operations are listed per type below.
+- *
+- * These sets are not disjoint.
+- */
+-
+-#define FOREACH_INT8X16_SIMD_CAST(_)  \
+-    _(fromFloat32x4Bits)              \
+-    _(fromFloat64x2Bits)              \
+-    _(fromInt16x8Bits)                \
+-    _(fromInt32x4Bits)
+-
+-#define FOREACH_INT16X8_SIMD_CAST(_)  \
+-    _(fromFloat32x4Bits)              \
+-    _(fromFloat64x2Bits)              \
+-    _(fromInt8x16Bits)                \
+-    _(fromInt32x4Bits)
+-
+-#define FOREACH_INT32X4_SIMD_CAST(_)  \
+-    _(fromFloat32x4)                  \
+-    _(fromFloat32x4Bits)              \
+-    _(fromFloat64x2Bits)              \
+-    _(fromInt8x16Bits)                \
+-    _(fromInt16x8Bits)
+-
+-#define FOREACH_FLOAT32X4_SIMD_CAST(_)\
+-    _(fromFloat64x2Bits)              \
+-    _(fromInt8x16Bits)                \
+-    _(fromInt16x8Bits)                \
+-    _(fromInt32x4)                    \
+-    _(fromInt32x4Bits)
+-
+-#define FOREACH_FLOAT64X2_SIMD_CAST(_)\
+-    _(fromFloat32x4Bits)              \
+-    _(fromInt8x16Bits)                \
+-    _(fromInt16x8Bits)                \
+-    _(fromInt32x4Bits)
+-
+-// All operations on Int32x4.
+-#define FORALL_INT32X4_SIMD_OP(_)     \
+-    FORALL_INT_SIMD_OP(_)             \
+-    FOREACH_MEMORY_X4_SIMD_OP(_)      \
+-    FOREACH_INT32X4_SIMD_CAST(_)
+-
+-// All operations on Float32X4
+-#define FORALL_FLOAT32X4_SIMD_OP(_)   \
+-    FORALL_FLOAT_SIMD_OP(_)           \
+-    FOREACH_MEMORY_X4_SIMD_OP(_)      \
+-    FOREACH_FLOAT32X4_SIMD_CAST(_)
+-
+-/*
+- * All SIMD operations assuming only 32x4 types exist.
+- * This is used in the current asm.js impl.
+- */
+-#define FORALL_SIMD_ASMJS_OP(_)       \
+-    FORALL_SIMD_NONCAST_OP(_)         \
+-    _(fromFloat32x4)                  \
+-    _(fromFloat32x4Bits)              \
+-    _(fromInt8x16Bits)                \
+-    _(fromInt16x8Bits)                \
+-    _(fromInt32x4)                    \
+-    _(fromInt32x4Bits)                \
+-    _(fromUint8x16Bits)               \
+-    _(fromUint16x8Bits)               \
+-    _(fromUint32x4)                   \
+-    _(fromUint32x4Bits)
+-
+-// All operations on Int8x16 or Uint8x16 in the asm.js world.
+-// Note: this does not include conversions and casts to/from Uint8x16 because
+-// this list is shared between Int8x16 and Uint8x16.
+-#define FORALL_INT8X16_ASMJS_OP(_)    \
+-    FORALL_INT_SIMD_OP(_)             \
+-    FOREACH_SMINT_SIMD_BINOP(_)       \
+-    _(fromInt16x8Bits)                \
+-    _(fromInt32x4Bits)                \
+-    _(fromFloat32x4Bits)
+-
+-// All operations on Int16x8 or Uint16x8 in the asm.js world.
+-// Note: this does not include conversions and casts to/from Uint16x8 because
+-// this list is shared between Int16x8 and Uint16x8.
+-#define FORALL_INT16X8_ASMJS_OP(_)    \
+-    FORALL_INT_SIMD_OP(_)             \
+-    FOREACH_SMINT_SIMD_BINOP(_)       \
+-    _(fromInt8x16Bits)                \
+-    _(fromInt32x4Bits)                \
+-    _(fromFloat32x4Bits)
+-
+-// All operations on Int32x4 or Uint32x4 in the asm.js world.
+-// Note: this does not include conversions and casts to/from Uint32x4 because
+-// this list is shared between Int32x4 and Uint32x4.
+-#define FORALL_INT32X4_ASMJS_OP(_)    \
+-    FORALL_INT_SIMD_OP(_)             \
+-    FOREACH_MEMORY_X4_SIMD_OP(_)      \
+-    _(fromInt8x16Bits)                \
+-    _(fromInt16x8Bits)                \
+-    _(fromFloat32x4)                  \
+-    _(fromFloat32x4Bits)
+-
+-// All operations on Float32X4 in the asm.js world.
+-#define FORALL_FLOAT32X4_ASMJS_OP(_)  \
+-    FORALL_FLOAT_SIMD_OP(_)           \
+-    FOREACH_MEMORY_X4_SIMD_OP(_)      \
+-    _(fromInt8x16Bits)                \
+-    _(fromInt16x8Bits)                \
+-    _(fromInt32x4Bits)                \
+-    _(fromInt32x4)                    \
+-    _(fromUint32x4)
+-
+-namespace js {
+-
+-// Complete set of SIMD types.
+-// It must be kept in sync with the enumeration of values in
+-// TypedObjectConstants.h; in particular we need to ensure that Count is
+-// appropriately set with respect to the number of actual types.
+-enum class SimdType {
+-    Int8x16   = JS_SIMDTYPEREPR_INT8X16,
+-    Int16x8   = JS_SIMDTYPEREPR_INT16X8,
+-    Int32x4   = JS_SIMDTYPEREPR_INT32X4,
+-    Uint8x16  = JS_SIMDTYPEREPR_UINT8X16,
+-    Uint16x8  = JS_SIMDTYPEREPR_UINT16X8,
+-    Uint32x4  = JS_SIMDTYPEREPR_UINT32X4,
+-    Float32x4 = JS_SIMDTYPEREPR_FLOAT32X4,
+-    Float64x2 = JS_SIMDTYPEREPR_FLOAT64X2,
+-    Bool8x16  = JS_SIMDTYPEREPR_BOOL8X16,
+-    Bool16x8  = JS_SIMDTYPEREPR_BOOL16X8,
+-    Bool32x4  = JS_SIMDTYPEREPR_BOOL32X4,
+-    Bool64x2  = JS_SIMDTYPEREPR_BOOL64X2,
+-    Count
+-};
+-
+-// The integer SIMD types have a lot of operations that do the exact same thing
+-// for signed and unsigned integer types. Sometimes it is simpler to treat
+-// signed and unsigned integer SIMD types as the same type, using a SimdSign to
+-// distinguish the few cases where there is a difference.
+-enum class SimdSign {
+-    // Signedness is not applicable to this type. (i.e., Float or Bool).
+-    NotApplicable,
+-    // Treat as an unsigned integer with a range 0 .. 2^N-1.
+-    Unsigned,
+-    // Treat as a signed integer in two's complement encoding.
+-    Signed,
+-};
+-
+-// Get the signedness of a SIMD type.
+-inline SimdSign
+-GetSimdSign(SimdType t)
+-{
+-    switch(t) {
+-      case SimdType::Int8x16:
+-      case SimdType::Int16x8:
+-      case SimdType::Int32x4:
+-        return SimdSign::Signed;
+-
+-      case SimdType::Uint8x16:
+-      case SimdType::Uint16x8:
+-      case SimdType::Uint32x4:
+-        return SimdSign::Unsigned;
+-
+-      default:
+-        return SimdSign::NotApplicable;
+-    }
+-}
+-
+-inline bool
+-IsSignedIntSimdType(SimdType type)
+-{
+-    return GetSimdSign(type) == SimdSign::Signed;
+-}
+-
+-// Get the boolean SIMD type with the same shape as t.
+-//
+-// This is the result type of a comparison operation, and it can also be used to
+-// identify the geometry of a SIMD type.
+-inline SimdType
+-GetBooleanSimdType(SimdType t)
+-{
+-    switch(t) {
+-      case SimdType::Int8x16:
+-      case SimdType::Uint8x16:
+-      case SimdType::Bool8x16:
+-        return SimdType::Bool8x16;
+-
+-      case SimdType::Int16x8:
+-      case SimdType::Uint16x8:
+-      case SimdType::Bool16x8:
+-        return SimdType::Bool16x8;
+-
+-      case SimdType::Int32x4:
+-      case SimdType::Uint32x4:
+-      case SimdType::Float32x4:
+-      case SimdType::Bool32x4:
+-        return SimdType::Bool32x4;
+-
+-      case SimdType::Float64x2:
+-      case SimdType::Bool64x2:
+-        return SimdType::Bool64x2;
+-
+-      case SimdType::Count:
+-        break;
+-    }
+-    MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Bad SIMD type");
+-}
+-
+-// Get the number of lanes in a SIMD type.
+-inline unsigned
+-GetSimdLanes(SimdType t)
+-{
+-    switch(t) {
+-      case SimdType::Int8x16:
+-      case SimdType::Uint8x16:
+-      case SimdType::Bool8x16:
+-        return 16;
+-
+-      case SimdType::Int16x8:
+-      case SimdType::Uint16x8:
+-      case SimdType::Bool16x8:
+-        return 8;
+-
+-      case SimdType::Int32x4:
+-      case SimdType::Uint32x4:
+-      case SimdType::Float32x4:
+-      case SimdType::Bool32x4:
+-        return 4;
+-
+-      case SimdType::Float64x2:
+-      case SimdType::Bool64x2:
+-        return 2;
+-
+-      case SimdType::Count:
+-        break;
+-    }
+-    MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Bad SIMD type");
+-}
+-
+-// Complete set of SIMD operations.
+-//
+-// No SIMD types implement all of these operations.
+-//
+-// C++ defines keywords and/or/xor/not, so prepend Fn_ to all named functions to
+-// avoid clashes.
+-//
+-// Note: because of a gcc < v4.8's compiler bug, uint8_t can't be used as the
+-// storage class here. See bug 1243810. See also
+-// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=64037 .
+-enum class SimdOperation {
+-    // The constructor call. No Fn_ prefix here.
+-    Constructor,
+-
+-    // All the operations, except for casts.
+-#define DEFOP(x) Fn_##x,
+-    FORALL_SIMD_NONCAST_OP(DEFOP)
+-#undef DEFOP
+-
+-    // Int <-> Float conversions.
+-    Fn_fromInt32x4,
+-    Fn_fromUint32x4,
+-    Fn_fromFloat32x4,
+-
+-    // Bitcasts. One for each type with a memory representation.
+-    Fn_fromInt8x16Bits,
+-    Fn_fromInt16x8Bits,
+-    Fn_fromInt32x4Bits,
+-    Fn_fromUint8x16Bits,
+-    Fn_fromUint16x8Bits,
+-    Fn_fromUint32x4Bits,
+-    Fn_fromFloat32x4Bits,
+-    Fn_fromFloat64x2Bits,
+-
+-    Last = Fn_fromFloat64x2Bits
+-};
+-
+-} // namespace js
+-
+-#endif /* builtin_SIMDConstants_h */
+diff --git a/js/src/builtin/TestingFunctions.cpp b/js/src/builtin/TestingFunctions.cpp
+--- a/js/src/builtin/TestingFunctions.cpp
++++ b/js/src/builtin/TestingFunctions.cpp
+@@ -3970,22 +3970,17 @@ ShellCloneAndExecuteScript(JSContext* cx
+     args.rval().setUndefined();
+     return true;
+ }
+ 
+ static bool
+ IsSimdAvailable(JSContext* cx, unsigned argc, Value* vp)
+ {
+     CallArgs args = CallArgsFromVp(argc, vp);
+-#if defined(JS_CODEGEN_NONE) || !defined(ENABLE_SIMD)
+-    bool available = false;
+-#else
+-    bool available = cx->jitSupportsSimd();
+-#endif
+-    args.rval().set(BooleanValue(available));
++    args.rval().set(BooleanValue(cx->jitSupportsSimd()));
+     return true;
+ }
+ 
+ static bool
+ ByteSize(JSContext* cx, unsigned argc, Value* vp)
+ {
+     CallArgs args = CallArgsFromVp(argc, vp);
+     mozilla::MallocSizeOf mallocSizeOf = cx->runtime()->debuggerMallocSizeOf;
+diff --git a/js/src/builtin/TypedObject.cpp b/js/src/builtin/TypedObject.cpp
+--- a/js/src/builtin/TypedObject.cpp
++++ b/js/src/builtin/TypedObject.cpp
+@@ -6,17 +6,16 @@
+ 
+ #include "builtin/TypedObject-inl.h"
+ 
+ #include "mozilla/Casting.h"
+ #include "mozilla/CheckedInt.h"
+ 
+ #include "jsutil.h"
+ 
+-#include "builtin/SIMDConstants.h"
+ #include "gc/Marking.h"
+ #include "js/Vector.h"
+ #include "util/StringBuffer.h"
+ #include "vm/GlobalObject.h"
+ #include "vm/JSCompartment.h"
+ #include "vm/JSFunction.h"
+ #include "vm/SelfHosting.h"
+ #include "vm/StringType.h"
+@@ -239,20 +238,16 @@ ScalarTypeDescr::alignment(Type t)
+ ScalarTypeDescr::typeName(Type type)
+ {
+     switch (type) {
+ #define NUMERIC_TYPE_TO_STRING(constant_, type_, name_) \
+         case constant_: return #name_;
+         JS_FOR_EACH_SCALAR_TYPE_REPR(NUMERIC_TYPE_TO_STRING)
+ #undef NUMERIC_TYPE_TO_STRING
+       case Scalar::Int64:
+-      case Scalar::Float32x4:
+-      case Scalar::Int8x16:
+-      case Scalar::Int16x8:
+-      case Scalar::Int32x4:
+       case Scalar::MaxTypedArrayViewType:
+         break;
+     }
+     MOZ_CRASH("Invalid type");
+ }
+ 
+ bool
+ ScalarTypeDescr::call(JSContext* cx, unsigned argc, Value* vp)
+@@ -280,20 +275,16 @@ ScalarTypeDescr::call(JSContext* cx, uns
+           type_ converted = ConvertScalar<type_>(number);                     \
+           args.rval().setNumber((double) converted);                          \
+           return true;                                                        \
+       }
+ 
+         JS_FOR_EACH_SCALAR_TYPE_REPR(SCALARTYPE_CALL)
+ #undef SCALARTYPE_CALL
+       case Scalar::Int64:
+-      case Scalar::Float32x4:
+-      case Scalar::Int8x16:
+-      case Scalar::Int16x8:
+-      case Scalar::Int32x4:
+       case Scalar::MaxTypedArrayViewType:
+         MOZ_CRASH();
+     }
+     return true;
+ }
+ 
+ /***************************************************************************
+  * Reference type objects
+@@ -397,60 +388,16 @@ js::ReferenceTypeDescr::call(JSContext* 
+         return true;
+       }
+     }
+ 
+     MOZ_CRASH("Unhandled Reference type");
+ }
+ 
+ /***************************************************************************
+- * SIMD type objects
+- *
+- * Note: these are partially defined in SIMD.cpp
+- */
+-
+-SimdType
+-SimdTypeDescr::type() const {
+-    uint32_t t = uint32_t(getReservedSlot(JS_DESCR_SLOT_TYPE).toInt32());
+-    MOZ_ASSERT(t < uint32_t(SimdType::Count));
+-    return SimdType(t);
+-}
+-
+-uint32_t
+-SimdTypeDescr::size(SimdType t)
+-{
+-    MOZ_ASSERT(unsigned(t) < unsigned(SimdType::Count));
+-    switch (t) {
+-      case SimdType::Int8x16:
+-      case SimdType::Int16x8:
+-      case SimdType::Int32x4:
+-      case SimdType::Uint8x16:
+-      case SimdType::Uint16x8:
+-      case SimdType::Uint32x4:
+-      case SimdType::Float32x4:
+-      case SimdType::Float64x2:
+-      case SimdType::Bool8x16:
+-      case SimdType::Bool16x8:
+-      case SimdType::Bool32x4:
+-      case SimdType::Bool64x2:
+-        return 16;
+-      case SimdType::Count:
+-        break;
+-    }
+-    MOZ_CRASH("unexpected SIMD type");
+-}
+-
+-uint32_t
+-SimdTypeDescr::alignment(SimdType t)
+-{
+-    MOZ_ASSERT(unsigned(t) < unsigned(SimdType::Count));
+-    return size(t);
+-}
+-
+-/***************************************************************************
+  * ArrayMetaTypeDescr class
+  */
+ 
+ /*
+  * For code like:
+  *
+  *   var A = new TypedObject.ArrayType(uint8, 10);
+  *   var S = new TypedObject.StructType({...});
+@@ -1659,17 +1606,16 @@ OutlineTypedObject::obj_trace(JSTracer* 
+ }
+ 
+ bool
+ TypeDescr::hasProperty(const JSAtomState& names, jsid id)
+ {
+     switch (kind()) {
+       case type::Scalar:
+       case type::Reference:
+-      case type::Simd:
+         return false;
+ 
+       case type::Array:
+       {
+         uint32_t index;
+         return IdIsIndex(id, &index) || JSID_IS_ATOM(id, names.length);
+       }
+ 
+@@ -1732,17 +1678,16 @@ TypedObject::obj_defineProperty(JSContex
+ 
+ bool
+ TypedObject::obj_hasProperty(JSContext* cx, HandleObject obj, HandleId id, bool* foundp)
+ {
+     Rooted<TypedObject*> typedObj(cx, &obj->as<TypedObject>());
+     switch (typedObj->typeDescr().kind()) {
+       case type::Scalar:
+       case type::Reference:
+-      case type::Simd:
+         break;
+ 
+       case type::Array: {
+         if (JSID_IS_ATOM(id, cx->names().length)) {
+             *foundp = true;
+             return true;
+         }
+         uint32_t index;
+@@ -1784,19 +1729,16 @@ TypedObject::obj_getProperty(JSContext* 
+ 
+     // Handle everything else here:
+ 
+     switch (typedObj->typeDescr().kind()) {
+       case type::Scalar:
+       case type::Reference:
+         break;
+ 
+-      case type::Simd:
+-        break;
+-
+       case type::Array:
+         if (JSID_IS_ATOM(id, cx->names().length)) {
+             if (!typedObj->isAttached()) {
+                 JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+                                           JSMSG_TYPEDOBJECT_HANDLE_UNATTACHED);
+                 return false;
+             }
+ 
+@@ -1833,17 +1775,16 @@ TypedObject::obj_getElement(JSContext* c
+ {
+     MOZ_ASSERT(obj->is<TypedObject>());
+     Rooted<TypedObject*> typedObj(cx, &obj->as<TypedObject>());
+     Rooted<TypeDescr*> descr(cx, &typedObj->typeDescr());
+ 
+     switch (descr->kind()) {
+       case type::Scalar:
+       case type::Reference:
+-      case type::Simd:
+       case type::Struct:
+         break;
+ 
+       case type::Array:
+         return obj_getArrayElement(cx, typedObj, descr, index, vp);
+     }
+ 
+     RootedObject proto(cx, obj->staticPrototype());
+@@ -1879,19 +1820,16 @@ TypedObject::obj_setProperty(JSContext* 
+ {
+     Rooted<TypedObject*> typedObj(cx, &obj->as<TypedObject>());
+ 
+     switch (typedObj->typeDescr().kind()) {
+       case type::Scalar:
+       case type::Reference:
+         break;
+ 
+-      case type::Simd:
+-        break;
+-
+       case type::Array: {
+         if (JSID_IS_ATOM(id, cx->names().length)) {
+             if (receiver.isObject() && obj == &receiver.toObject()) {
+                 JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+                                           JSMSG_CANT_REDEFINE_ARRAY_LENGTH);
+                 return false;
+             }
+             return result.failReadOnly();
+@@ -1949,17 +1887,16 @@ TypedObject::obj_getOwnPropertyDescripto
+         JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_TYPEDOBJECT_HANDLE_UNATTACHED);
+         return false;
+     }
+ 
+     Rooted<TypeDescr*> descr(cx, &typedObj->typeDescr());
+     switch (descr->kind()) {
+       case type::Scalar:
+       case type::Reference:
+-      case type::Simd:
+         break;
+ 
+       case type::Array:
+       {
+         uint32_t index;
+         if (IdIsIndex(id, &index)) {
+             if (!obj_getArrayElement(cx, typedObj, descr, index, desc.value()))
+                 return false;
+@@ -2003,17 +1940,16 @@ TypedObject::obj_getOwnPropertyDescripto
+ static bool
+ IsOwnId(JSContext* cx, HandleObject obj, HandleId id)
+ {
+     uint32_t index;
+     Rooted<TypedObject*> typedObj(cx, &obj->as<TypedObject>());
+     switch (typedObj->typeDescr().kind()) {
+       case type::Scalar:
+       case type::Reference:
+-      case type::Simd:
+         return false;
+ 
+       case type::Array:
+         return IdIsIndex(id, &index) || JSID_IS_ATOM(id, cx->names().length);
+ 
+       case type::Struct:
+         size_t index;
+         if (typedObj->typeDescr().as<StructTypeDescr>().fieldIndex(id, &index))
+@@ -2042,18 +1978,17 @@ TypedObject::obj_newEnumerate(JSContext*
+ {
+     MOZ_ASSERT(obj->is<TypedObject>());
+     Rooted<TypedObject*> typedObj(cx, &obj->as<TypedObject>());
+     Rooted<TypeDescr*> descr(cx, &typedObj->typeDescr());
+ 
+     RootedId id(cx);
+     switch (descr->kind()) {
+       case type::Scalar:
+-      case type::Reference:
+-      case type::Simd: {
++      case type::Reference: {
+         // Nothing to enumerate.
+         break;
+       }
+ 
+       case type::Array: {
+         if (!properties.reserve(typedObj->length()))
+             return false;
+ 
+@@ -2526,32 +2461,16 @@ js::GetTypedObjectModule(JSContext* cx, 
+ {
+     CallArgs args = CallArgsFromVp(argc, vp);
+     Rooted<GlobalObject*> global(cx, cx->global());
+     MOZ_ASSERT(global);
+     args.rval().setObject(global->getTypedObjectModule());
+     return true;
+ }
+ 
+-bool
+-js::GetSimdTypeDescr(JSContext* cx, unsigned argc, Value* vp)
+-{
+-    CallArgs args = CallArgsFromVp(argc, vp);
+-    MOZ_ASSERT(args.length() == 1);
+-    MOZ_ASSERT(args[0].isInt32());
+-    // One of the JS_SIMDTYPEREPR_* constants / a SimdType enum value.
+-    // getOrCreateSimdTypeDescr() will do the range check.
+-    int32_t simdTypeRepr = args[0].toInt32();
+-    Rooted<GlobalObject*> global(cx, cx->global());
+-    MOZ_ASSERT(global);
+-    auto* obj = GlobalObject::getOrCreateSimdTypeDescr(cx, global, SimdType(simdTypeRepr));
+-    args.rval().setObject(*obj);
+-    return true;
+-}
+-
+ #define JS_STORE_SCALAR_CLASS_IMPL(_constant, T, _name)                         \
+ bool                                                                            \
+ js::StoreScalar##T::Func(JSContext* cx, unsigned argc, Value* vp)               \
+ {                                                                               \
+     CallArgs args = CallArgsFromVp(argc, vp);                                   \
+     MOZ_ASSERT(args.length() == 3);                                             \
+     MOZ_ASSERT(args[0].isObject() && args[0].toObject().is<TypedObject>());     \
+     MOZ_ASSERT(args[1].isInt32());                                              \
+@@ -2732,17 +2651,16 @@ visitReferences(TypeDescr& descr,
+                 uint8_t* mem,
+                 V& visitor)
+ {
+     if (descr.transparent())
+         return;
+ 
+     switch (descr.kind()) {
+       case type::Scalar:
+-      case type::Simd:
+         return;
+ 
+       case type::Reference:
+         visitor.visitReference(descr.as<ReferenceTypeDescr>(), mem);
+         return;
+ 
+       case type::Array:
+       {
+diff --git a/js/src/builtin/TypedObject.h b/js/src/builtin/TypedObject.h
+--- a/js/src/builtin/TypedObject.h
++++ b/js/src/builtin/TypedObject.h
+@@ -116,29 +116,27 @@ static T ConvertScalar(double d)
+     return T(n);
+ }
+ 
+ namespace type {
+ 
+ enum Kind {
+     Scalar = JS_TYPEREPR_SCALAR_KIND,
+     Reference = JS_TYPEREPR_REFERENCE_KIND,
+-    Simd = JS_TYPEREPR_SIMD_KIND,
+     Struct = JS_TYPEREPR_STRUCT_KIND,
+     Array = JS_TYPEREPR_ARRAY_KIND
+ };
+ 
+ } // namespace type
+ 
+ ///////////////////////////////////////////////////////////////////////////
+ // Typed Prototypes
+ 
+ class SimpleTypeDescr;
+ class ComplexTypeDescr;
+-class SimdTypeDescr;
+ class StructTypeDescr;
+ class TypedProto;
+ 
+ /*
+  * The prototype for a typed object.
+  */
+ class TypedProto : public NativeObject
+ {
+@@ -250,24 +248,16 @@ class ScalarTypeDescr : public SimpleTyp
+         static_assert(Scalar::Uint32 == JS_SCALARTYPEREPR_UINT32,
+                       "TypedObjectConstants.h must be consistent with Scalar::Type");
+         static_assert(Scalar::Float32 == JS_SCALARTYPEREPR_FLOAT32,
+                       "TypedObjectConstants.h must be consistent with Scalar::Type");
+         static_assert(Scalar::Float64 == JS_SCALARTYPEREPR_FLOAT64,
+                       "TypedObjectConstants.h must be consistent with Scalar::Type");
+         static_assert(Scalar::Uint8Clamped == JS_SCALARTYPEREPR_UINT8_CLAMPED,
+                       "TypedObjectConstants.h must be consistent with Scalar::Type");
+-        static_assert(Scalar::Float32x4 == JS_SCALARTYPEREPR_FLOAT32X4,
+-                      "TypedObjectConstants.h must be consistent with Scalar::Type");
+-        static_assert(Scalar::Int8x16 == JS_SCALARTYPEREPR_INT8X16,
+-                      "TypedObjectConstants.h must be consistent with Scalar::Type");
+-        static_assert(Scalar::Int16x8 == JS_SCALARTYPEREPR_INT16X8,
+-                      "TypedObjectConstants.h must be consistent with Scalar::Type");
+-        static_assert(Scalar::Int32x4 == JS_SCALARTYPEREPR_INT32X4,
+-                      "TypedObjectConstants.h must be consistent with Scalar::Type");
+ 
+         return Type(getReservedSlot(JS_DESCR_SLOT_TYPE).toInt32());
+     }
+ 
+     static MOZ_MUST_USE bool call(JSContext* cx, unsigned argc, Value* vp);
+ };
+ 
+ // Enumerates the cases of ScalarTypeDescr::Type which have
+@@ -335,35 +325,16 @@ class ComplexTypeDescr : public TypeDesc
+   public:
+     // Returns the prototype that instances of this type descriptor
+     // will have.
+     TypedProto& instancePrototype() const {
+         return getReservedSlot(JS_DESCR_SLOT_TYPROTO).toObject().as<TypedProto>();
+     }
+ };
+ 
+-enum class SimdType;
+-
+-/*
+- * SIMD Type descriptors.
+- */
+-class SimdTypeDescr : public ComplexTypeDescr
+-{
+-  public:
+-    static const type::Kind Kind = type::Simd;
+-    static const bool Opaque = false;
+-    static const Class class_;
+-    static uint32_t size(SimdType t);
+-    static uint32_t alignment(SimdType t);
+-    static MOZ_MUST_USE bool call(JSContext* cx, unsigned argc, Value* vp);
+-    static bool is(const Value& v);
+-
+-    SimdType type() const;
+-};
+-
+ bool IsTypedObjectClass(const Class* clasp); // Defined below
+ bool IsTypedObjectArray(JSObject& obj);
+ 
+ MOZ_MUST_USE bool CreateUserSizeAndAlignmentProperties(JSContext* cx, HandleTypeDescr obj);
+ 
+ class ArrayTypeDescr;
+ 
+ /*
+@@ -789,26 +760,16 @@ class InlineTransparentTypedObject : pub
+ 
+ // Class for an opaque typed object with inline data and no array buffer.
+ class InlineOpaqueTypedObject : public InlineTypedObject
+ {
+   public:
+     static const Class class_;
+ };
+ 
+-// Class for the global SIMD object.
+-class SimdObject : public NativeObject
+-{
+-  public:
+-    static const Class class_;
+-    static MOZ_MUST_USE bool toString(JSContext* cx, unsigned int argc, Value* vp);
+-    static MOZ_MUST_USE bool resolve(JSContext* cx, JS::HandleObject obj, JS::HandleId,
+-                                     bool* resolved);
+-};
+-
+ /*
+  * Usage: NewOpaqueTypedObject(typeObj)
+  *
+  * Constructs a new, unattached instance of `Handle`.
+  */
+ MOZ_MUST_USE bool NewOpaqueTypedObject(JSContext* cx, unsigned argc, Value* vp);
+ 
+ /*
+@@ -897,26 +858,16 @@ MOZ_MUST_USE bool ClampToUint8(JSContext
+  * to the various builtin type descriptors. These are currently
+  * exported as immutable properties so it is safe for self-hosted code
+  * to access them; eventually this should be linked into the module
+  * system.
+  */
+ MOZ_MUST_USE bool GetTypedObjectModule(JSContext* cx, unsigned argc, Value* vp);
+ 
+ /*
+- * Usage: GetSimdTypeDescr(simdTypeRepr)
+- *
+- * Returns one of the SIMD type objects, identified by `simdTypeRepr` which must
+- * be one of the JS_SIMDTYPEREPR_* constants.
+- *
+- * The SIMD pseudo-module must have been initialized for this to be safe.
+- */
+-MOZ_MUST_USE bool GetSimdTypeDescr(JSContext* cx, unsigned argc, Value* vp);
+-
+-/*
+  * Usage: Store_int8(targetDatum, targetOffset, value)
+  *        ...
+  *        Store_uint8(targetDatum, targetOffset, value)
+  *        ...
+  *        Store_float32(targetDatum, targetOffset, value)
+  *        Store_float64(targetDatum, targetOffset, value)
+  *
+  * Intrinsic function. Stores `value` into the memory referenced by
+@@ -1040,18 +991,17 @@ IsSimpleTypeDescrClass(const Class* clas
+     return clasp == &ScalarTypeDescr::class_ ||
+            clasp == &ReferenceTypeDescr::class_;
+ }
+ 
+ inline bool
+ IsComplexTypeDescrClass(const Class* clasp)
+ {
+     return clasp == &StructTypeDescr::class_ ||
+-           clasp == &ArrayTypeDescr::class_ ||
+-           clasp == &SimdTypeDescr::class_;
++           clasp == &ArrayTypeDescr::class_;
+ }
+ 
+ inline bool
+ IsTypeDescrClass(const Class* clasp)
+ {
+     return IsSimpleTypeDescrClass(clasp) ||
+            IsComplexTypeDescrClass(clasp);
+ }
+diff --git a/js/src/builtin/TypedObject.js b/js/src/builtin/TypedObject.js
+--- a/js/src/builtin/TypedObject.js
++++ b/js/src/builtin/TypedObject.js
+@@ -48,19 +48,16 @@ function TypedObjectGet(descr, typedObj,
+ 
+   switch (DESCR_KIND(descr)) {
+   case JS_TYPEREPR_SCALAR_KIND:
+     return TypedObjectGetScalar(descr, typedObj, offset);
+ 
+   case JS_TYPEREPR_REFERENCE_KIND:
+     return TypedObjectGetReference(descr, typedObj, offset);
+ 
+-  case JS_TYPEREPR_SIMD_KIND:
+-    return TypedObjectGetSimd(descr, typedObj, offset);
+-
+   case JS_TYPEREPR_ARRAY_KIND:
+   case JS_TYPEREPR_STRUCT_KIND:
+     return TypedObjectGetDerived(descr, typedObj, offset);
+   }
+ 
+   assert(false, "Unhandled kind: " + DESCR_KIND(descr));
+   return undefined;
+ }
+@@ -132,154 +129,16 @@ function TypedObjectGetReference(descr, 
+   case JS_REFERENCETYPEREPR_STRING:
+     return Load_string(typedObj, offset);
+   }
+ 
+   assert(false, "Unhandled scalar type: " + type);
+   return undefined;
+ }
+ 
+-function TypedObjectGetSimd(descr, typedObj, offset) {
+-  var type = DESCR_TYPE(descr);
+-  var simdTypeDescr = GetSimdTypeDescr(type);
+-  switch (type) {
+-  case JS_SIMDTYPEREPR_FLOAT32X4:
+-    var x = Load_float32(typedObj, offset + 0);
+-    var y = Load_float32(typedObj, offset + 4);
+-    var z = Load_float32(typedObj, offset + 8);
+-    var w = Load_float32(typedObj, offset + 12);
+-    return simdTypeDescr(x, y, z, w);
+-
+-  case JS_SIMDTYPEREPR_FLOAT64X2:
+-    var x = Load_float64(typedObj, offset + 0);
+-    var y = Load_float64(typedObj, offset + 8);
+-    return simdTypeDescr(x, y);
+-
+-  case JS_SIMDTYPEREPR_INT8X16:
+-    var s0 = Load_int8(typedObj, offset + 0);
+-    var s1 = Load_int8(typedObj, offset + 1);
+-    var s2 = Load_int8(typedObj, offset + 2);
+-    var s3 = Load_int8(typedObj, offset + 3);
+-    var s4 = Load_int8(typedObj, offset + 4);
+-    var s5 = Load_int8(typedObj, offset + 5);
+-    var s6 = Load_int8(typedObj, offset + 6);
+-    var s7 = Load_int8(typedObj, offset + 7);
+-    var s8 = Load_int8(typedObj, offset + 8);
+-    var s9 = Load_int8(typedObj, offset + 9);
+-    var s10 = Load_int8(typedObj, offset + 10);
+-    var s11 = Load_int8(typedObj, offset + 11);
+-    var s12 = Load_int8(typedObj, offset + 12);
+-    var s13 = Load_int8(typedObj, offset + 13);
+-    var s14 = Load_int8(typedObj, offset + 14);
+-    var s15 = Load_int8(typedObj, offset + 15);
+-    return simdTypeDescr(s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15);
+-
+-  case JS_SIMDTYPEREPR_INT16X8:
+-    var s0 = Load_int16(typedObj, offset + 0);
+-    var s1 = Load_int16(typedObj, offset + 2);
+-    var s2 = Load_int16(typedObj, offset + 4);
+-    var s3 = Load_int16(typedObj, offset + 6);
+-    var s4 = Load_int16(typedObj, offset + 8);
+-    var s5 = Load_int16(typedObj, offset + 10);
+-    var s6 = Load_int16(typedObj, offset + 12);
+-    var s7 = Load_int16(typedObj, offset + 14);
+-    return simdTypeDescr(s0, s1, s2, s3, s4, s5, s6, s7);
+-
+-  case JS_SIMDTYPEREPR_INT32X4:
+-    var x = Load_int32(typedObj, offset + 0);
+-    var y = Load_int32(typedObj, offset + 4);
+-    var z = Load_int32(typedObj, offset + 8);
+-    var w = Load_int32(typedObj, offset + 12);
+-    return simdTypeDescr(x, y, z, w);
+-
+-  case JS_SIMDTYPEREPR_UINT8X16:
+-    var s0 = Load_uint8(typedObj, offset + 0);
+-    var s1 = Load_uint8(typedObj, offset + 1);
+-    var s2 = Load_uint8(typedObj, offset + 2);
+-    var s3 = Load_uint8(typedObj, offset + 3);
+-    var s4 = Load_uint8(typedObj, offset + 4);
+-    var s5 = Load_uint8(typedObj, offset + 5);
+-    var s6 = Load_uint8(typedObj, offset + 6);
+-    var s7 = Load_uint8(typedObj, offset + 7);
+-    var s8 = Load_uint8(typedObj, offset + 8);
+-    var s9 = Load_uint8(typedObj, offset + 9);
+-    var s10 = Load_uint8(typedObj, offset + 10);
+-    var s11 = Load_uint8(typedObj, offset + 11);
+-    var s12 = Load_uint8(typedObj, offset + 12);
+-    var s13 = Load_uint8(typedObj, offset + 13);
+-    var s14 = Load_uint8(typedObj, offset + 14);
+-    var s15 = Load_uint8(typedObj, offset + 15);
+-    return simdTypeDescr(s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15);
+-
+-  case JS_SIMDTYPEREPR_UINT16X8:
+-    var s0 = Load_uint16(typedObj, offset + 0);
+-    var s1 = Load_uint16(typedObj, offset + 2);
+-    var s2 = Load_uint16(typedObj, offset + 4);
+-    var s3 = Load_uint16(typedObj, offset + 6);
+-    var s4 = Load_uint16(typedObj, offset + 8);
+-    var s5 = Load_uint16(typedObj, offset + 10);
+-    var s6 = Load_uint16(typedObj, offset + 12);
+-    var s7 = Load_uint16(typedObj, offset + 14);
+-    return simdTypeDescr(s0, s1, s2, s3, s4, s5, s6, s7);
+-
+-  case JS_SIMDTYPEREPR_UINT32X4:
+-    var x = Load_uint32(typedObj, offset + 0);
+-    var y = Load_uint32(typedObj, offset + 4);
+-    var z = Load_uint32(typedObj, offset + 8);
+-    var w = Load_uint32(typedObj, offset + 12);
+-    return simdTypeDescr(x, y, z, w);
+-
+-  case JS_SIMDTYPEREPR_BOOL8X16:
+-    var s0 = Load_int8(typedObj, offset + 0);
+-    var s1 = Load_int8(typedObj, offset + 1);
+-    var s2 = Load_int8(typedObj, offset + 2);
+-    var s3 = Load_int8(typedObj, offset + 3);
+-    var s4 = Load_int8(typedObj, offset + 4);
+-    var s5 = Load_int8(typedObj, offset + 5);
+-    var s6 = Load_int8(typedObj, offset + 6);
+-    var s7 = Load_int8(typedObj, offset + 7);
+-    var s8 = Load_int8(typedObj, offset + 8);
+-    var s9 = Load_int8(typedObj, offset + 9);
+-    var s10 = Load_int8(typedObj, offset + 10);
+-    var s11 = Load_int8(typedObj, offset + 11);
+-    var s12 = Load_int8(typedObj, offset + 12);
+-    var s13 = Load_int8(typedObj, offset + 13);
+-    var s14 = Load_int8(typedObj, offset + 14);
+-    var s15 = Load_int8(typedObj, offset + 15);
+-    return simdTypeDescr(s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15);
+-
+-  case JS_SIMDTYPEREPR_BOOL16X8:
+-    var s0 = Load_int16(typedObj, offset + 0);
+-    var s1 = Load_int16(typedObj, offset + 2);
+-    var s2 = Load_int16(typedObj, offset + 4);
+-    var s3 = Load_int16(typedObj, offset + 6);
+-    var s4 = Load_int16(typedObj, offset + 8);
+-    var s5 = Load_int16(typedObj, offset + 10);
+-    var s6 = Load_int16(typedObj, offset + 12);
+-    var s7 = Load_int16(typedObj, offset + 14);
+-    return simdTypeDescr(s0, s1, s2, s3, s4, s5, s6, s7);
+-
+-  case JS_SIMDTYPEREPR_BOOL32X4:
+-    var x = Load_int32(typedObj, offset + 0);
+-    var y = Load_int32(typedObj, offset + 4);
+-    var z = Load_int32(typedObj, offset + 8);
+-    var w = Load_int32(typedObj, offset + 12);
+-    return simdTypeDescr(x, y, z, w);
+-
+-  case JS_SIMDTYPEREPR_BOOL64X2:
+-    var x = Load_int32(typedObj, offset + 0);
+-    var y = Load_int32(typedObj, offset + 8);
+-    return simdTypeDescr(x, y);
+-
+-  }
+-
+-  assert(false, "Unhandled SIMD type: " + type);
+-  return undefined;
+-}
+-
+ ///////////////////////////////////////////////////////////////////////////
+ // Setting values
+ //
+ // The methods in this section modify the data pointed at by `this`.
+ 
+ // Writes `fromValue` into the `typedObj` at offset `offset`, adapting
+ // it to `descr` as needed. This is the most general entry point
+ // and works for any type.
+@@ -291,20 +150,16 @@ function TypedObjectSet(descr, typedObj,
+   case JS_TYPEREPR_SCALAR_KIND:
+     TypedObjectSetScalar(descr, typedObj, offset, fromValue);
+     return;
+ 
+   case JS_TYPEREPR_REFERENCE_KIND:
+     TypedObjectSetReference(descr, typedObj, offset, name, fromValue);
+     return;
+ 
+-  case JS_TYPEREPR_SIMD_KIND:
+-    TypedObjectSetSimd(descr, typedObj, offset, fromValue);
+-    return;
+-
+   case JS_TYPEREPR_ARRAY_KIND:
+     var length = DESCR_ARRAY_LENGTH(descr);
+     if (TypedObjectSetArray(descr, length, typedObj, offset, fromValue))
+       return;
+     break;
+ 
+   case JS_TYPEREPR_STRUCT_KIND:
+     if (!IsObject(fromValue))
+@@ -409,116 +264,16 @@ function TypedObjectSetReference(descr, 
+     return Store_string(typedObj, offset, name, ToString(fromValue));
+   }
+ 
+   assert(false, "Unhandled scalar type: " + type);
+   return undefined;
+ }
+ 
+ // Sets `fromValue` to `this` assuming that `this` is a scalar type.
+-function TypedObjectSetSimd(descr, typedObj, offset, fromValue) {
+-  if (!IsObject(fromValue) || !ObjectIsTypedObject(fromValue))
+-    ThrowTypeError(JSMSG_CANT_CONVERT_TO,
+-                   typeof(fromValue),
+-                   DESCR_STRING_REPR(descr));
+-
+-  if (!DescrsEquiv(descr, TypedObjectTypeDescr(fromValue)))
+-    ThrowTypeError(JSMSG_CANT_CONVERT_TO,
+-                   typeof(fromValue),
+-                   DESCR_STRING_REPR(descr));
+-
+-  var type = DESCR_TYPE(descr);
+-  switch (type) {
+-    case JS_SIMDTYPEREPR_FLOAT32X4:
+-      Store_float32(typedObj, offset + 0, Load_float32(fromValue, 0));
+-      Store_float32(typedObj, offset + 4, Load_float32(fromValue, 4));
+-      Store_float32(typedObj, offset + 8, Load_float32(fromValue, 8));
+-      Store_float32(typedObj, offset + 12, Load_float32(fromValue, 12));
+-      break;
+-    case JS_SIMDTYPEREPR_FLOAT64X2:
+-      Store_float64(typedObj, offset + 0, Load_float64(fromValue, 0));
+-      Store_float64(typedObj, offset + 8, Load_float64(fromValue, 8));
+-      break;
+-    case JS_SIMDTYPEREPR_INT8X16:
+-    case JS_SIMDTYPEREPR_BOOL8X16:
+-      Store_int8(typedObj, offset + 0, Load_int8(fromValue, 0));
+-      Store_int8(typedObj, offset + 1, Load_int8(fromValue, 1));
+-      Store_int8(typedObj, offset + 2, Load_int8(fromValue, 2));
+-      Store_int8(typedObj, offset + 3, Load_int8(fromValue, 3));
+-      Store_int8(typedObj, offset + 4, Load_int8(fromValue, 4));
+-      Store_int8(typedObj, offset + 5, Load_int8(fromValue, 5));
+-      Store_int8(typedObj, offset + 6, Load_int8(fromValue, 6));
+-      Store_int8(typedObj, offset + 7, Load_int8(fromValue, 7));
+-      Store_int8(typedObj, offset + 8, Load_int8(fromValue, 8));
+-      Store_int8(typedObj, offset + 9, Load_int8(fromValue, 9));
+-      Store_int8(typedObj, offset + 10, Load_int8(fromValue, 10));
+-      Store_int8(typedObj, offset + 11, Load_int8(fromValue, 11));
+-      Store_int8(typedObj, offset + 12, Load_int8(fromValue, 12));
+-      Store_int8(typedObj, offset + 13, Load_int8(fromValue, 13));
+-      Store_int8(typedObj, offset + 14, Load_int8(fromValue, 14));
+-      Store_int8(typedObj, offset + 15, Load_int8(fromValue, 15));
+-      break;
+-    case JS_SIMDTYPEREPR_INT16X8:
+-    case JS_SIMDTYPEREPR_BOOL16X8:
+-      Store_int16(typedObj, offset + 0, Load_int16(fromValue, 0));
+-      Store_int16(typedObj, offset + 2, Load_int16(fromValue, 2));
+-      Store_int16(typedObj, offset + 4, Load_int16(fromValue, 4));
+-      Store_int16(typedObj, offset + 6, Load_int16(fromValue, 6));
+-      Store_int16(typedObj, offset + 8, Load_int16(fromValue, 8));
+-      Store_int16(typedObj, offset + 10, Load_int16(fromValue, 10));
+-      Store_int16(typedObj, offset + 12, Load_int16(fromValue, 12));
+-      Store_int16(typedObj, offset + 14, Load_int16(fromValue, 14));
+-      break;
+-    case JS_SIMDTYPEREPR_INT32X4:
+-    case JS_SIMDTYPEREPR_BOOL32X4:
+-    case JS_SIMDTYPEREPR_BOOL64X2:
+-      Store_int32(typedObj, offset + 0, Load_int32(fromValue, 0));
+-      Store_int32(typedObj, offset + 4, Load_int32(fromValue, 4));
+-      Store_int32(typedObj, offset + 8, Load_int32(fromValue, 8));
+-      Store_int32(typedObj, offset + 12, Load_int32(fromValue, 12));
+-      break;
+-    case JS_SIMDTYPEREPR_UINT8X16:
+-      Store_uint8(typedObj, offset + 0, Load_uint8(fromValue, 0));
+-      Store_uint8(typedObj, offset + 1, Load_uint8(fromValue, 1));
+-      Store_uint8(typedObj, offset + 2, Load_uint8(fromValue, 2));
+-      Store_uint8(typedObj, offset + 3, Load_uint8(fromValue, 3));
+-      Store_uint8(typedObj, offset + 4, Load_uint8(fromValue, 4));
+-      Store_uint8(typedObj, offset + 5, Load_uint8(fromValue, 5));
+-      Store_uint8(typedObj, offset + 6, Load_uint8(fromValue, 6));
+-      Store_uint8(typedObj, offset + 7, Load_uint8(fromValue, 7));
+-      Store_uint8(typedObj, offset + 8, Load_uint8(fromValue, 8));
+-      Store_uint8(typedObj, offset + 9, Load_uint8(fromValue, 9));
+-      Store_uint8(typedObj, offset + 10, Load_uint8(fromValue, 10));
+-      Store_uint8(typedObj, offset + 11, Load_uint8(fromValue, 11));
+-      Store_uint8(typedObj, offset + 12, Load_uint8(fromValue, 12));
+-      Store_uint8(typedObj, offset + 13, Load_uint8(fromValue, 13));
+-      Store_uint8(typedObj, offset + 14, Load_uint8(fromValue, 14));
+-      Store_uint8(typedObj, offset + 15, Load_uint8(fromValue, 15));
+-      break;
+-    case JS_SIMDTYPEREPR_UINT16X8:
+-      Store_uint16(typedObj, offset + 0, Load_uint16(fromValue, 0));
+-      Store_uint16(typedObj, offset + 2, Load_uint16(fromValue, 2));
+-      Store_uint16(typedObj, offset + 4, Load_uint16(fromValue, 4));
+-      Store_uint16(typedObj, offset + 6, Load_uint16(fromValue, 6));
+-      Store_uint16(typedObj, offset + 8, Load_uint16(fromValue, 8));
+-      Store_uint16(typedObj, offset + 10, Load_uint16(fromValue, 10));
+-      Store_uint16(typedObj, offset + 12, Load_uint16(fromValue, 12));
+-      Store_uint16(typedObj, offset + 14, Load_uint16(fromValue, 14));
+-      break;
+-    case JS_SIMDTYPEREPR_UINT32X4:
+-      Store_uint32(typedObj, offset + 0, Load_uint32(fromValue, 0));
+-      Store_uint32(typedObj, offset + 4, Load_uint32(fromValue, 4));
+-      Store_uint32(typedObj, offset + 8, Load_uint32(fromValue, 8));
+-      Store_uint32(typedObj, offset + 12, Load_uint32(fromValue, 12));
+-      break;
+-    default:
+-      assert(false, "Unhandled Simd type: " + type);
+-  }
+-}
+-
+ ///////////////////////////////////////////////////////////////////////////
+ // C++ Wrappers
+ //
+ // These helpers are invoked by C++ code or used as method bodies.
+ 
+ // Wrapper for use from C++ code.
+ function ConvertAndCopyTo(destDescr,
+                           destTypedObj,
+@@ -625,251 +380,16 @@ function TypedObjectArrayRedimension(new
+   assert(DESCR_SIZE(oldArrayType) == DESCR_SIZE(newArrayType),
+          "Byte sizes should be equal");
+ 
+   // Rewrap the data from `this` in a new type.
+   return NewDerivedTypedObject(newArrayType, this, 0);
+ }
+ 
+ ///////////////////////////////////////////////////////////////////////////
+-// SIMD
+-
+-function SimdProtoString(type) {
+-  switch (type) {
+-  case JS_SIMDTYPEREPR_INT8X16:
+-    return "Int8x16";
+-  case JS_SIMDTYPEREPR_INT16X8:
+-    return "Int16x8";
+-  case JS_SIMDTYPEREPR_INT32X4:
+-    return "Int32x4";
+-  case JS_SIMDTYPEREPR_UINT8X16:
+-    return "Uint8x16";
+-  case JS_SIMDTYPEREPR_UINT16X8:
+-    return "Uint16x8";
+-  case JS_SIMDTYPEREPR_UINT32X4:
+-    return "Uint32x4";
+-  case JS_SIMDTYPEREPR_FLOAT32X4:
+-    return "Float32x4";
+-  case JS_SIMDTYPEREPR_FLOAT64X2:
+-    return "Float64x2";
+-  case JS_SIMDTYPEREPR_BOOL8X16:
+-    return "Bool8x16";
+-  case JS_SIMDTYPEREPR_BOOL16X8:
+-    return "Bool16x8";
+-  case JS_SIMDTYPEREPR_BOOL32X4:
+-    return "Bool32x4";
+-  case JS_SIMDTYPEREPR_BOOL64X2:
+-    return "Bool64x2";
+-  }
+-
+-  assert(false, "Unhandled type constant");
+-  return undefined;
+-}
+-
+-function SimdTypeToLength(type) {
+-  switch (type) {
+-  case JS_SIMDTYPEREPR_INT8X16:
+-  case JS_SIMDTYPEREPR_BOOL8X16:
+-    return 16;
+-  case JS_SIMDTYPEREPR_INT16X8:
+-  case JS_SIMDTYPEREPR_BOOL16X8:
+-    return 8;
+-  case JS_SIMDTYPEREPR_INT32X4:
+-  case JS_SIMDTYPEREPR_FLOAT32X4:
+-  case JS_SIMDTYPEREPR_BOOL32X4:
+-    return 4;
+-  case JS_SIMDTYPEREPR_FLOAT64X2:
+-  case JS_SIMDTYPEREPR_BOOL64X2:
+-    return 2;
+-  }
+-
+-  assert(false, "Unhandled type constant");
+-  return undefined;
+-}
+-
+-// This implements SIMD.*.prototype.valueOf().
+-// Once we have proper value semantics for SIMD types, this function should just
+-// perform a type check and return this.
+-// For now, throw a TypeError unconditionally since valueOf() was probably
+-// called from ToNumber() which is supposed to throw when attempting to convert
+-// a SIMD value to a number.
+-function SimdValueOf() {
+-  if (!IsObject(this) || !ObjectIsTypedObject(this))
+-    ThrowTypeError(JSMSG_INCOMPATIBLE_PROTO, "SIMD", "valueOf", typeof this);
+-
+-  var descr = TypedObjectTypeDescr(this);
+-
+-  if (DESCR_KIND(descr) != JS_TYPEREPR_SIMD_KIND)
+-    ThrowTypeError(JSMSG_INCOMPATIBLE_PROTO, "SIMD", "valueOf", typeof this);
+-
+-  ThrowTypeError(JSMSG_SIMD_TO_NUMBER);
+-}
+-
+-function SimdToSource() {
+-  if (!IsObject(this) || !ObjectIsTypedObject(this))
+-    ThrowTypeError(JSMSG_INCOMPATIBLE_PROTO, "SIMD.*", "toSource", typeof this);
+-
+-  var descr = TypedObjectTypeDescr(this);
+-
+-  if (DESCR_KIND(descr) != JS_TYPEREPR_SIMD_KIND)
+-    ThrowTypeError(JSMSG_INCOMPATIBLE_PROTO, "SIMD.*", "toSource", typeof this);
+-
+-  return SimdFormatString(descr, this);
+-}
+-
+-function SimdToString() {
+-  if (!IsObject(this) || !ObjectIsTypedObject(this))
+-    ThrowTypeError(JSMSG_INCOMPATIBLE_PROTO, "SIMD.*", "toString", typeof this);
+-
+-  var descr = TypedObjectTypeDescr(this);
+-
+-  if (DESCR_KIND(descr) != JS_TYPEREPR_SIMD_KIND)
+-    ThrowTypeError(JSMSG_INCOMPATIBLE_PROTO, "SIMD.*", "toString", typeof this);
+-
+-  return SimdFormatString(descr, this);
+-}
+-
+-function SimdFormatString(descr, typedObj) {
+-  var typerepr = DESCR_TYPE(descr);
+-  var protoString = SimdProtoString(typerepr);
+-  switch (typerepr) {
+-      case JS_SIMDTYPEREPR_INT8X16: {
+-          var s1 = callFunction(std_SIMD_Int8x16_extractLane, null, typedObj, 0);
+-          var s2 = callFunction(std_SIMD_Int8x16_extractLane, null, typedObj, 1);
+-          var s3 = callFunction(std_SIMD_Int8x16_extractLane, null, typedObj, 2);
+-          var s4 = callFunction(std_SIMD_Int8x16_extractLane, null, typedObj, 3);
+-          var s5 = callFunction(std_SIMD_Int8x16_extractLane, null, typedObj, 4);
+-          var s6 = callFunction(std_SIMD_Int8x16_extractLane, null, typedObj, 5);
+-          var s7 = callFunction(std_SIMD_Int8x16_extractLane, null, typedObj, 6);
+-          var s8 = callFunction(std_SIMD_Int8x16_extractLane, null, typedObj, 7);
+-          var s9 = callFunction(std_SIMD_Int8x16_extractLane, null, typedObj, 8);
+-          var s10 = callFunction(std_SIMD_Int8x16_extractLane, null, typedObj, 9);
+-          var s11 = callFunction(std_SIMD_Int8x16_extractLane, null, typedObj, 10);
+-          var s12 = callFunction(std_SIMD_Int8x16_extractLane, null, typedObj, 11);
+-          var s13 = callFunction(std_SIMD_Int8x16_extractLane, null, typedObj, 12);
+-          var s14 = callFunction(std_SIMD_Int8x16_extractLane, null, typedObj, 13);
+-          var s15 = callFunction(std_SIMD_Int8x16_extractLane, null, typedObj, 14);
+-          var s16 = callFunction(std_SIMD_Int8x16_extractLane, null, typedObj, 15);
+-          return `SIMD.${protoString}(${s1}, ${s2}, ${s3}, ${s4}, ${s5}, ${s6}, ${s7}, ${s8}, ${s9}, ${s10}, ${s11}, ${s12}, ${s13}, ${s14}, ${s15}, ${s16})`;
+-      }
+-      case JS_SIMDTYPEREPR_INT16X8: {
+-          var s1 = callFunction(std_SIMD_Int16x8_extractLane, null, typedObj, 0);
+-          var s2 = callFunction(std_SIMD_Int16x8_extractLane, null, typedObj, 1);
+-          var s3 = callFunction(std_SIMD_Int16x8_extractLane, null, typedObj, 2);
+-          var s4 = callFunction(std_SIMD_Int16x8_extractLane, null, typedObj, 3);
+-          var s5 = callFunction(std_SIMD_Int16x8_extractLane, null, typedObj, 4);
+-          var s6 = callFunction(std_SIMD_Int16x8_extractLane, null, typedObj, 5);
+-          var s7 = callFunction(std_SIMD_Int16x8_extractLane, null, typedObj, 6);
+-          var s8 = callFunction(std_SIMD_Int16x8_extractLane, null, typedObj, 7);
+-          return `SIMD.${protoString}(${s1}, ${s2}, ${s3}, ${s4}, ${s5}, ${s6}, ${s7}, ${s8})`;
+-      }
+-      case JS_SIMDTYPEREPR_INT32X4: {
+-          var x = callFunction(std_SIMD_Int32x4_extractLane, null, typedObj, 0);
+-          var y = callFunction(std_SIMD_Int32x4_extractLane, null, typedObj, 1);
+-          var z = callFunction(std_SIMD_Int32x4_extractLane, null, typedObj, 2);
+-          var w = callFunction(std_SIMD_Int32x4_extractLane, null, typedObj, 3);
+-          return `SIMD.${protoString}(${x}, ${y}, ${z}, ${w})`;
+-      }
+-      case JS_SIMDTYPEREPR_UINT8X16: {
+-          var s1 = callFunction(std_SIMD_Uint8x16_extractLane, null, typedObj, 0);
+-          var s2 = callFunction(std_SIMD_Uint8x16_extractLane, null, typedObj, 1);
+-          var s3 = callFunction(std_SIMD_Uint8x16_extractLane, null, typedObj, 2);
+-          var s4 = callFunction(std_SIMD_Uint8x16_extractLane, null, typedObj, 3);
+-          var s5 = callFunction(std_SIMD_Uint8x16_extractLane, null, typedObj, 4);
+-          var s6 = callFunction(std_SIMD_Uint8x16_extractLane, null, typedObj, 5);
+-          var s7 = callFunction(std_SIMD_Uint8x16_extractLane, null, typedObj, 6);
+-          var s8 = callFunction(std_SIMD_Uint8x16_extractLane, null, typedObj, 7);
+-          var s9 = callFunction(std_SIMD_Uint8x16_extractLane, null, typedObj, 8);
+-          var s10 = callFunction(std_SIMD_Uint8x16_extractLane, null, typedObj, 9);
+-          var s11 = callFunction(std_SIMD_Uint8x16_extractLane, null, typedObj, 10);
+-          var s12 = callFunction(std_SIMD_Uint8x16_extractLane, null, typedObj, 11);
+-          var s13 = callFunction(std_SIMD_Uint8x16_extractLane, null, typedObj, 12);
+-          var s14 = callFunction(std_SIMD_Uint8x16_extractLane, null, typedObj, 13);
+-          var s15 = callFunction(std_SIMD_Uint8x16_extractLane, null, typedObj, 14);
+-          var s16 = callFunction(std_SIMD_Uint8x16_extractLane, null, typedObj, 15);
+-          return `SIMD.${protoString}(${s1}, ${s2}, ${s3}, ${s4}, ${s5}, ${s6}, ${s7}, ${s8}, ${s9}, ${s10}, ${s11}, ${s12}, ${s13}, ${s14}, ${s15}, ${s16})`;
+-      }
+-      case JS_SIMDTYPEREPR_UINT16X8: {
+-          var s1 = callFunction(std_SIMD_Uint16x8_extractLane, null, typedObj, 0);
+-          var s2 = callFunction(std_SIMD_Uint16x8_extractLane, null, typedObj, 1);
+-          var s3 = callFunction(std_SIMD_Uint16x8_extractLane, null, typedObj, 2);
+-          var s4 = callFunction(std_SIMD_Uint16x8_extractLane, null, typedObj, 3);
+-          var s5 = callFunction(std_SIMD_Uint16x8_extractLane, null, typedObj, 4);
+-          var s6 = callFunction(std_SIMD_Uint16x8_extractLane, null, typedObj, 5);
+-          var s7 = callFunction(std_SIMD_Uint16x8_extractLane, null, typedObj, 6);
+-          var s8 = callFunction(std_SIMD_Uint16x8_extractLane, null, typedObj, 7);
+-          return `SIMD.${protoString}(${s1}, ${s2}, ${s3}, ${s4}, ${s5}, ${s6}, ${s7}, ${s8})`;
+-      }
+-      case JS_SIMDTYPEREPR_UINT32X4: {
+-          var x = callFunction(std_SIMD_Uint32x4_extractLane, null, typedObj, 0);
+-          var y = callFunction(std_SIMD_Uint32x4_extractLane, null, typedObj, 1);
+-          var z = callFunction(std_SIMD_Uint32x4_extractLane, null, typedObj, 2);
+-          var w = callFunction(std_SIMD_Uint32x4_extractLane, null, typedObj, 3);
+-          return `SIMD.${protoString}(${x}, ${y}, ${z}, ${w})`;
+-      }
+-      case JS_SIMDTYPEREPR_FLOAT32X4: {
+-          var x = callFunction(std_SIMD_Float32x4_extractLane, null, typedObj, 0);
+-          var y = callFunction(std_SIMD_Float32x4_extractLane, null, typedObj, 1);
+-          var z = callFunction(std_SIMD_Float32x4_extractLane, null, typedObj, 2);
+-          var w = callFunction(std_SIMD_Float32x4_extractLane, null, typedObj, 3);
+-          return `SIMD.${protoString}(${x}, ${y}, ${z}, ${w})`;
+-      }
+-      case JS_SIMDTYPEREPR_FLOAT64X2: {
+-          var x = callFunction(std_SIMD_Float64x2_extractLane, null, typedObj, 0);
+-          var y = callFunction(std_SIMD_Float64x2_extractLane, null, typedObj, 1);
+-          return `SIMD.${protoString}(${x}, ${y})`;
+-      }
+-      case JS_SIMDTYPEREPR_BOOL8X16: {
+-          var s1 = callFunction(std_SIMD_Bool8x16_extractLane, null, typedObj, 0);
+-          var s2 = callFunction(std_SIMD_Bool8x16_extractLane, null, typedObj, 1);
+-          var s3 = callFunction(std_SIMD_Bool8x16_extractLane, null, typedObj, 2);
+-          var s4 = callFunction(std_SIMD_Bool8x16_extractLane, null, typedObj, 3);
+-          var s5 = callFunction(std_SIMD_Bool8x16_extractLane, null, typedObj, 4);
+-          var s6 = callFunction(std_SIMD_Bool8x16_extractLane, null, typedObj, 5);
+-          var s7 = callFunction(std_SIMD_Bool8x16_extractLane, null, typedObj, 6);
+-          var s8 = callFunction(std_SIMD_Bool8x16_extractLane, null, typedObj, 7);
+-          var s9 = callFunction(std_SIMD_Bool8x16_extractLane, null, typedObj, 8);
+-          var s10 = callFunction(std_SIMD_Bool8x16_extractLane, null, typedObj, 9);
+-          var s11 = callFunction(std_SIMD_Bool8x16_extractLane, null, typedObj, 10);
+-          var s12 = callFunction(std_SIMD_Bool8x16_extractLane, null, typedObj, 11);
+-          var s13 = callFunction(std_SIMD_Bool8x16_extractLane, null, typedObj, 12);
+-          var s14 = callFunction(std_SIMD_Bool8x16_extractLane, null, typedObj, 13);
+-          var s15 = callFunction(std_SIMD_Bool8x16_extractLane, null, typedObj, 14);
+-          var s16 = callFunction(std_SIMD_Bool8x16_extractLane, null, typedObj, 15);
+-          return `SIMD.${protoString}(${s1}, ${s2}, ${s3}, ${s4}, ${s5}, ${s6}, ${s7}, ${s8}, ${s9}, ${s10}, ${s11}, ${s12}, ${s13}, ${s14}, ${s15}, ${s16})`;
+-      }
+-      case JS_SIMDTYPEREPR_BOOL16X8: {
+-          var s1 = callFunction(std_SIMD_Bool16x8_extractLane, null, typedObj, 0);
+-          var s2 = callFunction(std_SIMD_Bool16x8_extractLane, null, typedObj, 1);
+-          var s3 = callFunction(std_SIMD_Bool16x8_extractLane, null, typedObj, 2);
+-          var s4 = callFunction(std_SIMD_Bool16x8_extractLane, null, typedObj, 3);
+-          var s5 = callFunction(std_SIMD_Bool16x8_extractLane, null, typedObj, 4);
+-          var s6 = callFunction(std_SIMD_Bool16x8_extractLane, null, typedObj, 5);
+-          var s7 = callFunction(std_SIMD_Bool16x8_extractLane, null, typedObj, 6);
+-          var s8 = callFunction(std_SIMD_Bool16x8_extractLane, null, typedObj, 7);
+-          return `SIMD.${protoString}(${s1}, ${s2}, ${s3}, ${s4}, ${s5}, ${s6}, ${s7}, ${s8})`;
+-      }
+-      case JS_SIMDTYPEREPR_BOOL32X4: {
+-          var x = callFunction(std_SIMD_Bool32x4_extractLane, null, typedObj, 0);
+-          var y = callFunction(std_SIMD_Bool32x4_extractLane, null, typedObj, 1);
+-          var z = callFunction(std_SIMD_Bool32x4_extractLane, null, typedObj, 2);
+-          var w = callFunction(std_SIMD_Bool32x4_extractLane, null, typedObj, 3);
+-          return `SIMD.${protoString}(${x}, ${y}, ${z}, ${w})`;
+-      }
+-      case JS_SIMDTYPEREPR_BOOL64X2: {
+-          var x = callFunction(std_SIMD_Bool64x2_extractLane, null, typedObj, 0);
+-          var y = callFunction(std_SIMD_Bool64x2_extractLane, null, typedObj, 1);
+-          return `SIMD.${protoString}(${x}, ${y})`;
+-      }
+-  }
+-  assert(false, "unexpected SIMD kind");
+-  return "?";
+-}
+-
+-///////////////////////////////////////////////////////////////////////////
+ // Miscellaneous
+ 
+ function DescrsEquiv(descr1, descr2) {
+   assert(IsObject(descr1) && ObjectIsTypeDescr(descr1), "descr1 not descr");
+   assert(IsObject(descr2) && ObjectIsTypeDescr(descr2), "descr2 not descr");
+ 
+   // Potential optimization: these two strings are guaranteed to be
+   // atoms, and hence this string comparison can just be a pointer
+diff --git a/js/src/builtin/TypedObjectConstants.h b/js/src/builtin/TypedObjectConstants.h
+--- a/js/src/builtin/TypedObjectConstants.h
++++ b/js/src/builtin/TypedObjectConstants.h
+@@ -48,17 +48,17 @@
+ #define JS_DESCR_SLOT_STRING_REPR        1  // Atomized string representation
+ #define JS_DESCR_SLOT_ALIGNMENT          2  // Alignment in bytes
+ #define JS_DESCR_SLOT_SIZE               3  // Size in bytes, else 0
+ #define JS_DESCR_SLOT_OPAQUE             4  // Atomized string representation
+ #define JS_DESCR_SLOT_TYPROTO            5  // Prototype for instances, if any
+ #define JS_DESCR_SLOT_ARRAYPROTO         6  // Lazily created prototype for arrays
+ #define JS_DESCR_SLOT_TRACE_LIST         7  // List of references for use in tracing
+ 
+-// Slots on scalars, references, and SIMD objects
++// Slots on scalars, references
+ #define JS_DESCR_SLOT_TYPE               8  // Type code
+ 
+ // Slots on array descriptors
+ #define JS_DESCR_SLOT_ARRAY_ELEM_TYPE    8
+ #define JS_DESCR_SLOT_ARRAY_LENGTH       9
+ 
+ // Slots on struct type objects
+ #define JS_DESCR_SLOT_STRUCT_FIELD_NAMES 8
+@@ -70,52 +70,31 @@
+ 
+ // These constants are for use exclusively in JS code. In C++ code,
+ // prefer TypeRepresentation::Scalar etc, which allows you to
+ // write a switch which will receive a warning if you omit a case.
+ #define JS_TYPEREPR_SCALAR_KIND         1
+ #define JS_TYPEREPR_REFERENCE_KIND      2
+ #define JS_TYPEREPR_STRUCT_KIND         3
+ #define JS_TYPEREPR_ARRAY_KIND          4
+-#define JS_TYPEREPR_SIMD_KIND           5
+ 
+ // These constants are for use exclusively in JS code. In C++ code,
+ // prefer Scalar::Int8 etc, which allows you to write a switch which will
+ // receive a warning if you omit a case.
+ #define JS_SCALARTYPEREPR_INT8          0
+ #define JS_SCALARTYPEREPR_UINT8         1
+ #define JS_SCALARTYPEREPR_INT16         2
+ #define JS_SCALARTYPEREPR_UINT16        3
+ #define JS_SCALARTYPEREPR_INT32         4
+ #define JS_SCALARTYPEREPR_UINT32        5
+ #define JS_SCALARTYPEREPR_FLOAT32       6
+ #define JS_SCALARTYPEREPR_FLOAT64       7
+ #define JS_SCALARTYPEREPR_UINT8_CLAMPED 8
+-#define JS_SCALARTYPEREPR_FLOAT32X4     11
+-#define JS_SCALARTYPEREPR_INT8X16       12
+-#define JS_SCALARTYPEREPR_INT16X8       13
+-#define JS_SCALARTYPEREPR_INT32X4       14
+ 
+ // These constants are for use exclusively in JS code. In C++ code,
+ // prefer ReferenceTypeRepresentation::TYPE_ANY etc, which allows
+ // you to write a switch which will receive a warning if you omit a
+ // case.
+ #define JS_REFERENCETYPEREPR_ANY        0
+ #define JS_REFERENCETYPEREPR_OBJECT     1
+ #define JS_REFERENCETYPEREPR_STRING     2
+ 
+-// These constants are for use exclusively in JS code. In C++ code, prefer
+-// SimdType::Int32x4 etc, since that allows you to write a switch which will
+-// receive a warning if you omit a case.
+-#define JS_SIMDTYPEREPR_INT8X16         0
+-#define JS_SIMDTYPEREPR_INT16X8         1
+-#define JS_SIMDTYPEREPR_INT32X4         2
+-#define JS_SIMDTYPEREPR_UINT8X16        3
+-#define JS_SIMDTYPEREPR_UINT16X8        4
+-#define JS_SIMDTYPEREPR_UINT32X4        5
+-#define JS_SIMDTYPEREPR_FLOAT32X4       6
+-#define JS_SIMDTYPEREPR_FLOAT64X2       7
+-#define JS_SIMDTYPEREPR_BOOL8X16        8
+-#define JS_SIMDTYPEREPR_BOOL16X8        9
+-#define JS_SIMDTYPEREPR_BOOL32X4       10
+-#define JS_SIMDTYPEREPR_BOOL64X2       11
+-
+ #endif
+diff --git a/js/src/devtools/automation/cgc-jittest-timeouts.txt b/js/src/devtools/automation/cgc-jittest-timeouts.txt
+--- a/js/src/devtools/automation/cgc-jittest-timeouts.txt
++++ b/js/src/devtools/automation/cgc-jittest-timeouts.txt
+@@ -1,9 +1,8 @@
+-SIMD/nursery-overflow.js
+ asm.js/testBug1117235.js
+ asm.js/testParallelCompile.js
+ auto-regress/bug653395.js
+ auto-regress/bug654392.js
+ auto-regress/bug675251.js
+ auto-regress/bug729797.js
+ baseline/bug847446.js
+ baseline/bug852175.js
+diff --git a/js/src/doc/JITOptimizations/Outcomes.md b/js/src/doc/JITOptimizations/Outcomes.md
+--- a/js/src/doc/JITOptimizations/Outcomes.md
++++ b/js/src/doc/JITOptimizations/Outcomes.md
+@@ -147,21 +147,16 @@ Arrays at this element access location h
+ The storage for the typed object being accessed at this location might be a detached ArrayBuffer.  (This can happen if the typed object, or its underlying buffer as accessed using `TypedObject.storage(typedObject).buffer`, is transferred using the structured clone algorithm.)
+ 
+ ### TypedObjectArrayRange
+ 
+ Failed to do range check of element access on a typed object.
+ 
+ ### AccessNotDense
+ 
+-### AccessNotSimdObject
+-
+-The observed type of the target of the property access doesn't guarantee
+-that it is a SIMD object.
+-
+ ### AccessNotTypedObject
+ 
+ The observed type of the target of the property access doesn't guarantee
+ that it is a TypedObject.
+ 
+ ### AccessNotTypedArray
+ 
+ The observed type of the target of the property access doesn't guarantee
+@@ -217,25 +212,16 @@ target values which may be non-native ob
+ IonMonkey does not generate inline caches for element reads in which
+ the keys have never been observed to be a String, Symbol, or Int32.
+ 
+ ### SetElemNonDenseNonTANotCached
+ 
+ IonMonkey only generates inline caches for element accesses which are
+ either on dense objects (e.g. dense Arrays), or Typed Arrays.
+ 
+-### NoSimdJitSupport
+-
+-Optimization failed because SIMD JIT support was not enabled.
+-
+-### SimdTypeNotOptimized
+-
+-The type observed as being retrieved from this property access did not
+-match an optimizable type.
+-
+ ### HasCommonInliningPath
+ 
+ Inlining was abandoned because the inlining call path was repeated.  A
+ repeated call path is indicative of a potentially mutually recursive
+ function call chain.
+ 
+ ### Inlined
+ 
+diff --git a/js/src/jit-test/jit_test.py b/js/src/jit-test/jit_test.py
+--- a/js/src/jit-test/jit_test.py
++++ b/js/src/jit-test/jit_test.py
+@@ -248,19 +248,16 @@ def main(argv):
+ 
+     if read_all:
+         test_list = jittests.find_tests()
+ 
+     # Exclude tests when code coverage is enabled.
+     # This part is equivalent to:
+     # skip-if = coverage
+     if os.getenv('GCOV_PREFIX') is not None:
+-        # GCOV errors.
+-        options.exclude += [os.path.join('asm.js', 'testSIMD.js')]               # Bug 1347245
+-
+         # JSVM errors.
+         options.exclude += [os.path.join('basic', 'functionnames.js')]           # Bug 1369783
+         options.exclude += [os.path.join('debug', 'Debugger-findScripts-23.js')]
+         options.exclude += [os.path.join('debug', 'bug1160182.js')]
+         options.exclude += [os.path.join('xdr', 'incremental-encoder.js')]
+         options.exclude += [os.path.join('xdr', 'bug1186973.js')]                # Bug 1369785
+         options.exclude += [os.path.join('xdr', 'relazify.js')]
+         options.exclude += [os.path.join('basic', 'werror.js')]
+diff --git a/js/src/jit-test/lib/simd.js b/js/src/jit-test/lib/simd.js
+deleted file mode 100644
+--- a/js/src/jit-test/lib/simd.js
++++ /dev/null
+@@ -1,109 +0,0 @@
+-if (!this.hasOwnProperty("SIMD"))
+-    quit();
+-
+-function booleanBinaryX4(op, v, w) {
+-    var arr = [];
+-    var [varr, warr] = [simdToArray(v), simdToArray(w)];
+-    for (var i = 0; i < 4; i++)
+-        arr[i] = op(varr[i], warr[i]);
+-    return arr;
+-}
+-
+-function binaryX(op, v, w) {
+-    var arr = [];
+-    var [varr, warr] = [simdToArray(v), simdToArray(w)];
+-    [varr, warr] = [varr.map(Math.fround), warr.map(Math.fround)];
+-    for (var i = 0; i < varr.length; i++)
+-        arr[i] = op(varr[i], warr[i]);
+-    return arr.map(Math.fround);
+-}
+-
+-function unaryX4(op, v, coerceFunc) {
+-    var arr = [];
+-    var varr = simdToArray(v).map(coerceFunc);
+-    for (var i = 0; i < 4; i++)
+-        arr[i] = op(varr[i]);
+-    return arr.map(coerceFunc);
+-}
+-
+-function assertNear(a, b) {
+-    assertEq((a != a && b != b) || Math.abs(a - b) < 0.001, true);
+-}
+-
+-function GetType(v) {
+-    var pt = Object.getPrototypeOf(v);
+-    switch (pt) {
+-        case SIMD.Int8x16.prototype: return SIMD.Int8x16;
+-        case SIMD.Int16x8.prototype: return SIMD.Int16x8;
+-        case SIMD.Int32x4.prototype: return SIMD.Int32x4;
+-        case SIMD.Uint8x16.prototype: return SIMD.Uint8x16;
+-        case SIMD.Uint16x8.prototype: return SIMD.Uint16x8;
+-        case SIMD.Uint32x4.prototype: return SIMD.Uint32x4;
+-        case SIMD.Float32x4.prototype: return SIMD.Float32x4;
+-        case SIMD.Bool8x16.prototype: return SIMD.Bool8x16;
+-        case SIMD.Bool16x8.prototype: return SIMD.Bool16x8;
+-        case SIMD.Bool32x4.prototype: return SIMD.Bool32x4;
+-    }
+-    throw "unexpected SIMD type";
+-}
+-
+-function GetLength(t) {
+-    switch (t) {
+-      case SIMD.Int8x16: return 16;
+-      case SIMD.Int16x8: return 8;
+-      case SIMD.Int32x4: return 4;
+-      case SIMD.Uint8x16: return 16;
+-      case SIMD.Uint16x8: return 8;
+-      case SIMD.Uint32x4: return 4;
+-      case SIMD.Float32x4: return 4;
+-      case SIMD.Bool8x16: return 16;
+-      case SIMD.Bool16x8: return 8;
+-      case SIMD.Bool32x4: return 4;
+-    }
+-    throw "unexpected SIMD type";
+-}
+-
+-function assertEqVec(v, w) {
+-    var typeV = GetType(v);
+-    var lengthV = GetLength(typeV);
+-    var ext = typeV.extractLane;
+-    assertEq(GetType(w), typeV);
+-    for (var i = 0; i < lengthV; i++)
+-        assertEq(ext(v, i), ext(w, i));
+-}
+-
+-function assertEqVecArr(v, w) {
+-    var typeV = GetType(v);
+-    var lengthV = GetLength(typeV);
+-    var ext = typeV.extractLane;
+-    assertEq(w.length, lengthV);
+-
+-    for (var i = 0; i < lengthV; i++)
+-        assertEq(ext(v, i), w[i]);
+-}
+-
+-function assertEqX4(vec, arr, ...opts) {
+-
+-    var assertFunc;
+-    if (opts.length == 1 && typeof opts[0] !== 'undefined') {
+-        assertFunc = opts[0];
+-    } else {
+-        assertFunc = assertEq;
+-    }
+-
+-    var Type = GetType(vec);
+-
+-    assertFunc(Type.extractLane(vec, 0), arr[0]);
+-    assertFunc(Type.extractLane(vec, 1), arr[1]);
+-    assertFunc(Type.extractLane(vec, 2), arr[2]);
+-    assertFunc(Type.extractLane(vec, 3), arr[3]);
+-}
+-
+-function simdToArray(vec) {
+-    var Type = GetType(vec);
+-    var Length = GetLength(Type);
+-    var a = [];
+-    for (var i = 0; i < Length; i++)
+-        a.push(Type.extractLane(vec, i));
+-    return a;
+-}
+diff --git a/js/src/jit-test/tests/SIMD/anyall.js b/js/src/jit-test/tests/SIMD/anyall.js
+deleted file mode 100644
+--- a/js/src/jit-test/tests/SIMD/anyall.js
++++ /dev/null
+@@ -1,38 +0,0 @@
+-load(libdir + 'simd.js');
+-
+-setJitCompilerOption("ion.warmup.trigger", 50);
+-
+-function all(B, n) {
+-    var a = B.splat(true);
+-    for (var i = 0; i < n; i++) {
+-        var b = B.replaceLane(a, i, false);
+-        assertEq(B.allTrue(b), false);
+-        var c = B.replaceLane(b, i, true);
+-        assertEq(B.allTrue(c), true);
+-    }
+-}
+-
+-function any(B, n) {
+-    var a = B.splat(false);
+-    for (var i = 0; i < n; i++) {
+-        var b = B.replaceLane(a, i, true);
+-        assertEq(B.anyTrue(b), true);
+-        var c = B.replaceLane(b, i, false);
+-        assertEq(B.anyTrue(c), false);
+-    }
+-}
+-
+-function f() {
+-    for (var j = 0; j < 200; j++) {
+-        all(SIMD.Bool64x2, 2)
+-        any(SIMD.Bool64x2, 2)
+-        all(SIMD.Bool32x4, 4)
+-        any(SIMD.Bool32x4, 4)
+-        all(SIMD.Bool16x8, 8)
+-        any(SIMD.Bool16x8, 8)
+-        all(SIMD.Bool8x16, 16)
+-        any(SIMD.Bool8x16, 16)
+-    }
+-}
+-
+-f()
+diff --git a/js/src/jit-test/tests/SIMD/binary-arith.js b/js/src/jit-test/tests/SIMD/binary-arith.js
+deleted file mode 100644
+--- a/js/src/jit-test/tests/SIMD/binary-arith.js
++++ /dev/null
+@@ -1,30 +0,0 @@
+-load(libdir + 'simd.js');
+-
+-setJitCompilerOption("ion.warmup.trigger", 50);
+-
+-function f() {
+-    var i1 = SIMD.Int32x4(1, 2, 3, 4);
+-    var i2 = SIMD.Int32x4(4, 3, 2, 1);
+-
+-    var f1 = SIMD.Float32x4(1, 2, 3, 4);
+-    var f2 = SIMD.Float32x4(4, 3, 2, 1);
+-
+-    var i8_1 = SIMD.Int8x16(1, 2, 3, 4, 20, 30, 40, 50, 100, 115, 120, 125);
+-    var i8_2 = SIMD.Int8x16(4, 3, 2, 1,  8,  7,  6,  5,  12,  11,  10,   9);
+-
+-    for (var i = 0; i < 150; i++) {
+-        assertEqX4(SIMD.Float32x4.add(f1, f2), binaryX((x, y) => x + y, f1, f2));
+-        assertEqX4(SIMD.Float32x4.sub(f1, f2), binaryX((x, y) => x - y, f1, f2));
+-        assertEqX4(SIMD.Float32x4.mul(f1, f2), binaryX((x, y) => x * y, f1, f2));
+-
+-        assertEqX4(SIMD.Int32x4.add(i1, i2), binaryX((x, y) => x + y, i1, i2));
+-        assertEqX4(SIMD.Int32x4.sub(i1, i2), binaryX((x, y) => x - y, i1, i2));
+-        assertEqX4(SIMD.Int32x4.mul(i1, i2), binaryX((x, y) => x * y, i1, i2));
+-
+-        assertEqX4(SIMD.Int8x16.add(i8_1, i8_2), binaryX((x, y) => (x + y) << 24 >> 24, i8_1, i8_2));
+-        assertEqX4(SIMD.Int8x16.sub(i8_1, i8_2), binaryX((x, y) => (x - y) << 24 >> 24, i8_1, i8_2));
+-        assertEqX4(SIMD.Int8x16.mul(i8_1, i8_2), binaryX((x, y) => (x * y) << 24 >> 24, i8_1, i8_2));
+-    }
+-}
+-
+-f();
+diff --git a/js/src/jit-test/tests/SIMD/bool32x4-arith.js b/js/src/jit-test/tests/SIMD/bool32x4-arith.js
+deleted file mode 100644
+--- a/js/src/jit-test/tests/SIMD/bool32x4-arith.js
++++ /dev/null
+@@ -1,15 +0,0 @@
+-load(libdir + "simd.js");
+-
+-setJitCompilerOption("ion.warmup.trigger", 50);
+-
+-function f() {
+-    var b1 = SIMD.Bool32x4(true, false, true, false);
+-    var b2 = SIMD.Bool32x4(true, true, true, true);
+-    do {
+-        assertEqX4(SIMD.Bool32x4.and(b1, b2), booleanBinaryX4((x, y) => x && y, b1, b2));
+-        assertEqX4(SIMD.Bool32x4.or(b1, b2),  booleanBinaryX4((x, y) => x || y, b1, b2));
+-        assertEqX4(SIMD.Bool32x4.xor(b1, b2), booleanBinaryX4((x, y) => x != y, b1, b2));
+-    } while (!inIon());
+-}
+-
+-f();
+diff --git a/js/src/jit-test/tests/SIMD/bool32x4-const.js b/js/src/jit-test/tests/SIMD/bool32x4-const.js
+deleted file mode 100644
+--- a/js/src/jit-test/tests/SIMD/bool32x4-const.js
++++ /dev/null
+@@ -1,65 +0,0 @@
+-load(libdir + "simd.js");
+-
+-setJitCompilerOption("ion.warmup.trigger", 50);
+-
+-// Test constant folding into the Bool32x4 constructor.
+-// Verify that we get the truthiness right, c.f. the ECMA ToBoolean() function.
+-function f1() {
+-    var B = SIMD.Bool32x4;
+-    var S = SIMD.Bool32x4.splat;
+-    return [
+-        B(false, false, false, true),
+-        B(true),
+-        B(undefined, null, "", "x"),
+-        B({}, 0, 1, -0.0),
+-        B(NaN, -NaN, Symbol(), createIsHTMLDDA()),
+-
+-        S(false),
+-        S(true),
+-        S(undefined),
+-        S(null),
+-
+-        S(""),
+-        S("x"),
+-        S(0),
+-        S(1),
+-
+-        S({}),
+-        S(-0.0),
+-        S(NaN),
+-        S(Symbol()),
+-
+-        S(createIsHTMLDDA())
+-    ];
+-}
+-
+-function f() {
+-    for (var i = 0; i < 100; i++) {
+-        var a = f1()
+-        assertEqX4(a[0], [false, false, false, true]);
+-        assertEqX4(a[1], [true,  false, false, false]);
+-        assertEqX4(a[2], [false, false, false, true]);
+-        assertEqX4(a[3], [true,  false, true,  false]);
+-        assertEqX4(a[4], [false, false, true,  false]);
+-
+-        // Splats.
+-        assertEqX4(a[5], [false, false, false, false]);
+-        assertEqX4(a[6], [true, true, true, true]);
+-        assertEqX4(a[7], [false, false, false, false]);
+-        assertEqX4(a[8], [false, false, false, false]);
+-
+-        assertEqX4(a[9], [false, false, false, false]);
+-        assertEqX4(a[10], [true, true, true, true]);
+-        assertEqX4(a[11], [false, false, false, false]);
+-        assertEqX4(a[12], [true, true, true, true]);
+-
+-        assertEqX4(a[13], [true, true, true, true]);
+-        assertEqX4(a[14], [false, false, false, false]);
+-        assertEqX4(a[15], [false, false, false, false]);
+-        assertEqX4(a[16], [true, true, true, true]);
+-
+-        assertEqX4(a[17], [false, false, false, false]);
+-    }
+-}
+-
+-f();
+diff --git a/js/src/jit-test/tests/SIMD/bug1109911.js b/js/src/jit-test/tests/SIMD/bug1109911.js
+deleted file mode 100644
+--- a/js/src/jit-test/tests/SIMD/bug1109911.js
++++ /dev/null
+@@ -1,11 +0,0 @@
+-if (typeof TypedObject === "undefined" || typeof SIMD === 'undefined')
+-  quit();
+-
+-var Int32x4 = SIMD.Int32x4;
+-var a = Int32x4((4294967295), 200, 300, 400);
+-addCase( new Array(Math.pow(2,12)) );
+-for ( var arg = "", i = 0; i < Math.pow(2,12); i++ ) {}
+-addCase( a );
+-function addCase(object) {
+-  object.length
+-}
+diff --git a/js/src/jit-test/tests/SIMD/bug1121299.js b/js/src/jit-test/tests/SIMD/bug1121299.js
+deleted file mode 100644
+--- a/js/src/jit-test/tests/SIMD/bug1121299.js
++++ /dev/null
+@@ -1,31 +0,0 @@
+-if (!this.hasOwnProperty("SIMD"))
+-  quit();
+-
+-setJitCompilerOption("baseline.warmup.trigger", 10);
+-setJitCompilerOption("ion.warmup.trigger", 30);
+-
+-function test_1(i) {
+-  if (i >= 40)
+-    return;
+-  var a = SIMD.Float32x4(1.1, 2.2, 3.3, 4.6);
+-  SIMD.Int32x4.fromFloat32x4(a);
+-  test_1(i + 1);
+-}
+-test_1(0);
+-
+-
+-var Float32x4 = SIMD.Float32x4;
+-function test_2() {
+-    var Array = Float32x4.array(3);
+-    var array = new Array([
+-        Float32x4(1, 2, 3, 4),
+-        Float32x4(5, 6, 7, 8),
+-        Float32x4(9, 10, 11, 12)
+-    ]);
+-    if (typeof reportCompare === "function")
+-        reportCompare(true, true);
+-}
+-test_2();
+-evaluate("test_2(); test_2();", {
+-    isRunOnce: true,
+-});
+diff --git a/js/src/jit-test/tests/SIMD/bug1123631.js b/js/src/jit-test/tests/SIMD/bug1123631.js
+deleted file mode 100644
+--- a/js/src/jit-test/tests/SIMD/bug1123631.js
++++ /dev/null
+@@ -1,9 +0,0 @@
+-if (!this.hasOwnProperty("SIMD"))
+-  quit();
+-
+-var Float64x2 = SIMD.Float64x2;
+-function test() {
+-  var a = Float64x2(1, 2);
+-}
+-test();
+-test();
+diff --git a/js/src/jit-test/tests/SIMD/bug1130845.js b/js/src/jit-test/tests/SIMD/bug1130845.js
+deleted file mode 100644
+--- a/js/src/jit-test/tests/SIMD/bug1130845.js
++++ /dev/null
+@@ -1,15 +0,0 @@
+-if (!this.hasOwnProperty("SIMD"))
+-  quit();
+-
+-var Int32x4 = SIMD.Int32x4;
+-function test() {
+-  var a = Int32x4();
+-  var b = Int32x4(10, 20, 30, 40);
+-  var c = SIMD.Int32x4.and(a, b);
+-  assertEq(Int32x4.extractLane(c, 0), 0);
+-  return 0;
+-}
+-test();
+-var u = [], v = [];
+-for (var j=0; j<u.length; ++j)
+-    v[test()] = t;
+diff --git a/js/src/jit-test/tests/SIMD/bug1241872.js b/js/src/jit-test/tests/SIMD/bug1241872.js
+deleted file mode 100644
+--- a/js/src/jit-test/tests/SIMD/bug1241872.js
++++ /dev/null
+@@ -1,10 +0,0 @@
+-if (typeof SIMD !== 'object')
+-    quit(0);
+-
+-function test() {
+-    return SIMD.Float32x4().toSource();
+-}
+-
+-var r = '';
+-for (var i = 0; i < 10000; i++)
+-    r = test();
+diff --git a/js/src/jit-test/tests/SIMD/bug1248503.js b/js/src/jit-test/tests/SIMD/bug1248503.js
+deleted file mode 100644
+--- a/js/src/jit-test/tests/SIMD/bug1248503.js
++++ /dev/null
+@@ -1,16 +0,0 @@
+-if (typeof SIMD !== 'object')
+-    quit(0);
+-
+-function assertEqVec(v, w) {
+-    [0].forEach(i => v, w);
+-    function assertEqX4(...opts) {}
+-}
+-gczeal(1);
+-function f() {
+-    SIMD.Float32x4();
+-    var i1 = SIMD.Int32x4();
+-    for (j = 0; j < 100000; ++j, eval.eval)
+-        assertEqVec(SIMD.Int32x4.check(i1), i1);
+-}
+-f();
+-
+diff --git a/js/src/jit-test/tests/SIMD/bug1273483.js b/js/src/jit-test/tests/SIMD/bug1273483.js
+deleted file mode 100644
+--- a/js/src/jit-test/tests/SIMD/bug1273483.js
++++ /dev/null
+@@ -1,9 +0,0 @@
+-if (typeof SIMD === 'undefined')
+-    quit();
+-
+-Int8x16 = SIMD.Int8x16;
+-var Int32x4 = SIMD.Int32x4;
+-function testSwizzleForType(type) { return type(); }
+-testSwizzleForType(Int8x16);
+-function testSwizzleInt32x4() { return testSwizzleForType(Int32x4); }
+-testSwizzleInt32x4();
+diff --git a/js/src/jit-test/tests/SIMD/bug1296640-gc-args.js b/js/src/jit-test/tests/SIMD/bug1296640-gc-args.js
+deleted file mode 100644
+--- a/js/src/jit-test/tests/SIMD/bug1296640-gc-args.js
++++ /dev/null
+@@ -1,9 +0,0 @@
+-if (typeof gczeal === 'undefined' || typeof SIMD === 'undefined') {
+-    quit();
+-}
+-
+-gczeal(9, 2);
+-var Int8x16 = SIMD.Int8x16;
+-var v = Int8x16();
+-var good = { valueOf: () => 21 };
+-Int8x16.shiftLeftByScalar(v, good);
+diff --git a/js/src/jit-test/tests/SIMD/bug1303780-gc-args.js b/js/src/jit-test/tests/SIMD/bug1303780-gc-args.js
+deleted file mode 100644
+--- a/js/src/jit-test/tests/SIMD/bug1303780-gc-args.js
++++ /dev/null
+@@ -1,12 +0,0 @@
+-if (typeof gczeal === 'undefined' || typeof SIMD === 'undefined') {
+-    quit();
+-}
+-
+-gczeal(14,2);
+-var Float32x4 = SIMD.Float32x4;
+-function test() {
+-    var v = Float32x4(1,2,3,4);
+-    var good = {valueOf: () => 42};
+-    Float32x4.replaceLane(v, 0, good);
+-}
+-test();
+diff --git a/js/src/jit-test/tests/SIMD/bug1435317.js b/js/src/jit-test/tests/SIMD/bug1435317.js
+deleted file mode 100644
+--- a/js/src/jit-test/tests/SIMD/bug1435317.js
++++ /dev/null
+@@ -1,25 +0,0 @@
+-load(libdir + 'simd.js');
+-
+-var ab = new ArrayBuffer(64 * 1024);
+-var arr = new Uint8Array(ab);
+-
+-(function(glob, imp, b) {
+-  "use asm";
+-  var arr = new glob.Uint8Array(b);
+-  return {}
+-})(this, null, ab);
+-
+-function testSimdX4() {
+-    for (var i = 10; i --> 0;) {
+-        var caught;
+-        try {
+-            v = SIMD.Int32x4.load(arr, 65534);
+-        } catch (e) {
+-            caught = e;
+-        }
+-        assertEq(caught instanceof RangeError, true);
+-    }
+-}
+-
+-setJitCompilerOption('ion.warmup.trigger', 0);
+-testSimdX4();
+diff --git a/js/src/jit-test/tests/SIMD/bug953108.js b/js/src/jit-test/tests/SIMD/bug953108.js
+deleted file mode 100644
+--- a/js/src/jit-test/tests/SIMD/bug953108.js
++++ /dev/null
+@@ -1,10 +0,0 @@
+-/*
+- * Any copyright is dedicated to the Public Domain.
+- * http://creativecommons.org/licenses/publicdomain/
+- */
+-
+-if (!this.hasOwnProperty("TypedObject") || !this.hasOwnProperty("SIMD"))
+-  quit();
+-
+-var Float32x4 = SIMD.Float32x4;
+-Float32x4.array(1);
+diff --git a/js/src/jit-test/tests/SIMD/check.js b/js/src/jit-test/tests/SIMD/check.js
+deleted file mode 100644
+--- a/js/src/jit-test/tests/SIMD/check.js
++++ /dev/null
+@@ -1,25 +0,0 @@
+-load(libdir + 'simd.js');
+-
+-setJitCompilerOption("ion.warmup.trigger", 50);
+-
+-function f() {
+-    var f1 = SIMD.Float32x4(1, 2, 3, 4);
+-    var i1 = SIMD.Int32x4(1, 2, -3, 4);
+-    var b1 = SIMD.Bool32x4(true, true, false, true);
+-    var i = 0;
+-    try {
+-        for (; i < 150; i++) {
+-            if (i > 148)
+-                i1 = f1;
+-            assertEqVec(SIMD.Int32x4.check(i1), i1);
+-            assertEqVec(SIMD.Float32x4.check(f1), f1);
+-            assertEqVec(SIMD.Bool32x4.check(b1), b1);
+-        }
+-    } catch (ex) {
+-        assertEq(i, 149);
+-        assertEq(ex instanceof TypeError, true);
+-    }
+-}
+-
+-f();
+-
+diff --git a/js/src/jit-test/tests/SIMD/compare.js b/js/src/jit-test/tests/SIMD/compare.js
+deleted file mode 100644
+--- a/js/src/jit-test/tests/SIMD/compare.js
++++ /dev/null
+@@ -1,39 +0,0 @@
+-load(libdir + 'simd.js');
+-
+-setJitCompilerOption("ion.warmup.trigger", 50);
+-
+-function f() {
+-    var f1 = SIMD.Float32x4(1, 2, 3, 4);
+-    var f2 = SIMD.Float32x4(NaN, Infinity, 3.14, -0);
+-
+-    var i1 = SIMD.Int32x4(1, 2, -3, 4);
+-    var i2 = SIMD.Int32x4(1, -2, 3, 0);
+-
+-    var u1 = SIMD.Uint32x4(1, 2, -3, 4);
+-    var u2 = SIMD.Uint32x4(1, -2, 3, 0x80000000);
+-
+-    for (var i = 0; i < 150; i++) {
+-        assertEqX4(SIMD.Int32x4.lessThan(i1, i2),             [false, false, true, false]);
+-        assertEqX4(SIMD.Int32x4.lessThanOrEqual(i1, i2),      [true, false, true, false]);
+-        assertEqX4(SIMD.Int32x4.equal(i1, i2),                [true, false, false, false]);
+-        assertEqX4(SIMD.Int32x4.notEqual(i1, i2),             [false, true, true, true]);
+-        assertEqX4(SIMD.Int32x4.greaterThan(i1, i2),          [false, true, false, true]);
+-        assertEqX4(SIMD.Int32x4.greaterThanOrEqual(i1, i2),   [true, true, false, true]);
+-
+-        assertEqX4(SIMD.Uint32x4.lessThan(u1, u2),             [false, true, false, true]);
+-        assertEqX4(SIMD.Uint32x4.lessThanOrEqual(u1, u2),      [true,  true, false, true]);
+-        assertEqX4(SIMD.Uint32x4.equal(u1, u2),                [true, false, false, false]);
+-        assertEqX4(SIMD.Uint32x4.notEqual(u1, u2),             [false, true, true, true]);
+-        assertEqX4(SIMD.Uint32x4.greaterThan(u1, u2),          [false, false, true, false]);
+-        assertEqX4(SIMD.Uint32x4.greaterThanOrEqual(u1, u2),   [true, false, true, false]);
+-
+-        assertEqX4(SIMD.Float32x4.lessThan(f1, f2),             [false, true, true, false]);
+-        assertEqX4(SIMD.Float32x4.lessThanOrEqual(f1, f2),      [false, true, true, false]);
+-        assertEqX4(SIMD.Float32x4.equal(f1, f2),                [false, false, false, false]);
+-        assertEqX4(SIMD.Float32x4.notEqual(f1, f2),             [true, true, true, true]);
+-        assertEqX4(SIMD.Float32x4.greaterThan(f1, f2),          [false, false, false, true]);
+-        assertEqX4(SIMD.Float32x4.greaterThanOrEqual(f1, f2),   [false, false, false, true]);
+-    }
+-}
+-
+-f();
+diff --git a/js/src/jit-test/tests/SIMD/complex-4.js b/js/src/jit-test/tests/SIMD/complex-4.js
+deleted file mode 100644
+--- a/js/src/jit-test/tests/SIMD/complex-4.js
++++ /dev/null
+@@ -1,70 +0,0 @@
+-load(libdir + 'simd.js');
+-
+-if (typeof SIMD === "undefined")
+-    quit();
+-
+-setJitCompilerOption("baseline.warmup.trigger", 10);
+-setJitCompilerOption("ion.warmup.trigger", 90);
+-var max = 100; // Make have the warm-up counter high enough to
+-               // consider inlining functions.
+-
+-var f4 = SIMD.Int32x4; // :TODO: Support Float32x4 arith.
+-var f4add = f4.add;
+-var f4sub = f4.sub;
+-var f4mul = f4.mul;
+-
+-function c4mul(z1, z2) {
+-  var { re: re1, im: im1 } = z1;
+-  var { re: re2, im: im2 } = z2;
+-  var rere = f4mul(re1, re2);
+-  var reim = f4mul(re1, im2);
+-  var imre = f4mul(im1, re2);
+-  var imim = f4mul(im1, im2);
+-  return { re: f4sub(rere, imim), im: f4add(reim, imre) };
+-}
+-
+-function c4inv(z) {
+-  var { re: re, im: im } = z;
+-  var minus = f4(-1, -1, -1, -1);
+-  return { re: re, im: f4mul(im, minus) };
+-}
+-
+-function c4inv_inplace(z) {
+-  var res = c4inv(z);
+-  z.re = res.re;
+-  z.im = res.im;
+-}
+-
+-function c4norm(z) {
+-  var { re: re, im: im } = c4mul(z, c4inv(z));
+-  return re;
+-}
+-
+-function c4scale(z, s) {
+-  var { re: re, im: im } = z;
+-  var f4s = f4(s, s, s, s);
+-  return { re: f4mul(re, f4s), im: f4mul(im, f4s) };
+-}
+-
+-var rotate90 = { re: f4(0, 0, 0, 0), im: f4(1, 1, 1, 1) };
+-var cardinals = { re: f4(1, 0, -1, 0), im: f4(0, 1, 0, -1) };
+-
+-function test(dots) {
+-  for (var j = 0; j < 4; j++) {
+-    dots = c4mul(rotate90, dots);
+-    if (j % 2 == 0) // Magic !
+-      c4inv_inplace(dots);
+-    dots = c4scale(dots, 2);
+-  }
+-  return dots;
+-}
+-
+-assertEqX4(c4norm(cardinals), simdToArray(f4.splat(1)));
+-var cardinals16 = c4scale(cardinals, 16);
+-
+-for (var i = 0; i < max; i++) {
+-  var res = test(cardinals);
+-  assertEqX4(c4norm(res), simdToArray(f4.splat(16 * 16)));
+-  assertEqX4(res.re, simdToArray(cardinals16.re));
+-  assertEqX4(res.im, simdToArray(cardinals16.im));
+-}
+diff --git a/js/src/jit-test/tests/SIMD/convert.js b/js/src/jit-test/tests/SIMD/convert.js
+deleted file mode 100644
+--- a/js/src/jit-test/tests/SIMD/convert.js
++++ /dev/null
+@@ -1,68 +0,0 @@
+-load(libdir + 'simd.js');
+-
+-setJitCompilerOption("ion.warmup.trigger", 30);
+-
+-var cast = (function() {
+-    var i32 = new Int32Array(1);
+-    var f32 = new Float32Array(i32.buffer);
+-    return {
+-        fromInt32Bits(x) {
+-            i32[0] = x;
+-            return f32[0];
+-        },
+-
+-        fromFloat32Bits(x) {
+-            f32[0] = x;
+-            return i32[0];
+-        }
+-    }
+-})();
+-
+-function f() {
+-    // No bailout here.
+-    var f4 = SIMD.Float32x4(1, 2, 3, 4);
+-    var i4 = SIMD.Int32x4(1, 2, 3, 4);
+-    var BitOrZero = (x) => x | 0;
+-    for (var i = 0; i < 150; i++) {
+-        assertEqX4(SIMD.Float32x4.fromInt32x4(i4), unaryX4(BitOrZero, f4, Math.fround));
+-        assertEqX4(SIMD.Float32x4.fromInt32x4Bits(i4), unaryX4(cast.fromInt32Bits, f4, Math.fround));
+-        assertEqX4(SIMD.Int32x4.fromFloat32x4(f4), unaryX4(Math.fround, i4, BitOrZero));
+-        assertEqX4(SIMD.Int32x4.fromFloat32x4Bits(f4), unaryX4(cast.fromFloat32Bits, i4, BitOrZero));
+-    }
+-}
+-
+-function uglyDuckling(val) {
+-    // We bail out when i == 149 because the conversion will return
+-    // 0x80000000 and the input actually wasn't in bounds.
+-    val = Math.fround(val);
+-    for (var i = 0; i < 150; i++) {
+-        var caught = false;
+-        try {
+-            var v = SIMD.Float32x4(i < 149 ? 0 : val, 0, 0, 0)
+-            SIMD.Int32x4.fromFloat32x4(v);
+-        } catch(e) {
+-            assertEq(e instanceof RangeError, true);
+-            assertEq(i, 149);
+-            caught = true;
+-        }
+-        assertEq(i < 149 || caught, true);
+-    }
+-}
+-
+-function dontBail() {
+-    // On x86, the conversion will return 0x80000000, which will imply that we
+-    // check the input values. However, we shouldn't bail out in this case.
+-    for (var i = 0; i < 150; i++) {
+-        var v = SIMD.Float32x4(i < 149 ? 0 : -Math.pow(2, 31), 0, 0, 0)
+-        SIMD.Int32x4.fromFloat32x4(v);
+-    }
+-}
+-
+-f();
+-
+-dontBail();
+-dontBail();
+-
+-uglyDuckling(Math.pow(2, 31));
+-uglyDuckling(NaN);
+-uglyDuckling(-Math.pow(2, 32));
+diff --git a/js/src/jit-test/tests/SIMD/float32x4-binary-arith.js b/js/src/jit-test/tests/SIMD/float32x4-binary-arith.js
+deleted file mode 100644
+--- a/js/src/jit-test/tests/SIMD/float32x4-binary-arith.js
++++ /dev/null
+@@ -1,33 +0,0 @@
+-load(libdir + 'simd.js');
+-
+-setJitCompilerOption("ion.warmup.trigger", 50);
+-
+-function maxNum(x, y) {
+-    if (x != x)
+-        return y;
+-    if (y != y)
+-        return x;
+-    return Math.max(x, y);
+-}
+-
+-function minNum(x, y) {
+-    if (x != x)
+-        return y;
+-    if (y != y)
+-        return x;
+-    return Math.min(x, y);
+-}
+-
+-function f() {
+-    var f1 = SIMD.Float32x4(1, 2, 3, 4);
+-    var f2 = SIMD.Float32x4(4, 3, 2, 1);
+-    for (var i = 0; i < 150; i++) {
+-        assertEqX4(SIMD.Float32x4.div(f1, f2), binaryX((x, y) => x / y, f1, f2));
+-        assertEqX4(SIMD.Float32x4.min(f1, f2), binaryX(Math.min, f1, f2));
+-        assertEqX4(SIMD.Float32x4.max(f1, f2), binaryX(Math.max, f1, f2));
+-        assertEqX4(SIMD.Float32x4.minNum(f1, f2), binaryX(minNum, f1, f2));
+-        assertEqX4(SIMD.Float32x4.maxNum(f1, f2), binaryX(maxNum, f1, f2));
+-    }
+-}
+-
+-f();
+diff --git a/js/src/jit-test/tests/SIMD/getters.js b/js/src/jit-test/tests/SIMD/getters.js
+deleted file mode 100644
+--- a/js/src/jit-test/tests/SIMD/getters.js
++++ /dev/null
+@@ -1,48 +0,0 @@
+-load(libdir + 'simd.js');
+-
+-setJitCompilerOption("ion.warmup.trigger", 50);
+-
+-function f() {
+-    var i4 = SIMD.Int32x4(1, -2, 3, -4);
+-    var u4 = SIMD.Uint32x4(1, -2, 3, 0x88000000);
+-    var b4 = SIMD.Bool32x4(true, true, false, true);
+-
+-
+-    var bt4 = SIMD.Bool32x4(true, true, true, true);
+-    var bf4 = SIMD.Bool32x4(false, false, false, false);
+-
+-    var v = Math.fround(13.37);
+-    var f4 = SIMD.Float32x4(13.37, NaN, Infinity, -0);
+-
+-    for (var i = 0; i < 150; i++) {
+-        assertEq(SIMD.Int32x4.extractLane(i4, 0), 1);
+-        assertEq(SIMD.Int32x4.extractLane(i4, 1), -2);
+-        assertEq(SIMD.Int32x4.extractLane(i4, 2), 3);
+-        assertEq(SIMD.Int32x4.extractLane(i4, 3), -4);
+-
+-        assertEq(SIMD.Uint32x4.extractLane(u4, 0), 1);
+-        assertEq(SIMD.Uint32x4.extractLane(u4, 1), -2 >>> 0);
+-        assertEq(SIMD.Uint32x4.extractLane(u4, 2), 3);
+-        assertEq(SIMD.Uint32x4.extractLane(u4, 3), 0x88000000);
+-
+-        assertEq(SIMD.Float32x4.extractLane(f4, 0), v);
+-        assertEq(SIMD.Float32x4.extractLane(f4, 1), NaN);
+-        assertEq(SIMD.Float32x4.extractLane(f4, 2), Infinity);
+-        assertEq(SIMD.Float32x4.extractLane(f4, 3), -0);
+-
+-        assertEq(SIMD.Bool32x4.extractLane(b4, 0), true);
+-        assertEq(SIMD.Bool32x4.extractLane(b4, 1), true);
+-        assertEq(SIMD.Bool32x4.extractLane(b4, 2), false);
+-        assertEq(SIMD.Bool32x4.extractLane(b4, 3), true);
+-
+-        assertEq(SIMD.Bool32x4.anyTrue(b4), true);
+-        assertEq(SIMD.Bool32x4.allTrue(b4), false);
+-
+-        assertEq(SIMD.Bool32x4.anyTrue(bt4), true);
+-        assertEq(SIMD.Bool32x4.allTrue(bt4), true);
+-        assertEq(SIMD.Bool32x4.anyTrue(bf4), false);
+-        assertEq(SIMD.Bool32x4.allTrue(bf4), false);
+-    }
+-}
+-
+-f();
+diff --git a/js/src/jit-test/tests/SIMD/inline-missing-arguments.js b/js/src/jit-test/tests/SIMD/inline-missing-arguments.js
+deleted file mode 100644
+--- a/js/src/jit-test/tests/SIMD/inline-missing-arguments.js
++++ /dev/null
+@@ -1,81 +0,0 @@
+-load(libdir + 'simd.js');
+-
+-setJitCompilerOption("ion.warmup.trigger", 50);
+-
+-function test(i) {
+-    assertEqX4(SIMD.Int32x4(),              [0, 0, 0, 0]);
+-    assertEqX4(SIMD.Int32x4(i),             [i, 0, 0, 0]);
+-    assertEqX4(SIMD.Int32x4(i, 1),          [i, 1, 0, 0]);
+-    assertEqX4(SIMD.Int32x4(i, 1, 2),       [i, 1, 2, 0]);
+-    assertEqX4(SIMD.Int32x4(i, 1, 2, 3),    [i, 1, 2, 3]);
+-    assertEqX4(SIMD.Int32x4(i, 1, 2, 3, 4), [i, 1, 2, 3]);
+-
+-    assertEqVecArr(SIMD.Int16x8(),              [0, 0, 0, 0, 0, 0, 0, 0]);
+-    assertEqVecArr(SIMD.Int16x8(i),             [i, 0, 0, 0, 0, 0, 0, 0]);
+-    assertEqVecArr(SIMD.Int16x8(i, 1),          [i, 1, 0, 0, 0, 0, 0, 0]);
+-    assertEqVecArr(SIMD.Int16x8(i, 1, 2),       [i, 1, 2, 0, 0, 0, 0, 0]);
+-    assertEqVecArr(SIMD.Int16x8(i, 1, 2, 3),    [i, 1, 2, 3, 0, 0, 0, 0]);
+-    assertEqVecArr(SIMD.Int16x8(i, 1, 2, 3, 4), [i, 1, 2, 3, 4, 0, 0, 0]);
+-    assertEqVecArr(SIMD.Int16x8(i, 1, 2, 3, 4, 5, 6),
+-                               [i, 1, 2, 3, 4, 5, 6, 0]);
+-    j = i & 32
+-    assertEqVecArr(SIMD.Int8x16(),              [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
+-    assertEqVecArr(SIMD.Int8x16(j),             [j, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
+-    assertEqVecArr(SIMD.Int8x16(j, 1),          [j, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
+-    assertEqVecArr(SIMD.Int8x16(j, 1, 2),       [j, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
+-    assertEqVecArr(SIMD.Int8x16(j, 1, 2, 3),    [j, 1, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
+-    assertEqVecArr(SIMD.Int8x16(j, 1, 2, 3, 4), [j, 1, 2, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
+-    assertEqVecArr(SIMD.Int8x16(j, 1, 2, 3, 4, 5, 6),
+-                               [j, 1, 2, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
+-    assertEqVecArr(SIMD.Int8x16(j, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12),
+-                               [j, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 0, 0, 0]);
+-
+-    assertEqX4(SIMD.Float32x4(),                [NaN, NaN, NaN, NaN]);
+-    assertEqX4(SIMD.Float32x4(i),               [i,   NaN, NaN, NaN]);
+-    assertEqX4(SIMD.Float32x4(i, 1),            [i,   1,   NaN, NaN]);
+-    assertEqX4(SIMD.Float32x4(i, 1, 2),         [i,   1,   2,   NaN]);
+-    assertEqX4(SIMD.Float32x4(i, 1, 2, 3),      [i,   1,   2,   3  ]);
+-    assertEqX4(SIMD.Float32x4(i, 1, 2, 3, 4),   [i,   1,   2,   3  ]);
+-
+-    var b = i % 2 > 0 ;
+-    assertEqX4(SIMD.Bool32x4(),                           [false, false, false, false]);
+-    assertEqX4(SIMD.Bool32x4(b),                          [b,     false, false, false]);
+-    assertEqX4(SIMD.Bool32x4(b, true),                    [b,     true,  false, false]);
+-    assertEqX4(SIMD.Bool32x4(b, false, true),             [b,     false, true,  false]);
+-    assertEqX4(SIMD.Bool32x4(b, false, true, true),       [b,     false, true,  true ]);
+-    assertEqX4(SIMD.Bool32x4(b, false, true, true, true), [b,     false, true,  true ]);
+-
+-    assertEqVecArr(SIMD.Bool16x8(),
+-                                [false, false, false, false, false, false, false, false]);
+-    assertEqVecArr(SIMD.Bool16x8(b),
+-                                [b,     false, false, false, false, false, false, false]);
+-    assertEqVecArr(SIMD.Bool16x8(b,     true),
+-                                [b,     true,  false, false, false, false, false, false]);
+-    assertEqVecArr(SIMD.Bool16x8(b,     false, true),
+-                                [b,     false, true,  false, false, false, false, false]);
+-    assertEqVecArr(SIMD.Bool16x8(b,     false, true,  true),
+-                                [b,     false, true,  true,  false, false, false, false]);
+-    assertEqVecArr(SIMD.Bool16x8(b,     false, true,  true,  true),
+-                                [b,     false, true,  true,  true,  false, false, false]);
+-    assertEqVecArr(SIMD.Bool16x8(b,     false, true,  true,  true,  true),
+-                                [b,     false, true,  true,  true,  true,  false, false]);
+-
+-    assertEqVecArr(SIMD.Bool8x16(),
+-                                [false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false]);
+-    assertEqVecArr(SIMD.Bool8x16(b),
+-                                [b,     false, false, false, false, false, false, false, false, false, false, false, false, false, false, false]);
+-    assertEqVecArr(SIMD.Bool8x16(b,     true),
+-                                [b,     true,  false, false, false, false, false, false, false, false, false, false, false, false, false, false]);
+-    assertEqVecArr(SIMD.Bool8x16(b,     false, true),
+-                                [b,     false, true,  false, false, false, false, false, false, false, false, false, false, false, false, false]);
+-    assertEqVecArr(SIMD.Bool8x16(b,     false, true,  true),
+-                                [b,     false, true,  true,  false, false, false, false, false, false, false, false, false, false, false, false]);
+-    assertEqVecArr(SIMD.Bool8x16(b,     false, true,  true,  true),
+-                                [b,     false, true,  true,  true,  false, false, false, false, false, false, false, false, false, false, false]);
+-    assertEqVecArr(SIMD.Bool8x16(b,     false, true,  true,  true,  true,  false, true,  true,  true),
+-                                [b,     false, true,  true,  true,  true,  false, true,  true,  true,  false, false, false, false, false, false]);
+-}
+-
+-for(var i=0; i<300; i++) {
+-    test(i);
+-}
+diff --git a/js/src/jit-test/tests/SIMD/load.js b/js/src/jit-test/tests/SIMD/load.js
+deleted file mode 100644
+--- a/js/src/jit-test/tests/SIMD/load.js
++++ /dev/null
+@@ -1,123 +0,0 @@
+-load(libdir + 'simd.js');
+-
+-setJitCompilerOption("ion.warmup.trigger", 40);
+-
+-function f() {
+-    var f32 = new Float32Array(16);
+-    for (var i = 0; i < 16; i++)
+-        f32[i] = i + 1;
+-
+-    var f64 = new Float64Array(f32.buffer);
+-    var i32 = new Int32Array(f32.buffer);
+-    var u32 = new Uint32Array(f32.buffer);
+-    var i16 = new Int16Array(f32.buffer);
+-    var u16 = new Uint16Array(f32.buffer);
+-    var i8  = new Int8Array(f32.buffer);
+-    var u8  = new Uint8Array(f32.buffer);
+-
+-    function testLoad() {
+-        assertEqX4(SIMD.Float32x4.load(f64, 0),      [1,2,3,4]);
+-        assertEqX4(SIMD.Float32x4.load(f32, 1),      [2,3,4,5]);
+-        assertEqX4(SIMD.Float32x4.load(i32, 2),      [3,4,5,6]);
+-        assertEqX4(SIMD.Float32x4.load(i16, 3 << 1), [4,5,6,7]);
+-        assertEqX4(SIMD.Float32x4.load(u16, 4 << 1), [5,6,7,8]);
+-        assertEqX4(SIMD.Float32x4.load(i8 , 5 << 2), [6,7,8,9]);
+-        assertEqX4(SIMD.Float32x4.load(u8 , 6 << 2), [7,8,9,10]);
+-
+-        assertEqX4(SIMD.Float32x4.load(f64, (16 >> 1) - (4 >> 1)), [13,14,15,16]);
+-        assertEqX4(SIMD.Float32x4.load(f32, 16 - 4),               [13,14,15,16]);
+-        assertEqX4(SIMD.Float32x4.load(i32, 16 - 4),               [13,14,15,16]);
+-        assertEqX4(SIMD.Float32x4.load(i16, (16 << 1) - (4 << 1)), [13,14,15,16]);
+-        assertEqX4(SIMD.Float32x4.load(u16, (16 << 1) - (4 << 1)), [13,14,15,16]);
+-        assertEqX4(SIMD.Float32x4.load(i8,  (16 << 2) - (4 << 2)), [13,14,15,16]);
+-        assertEqX4(SIMD.Float32x4.load(u8,  (16 << 2) - (4 << 2)), [13,14,15,16]);
+-    }
+-
+-    function testLoad1() {
+-        assertEqX4(SIMD.Float32x4.load1(f64, 0),      [1,0,0,0]);
+-        assertEqX4(SIMD.Float32x4.load1(f32, 1),      [2,0,0,0]);
+-        assertEqX4(SIMD.Float32x4.load1(i32, 2),      [3,0,0,0]);
+-        assertEqX4(SIMD.Float32x4.load1(i16, 3 << 1), [4,0,0,0]);
+-        assertEqX4(SIMD.Float32x4.load1(u16, 4 << 1), [5,0,0,0]);
+-        assertEqX4(SIMD.Float32x4.load1(i8 , 5 << 2), [6,0,0,0]);
+-        assertEqX4(SIMD.Float32x4.load1(u8 , 6 << 2), [7,0,0,0]);
+-
+-        assertEqX4(SIMD.Float32x4.load1(f64, (16 >> 1) - (4 >> 1)), [13,0,0,0]);
+-        assertEqX4(SIMD.Float32x4.load1(f32, 16 - 4),               [13,0,0,0]);
+-        assertEqX4(SIMD.Float32x4.load1(i32, 16 - 4),               [13,0,0,0]);
+-        assertEqX4(SIMD.Float32x4.load1(i16, (16 << 1) - (4 << 1)), [13,0,0,0]);
+-        assertEqX4(SIMD.Float32x4.load1(u16, (16 << 1) - (4 << 1)), [13,0,0,0]);
+-        assertEqX4(SIMD.Float32x4.load1(i8,  (16 << 2) - (4 << 2)), [13,0,0,0]);
+-        assertEqX4(SIMD.Float32x4.load1(u8,  (16 << 2) - (4 << 2)), [13,0,0,0]);
+-    }
+-
+-    function testLoad2() {
+-        assertEqX4(SIMD.Float32x4.load2(f64, 0),      [1,2,0,0]);
+-        assertEqX4(SIMD.Float32x4.load2(f32, 1),      [2,3,0,0]);
+-        assertEqX4(SIMD.Float32x4.load2(i32, 2),      [3,4,0,0]);
+-        assertEqX4(SIMD.Float32x4.load2(i16, 3 << 1), [4,5,0,0]);
+-        assertEqX4(SIMD.Float32x4.load2(u16, 4 << 1), [5,6,0,0]);
+-        assertEqX4(SIMD.Float32x4.load2(i8 , 5 << 2), [6,7,0,0]);
+-        assertEqX4(SIMD.Float32x4.load2(u8 , 6 << 2), [7,8,0,0]);
+-
+-        assertEqX4(SIMD.Float32x4.load2(f64, (16 >> 1) - (4 >> 1)), [13,14,0,0]);
+-        assertEqX4(SIMD.Float32x4.load2(f32, 16 - 4),               [13,14,0,0]);
+-        assertEqX4(SIMD.Float32x4.load2(i32, 16 - 4),               [13,14,0,0]);
+-        assertEqX4(SIMD.Float32x4.load2(i16, (16 << 1) - (4 << 1)), [13,14,0,0]);
+-        assertEqX4(SIMD.Float32x4.load2(u16, (16 << 1) - (4 << 1)), [13,14,0,0]);
+-        assertEqX4(SIMD.Float32x4.load2(i8,  (16 << 2) - (4 << 2)), [13,14,0,0]);
+-        assertEqX4(SIMD.Float32x4.load2(u8,  (16 << 2) - (4 << 2)), [13,14,0,0]);
+-    }
+-
+-    function testLoad3() {
+-        assertEqX4(SIMD.Float32x4.load3(f64, 0),      [1,2,3,0]);
+-        assertEqX4(SIMD.Float32x4.load3(f32, 1),      [2,3,4,0]);
+-        assertEqX4(SIMD.Float32x4.load3(i32, 2),      [3,4,5,0]);
+-        assertEqX4(SIMD.Float32x4.load3(i16, 3 << 1), [4,5,6,0]);
+-        assertEqX4(SIMD.Float32x4.load3(u16, 4 << 1), [5,6,7,0]);
+-        assertEqX4(SIMD.Float32x4.load3(i8 , 5 << 2), [6,7,8,0]);
+-        assertEqX4(SIMD.Float32x4.load3(u8 , 6 << 2), [7,8,9,0]);
+-
+-        assertEqX4(SIMD.Float32x4.load3(f64, (16 >> 1) - (4 >> 1)), [13,14,15,0]);
+-        assertEqX4(SIMD.Float32x4.load3(f32, 16 - 4),               [13,14,15,0]);
+-        assertEqX4(SIMD.Float32x4.load3(i32, 16 - 4),               [13,14,15,0]);
+-        assertEqX4(SIMD.Float32x4.load3(i16, (16 << 1) - (4 << 1)), [13,14,15,0]);
+-        assertEqX4(SIMD.Float32x4.load3(u16, (16 << 1) - (4 << 1)), [13,14,15,0]);
+-        assertEqX4(SIMD.Float32x4.load3(i8,  (16 << 2) - (4 << 2)), [13,14,15,0]);
+-        assertEqX4(SIMD.Float32x4.load3(u8,  (16 << 2) - (4 << 2)), [13,14,15,0]);
+-    }
+-
+-    for (var i = 0; i < 150; i++) {
+-        testLoad();
+-        testLoad1();
+-        testLoad2();
+-        testLoad3();
+-    }
+-}
+-
+-f();
+-
+-function testBailout(uglyDuckling) {
+-    var f32 = new Float32Array(16);
+-    for (var i = 0; i < 16; i++)
+-        f32[i] = i + 1;
+-
+-    var i8  = new Int8Array(f32.buffer);
+-
+-    for (var i = 0; i < 150; i++) {
+-        var caught = false;
+-        try {
+-            SIMD.Float32x4.load(i8, (i < 149) ? 0 : uglyDuckling);
+-        } catch (e) {
+-            print(e);
+-            assertEq(e instanceof RangeError, true);
+-            caught = true;
+-        }
+-        assertEq(i < 149 || caught, true);
+-    }
+-}
+-
+-print('Testing range checks...');
+-testBailout(-1);
+-testBailout(-15);
+-testBailout(12 * 4 + 1);
+diff --git a/js/src/jit-test/tests/SIMD/nursery-overflow.js b/js/src/jit-test/tests/SIMD/nursery-overflow.js
+deleted file mode 100644
+--- a/js/src/jit-test/tests/SIMD/nursery-overflow.js
++++ /dev/null
+@@ -1,29 +0,0 @@
+-load(libdir + 'simd.js');
+-
+-if (typeof SIMD === "undefined")
+-    quit();
+-
+-setJitCompilerOption("baseline.warmup.trigger", 10);
+-setJitCompilerOption("ion.warmup.trigger", 30);
+-
+-var i4 = SIMD.Int32x4;
+-var i4sub = SIMD.Int32x4.sub;
+-
+-function simdbox(i) {
+-  return i4(i, i, i, i);
+-}
+-
+-function test() {
+-  var arr = [];
+-
+-  // overflow the nursery with live SIMD objects.
+-  for (var i = 0; i < 100000; i++) {
+-    arr.push(simdbox(i));
+-  }
+-
+-  return arr;
+-}
+-
+-var arr = test();
+-for (var i = 0; i < arr.length; i++)
+-  assertEqX4(arr[i], [i, i, i, i]);
+diff --git a/js/src/jit-test/tests/SIMD/recover.js b/js/src/jit-test/tests/SIMD/recover.js
+deleted file mode 100644
+--- a/js/src/jit-test/tests/SIMD/recover.js
++++ /dev/null
+@@ -1,70 +0,0 @@
+-load(libdir + 'simd.js');
+-
+-if (!this.hasOwnProperty("SIMD"))
+-  quit();
+-
+-// This test case ensure that if we are able to optimize SIMD, then we can use
+-// recover instructions to get rid of the allocations. So, there is no value
+-// (and the test case would fail) if we are not able to inline SIMD
+-// constructors.
+-if (!isSimdAvailable())
+-  quit();
+-
+-setJitCompilerOption("baseline.warmup.trigger", 10);
+-setJitCompilerOption("ion.warmup.trigger", 20);
+-
+-// This function is used to cause an invalidation after having removed a branch
+-// after DCE.  This is made to check if we correctly recover an array
+-// allocation.
+-var uceFault = function (i) {
+-    if (i > 98)
+-        uceFault = function (i) { return true; };
+-    return false;
+-};
+-
+-// Check that we can correctly recover a boxed value.
+-var uceFault_simdBox_i4 = eval(uneval(uceFault).replace('uceFault', 'uceFault_simdBox_i4'));
+-function simdBox_i4(i) {
+-    var a = SIMD.Int32x4(i, i, i, i);
+-    if (uceFault_simdBox_i4(i) || uceFault_simdBox_i4(i))
+-        assertEqX4(a, [i, i, i, i]);
+-    assertRecoveredOnBailout(a, true);
+-    return 0;
+-}
+-
+-var uceFault_simdBox_u4 = eval(uneval(uceFault).replace('uceFault', 'uceFault_simdBox_u4'));
+-function simdBox_u4(i) {
+-    var a = SIMD.Uint32x4(i, 98 - i, i + 0x7ffffff0, i + 0xffffff00);
+-    if (uceFault_simdBox_u4(i) || uceFault_simdBox_u4(i))
+-        assertEqX4(a, [i, 98 - i, i + 0x7ffffff0, i + 0xffffff00].map(x => x >>> 0));
+-    assertRecoveredOnBailout(a, true);
+-    return 0;
+-}
+-
+-var uceFault_simdBox_f4 = eval(uneval(uceFault).replace('uceFault', 'uceFault_simdBox_f4'));
+-function simdBox_f4(i) {
+-    var a = SIMD.Float32x4(i, i + 0.1, i + 0.2, i + 0.3);
+-    if (uceFault_simdBox_f4(i) || uceFault_simdBox_f4(i))
+-        assertEqX4(a, [i, i + 0.1, i + 0.2, i + 0.3].map(Math.fround));
+-    assertRecoveredOnBailout(a, true);
+-    return 0;
+-}
+-
+-var uceFault_simdBox_b4 = eval(uneval(uceFault).replace('uceFault', 'uceFault_simdBox_b4'));
+-function simdBox_b4(i) {
+-    var val1 = i%2 === 0,
+-        val2 = !val1;
+-
+-    var a = SIMD.Bool32x4(val1, val2, val1, val2);
+-    if (uceFault_simdBox_b4(i) || uceFault_simdBox_b4(i))
+-        assertEqX4(a, [val1, val2, val1, val2]);
+-    assertRecoveredOnBailout(a, true);
+-    return 0;
+-}
+-
+-for (var i = 0; i < 100; i++) {
+-    simdBox_i4(i);
+-    simdBox_u4(i);
+-    simdBox_f4(i);
+-    simdBox_b4(i);
+-}
+diff --git a/js/src/jit-test/tests/SIMD/replacelane.js b/js/src/jit-test/tests/SIMD/replacelane.js
+deleted file mode 100644
+--- a/js/src/jit-test/tests/SIMD/replacelane.js
++++ /dev/null
+@@ -1,181 +0,0 @@
+-load(libdir + 'simd.js');
+-
+-setJitCompilerOption("ion.warmup.trigger", 50);
+-
+-function f() {
+-    var f4 = SIMD.Float32x4(1, 2, 3, 4);
+-    var i4 = SIMD.Int32x4(1, 2, 3, 4);
+-    var b4 = SIMD.Bool32x4(true, false, true, false);
+-
+-    for (var i = 0; i < 150; i++) {
+-        assertEqX4(SIMD.Int32x4.replaceLane(i4, 0, 42), [42, 2, 3, 4]);
+-        assertEqX4(SIMD.Int32x4.replaceLane(i4, 1, 42), [1, 42, 3, 4]);
+-        assertEqX4(SIMD.Int32x4.replaceLane(i4, 2, 42), [1, 2, 42, 4]);
+-        assertEqX4(SIMD.Int32x4.replaceLane(i4, 3, 42), [1, 2, 3, 42]);
+-
+-        assertEqX4(SIMD.Float32x4.replaceLane(f4, 0, 42), [42, 2, 3, 4]);
+-        assertEqX4(SIMD.Float32x4.replaceLane(f4, 1, 42), [1, 42, 3, 4]);
+-        assertEqX4(SIMD.Float32x4.replaceLane(f4, 2, 42), [1, 2, 42, 4]);
+-        assertEqX4(SIMD.Float32x4.replaceLane(f4, 3, 42), [1, 2, 3, 42]);
+-
+-        assertEqX4(SIMD.Bool32x4.replaceLane(b4, 0, false), [false, false, true, false]);
+-        assertEqX4(SIMD.Bool32x4.replaceLane(b4, 1, true), [true, true, true, false]);
+-        assertEqX4(SIMD.Bool32x4.replaceLane(b4, 2, false), [true, false, false, false]);
+-        assertEqX4(SIMD.Bool32x4.replaceLane(b4, 3, true), [true, false, true, true]);
+-    }
+-}
+-
+-f();
+-
+-function e() {
+-    var f4 = SIMD.Float32x4(1, 2, 3, 4);
+-    var i4 = SIMD.Int32x4(1, 2, 3, 4);
+-    var b4 = SIMD.Bool32x4(true, false, true, false);
+-
+-    for (let i = 0; i < 150; i++) {
+-        let caught = false;
+-        try {
+-            let x = SIMD.Int32x4.replaceLane(i < 149 ? i4 : f4, 0, 42);
+-        } catch(e) {
+-            assertEq(e instanceof TypeError, true);
+-            assertEq(i, 149);
+-            caught = true;
+-        }
+-        assertEq(i < 149 || caught, true);
+-    }
+-
+-    for (let i = 0; i < 150; i++) {
+-        let caught = false;
+-        try {
+-            let x = SIMD.Int32x4.replaceLane(i < 149 ? i4 : b4, 0, 42);
+-        } catch(e) {
+-            assertEq(e instanceof TypeError, true);
+-            assertEq(i, 149);
+-            caught = true;
+-        }
+-        assertEq(i < 149 || caught, true);
+-    }
+-
+-    for (let i = 0; i < 150; i++) {
+-        let caught = false;
+-        try {
+-            let x = SIMD.Int32x4.replaceLane(i4, i < 149 ? 0 : 4, 42);
+-        } catch(e) {
+-            assertEq(e instanceof RangeError, true);
+-            assertEq(i, 149);
+-            caught = true;
+-        }
+-        assertEq(i < 149 || caught, true);
+-    }
+-
+-    for (let i = 0; i < 150; i++) {
+-        let caught = false;
+-        try {
+-            let x = SIMD.Int32x4.replaceLane(i4, i < 149 ? 0 : 1.1, 42);
+-        } catch(e) {
+-            assertEq(e instanceof RangeError, true);
+-            assertEq(i, 149);
+-            caught = true;
+-        }
+-        assertEq(i < 149 || caught, true);
+-    }
+-
+-    for (let i = 0; i < 150; i++) {
+-        let caught = false;
+-        try {
+-            let x = SIMD.Float32x4.replaceLane(i < 149 ? f4 : i4, 0, 42);
+-        } catch(e) {
+-            assertEq(e instanceof TypeError, true);
+-            assertEq(i, 149);
+-            caught = true;
+-        }
+-        assertEq(i < 149 || caught, true);
+-    }
+-
+-    for (let i = 0; i < 150; i++) {
+-        let caught = false;
+-        try {
+-            let x = SIMD.Float32x4.replaceLane(i < 149 ? f4 : b4, 0, 42);
+-        } catch(e) {
+-            assertEq(e instanceof TypeError, true);
+-            assertEq(i, 149);
+-            caught = true;
+-        }
+-        assertEq(i < 149 || caught, true);
+-    }
+-
+-    for (let i = 0; i < 150; i++) {
+-        let caught = false;
+-        try {
+-            let x = SIMD.Float32x4.replaceLane(f4, i < 149 ? 0 : 4, 42);
+-        } catch(e) {
+-            assertEq(e instanceof RangeError, true);
+-            assertEq(i, 149);
+-            caught = true;
+-        }
+-        assertEq(i < 149 || caught, true);
+-    }
+-
+-    for (let i = 0; i < 150; i++) {
+-        let caught = false;
+-        try {
+-            let x = SIMD.Float32x4.replaceLane(f4, i < 149 ? 0 : 1.1, 42);
+-        } catch(e) {
+-            assertEq(e instanceof RangeError, true);
+-            assertEq(i, 149);
+-            caught = true;
+-        }
+-        assertEq(i < 149 || caught, true);
+-    }
+-
+-    for (let i = 0; i < 150; i++) {
+-        let caught = false;
+-        try {
+-            let x = SIMD.Bool32x4.replaceLane(i < 149 ? b4 : i4, 0, true);
+-        } catch(e) {
+-            assertEq(e instanceof TypeError, true);
+-            assertEq(i, 149);
+-            caught = true;
+-        }
+-        assertEq(i < 149 || caught, true);
+-    }
+-
+-    for (let i = 0; i < 150; i++) {
+-        let caught = false;
+-        try {
+-            let x = SIMD.Bool32x4.replaceLane(i < 149 ? b4 : f4, 0, true);
+-        } catch(e) {
+-            assertEq(e instanceof TypeError, true);
+-            assertEq(i, 149);
+-            caught = true;
+-        }
+-        assertEq(i < 149 || caught, true);
+-    }
+-
+-    for (let i = 0; i < 150; i++) {
+-        let caught = false;
+-        try {
+-            let x = SIMD.Bool32x4.replaceLane(b4, i < 149 ? 0 : 4, true);
+-        } catch(e) {
+-            assertEq(e instanceof RangeError, true);
+-            assertEq(i, 149);
+-            caught = true;
+-        }
+-        assertEq(i < 149 || caught, true);
+-    }
+-
+-    for (let i = 0; i < 150; i++) {
+-        let caught = false;
+-        try {
+-            let x = SIMD.Bool32x4.replaceLane(b4, i < 149 ? 0 : 1.1, true);
+-        } catch(e) {
+-            assertEq(e instanceof RangeError, true);
+-            assertEq(i, 149);
+-            caught = true;
+-        }
+-        assertEq(i < 149 || caught, true);
+-    }
+-
+-}
+-
+-e();
+diff --git a/js/src/jit-test/tests/SIMD/saturate.js b/js/src/jit-test/tests/SIMD/saturate.js
+deleted file mode 100644
+--- a/js/src/jit-test/tests/SIMD/saturate.js
++++ /dev/null
+@@ -1,37 +0,0 @@
+-load(libdir + 'simd.js');
+-
+-setJitCompilerOption("ion.warmup.trigger", 50);
+-
+-const INT8_MIN = -128;
+-const INT8_MAX = 127;
+-const UINT8_MAX = 255;
+-
+-function sat8(x) {
+-    if (x < INT8_MIN) return INT8_MIN;
+-    if (x > INT8_MAX) return INT8_MAX;
+-    return x;
+-}
+-
+-function usat8(x) {
+-    if (x < 0) return 0;
+-    if (x > UINT8_MAX) return UINT8_MAX;
+-    return x;
+-}
+-
+-function f() {
+-    var i1 = SIMD.Int8x16(1, 100, 3, 4);
+-    var i2 = SIMD.Int8x16(4, 30, 2, 1);
+-
+-    var u1 = SIMD.Uint8x16(1, 2, 3, 4);
+-    var u2 = SIMD.Uint8x16(4, 3, 2, 1);
+-
+-    for (var i = 0; i < 150; i++) {
+-        assertEqX4(SIMD.Int8x16.addSaturate(i1, i2), binaryX((x, y) => sat8(x + y), i1, i2));
+-        assertEqX4(SIMD.Int8x16.subSaturate(i1, i2), binaryX((x, y) => sat8(x - y), i1, i2));
+-
+-        assertEqX4(SIMD.Uint8x16.addSaturate(u1, u2), binaryX((x, y) => usat8(x + y), u1, u2));
+-        assertEqX4(SIMD.Uint8x16.subSaturate(u1, u2), binaryX((x, y) => usat8(x - y), u1, u2));
+-    }
+-}
+-
+-f();
+diff --git a/js/src/jit-test/tests/SIMD/select.js b/js/src/jit-test/tests/SIMD/select.js
+deleted file mode 100644
+--- a/js/src/jit-test/tests/SIMD/select.js
++++ /dev/null
+@@ -1,35 +0,0 @@
+-load(libdir + 'simd.js');
+-
+-setJitCompilerOption("ion.warmup.trigger", 50);
+-
+-function select(type, mask, ifTrue, ifFalse) {
+-    var arr = [];
+-    for (var i = 0; i < 4; i++) {
+-        var selector = SIMD.Bool32x4.extractLane(mask, i);
+-        arr.push(type.extractLane(selector ? ifTrue : ifFalse, i));
+-    }
+-    return arr;
+-}
+-
+-function f() {
+-    var f1 = SIMD.Float32x4(1, 2, 3, 4);
+-    var f2 = SIMD.Float32x4(NaN, Infinity, 3.14, -0);
+-
+-    var i1 = SIMD.Int32x4(2, 3, 5, 8);
+-    var i2 = SIMD.Int32x4(13, 37, 24, 42);
+-
+-    var TTFT = SIMD.Bool32x4(true, true, false, true);
+-    var TFTF = SIMD.Bool32x4(true, false, true, false);
+-
+-    var mask = SIMD.Int32x4(0xdeadbeef, 0xbaadf00d, 0x00ff1ce, 0xdeadc0de);
+-
+-    for (var i = 0; i < 150; i++) {
+-        assertEqX4(SIMD.Float32x4.select(TTFT, f1, f2), select(SIMD.Float32x4, TTFT, f1, f2));
+-        assertEqX4(SIMD.Float32x4.select(TFTF, f1, f2), select(SIMD.Float32x4, TFTF, f1, f2));
+-
+-        assertEqX4(SIMD.Int32x4.select(TFTF, i1, i2), select(SIMD.Int32x4, TFTF, i1, i2));
+-        assertEqX4(SIMD.Int32x4.select(TTFT, i1, i2), select(SIMD.Int32x4, TTFT, i1, i2));
+-    }
+-}
+-
+-f();
+diff --git a/js/src/jit-test/tests/SIMD/shift.js b/js/src/jit-test/tests/SIMD/shift.js
+deleted file mode 100644
+--- a/js/src/jit-test/tests/SIMD/shift.js
++++ /dev/null
+@@ -1,75 +0,0 @@
+-load(libdir + 'simd.js');
+-
+-setJitCompilerOption("ion.warmup.trigger", 50);
+-
+-function curry(f, arg) { return f.bind(null, arg); }
+-
+-function binaryLsh(count, v) { count &= 31; return (v << count) | 0; }
+-function lsh(count) { return curry(binaryLsh, count); }
+-
+-function binaryRsh(count, v) { count &= 31; return (v >> count) | 0; }
+-function rsh(count) { return curry(binaryRsh, count); }
+-
+-function binaryUlsh(count, v) { count &= 31; return (v << count) >>> 0; }
+-function ulsh(count) { return curry(binaryUlsh, count); }
+-
+-function binaryUrsh(count, v) { count &= 31; return v >>> count; }
+-function ursh(count) { return curry(binaryUrsh, count); }
+-
+-function f() {
+-    var v = SIMD.Int32x4(1, 2, -3, 4);
+-    var u = SIMD.Uint32x4(1, 0x55005500, -3, 0xaa00aa00);
+-    var a = [1, 2, -3, 4];
+-    var b = [1, 0x55005500, -3, 0xaa00aa00];
+-
+-    var shifts = [-2, -1, 0, 1, 31, 32, 33];
+-
+-    var r;
+-    for (var i = 0; i < 150; i++) {
+-        // Constant shift counts
+-        assertEqX4(SIMD.Int32x4.shiftLeftByScalar(v, -1), a.map(lsh(-1)));
+-        assertEqX4(SIMD.Int32x4.shiftLeftByScalar(v, 0),  a.map(lsh(0)));
+-        assertEqX4(SIMD.Int32x4.shiftLeftByScalar(v, 1),  a.map(lsh(1)));
+-        assertEqX4(SIMD.Int32x4.shiftLeftByScalar(v, 2),  a.map(lsh(2)));
+-        assertEqX4(SIMD.Int32x4.shiftLeftByScalar(v, 31), a.map(lsh(31)));
+-        assertEqX4(SIMD.Int32x4.shiftLeftByScalar(v, 32), a.map(lsh(32)));
+-        assertEqX4(SIMD.Int32x4.shiftLeftByScalar(v, 33), a.map(lsh(33)));
+-
+-        assertEqX4(SIMD.Int32x4.shiftRightByScalar(v, -1), a.map(rsh(31)));
+-        assertEqX4(SIMD.Int32x4.shiftRightByScalar(v, 0),  a.map(rsh(0)));
+-        assertEqX4(SIMD.Int32x4.shiftRightByScalar(v, 1),  a.map(rsh(1)));
+-        assertEqX4(SIMD.Int32x4.shiftRightByScalar(v, 2),  a.map(rsh(2)));
+-        assertEqX4(SIMD.Int32x4.shiftRightByScalar(v, 31), a.map(rsh(31)));
+-        assertEqX4(SIMD.Int32x4.shiftRightByScalar(v, 32), a.map(rsh(32)));
+-        assertEqX4(SIMD.Int32x4.shiftRightByScalar(v, 33), a.map(rsh(33)));
+-
+-        assertEqX4(SIMD.Uint32x4.shiftLeftByScalar(u, -1), b.map(ulsh(-1)));
+-        assertEqX4(SIMD.Uint32x4.shiftLeftByScalar(u, 0),  b.map(ulsh(0)));
+-        assertEqX4(SIMD.Uint32x4.shiftLeftByScalar(u, 1),  b.map(ulsh(1)));
+-        assertEqX4(SIMD.Uint32x4.shiftLeftByScalar(u, 2),  b.map(ulsh(2)));
+-        assertEqX4(SIMD.Uint32x4.shiftLeftByScalar(u, 31), b.map(ulsh(31)));
+-        assertEqX4(SIMD.Uint32x4.shiftLeftByScalar(u, 32), b.map(ulsh(32)));
+-        assertEqX4(SIMD.Uint32x4.shiftLeftByScalar(u, 33), b.map(ulsh(33)));
+-
+-        assertEqX4(SIMD.Uint32x4.shiftRightByScalar(u, -1), b.map(ursh(-1)));
+-        assertEqX4(SIMD.Uint32x4.shiftRightByScalar(u, 0),  b.map(ursh(0)));
+-        assertEqX4(SIMD.Uint32x4.shiftRightByScalar(u, 1),  b.map(ursh(1)));
+-        assertEqX4(SIMD.Uint32x4.shiftRightByScalar(u, 2),  b.map(ursh(2)));
+-        assertEqX4(SIMD.Uint32x4.shiftRightByScalar(u, 31), b.map(ursh(31)));
+-        assertEqX4(SIMD.Uint32x4.shiftRightByScalar(u, 32), b.map(ursh(32)));
+-        assertEqX4(SIMD.Uint32x4.shiftRightByScalar(u, 33), b.map(ursh(33)));
+-
+-        // Non constant shift counts
+-        var c = shifts[i % shifts.length];
+-
+-        assertEqX4(SIMD.Int32x4.shiftLeftByScalar(v, c), a.map(lsh(c)));
+-        assertEqX4(SIMD.Int32x4.shiftRightByScalar(v, c), a.map(rsh(c)));
+-
+-        assertEqX4(SIMD.Uint32x4.shiftLeftByScalar(u, c), b.map(ulsh(c)));
+-        assertEqX4(SIMD.Uint32x4.shiftRightByScalar(u, c), b.map(ursh(c)));
+-    }
+-    return r;
+-}
+-
+-f();
+-
+diff --git a/js/src/jit-test/tests/SIMD/shuffle.js b/js/src/jit-test/tests/SIMD/shuffle.js
+deleted file mode 100644
+--- a/js/src/jit-test/tests/SIMD/shuffle.js
++++ /dev/null
+@@ -1,86 +0,0 @@
+-load(libdir + 'simd.js');
+-
+-setJitCompilerOption("ion.warmup.trigger", 50);
+-
+-function f() {
+-    var i1 = SIMD.Int32x4(1, 2, 3, 4);
+-    var i2 = SIMD.Int32x4(5, 6, 7, 8);
+-
+-    var leet = Math.fround(13.37);
+-    var f1 = SIMD.Float32x4(-.5, -0, Infinity, leet);
+-    var f2 = SIMD.Float32x4(42, .5, 23, -10);
+-
+-    // computes all rotations of a given array
+-    function *gen(arr) {
+-        var previous = arr.slice().splice(0, 4);
+-        var i = 4;
+-        for (var j = 0; j < 8; j++) {
+-            yield previous.slice();
+-            previous = previous.splice(1, previous.length - 1);
+-            previous.push(arr[i]);
+-            i = (i + 1) % arr.length;
+-        }
+-    }
+-
+-    var compI = [];
+-    var baseI = [];
+-    for (var i = 0; i < 8; i++)
+-        baseI.push(SIMD.Int32x4.extractLane(i < 4 ? i1 : i2, i % 4));
+-    for (var k of gen(baseI))
+-        compI.push(k);
+-
+-    var compF = [];
+-    var baseF = [];
+-    for (var i = 0; i < 8; i++)
+-        baseF.push(SIMD.Float32x4.extractLane(i < 4 ? f1 : f2, i % 4));
+-    for (var k of gen(baseF))
+-        compF.push(k);
+-
+-    for (var i = 0; i < 150; i++) {
+-        // Variable lanes
+-        var r = SIMD.Float32x4.shuffle(f1, f2, i % 8, (i + 1) % 8, (i + 2) % 8, (i + 3) % 8);
+-        assertEqX4(r, compF[i % 8]);
+-
+-        // Constant lanes
+-        assertEqX4(SIMD.Float32x4.shuffle(f1, f2, 3, 2, 4, 5), [leet, Infinity, 42, .5]);
+-
+-        // Variable lanes
+-        var r = SIMD.Int32x4.shuffle(i1, i2, i % 8, (i + 1) % 8, (i + 2) % 8, (i + 3) % 8);
+-        assertEqX4(r, compI[i % 8]);
+-
+-        // Constant lanes
+-        assertEqX4(SIMD.Int32x4.shuffle(i1, i2, 3, 2, 4, 5), [4, 3, 5, 6]);
+-    }
+-}
+-
+-function testBailouts(expectException, uglyDuckling) {
+-    var i1 = SIMD.Int32x4(1, 2, 3, 4);
+-    var i2 = SIMD.Int32x4(5, 6, 7, 8);
+-
+-    for (var i = 0; i < 150; i++) {
+-        // Test bailouts
+-        var value = i == 149 ? uglyDuckling : 0;
+-        var caught = false;
+-        try {
+-            assertEqX4(SIMD.Int32x4.shuffle(i1, i2, value, 2, 4, 5), [1, 3, 5, 6]);
+-        } catch(e) {
+-            print(e);
+-            caught = true;
+-            assertEq(i, 149);
+-            assertEq(e instanceof TypeError || e instanceof RangeError, true);
+-        }
+-        if (i == 149)
+-            assertEq(caught, expectException);
+-    }
+-}
+-
+-f();
+-testBailouts(true, -1);
+-testBailouts(true, 8);
+-testBailouts(true, 2.5);
+-testBailouts(true, undefined);
+-testBailouts(true, {});
+-testBailouts(true, 'one');
+-testBailouts(false, false);
+-testBailouts(false, null);
+-testBailouts(false, " 0.0 ");
+diff --git a/js/src/jit-test/tests/SIMD/splat.js b/js/src/jit-test/tests/SIMD/splat.js
+deleted file mode 100644
+--- a/js/src/jit-test/tests/SIMD/splat.js
++++ /dev/null
+@@ -1,15 +0,0 @@
+-load(libdir + 'simd.js');
+-
+-setJitCompilerOption("ion.warmup.trigger", 50);
+-
+-function f() {
+-    for (var i = 0; i < 150; i++) {
+-        assertEqX4(SIMD.Int32x4.splat(42),   [42, 42, 42, 42]);
+-        assertEqX4(SIMD.Float32x4.splat(42), [42, 42, 42, 42]);
+-        assertEqX4(SIMD.Bool32x4.splat(true), [true, true, true, true]);
+-        assertEqX4(SIMD.Bool32x4.splat(false), [false, false, false, false]);
+-    }
+-}
+-
+-f();
+-
+diff --git a/js/src/jit-test/tests/SIMD/store.js b/js/src/jit-test/tests/SIMD/store.js
+deleted file mode 100644
+--- a/js/src/jit-test/tests/SIMD/store.js
++++ /dev/null
+@@ -1,143 +0,0 @@
+-load(libdir + 'simd.js');
+-
+-setJitCompilerOption("ion.warmup.trigger", 40);
+-
+-function f() {
+-    var f32 = new Float32Array(16);
+-    for (var i = 0; i < 16; i++)
+-        f32[i] = i + 1;
+-
+-    var f64 = new Float64Array(f32.buffer);
+-    var i32 = new Int32Array(f32.buffer);
+-    var u32 = new Uint32Array(f32.buffer);
+-    var i16 = new Int16Array(f32.buffer);
+-    var u16 = new Uint16Array(f32.buffer);
+-    var i8  = new Int8Array(f32.buffer);
+-    var u8  = new Uint8Array(f32.buffer);
+-
+-    var f4 = SIMD.Float32x4(42, 43, 44, 45);
+-
+-    function check(n) {
+-        assertEq(f32[0], 42);
+-        assertEq(f32[1], n > 1 ? 43 : 2);
+-        assertEq(f32[2], n > 2 ? 44 : 3);
+-        assertEq(f32[3], n > 3 ? 45 : 4);
+-
+-        f32[0] = 1;
+-        f32[1] = 2;
+-        f32[2] = 3;
+-        f32[3] = 4;
+-    }
+-
+-    function testStore() {
+-        SIMD.Float32x4.store(f64, 0, f4);
+-        check(4);
+-        SIMD.Float32x4.store(f32, 0, f4);
+-        check(4);
+-        SIMD.Float32x4.store(i32, 0, f4);
+-        check(4);
+-        SIMD.Float32x4.store(u32, 0, f4);
+-        check(4);
+-        SIMD.Float32x4.store(i16, 0, f4);
+-        check(4);
+-        SIMD.Float32x4.store(u16, 0, f4);
+-        check(4);
+-        SIMD.Float32x4.store(i8, 0, f4);
+-        check(4);
+-        SIMD.Float32x4.store(u8, 0, f4);
+-        check(4);
+-    }
+-
+-    function testStore1() {
+-        SIMD.Float32x4.store1(f64, 0, f4);
+-        check(1);
+-        SIMD.Float32x4.store1(f32, 0, f4);
+-        check(1);
+-        SIMD.Float32x4.store1(i32, 0, f4);
+-        check(1);
+-        SIMD.Float32x4.store1(u32, 0, f4);
+-        check(1);
+-        SIMD.Float32x4.store1(i16, 0, f4);
+-        check(1);
+-        SIMD.Float32x4.store1(u16, 0, f4);
+-        check(1);
+-        SIMD.Float32x4.store1(i8, 0, f4);
+-        check(1);
+-        SIMD.Float32x4.store1(u8, 0, f4);
+-        check(1);
+-    }
+-
+-    function testStore2() {
+-        SIMD.Float32x4.store2(f64, 0, f4);
+-        check(2);
+-        SIMD.Float32x4.store2(f32, 0, f4);
+-        check(2);
+-        SIMD.Float32x4.store2(i32, 0, f4);
+-        check(2);
+-        SIMD.Float32x4.store2(u32, 0, f4);
+-        check(2);
+-        SIMD.Float32x4.store2(i16, 0, f4);
+-        check(2);
+-        SIMD.Float32x4.store2(u16, 0, f4);
+-        check(2);
+-        SIMD.Float32x4.store2(i8, 0, f4);
+-        check(2);
+-        SIMD.Float32x4.store2(u8, 0, f4);
+-        check(2);
+-    }
+-
+-    function testStore3() {
+-        SIMD.Float32x4.store3(f64, 0, f4);
+-        check(3);
+-        SIMD.Float32x4.store3(f32, 0, f4);
+-        check(3);
+-        SIMD.Float32x4.store3(i32, 0, f4);
+-        check(3);
+-        SIMD.Float32x4.store3(u32, 0, f4);
+-        check(3);
+-        SIMD.Float32x4.store3(i16, 0, f4);
+-        check(3);
+-        SIMD.Float32x4.store3(u16, 0, f4);
+-        check(3);
+-        SIMD.Float32x4.store3(i8, 0, f4);
+-        check(3);
+-        SIMD.Float32x4.store3(u8, 0, f4);
+-        check(3);
+-    }
+-
+-    for (var i = 0; i < 150; i++) {
+-        testStore();
+-        testStore1();
+-        testStore2();
+-        testStore3();
+-    }
+-}
+-
+-f();
+-
+-function testBailout(uglyDuckling) {
+-    var f32 = new Float32Array(16);
+-    for (var i = 0; i < 16; i++)
+-        f32[i] = i + 1;
+-
+-    var i8  = new Int8Array(f32.buffer);
+-
+-    var f4 = SIMD.Float32x4(42, 43, 44, 45);
+-
+-    for (var i = 0; i < 150; i++) {
+-        var caught = false;
+-        try {
+-            SIMD.Float32x4.store(i8, (i < 149) ? 0 : (16 << 2) - (4 << 2) + 1, f4);
+-        } catch (e) {
+-            print(e);
+-            assertEq(e instanceof RangeError, true);
+-            caught = true;
+-        }
+-        assertEq(i < 149 || caught, true);
+-    }
+-}
+-
+-print('Testing range checks...');
+-testBailout(-1);
+-testBailout(-15);
+-testBailout(12 * 4 + 1);
+diff --git a/js/src/jit-test/tests/SIMD/swizzle.js b/js/src/jit-test/tests/SIMD/swizzle.js
+deleted file mode 100644
+--- a/js/src/jit-test/tests/SIMD/swizzle.js
++++ /dev/null
+@@ -1,104 +0,0 @@
+-if (!this.hasOwnProperty("SIMD"))
+-  quit();
+-
+-load(libdir + 'simd.js');
+-
+-setJitCompilerOption("ion.warmup.trigger", 50);
+-
+-function f() {
+-    var i4 = SIMD.Int32x4(1, 2, 3, 4);
+-
+-    var leet = Math.fround(13.37);
+-    var f4 = SIMD.Float32x4(-.5, -0, Infinity, leet);
+-
+-    var compI = [
+-        [1,2,3,4],
+-        [2,3,4,1],
+-        [3,4,1,2],
+-        [4,1,2,3]
+-    ];
+-
+-    var compF = [
+-        [-.5, -0, Infinity, leet],
+-        [-0, Infinity, leet, -.5],
+-        [Infinity, leet, -.5, -0],
+-        [leet, -.5, -0, Infinity]
+-    ];
+-
+-    for (var i = 0; i < 150; i++) {
+-        // Variable lanes
+-        var r = SIMD.Float32x4.swizzle(f4, i % 4, (i + 1) % 4, (i + 2) % 4, (i + 3) % 4);
+-        assertEqX4(r, compF[i % 4]);
+-
+-        // Constant lanes
+-        assertEqX4(SIMD.Float32x4.swizzle(f4, 3, 2, 1, 0), [leet, Infinity, -0, -.5]);
+-
+-        // Variable lanes
+-        var r = SIMD.Int32x4.swizzle(i4, i % 4, (i + 1) % 4, (i + 2) % 4, (i + 3) % 4);
+-        assertEqX4(r, compI[i % 4]);
+-
+-        // Constant lanes
+-        assertEqX4(SIMD.Int32x4.swizzle(i4, 3, 2, 1, 0), [4, 3, 2, 1]);
+-    }
+-}
+-
+-function testBailouts(expectException, uglyDuckling) {
+-    var i4 = SIMD.Int32x4(1, 2, 3, 4);
+-    for (var i = 0; i < 150; i++) {
+-        // Test bailouts
+-        var value = i == 149 ? uglyDuckling : 0;
+-        var caught = false;
+-        try {
+-            assertEqX4(SIMD.Int32x4.swizzle(i4, value, 3, 2, 0), [1, 4, 3, 1]);
+-        } catch(e) {
+-            print(e);
+-            caught = true;
+-            assertEq(i, 149);
+-            assertEq(e instanceof TypeError || e instanceof RangeError, true);
+-        }
+-        if (i == 149)
+-            assertEq(caught, expectException);
+-    }
+-}
+-
+-function testInt32x4SwizzleBailout() {
+-    // Test out-of-bounds non-constant indices. This is expected to throw.
+-    var i4 = SIMD.Int32x4(1, 2, 3, 4);
+-    for (var i = 0; i < 150; i++) {
+-        assertEqX4(SIMD.Int32x4.swizzle(i4, i, 3, 2, 0), [i + 1, 4, 3, 1]);
+-    }
+-}
+-
+-f();
+-testBailouts(true, -1);
+-testBailouts(true, 4);
+-testBailouts(true, 2.5);
+-testBailouts(true, undefined);
+-testBailouts(true, {});
+-testBailouts(true, 'one');
+-testBailouts(false, false);
+-testBailouts(false, null);
+-testBailouts(false, " 0.0 ");
+-
+-try {
+-    testInt32x4SwizzleBailout();
+-    throw 'not caught';
+-} catch(e) {
+-    assertEq(e instanceof RangeError, true);
+-}
+-
+-(function() {
+-    var zappa = 0;
+-
+-    function testBailouts() {
+-        var i4 = SIMD.Int32x4(1, 2, 3, 4);
+-        for (var i = 0; i < 300; i++) {
+-            var value = i == 299 ? 2.5 : 1;
+-            SIMD.Int32x4.swizzle(i4, value, 3, 2, 0);
+-            zappa = i;
+-        }
+-    }
+-
+-    try { testBailouts(); } catch (e) {}
+-    assertEq(zappa, 298);
+-})();
+diff --git a/js/src/jit-test/tests/SIMD/uconvert.js b/js/src/jit-test/tests/SIMD/uconvert.js
+deleted file mode 100644
+--- a/js/src/jit-test/tests/SIMD/uconvert.js
++++ /dev/null
+@@ -1,86 +0,0 @@
+-load(libdir + 'simd.js');
+-
+-setJitCompilerOption("ion.warmup.trigger", 30);
+-
+-// Testing Uint32 <-> Float32 conversions.
+-// These conversions deserve special attention because SSE doesn't provide
+-// simple conversion instructions.
+-
+-// Convert an Uint32Array to a Float32Array using scalar conversions.
+-function cvt_utof_scalar(u32s, f32s) {
+-    assertEq(u32s.length, f32s.length);
+-    for (var i = 0; i < u32s.length; i++) {
+-        f32s[i] = u32s[i];
+-    }
+-}
+-
+-// Convert an Uint32Array to a Float32Array using simd conversions.
+-function cvt_utof_simd(u32s, f32s) {
+-    assertEq(u32s.length, f32s.length);
+-    for (var i = 0; i < u32s.length; i += 4) {
+-        SIMD.Float32x4.store(f32s, i, SIMD.Float32x4.fromUint32x4(SIMD.Uint32x4.load(u32s, i)));
+-    }
+-}
+-
+-// Convert a Float32Array to an Uint32Array using scalar conversions.
+-function cvt_ftou_scalar(f32s, u32s) {
+-    assertEq(f32s.length, u32s.length);
+-    for (var i = 0; i < f32s.length; i++) {
+-        u32s[i] = f32s[i];
+-    }
+-}
+-
+-// Convert a Float32Array to an Uint32Array using simd conversions.
+-function cvt_ftou_simd(f32s, u32s) {
+-    assertEq(f32s.length, u32s.length);
+-    for (var i = 0; i < f32s.length; i += 4) {
+-        SIMD.Uint32x4.store(u32s, i, SIMD.Uint32x4.fromFloat32x4(SIMD.Float32x4.load(f32s, i)));
+-    }
+-}
+-
+-function check(a, b) {
+-    assertEq(a.length, b.length);
+-    for (var i = 0; i < a.length; i++) {
+-        assertEq(a[i], b[i]);
+-    }
+-}
+-
+-// Uint32x4 --> Float32x4 tests.
+-var src = new Uint32Array(8000);
+-var dst1 = new Float32Array(8000);
+-var dst2 = new Float32Array(8000);
+-
+-for (var i = 0; i < 2000; i++) {
+-    src[i] = i;
+-    src[i + 2000] = 0x7fffffff - i;
+-    src[i + 4000] = 0x80000000 + i;
+-    src[i + 6000] = 0xffffffff - i;
+-}
+-
+-for (var n = 0; n < 10; n++) {
+-    cvt_utof_scalar(src, dst1);
+-    cvt_utof_simd(src, dst2);
+-    check(dst1, dst2);
+-}
+-
+-// Float32x4 --> Uint32x4 tests.
+-var fsrc = dst1;
+-var fdst1 = new Uint32Array(8000);
+-var fdst2 = new Uint32Array(8000);
+-
+-// The 0xffffffff entries in fsrc round to 0x1.0p32f which throws.
+-// Go as high as 0x0.ffffffp32f.
+-for (var i = 0; i < 2000; i++) {
+-    fsrc[i + 6000] = 0xffffff7f - i;
+-}
+-
+-// Truncation towards 0.
+-fsrc[1990] = -0.9
+-fsrc[1991] = 0.9
+-fsrc[1992] = 1.9
+-
+-for (var n = 0; n < 10; n++) {
+-    cvt_ftou_scalar(fsrc, fdst1);
+-    cvt_ftou_simd(fsrc, fdst2);
+-    check(fdst1, fdst2);
+-}
+diff --git a/js/src/jit-test/tests/SIMD/unary.js b/js/src/jit-test/tests/SIMD/unary.js
+deleted file mode 100644
+--- a/js/src/jit-test/tests/SIMD/unary.js
++++ /dev/null
+@@ -1,35 +0,0 @@
+-load(libdir + 'simd.js');
+-
+-setJitCompilerOption("ion.warmup.trigger", 50);
+-
+-var notf = (function() {
+-    var i32 = new Int32Array(1);
+-    var f32 = new Float32Array(i32.buffer);
+-    return function(x) {
+-        f32[0] = x;
+-        i32[0] = ~i32[0];
+-        return f32[0];
+-    }
+-})();
+-
+-function f() {
+-    var f4 = SIMD.Float32x4(1, 2, 3, 4);
+-    var i4 = SIMD.Int32x4(1, 2, 3, 4);
+-    var b4 = SIMD.Bool32x4(true, false, true, false);
+-    var BitOrZero = (x) => x | 0;
+-    for (var i = 0; i < 150; i++) {
+-        assertEqX4(SIMD.Float32x4.neg(f4), unaryX4((x) => -x, f4, Math.fround));
+-        assertEqX4(SIMD.Float32x4.abs(f4), unaryX4(Math.abs, f4, Math.fround));
+-        assertEqX4(SIMD.Float32x4.sqrt(f4), unaryX4(Math.sqrt, f4, Math.fround));
+-
+-        assertEqX4(SIMD.Float32x4.reciprocalApproximation(f4), unaryX4((x) => 1 / x, f4, Math.fround), assertNear);
+-        assertEqX4(SIMD.Float32x4.reciprocalSqrtApproximation(f4), unaryX4((x) => 1 / Math.sqrt(x), f4, Math.fround), assertNear);
+-
+-        assertEqX4(SIMD.Int32x4.not(i4), unaryX4((x) => ~x, i4, BitOrZero));
+-        assertEqX4(SIMD.Int32x4.neg(i4), unaryX4((x) => -x, i4, BitOrZero));
+-
+-        assertEqX4(SIMD.Bool32x4.not(b4), unaryX4((x) => !x, b4, (x) => x ));
+-    }
+-}
+-
+-f();
+diff --git a/js/src/jit-test/tests/SIMD/unbox.js b/js/src/jit-test/tests/SIMD/unbox.js
+deleted file mode 100644
+--- a/js/src/jit-test/tests/SIMD/unbox.js
++++ /dev/null
+@@ -1,144 +0,0 @@
+-load(libdir + 'simd.js');
+-
+-setJitCompilerOption("baseline.warmup.trigger", 10);
+-setJitCompilerOption("ion.warmup.trigger", 30);
+-
+-var max = 40, pivot = 35;
+-
+-var i32x4 = SIMD.Int32x4;
+-var f32x4 = SIMD.Float32x4;
+-var i32x4Add = SIMD.Int32x4.add;
+-
+-var FakeSIMDType = function (o) { this.x = o.x; this.y = o.y; this.z = o.z; this.w = o.w; };
+-if (this.hasOwnProperty("TypedObject")) {
+-  var TO = TypedObject;
+-  FakeSIMDType = new TO.StructType({ x: TO.int32, y: TO.int32, z: TO.int32, w: TO.int32 });
+-}
+-
+-function simdunbox_bail_undef(i, lhs, rhs) {
+-  return i32x4Add(lhs, rhs);
+-}
+-
+-function simdunbox_bail_object(i, lhs, rhs) {
+-  return i32x4Add(lhs, rhs);
+-}
+-
+-function simdunbox_bail_typeobj(i, lhs, rhs) {
+-  return i32x4Add(lhs, rhs);
+-}
+-
+-function simdunbox_bail_badsimd(i, lhs, rhs) {
+-  return i32x4Add(lhs, rhs);
+-}
+-
+-var arr_undef = [ i32x4(0, 1, 1, 2), i32x4(1, 1, 2, 3) ];
+-var fail_undef = 0;
+-var arr_object = [ i32x4(0, 1, 1, 2), i32x4(1, 1, 2, 3) ];
+-var fail_object = 0;
+-var arr_typeobj = [ i32x4(0, 1, 1, 2), i32x4(1, 1, 2, 3) ];
+-var fail_typeobj = 0;
+-var arr_badsimd = [ i32x4(0, 1, 1, 2), i32x4(1, 1, 2, 3) ];
+-var fail_badsimd = 0;
+-for (var i = 0; i < max; i++) {
+-  try {
+-    arr_undef[i + 2] = simdunbox_bail_undef(i, arr_undef[i], arr_undef[i + 1]);
+-  } catch (x) {
+-    arr_undef[i + 2] = arr_undef[i - 1];
+-    fail_undef++;
+-  }
+-
+-  try {
+-    arr_object[i + 2] = simdunbox_bail_object(i, arr_object[i], arr_object[i + 1]);
+-  } catch (x) {
+-    arr_object[i + 2] = arr_object[i - 1];
+-    fail_object++;
+-  }
+-
+-  try {
+-    arr_typeobj[i + 2] = simdunbox_bail_typeobj(i, arr_typeobj[i], arr_typeobj[i + 1]);
+-  } catch (x) {
+-    arr_typeobj[i + 2] = arr_typeobj[i - 1];
+-    fail_typeobj++;
+-  }
+-
+-  try {
+-    arr_badsimd[i + 2] = simdunbox_bail_badsimd(i, arr_badsimd[i], arr_badsimd[i + 1]);
+-  } catch (x) {
+-    arr_badsimd[i + 2] = arr_badsimd[i - 1];
+-    fail_badsimd++;
+-  }
+-
+-  if (i + 2 == pivot) {
+-    arr_undef[pivot] = undefined;
+-    arr_object[pivot] = { x: 0, y: 1, z: 2, w: 3 };
+-    arr_typeobj[pivot] = new FakeSIMDType({ x: 0, y: 1, z: 2, w: 3 });
+-    arr_badsimd[pivot] = f32x4(0, 1, 2, 3);
+-  }
+-}
+-
+-assertEq(fail_undef, 2);
+-assertEq(fail_object, 2);
+-assertEq(fail_typeobj, 2);
+-assertEq(fail_badsimd, 2);
+-
+-// Assert that all SIMD values are correct.
+-function assertEqX4(real, expected, assertFunc) {
+-    if (typeof assertFunc === 'undefined')
+-        assertFunc = assertEq;
+-
+-    assertFunc(real.x, expected[0]);
+-    assertFunc(real.y, expected[1]);
+-    assertFunc(real.z, expected[2]);
+-    assertFunc(real.w, expected[3]);
+-}
+-
+-var fib = [0, 1];
+-for (i = 0; i < max + 5; i++)
+-  fib[i+2] = (fib[i] + fib[i+1]) | 0;
+-
+-for (i = 0; i < max; i++) {
+-  if (i == pivot)
+-    continue;
+-  var ref = fib.slice(i < pivot ? i : i - 3);
+-  assertEqX4(arr_undef[i], ref);
+-  assertEqX4(arr_object[i], ref);
+-  assertEqX4(arr_typeobj[i], ref);
+-  assertEqX4(arr_badsimd[i], ref);
+-}
+-
+-// Check that unbox operations aren't removed
+-(function() {
+-
+-    function add(i, v, w) {
+-        if (i % 2 == 0) {
+-            SIMD.Int32x4.add(v, w);
+-        } else {
+-            SIMD.Float32x4.add(v, w);
+-        }
+-    }
+-
+-    var i = 0;
+-    var caught = false;
+-    var f4 = SIMD.Float32x4(1,2,3,4);
+-    var i4 = SIMD.Int32x4(1,2,3,4);
+-    try {
+-        for (; i < 200; i++) {
+-            if (i % 2 == 0) {
+-                add(i, i4, i4);
+-            } else if (i == 199) {
+-                add(i, i4, f4);
+-            } else {
+-                add(i, f4, f4);
+-            }
+-        }
+-    } catch(e) {
+-        print(e);
+-        assertEq(e instanceof TypeError, true);
+-        assertEq(i, 199);
+-        caught = true;
+-    }
+-
+-    assertEq(i < 199 || caught, true);
+-
+-})();
+-
+diff --git a/js/src/jit-test/tests/asm.js/bug1126251.js b/js/src/jit-test/tests/asm.js/bug1126251.js
+--- a/js/src/jit-test/tests/asm.js/bug1126251.js
++++ b/js/src/jit-test/tests/asm.js/bug1126251.js
+@@ -8,46 +8,8 @@ var v = asmLink(asmCompile('global', `
+         var x = frd(.1e+71);
+         x = frd(x / x);
+         return +x;
+     }
+     return e;
+ `), this)();
+ 
+ assertEq(v, NaN);
+-
+-if (!isSimdAvailable() || typeof SIMD === 'undefined') {
+-    quit(0);
+-}
+-
+-var v = asmLink(asmCompile('global', `
+-    "use asm";
+-    var frd = global.Math.fround;
+-    var Float32x4 = global.SIMD.Float32x4;
+-    var splat = Float32x4.splat;
+-    var ext = Float32x4.extractLane;
+-    function e() {
+-        var v = Float32x4(0,0,0,0);
+-        var x = frd(0.);
+-        v = splat(.1e+71);
+-        x = ext(v,0);
+-        x = frd(x / x);
+-        return +x;
+-    }
+-    return e;
+-`), this)();
+-
+-assertEq(v, NaN);
+-
+-// Bug 1130618: without GVN
+-setJitCompilerOption("ion.gvn.enable", 0);
+-var v = asmLink(asmCompile('global', `
+-    "use asm";
+-    var Float32x4 = global.SIMD.Float32x4;
+-    var splat = Float32x4.splat;
+-    var ext = Float32x4.extractLane;
+-    function e() {
+-        return +ext(splat(.1e+71),0);
+-    }
+-    return e;
+-`), this)();
+-
+-assertEq(v, Infinity);
+diff --git a/js/src/jit-test/tests/asm.js/bug1201124-simd-proxy.js b/js/src/jit-test/tests/asm.js/bug1201124-simd-proxy.js
+deleted file mode 100644
+--- a/js/src/jit-test/tests/asm.js/bug1201124-simd-proxy.js
++++ /dev/null
+@@ -1,28 +0,0 @@
+-load(libdir + "asm.js");
+-load(libdir + "asserts.js");
+-
+-if (typeof newGlobal !== 'function' ||
+-    !isSimdAvailable() ||
+-    typeof SIMD === 'undefined')
+-{
+-    quit();
+-}
+-
+-var stdlib = new (newGlobal().Proxy)(this, new Proxy({
+-    simdGet: 0,
+-    getOwnPropertyDescriptor(t, pk) {
+-        if (pk === "SIMD" && this.simdGet++ === 1) {
+-            return {};
+-        }
+-        return Reflect.getOwnPropertyDescriptor(t, pk);
+-    }
+-}, {
+-    get(t, pk, r) {
+-        print("trap", pk);
+-        return Reflect.get(t, pk, r);
+-    }
+-}));
+-
+-var m = asmCompile('stdlib', '"use asm"; var i4=stdlib.SIMD.Int32x4; var i4add=i4.add; return {}');
+-
+-assertAsmLinkFail(m, stdlib);
+diff --git a/js/src/jit-test/tests/asm.js/simd-fbirds.js b/js/src/jit-test/tests/asm.js/simd-fbirds.js
+deleted file mode 100644
+--- a/js/src/jit-test/tests/asm.js/simd-fbirds.js
++++ /dev/null
+@@ -1,198 +0,0 @@
+-/* -*- Mode: javascript; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 ; js-indent-level : 2 ; js-curly-indent-offset: 0 -*- */
+-/* vim: set ts=4 et sw=4 tw=80: */
+-
+-// Author: Peter Jensen
+-
+-load(libdir + "asm.js");
+-if (!isSimdAvailable() || typeof SIMD === 'undefined') {
+-    print("won't run tests as simd extensions aren't activated yet");
+-    quit(0);
+-}
+-
+-const NUM_BIRDS = 30;
+-const NUM_UPDATES = 20;
+-const ACCEL_DATA_STEPS = 30;
+-
+-var buffer = new ArrayBuffer(0x200000);
+-var bufferF32 = new Float32Array(buffer);
+-
+-var actualBirds = 0;
+-
+-function init() {
+-    actualBirds = 0;
+-    // Make it a power of two, for quick modulo wrapping.
+-    var accelDataValues = [10.0, 9.5, 9.0, 8.0, 7.0, 6.0, 5.5, 5.0, 5.0, 5.0, 5.5, 6.0, 7.0, 8.0, 9.0, 10.0];
+-    accelDataValues = accelDataValues.map(function(v) { return 50*v; });
+-    var accelDataValuesLength = accelDataValues.length;
+-    assertEq(accelDataValuesLength, 16); // Hard coded in the asm.js module
+-    for (i = 0; i < accelDataValuesLength; i++)
+-        bufferF32[i + NUM_BIRDS * 2] = accelDataValues[i];
+-}
+-
+-function addBird(pos, vel) {
+-    bufferF32[actualBirds] = pos;
+-    bufferF32[actualBirds + NUM_BIRDS] = vel;
+-    actualBirds++;
+-    return actualBirds - 1;
+-}
+-
+-function getActualBirds() {
+-    return actualBirds;
+-}
+-
+-var code = `
+-    "use asm";
+-    var toF = global.Math.fround;
+-    var u8 = new global.Uint8Array(buffer);
+-    var f32 = new global.Float32Array(buffer);
+-    const maxBirds = 100000;
+-    const maxBirdsx4 = 400000;
+-    const maxBirdsx8 = 800000;
+-    const accelMask = 0x3c;
+-    const mk4 = 0x000ffff0;
+-
+-    const getMaxPos = 1000.0;
+-    const getAccelDataSteps = imp.accelDataSteps | 0;
+-    var getActualBirds = imp.getActualBirds;
+-
+-    var i4 = global.SIMD.Int32x4;
+-    var f4 = global.SIMD.Float32x4;
+-    var b4 = global.SIMD.Bool32x4;
+-    var i4add = i4.add;
+-    var i4and = i4.and;
+-    var f4select = f4.select;
+-    var f4add = f4.add;
+-    var f4sub = f4.sub;
+-    var f4mul = f4.mul;
+-    var f4greaterThan = f4.greaterThan;
+-    var f4splat = f4.splat;
+-    var f4load = f4.load;
+-    var f4store = f4.store;
+-    var b4any = b4.anyTrue;
+-
+-    const zerox4 = f4(0.0,0.0,0.0,0.0);
+-
+-    function declareHeapSize() {
+-        f32[0x0007ffff] = toF(0.0);
+-    }
+-
+-    function update(timeDelta) {
+-        timeDelta = toF(timeDelta);
+-        //      var steps               = Math.ceil(timeDelta/accelData.interval);
+-        var steps = 0;
+-        var subTimeDelta = toF(0.0);
+-        var actualBirds = 0;
+-        var maxPos = toF(0.0);
+-        var maxPosx4 = f4(0.0,0.0,0.0,0.0);
+-        var subTimeDeltax4  = f4(0.0,0.0,0.0,0.0);
+-        var subTimeDeltaSquaredx4 = f4(0.0,0.0,0.0,0.0);
+-        var point5x4 = f4(0.5, 0.5, 0.5, 0.5);
+-        var i = 0;
+-        var len = 0;
+-        var accelIndex = 0;
+-        var newPosx4 = f4(0.0,0.0,0.0,0.0);
+-        var newVelx4 = f4(0.0,0.0,0.0,0.0);
+-        var accel = toF(0.0);
+-        var accelx4 = f4(0.0,0.0,0.0,0.0);
+-        var a = 0;
+-        var posDeltax4 = f4(0.0,0.0,0.0,0.0);
+-        var cmpx4 = b4(0,0,0,0);
+-        var newVelTruex4 = f4(0.0,0.0,0.0,0.0);
+-
+-        steps = getAccelDataSteps | 0;
+-        subTimeDelta = toF(toF(timeDelta / toF(steps | 0)) / toF(1000.0));
+-        actualBirds = getActualBirds() | 0;
+-        maxPos = toF(+getMaxPos);
+-        maxPosx4 = f4splat(maxPos);
+-        subTimeDeltax4 = f4splat(subTimeDelta);
+-        subTimeDeltaSquaredx4 = f4mul(subTimeDeltax4, subTimeDeltax4);
+-
+-        len = ((actualBirds + 3) >> 2) << 4;
+-
+-        for (i = 0; (i | 0) < (len | 0); i = (i + 16) | 0) {
+-            accelIndex = 0;
+-            newPosx4 = f4load(u8, i & mk4);
+-            newVelx4 = f4load(u8, (i & mk4) + maxBirdsx4);
+-            for (a = 0; (a | 0) < (steps | 0); a = (a + 1) | 0) {
+-                accel = toF(f32[(accelIndex & accelMask) + maxBirdsx8 >> 2]);
+-                accelx4 = f4splat(accel);
+-                accelIndex = (accelIndex + 4) | 0;
+-                posDeltax4 = f4mul(point5x4, f4mul(accelx4, subTimeDeltaSquaredx4));
+-                posDeltax4 = f4add(posDeltax4, f4mul(newVelx4, subTimeDeltax4));
+-                newPosx4 = f4add(newPosx4, posDeltax4);
+-                newVelx4 = f4add(newVelx4, f4mul(accelx4, subTimeDeltax4));
+-                cmpx4 = f4greaterThan(newPosx4, maxPosx4);
+-
+-                if (b4any(cmpx4)) {
+-                    // Work around unimplemented 'neg' operation, using 0 - x.
+-                    newVelTruex4 = f4sub(zerox4, newVelx4);
+-                    newVelx4 = f4select(cmpx4, newVelTruex4, newVelx4);
+-                }
+-            }
+-            f4store(u8, i & mk4, newPosx4);
+-            f4store(u8, (i & mk4) + maxBirdsx4, newVelx4);
+-        }
+-    }
+-
+-    return update;
+-`
+-
+-var ffi = {
+-    getActualBirds,
+-    accelDataSteps: ACCEL_DATA_STEPS
+-};
+-
+-var fbirds = asmLink(asmCompile('global', 'imp', 'buffer', code), this, ffi, buffer);
+-
+-init();
+-for (var i = 0; i < NUM_BIRDS; i++) {
+-    addBird(i / 10, Math.exp(2, NUM_BIRDS - i));
+-}
+-
+-var b = dateNow();
+-for (var j = 0; j < NUM_UPDATES; j++) {
+-    fbirds(16);
+-}
+-print(dateNow() - b);
+-
+-assertEq(bufferF32[0], 0);
+-assertEq(bufferF32[1], 0.10000000149011612);
+-assertEq(bufferF32[2], 0.20000000298023224);
+-assertEq(bufferF32[3], 0.30000001192092896);
+-assertEq(bufferF32[4], 0.4000000059604645);
+-assertEq(bufferF32[5], 0.5);
+-assertEq(bufferF32[6], 0.6000000238418579);
+-assertEq(bufferF32[7], 0.699999988079071);
+-assertEq(bufferF32[8], 0.800000011920929);
+-assertEq(bufferF32[9], 0.8999999761581421);
+-assertEq(bufferF32[10], 1);
+-assertEq(bufferF32[11], 1.100000023841858);
+-assertEq(bufferF32[12], 1.2000000476837158);
+-assertEq(bufferF32[13], 1.2999999523162842);
+-assertEq(bufferF32[14], 1.399999976158142);
+-assertEq(bufferF32[15], 1.5);
+-assertEq(bufferF32[16], 1.600000023841858);
+-assertEq(bufferF32[17], 1.7000000476837158);
+-assertEq(bufferF32[18], 1.7999999523162842);
+-assertEq(bufferF32[19], 1.899999976158142);
+-assertEq(bufferF32[20], 2);
+-assertEq(bufferF32[21], 2.0999999046325684);
+-assertEq(bufferF32[22], 2.200000047683716);
+-assertEq(bufferF32[23], 2.299999952316284);
+-assertEq(bufferF32[24], 2.4000000953674316);
+-assertEq(bufferF32[25], 2.5);
+-assertEq(bufferF32[26], 2.5999999046325684);
+-assertEq(bufferF32[27], 2.700000047683716);
+-assertEq(bufferF32[28], 2.799999952316284);
+-assertEq(bufferF32[29], 2.9000000953674316);
+-
+-
+-// Code used to generate the assertEq list above.
+-function generateAssertList() {
+-    var buf = '';
+-    for (var k = 0; k < NUM_BIRDS; k++) {
+-        buf += 'assertEq(bufferF32['+ k + '], ' + bufferF32[k] + ');\n';
+-    }
+-    print(buf);
+-}
+-//generateAssertList();
+diff --git a/js/src/jit-test/tests/asm.js/simd-mandelbrot.js b/js/src/jit-test/tests/asm.js/simd-mandelbrot.js
+deleted file mode 100644
+--- a/js/src/jit-test/tests/asm.js/simd-mandelbrot.js
++++ /dev/null
+@@ -1,1819 +0,0 @@
+-/* -*- Mode: javascript; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 ; js-indent-level : 2 ; js-curly-indent-offset: 0 -*- */
+-/* vim: set ts=2 et sw=2 tw=80: */
+-
+-// Mandelbrot using SIMD
+-// Author: Peter Jensen, Intel Corporation
+-
+-load(libdir + "asm.js");
+-if (!isSimdAvailable() || typeof SIMD === 'undefined') {
+-    print("won't run tests as simd extensions aren't activated yet");
+-    quit(0);
+-}
+-
+-// global variables
+-const MAX_ITERATIONS = 10;
+-const DRAW_ITERATIONS = 10;
+-
+-const CANVAS_WIDTH = 20;
+-const CANVAS_HEIGHT = 20;
+-
+-const LIMIT_SHOW = 20 * 20 * 4;
+-
+-// Asm.js module buffer.
+-var buffer = new ArrayBuffer(16 * 1024 * 1024);
+-var view = new Uint8Array(buffer);
+-
+-var moduleCode = `
+-  "use asm"
+-  var b8 = new global.Uint8Array(buffer);
+-  var toF = global.Math.fround;
+-  var i4 = global.SIMD.Int32x4;
+-  var ci4 = i4.check;
+-  var f4 = global.SIMD.Float32x4;
+-  var i4add = i4.add;
+-  var i4and = i4.and;
+-  var i4ext = i4.extractLane;
+-  var i4sel = i4.select;
+-  var f4add = f4.add;
+-  var f4sub = f4.sub;
+-  var f4mul = f4.mul;
+-  var f4lessThanOrEqual = f4.lessThanOrEqual;
+-  var f4splat = f4.splat;
+-  var imul = global.Math.imul;
+-  var b4 = global.SIMD.Bool32x4;
+-  var b4any = b4.anyTrue;
+-  const zero4 = i4(0,0,0,0), one4 = i4(1,1,1,1), two4 = f4(2,2,2,2), four4 = f4(4,4,4,4);
+-
+-  const mk0 = 0x007fffff;
+-
+-  function declareHeapLength() {
+-    b8[0x00ffffff] = 0;
+-  }
+-
+-  function mapColorAndSetPixel (x, y, width, value, max_iterations) {
+-    x = x | 0;
+-    y = y | 0;
+-    width = width | 0;
+-    value = value | 0;
+-    max_iterations = max_iterations | 0;
+-
+-    var rgb = 0, r = 0, g = 0, b = 0, index = 0;
+-
+-    index = (((imul((width >>> 0), (y >>> 0)) + x) | 0) * 4) | 0;
+-    if ((value | 0) == (max_iterations | 0)) {
+-      r = 0;
+-      g = 0;
+-      b = 0;
+-    } else {
+-      rgb = ~~toF(toF(toF(toF(value >>> 0) * toF(0xffff)) / toF(max_iterations >>> 0)) * toF(0xff));
+-      r = rgb & 0xff;
+-      g = (rgb >>> 8) & 0xff;
+-      b = (rgb >>> 16) & 0xff;
+-    }
+-    b8[(index & mk0) >> 0] = r;
+-    b8[(index & mk0) + 1 >> 0] = g;
+-    b8[(index & mk0) + 2 >> 0] = b;
+-    b8[(index & mk0) + 3 >> 0] = 255;
+-  }
+-
+-  function mandelPixelX4 (xf, yf, yd, max_iterations) {
+-    xf = toF(xf);
+-    yf = toF(yf);
+-    yd = toF(yd);
+-    max_iterations = max_iterations | 0;
+-    var c_re4  = f4(0,0,0,0), c_im4  = f4(0,0,0,0);
+-    var z_re4  = f4(0,0,0,0), z_im4  = f4(0,0,0,0);
+-    var count4 = i4(0,0,0,0);
+-    var z_re24 = f4(0,0,0,0), z_im24 = f4(0,0,0,0);
+-    var new_re4 = f4(0,0,0,0), new_im4 = f4(0,0,0,0);
+-    var i = 0;
+-    var mb4 = b4(0,0,0,0);
+-
+-    c_re4 = f4splat(xf);
+-    c_im4 = f4(yf, toF(yd + yf), toF(yd + toF(yd + yf)), toF(yd + toF(yd + toF(yd + yf))));
+-
+-    z_re4  = c_re4;
+-    z_im4  = c_im4;
+-
+-    for (i = 0; (i | 0) < (max_iterations | 0); i = (i + 1) | 0) {
+-      z_re24 = f4mul(z_re4, z_re4);
+-      z_im24 = f4mul(z_im4, z_im4);
+-      mb4 = f4lessThanOrEqual(f4add(z_re24, z_im24), four4);
+-      // If all 4 values are greater than 4.0, there's no reason to continue.
+-      if (!b4any(mb4))
+-        break;
+-
+-      new_re4 = f4sub(z_re24, z_im24);
+-      new_im4 = f4mul(f4mul(two4, z_re4), z_im4);
+-      z_re4   = f4add(c_re4, new_re4);
+-      z_im4   = f4add(c_im4, new_im4);
+-      count4  = i4add(count4, i4sel(mb4, one4, zero4));
+-    }
+-    return ci4(count4);
+-  }
+-
+-  function mandelColumnX4 (x, width, height, xf, yf, yd, max_iterations) {
+-    x = x | 0;
+-    width = width | 0;
+-    height = height | 0;
+-    xf = toF(xf);
+-    yf = toF(yf);
+-    yd = toF(yd);
+-    max_iterations = max_iterations | 0;
+-
+-    var y = 0;
+-    var ydx4 = toF(0);
+-    var m4 = i4(0,0,0,0);
+-
+-    ydx4 = toF(yd * toF(4));
+-    for (y = 0; (y | 0) < (height | 0); y = (y + 4) | 0) {
+-      m4   = ci4(mandelPixelX4(toF(xf), toF(yf), toF(yd), max_iterations));
+-      mapColorAndSetPixel(x | 0, y | 0,   width, i4ext(m4,0), max_iterations);
+-      mapColorAndSetPixel(x | 0, (y + 1) | 0, width, i4ext(m4,1), max_iterations);
+-      mapColorAndSetPixel(x | 0, (y + 2) | 0, width, i4ext(m4,2), max_iterations);
+-      mapColorAndSetPixel(x | 0, (y + 3) | 0, width, i4ext(m4,3), max_iterations);
+-      yf = toF(yf + ydx4);
+-    }
+-  }
+-
+-  function mandel (width, height, xc, yc, scale, max_iterations) {
+-    width = width | 0;
+-    height = height | 0;
+-    xc = toF(xc);
+-    yc = toF(yc);
+-    scale = toF(scale);
+-    max_iterations = max_iterations | 0;
+-
+-    var x0 = toF(0), y0 = toF(0);
+-    var xd = toF(0), yd = toF(0);
+-    var xf = toF(0);
+-    var x = 0;
+-
+-    x0 = toF(xc - toF(scale * toF(1.5)));
+-    y0 = toF(yc - scale);
+-    xd = toF(toF(scale * toF(3)) / toF(width >>> 0));
+-    yd = toF(toF(scale * toF(2)) / toF(height >>> 0));
+-    xf = x0;
+-
+-    for (x = 0; (x | 0) < (width | 0); x = (x + 1) | 0) {
+-      mandelColumnX4(x, width, height, xf, y0, yd, max_iterations);
+-      xf = toF(xf + xd);
+-    }
+-  }
+-
+-  return mandel;
+-`;
+-
+-var FFI = {};
+-var mandelbro = asmLink(asmCompile('global', 'ffi', 'buffer', moduleCode), this, FFI, buffer);
+-
+-function animateMandelbrot () {
+-  var scale_start = 1.0;
+-  var scale_end   = 0.0005;
+-  var xc_start    = -0.5;
+-  var yc_start    = 0.0;
+-  var xc_end      = 0.0;
+-  var yc_end      = 0.75;
+-  var steps       = 200.0;
+-  var scale_step  = (scale_end - scale_start)/steps;
+-  var xc_step     = (xc_end - xc_start)/steps;
+-  var yc_step     = (yc_end - yc_start)/steps;
+-  var scale       = scale_start;
+-  var xc          = xc_start;
+-  var yc          = yc_start;
+-  var i           = 0;
+-  var now         = dateNow();
+-
+-  function draw1 () {
+-    mandelbro(CANVAS_WIDTH, CANVAS_HEIGHT, xc, yc, scale, MAX_ITERATIONS);
+-    if (scale < scale_end || scale > scale_start) {
+-      scale_step = -scale_step;
+-      xc_step = -xc_step;
+-      yc_step = -yc_step;
+-    }
+-    scale += scale_step;
+-    xc += xc_step;
+-    yc += yc_step;
+-    i++;
+-  }
+-
+-  var b = dateNow();
+-  for (var j = DRAW_ITERATIONS; j --> 0;)
+-    draw1();
+-  print(dateNow() - b);
+-}
+-
+-animateMandelbrot();
+-
+-assertEq(view[0], 0, "0th value should be 0");
+-assertEq(view[1], 0, "1th value should be 0");
+-assertEq(view[2], 0, "2th value should be 0");
+-assertEq(view[3], 255, "3th value should be 255");
+-assertEq(view[4], 230, "4th value should be 230");
+-assertEq(view[5], 127, "5th value should be 127");
+-assertEq(view[6], 25, "6th value should be 25");
+-assertEq(view[7], 255, "7th value should be 255");
+-assertEq(view[8], 230, "8th value should be 230");
+-assertEq(view[9], 127, "9th value should be 127");
+-assertEq(view[10], 25, "10th value should be 25");
+-assertEq(view[11], 255, "11th value should be 255");
+-assertEq(view[12], 205, "12th value should be 205");
+-assertEq(view[13], 255, "13th value should be 255");
+-assertEq(view[14], 50, "14th value should be 50");
+-assertEq(view[15], 255, "15th value should be 255");
+-assertEq(view[16], 205, "16th value should be 205");
+-assertEq(view[17], 255, "17th value should be 255");
+-assertEq(view[18], 50, "18th value should be 50");
+-assertEq(view[19], 255, "19th value should be 255");
+-assertEq(view[20], 205, "20th value should be 205");
+-assertEq(view[21], 255, "21th value should be 255");
+-assertEq(view[22], 50, "22th value should be 50");
+-assertEq(view[23], 255, "23th value should be 255");
+-assertEq(view[24], 205, "24th value should be 205");
+-assertEq(view[25], 255, "25th value should be 255");
+-assertEq(view[26], 50, "26th value should be 50");
+-assertEq(view[27], 255, "27th value should be 255");
+-assertEq(view[28], 205, "28th value should be 205");
+-assertEq(view[29], 255, "29th value should be 255");
+-assertEq(view[30], 50, "30th value should be 50");
+-assertEq(view[31], 255, "31th value should be 255");
+-assertEq(view[32], 179, "32th value should be 179");
+-assertEq(view[33], 127, "33th value should be 127");
+-assertEq(view[34], 76, "34th value should be 76");
+-assertEq(view[35], 255, "35th value should be 255");
+-assertEq(view[36], 179, "36th value should be 179");
+-assertEq(view[37], 127, "37th value should be 127");
+-assertEq(view[38], 76, "38th value should be 76");
+-assertEq(view[39], 255, "39th value should be 255");
+-assertEq(view[40], 179, "40th value should be 179");
+-assertEq(view[41], 127, "41th value should be 127");
+-assertEq(view[42], 76, "42th value should be 76");
+-assertEq(view[43], 255, "43th value should be 255");
+-assertEq(view[44], 154, "44th value should be 154");
+-assertEq(view[45], 255, "45th value should be 255");
+-assertEq(view[46], 101, "46th value should be 101");
+-assertEq(view[47], 255, "47th value should be 255");
+-assertEq(view[48], 78, "48th value should be 78");
+-assertEq(view[49], 127, "49th value should be 127");
+-assertEq(view[50], 178, "50th value should be 178");
+-assertEq(view[51], 255, "51th value should be 255");
+-assertEq(view[52], 52, "52th value should be 52");
+-assertEq(view[53], 255, "53th value should be 255");
+-assertEq(view[54], 203, "54th value should be 203");
+-assertEq(view[55], 255, "55th value should be 255");
+-assertEq(view[56], 154, "56th value should be 154");
+-assertEq(view[57], 255, "57th value should be 255");
+-assertEq(view[58], 101, "58th value should be 101");
+-assertEq(view[59], 255, "59th value should be 255");
+-assertEq(view[60], 179, "60th value should be 179");
+-assertEq(view[61], 127, "61th value should be 127");
+-assertEq(view[62], 76, "62th value should be 76");
+-assertEq(view[63], 255, "63th value should be 255");
+-assertEq(view[64], 205, "64th value should be 205");
+-assertEq(view[65], 255, "65th value should be 255");
+-assertEq(view[66], 50, "66th value should be 50");
+-assertEq(view[67], 255, "67th value should be 255");
+-assertEq(view[68], 205, "68th value should be 205");
+-assertEq(view[69], 255, "69th value should be 255");
+-assertEq(view[70], 50, "70th value should be 50");
+-assertEq(view[71], 255, "71th value should be 255");
+-assertEq(view[72], 230, "72th value should be 230");
+-assertEq(view[73], 127, "73th value should be 127");
+-assertEq(view[74], 25, "74th value should be 25");
+-assertEq(view[75], 255, "75th value should be 255");
+-assertEq(view[76], 230, "76th value should be 230");
+-assertEq(view[77], 127, "77th value should be 127");
+-assertEq(view[78], 25, "78th value should be 25");
+-assertEq(view[79], 255, "79th value should be 255");
+-assertEq(view[80], 0, "80th value should be 0");
+-assertEq(view[81], 0, "81th value should be 0");
+-assertEq(view[82], 0, "82th value should be 0");
+-assertEq(view[83], 255, "83th value should be 255");
+-assertEq(view[84], 230, "84th value should be 230");
+-assertEq(view[85], 127, "85th value should be 127");
+-assertEq(view[86], 25, "86th value should be 25");
+-assertEq(view[87], 255, "87th value should be 255");
+-assertEq(view[88], 205, "88th value should be 205");
+-assertEq(view[89], 255, "89th value should be 255");
+-assertEq(view[90], 50, "90th value should be 50");
+-assertEq(view[91], 255, "91th value should be 255");
+-assertEq(view[92], 205, "92th value should be 205");
+-assertEq(view[93], 255, "93th value should be 255");
+-assertEq(view[94], 50, "94th value should be 50");
+-assertEq(view[95], 255, "95th value should be 255");
+-assertEq(view[96], 205, "96th value should be 205");
+-assertEq(view[97], 255, "97th value should be 255");
+-assertEq(view[98], 50, "98th value should be 50");
+-assertEq(view[99], 255, "99th value should be 255");
+-assertEq(view[100], 205, "100th value should be 205");
+-assertEq(view[101], 255, "101th value should be 255");
+-assertEq(view[102], 50, "102th value should be 50");
+-assertEq(view[103], 255, "103th value should be 255");
+-assertEq(view[104], 205, "104th value should be 205");
+-assertEq(view[105], 255, "105th value should be 255");
+-assertEq(view[106], 50, "106th value should be 50");
+-assertEq(view[107], 255, "107th value should be 255");
+-assertEq(view[108], 205, "108th value should be 205");
+-assertEq(view[109], 255, "109th value should be 255");
+-assertEq(view[110], 50, "110th value should be 50");
+-assertEq(view[111], 255, "111th value should be 255");
+-assertEq(view[112], 179, "112th value should be 179");
+-assertEq(view[113], 127, "113th value should be 127");
+-assertEq(view[114], 76, "114th value should be 76");
+-assertEq(view[115], 255, "115th value should be 255");
+-assertEq(view[116], 179, "116th value should be 179");
+-assertEq(view[117], 127, "117th value should be 127");
+-assertEq(view[118], 76, "118th value should be 76");
+-assertEq(view[119], 255, "119th value should be 255");
+-assertEq(view[120], 154, "120th value should be 154");
+-assertEq(view[121], 255, "121th value should be 255");
+-assertEq(view[122], 101, "122th value should be 101");
+-assertEq(view[123], 255, "123th value should be 255");
+-assertEq(view[124], 103, "124th value should be 103");
+-assertEq(view[125], 255, "125th value should be 255");
+-assertEq(view[126], 152, "126th value should be 152");
+-assertEq(view[127], 255, "127th value should be 255");
+-assertEq(view[128], 0, "128th value should be 0");
+-assertEq(view[129], 0, "129th value should be 0");
+-assertEq(view[130], 0, "130th value should be 0");
+-assertEq(view[131], 255, "131th value should be 255");
+-assertEq(view[132], 0, "132th value should be 0");
+-assertEq(view[133], 0, "133th value should be 0");
+-assertEq(view[134], 0, "134th value should be 0");
+-assertEq(view[135], 255, "135th value should be 255");
+-assertEq(view[136], 128, "136th value should be 128");
+-assertEq(view[137], 127, "137th value should be 127");
+-assertEq(view[138], 127, "138th value should be 127");
+-assertEq(view[139], 255, "139th value should be 255");
+-assertEq(view[140], 154, "140th value should be 154");
+-assertEq(view[141], 255, "141th value should be 255");
+-assertEq(view[142], 101, "142th value should be 101");
+-assertEq(view[143], 255, "143th value should be 255");
+-assertEq(view[144], 179, "144th value should be 179");
+-assertEq(view[145], 127, "145th value should be 127");
+-assertEq(view[146], 76, "146th value should be 76");
+-assertEq(view[147], 255, "147th value should be 255");
+-assertEq(view[148], 205, "148th value should be 205");
+-assertEq(view[149], 255, "149th value should be 255");
+-assertEq(view[150], 50, "150th value should be 50");
+-assertEq(view[151], 255, "151th value should be 255");
+-assertEq(view[152], 205, "152th value should be 205");
+-assertEq(view[153], 255, "153th value should be 255");
+-assertEq(view[154], 50, "154th value should be 50");
+-assertEq(view[155], 255, "155th value should be 255");
+-assertEq(view[156], 230, "156th value should be 230");
+-assertEq(view[157], 127, "157th value should be 127");
+-assertEq(view[158], 25, "158th value should be 25");
+-assertEq(view[159], 255, "159th value should be 255");
+-assertEq(view[160], 0, "160th value should be 0");
+-assertEq(view[161], 0, "161th value should be 0");
+-assertEq(view[162], 0, "162th value should be 0");
+-assertEq(view[163], 255, "163th value should be 255");
+-assertEq(view[164], 230, "164th value should be 230");
+-assertEq(view[165], 127, "165th value should be 127");
+-assertEq(view[166], 25, "166th value should be 25");
+-assertEq(view[167], 255, "167th value should be 255");
+-assertEq(view[168], 205, "168th value should be 205");
+-assertEq(view[169], 255, "169th value should be 255");
+-assertEq(view[170], 50, "170th value should be 50");
+-assertEq(view[171], 255, "171th value should be 255");
+-assertEq(view[172], 205, "172th value should be 205");
+-assertEq(view[173], 255, "173th value should be 255");
+-assertEq(view[174], 50, "174th value should be 50");
+-assertEq(view[175], 255, "175th value should be 255");
+-assertEq(view[176], 205, "176th value should be 205");
+-assertEq(view[177], 255, "177th value should be 255");
+-assertEq(view[178], 50, "178th value should be 50");
+-assertEq(view[179], 255, "179th value should be 255");
+-assertEq(view[180], 205, "180th value should be 205");
+-assertEq(view[181], 255, "181th value should be 255");
+-assertEq(view[182], 50, "182th value should be 50");
+-assertEq(view[183], 255, "183th value should be 255");
+-assertEq(view[184], 205, "184th value should be 205");
+-assertEq(view[185], 255, "185th value should be 255");
+-assertEq(view[186], 50, "186th value should be 50");
+-assertEq(view[187], 255, "187th value should be 255");
+-assertEq(view[188], 179, "188th value should be 179");
+-assertEq(view[189], 127, "189th value should be 127");
+-assertEq(view[190], 76, "190th value should be 76");
+-assertEq(view[191], 255, "191th value should be 255");
+-assertEq(view[192], 179, "192th value should be 179");
+-assertEq(view[193], 127, "193th value should be 127");
+-assertEq(view[194], 76, "194th value should be 76");
+-assertEq(view[195], 255, "195th value should be 255");
+-assertEq(view[196], 154, "196th value should be 154");
+-assertEq(view[197], 255, "197th value should be 255");
+-assertEq(view[198], 101, "198th value should be 101");
+-assertEq(view[199], 255, "199th value should be 255");
+-assertEq(view[200], 103, "200th value should be 103");
+-assertEq(view[201], 255, "201th value should be 255");
+-assertEq(view[202], 152, "202th value should be 152");
+-assertEq(view[203], 255, "203th value should be 255");
+-assertEq(view[204], 78, "204th value should be 78");
+-assertEq(view[205], 127, "205th value should be 127");
+-assertEq(view[206], 178, "206th value should be 178");
+-assertEq(view[207], 255, "207th value should be 255");
+-assertEq(view[208], 0, "208th value should be 0");
+-assertEq(view[209], 0, "209th value should be 0");
+-assertEq(view[210], 0, "210th value should be 0");
+-assertEq(view[211], 255, "211th value should be 255");
+-assertEq(view[212], 0, "212th value should be 0");
+-assertEq(view[213], 0, "213th value should be 0");
+-assertEq(view[214], 0, "214th value should be 0");
+-assertEq(view[215], 255, "215th value should be 255");
+-assertEq(view[216], 78, "216th value should be 78");
+-assertEq(view[217], 127, "217th value should be 127");
+-assertEq(view[218], 178, "218th value should be 178");
+-assertEq(view[219], 255, "219th value should be 255");
+-assertEq(view[220], 128, "220th value should be 128");
+-assertEq(view[221], 127, "221th value should be 127");
+-assertEq(view[222], 127, "222th value should be 127");
+-assertEq(view[223], 255, "223th value should be 255");
+-assertEq(view[224], 154, "224th value should be 154");
+-assertEq(view[225], 255, "225th value should be 255");
+-assertEq(view[226], 101, "226th value should be 101");
+-assertEq(view[227], 255, "227th value should be 255");
+-assertEq(view[228], 205, "228th value should be 205");
+-assertEq(view[229], 255, "229th value should be 255");
+-assertEq(view[230], 50, "230th value should be 50");
+-assertEq(view[231], 255, "231th value should be 255");
+-assertEq(view[232], 205, "232th value should be 205");
+-assertEq(view[233], 255, "233th value should be 255");
+-assertEq(view[234], 50, "234th value should be 50");
+-assertEq(view[235], 255, "235th value should be 255");
+-assertEq(view[236], 230, "236th value should be 230");
+-assertEq(view[237], 127, "237th value should be 127");
+-assertEq(view[238], 25, "238th value should be 25");
+-assertEq(view[239], 255, "239th value should be 255");
+-assertEq(view[240], 0, "240th value should be 0");
+-assertEq(view[241], 0, "241th value should be 0");
+-assertEq(view[242], 0, "242th value should be 0");
+-assertEq(view[243], 255, "243th value should be 255");
+-assertEq(view[244], 205, "244th value should be 205");
+-assertEq(view[245], 255, "245th value should be 255");
+-assertEq(view[246], 50, "246th value should be 50");
+-assertEq(view[247], 255, "247th value should be 255");
+-assertEq(view[248], 205, "248th value should be 205");
+-assertEq(view[249], 255, "249th value should be 255");
+-assertEq(view[250], 50, "250th value should be 50");
+-assertEq(view[251], 255, "251th value should be 255");
+-assertEq(view[252], 205, "252th value should be 205");
+-assertEq(view[253], 255, "253th value should be 255");
+-assertEq(view[254], 50, "254th value should be 50");
+-assertEq(view[255], 255, "255th value should be 255");
+-assertEq(view[256], 205, "256th value should be 205");
+-assertEq(view[257], 255, "257th value should be 255");
+-assertEq(view[258], 50, "258th value should be 50");
+-assertEq(view[259], 255, "259th value should be 255");
+-assertEq(view[260], 205, "260th value should be 205");
+-assertEq(view[261], 255, "261th value should be 255");
+-assertEq(view[262], 50, "262th value should be 50");
+-assertEq(view[263], 255, "263th value should be 255");
+-assertEq(view[264], 179, "264th value should be 179");
+-assertEq(view[265], 127, "265th value should be 127");
+-assertEq(view[266], 76, "266th value should be 76");
+-assertEq(view[267], 255, "267th value should be 255");
+-assertEq(view[268], 179, "268th value should be 179");
+-assertEq(view[269], 127, "269th value should be 127");
+-assertEq(view[270], 76, "270th value should be 76");
+-assertEq(view[271], 255, "271th value should be 255");
+-assertEq(view[272], 154, "272th value should be 154");
+-assertEq(view[273], 255, "273th value should be 255");
+-assertEq(view[274], 101, "274th value should be 101");
+-assertEq(view[275], 255, "275th value should be 255");
+-assertEq(view[276], 52, "276th value should be 52");
+-assertEq(view[277], 255, "277th value should be 255");
+-assertEq(view[278], 203, "278th value should be 203");
+-assertEq(view[279], 255, "279th value should be 255");
+-assertEq(view[280], 0, "280th value should be 0");
+-assertEq(view[281], 0, "281th value should be 0");
+-assertEq(view[282], 0, "282th value should be 0");
+-assertEq(view[283], 255, "283th value should be 255");
+-assertEq(view[284], 0, "284th value should be 0");
+-assertEq(view[285], 0, "285th value should be 0");
+-assertEq(view[286], 0, "286th value should be 0");
+-assertEq(view[287], 255, "287th value should be 255");
+-assertEq(view[288], 0, "288th value should be 0");
+-assertEq(view[289], 0, "289th value should be 0");
+-assertEq(view[290], 0, "290th value should be 0");
+-assertEq(view[291], 255, "291th value should be 255");
+-assertEq(view[292], 0, "292th value should be 0");
+-assertEq(view[293], 0, "293th value should be 0");
+-assertEq(view[294], 0, "294th value should be 0");
+-assertEq(view[295], 255, "295th value should be 255");
+-assertEq(view[296], 0, "296th value should be 0");
+-assertEq(view[297], 0, "297th value should be 0");
+-assertEq(view[298], 0, "298th value should be 0");
+-assertEq(view[299], 255, "299th value should be 255");
+-assertEq(view[300], 52, "300th value should be 52");
+-assertEq(view[301], 255, "301th value should be 255");
+-assertEq(view[302], 203, "302th value should be 203");
+-assertEq(view[303], 255, "303th value should be 255");
+-assertEq(view[304], 52, "304th value should be 52");
+-assertEq(view[305], 255, "305th value should be 255");
+-assertEq(view[306], 203, "306th value should be 203");
+-assertEq(view[307], 255, "307th value should be 255");
+-assertEq(view[308], 179, "308th value should be 179");
+-assertEq(view[309], 127, "309th value should be 127");
+-assertEq(view[310], 76, "310th value should be 76");
+-assertEq(view[311], 255, "311th value should be 255");
+-assertEq(view[312], 205, "312th value should be 205");
+-assertEq(view[313], 255, "313th value should be 255");
+-assertEq(view[314], 50, "314th value should be 50");
+-assertEq(view[315], 255, "315th value should be 255");
+-assertEq(view[316], 205, "316th value should be 205");
+-assertEq(view[317], 255, "317th value should be 255");
+-assertEq(view[318], 50, "318th value should be 50");
+-assertEq(view[319], 255, "319th value should be 255");
+-assertEq(view[320], 230, "320th value should be 230");
+-assertEq(view[321], 127, "321th value should be 127");
+-assertEq(view[322], 25, "322th value should be 25");
+-assertEq(view[323], 255, "323th value should be 255");
+-assertEq(view[324], 205, "324th value should be 205");
+-assertEq(view[325], 255, "325th value should be 255");
+-assertEq(view[326], 50, "326th value should be 50");
+-assertEq(view[327], 255, "327th value should be 255");
+-assertEq(view[328], 205, "328th value should be 205");
+-assertEq(view[329], 255, "329th value should be 255");
+-assertEq(view[330], 50, "330th value should be 50");
+-assertEq(view[331], 255, "331th value should be 255");
+-assertEq(view[332], 205, "332th value should be 205");
+-assertEq(view[333], 255, "333th value should be 255");
+-assertEq(view[334], 50, "334th value should be 50");
+-assertEq(view[335], 255, "335th value should be 255");
+-assertEq(view[336], 205, "336th value should be 205");
+-assertEq(view[337], 255, "337th value should be 255");
+-assertEq(view[338], 50, "338th value should be 50");
+-assertEq(view[339], 255, "339th value should be 255");
+-assertEq(view[340], 179, "340th value should be 179");
+-assertEq(view[341], 127, "341th value should be 127");
+-assertEq(view[342], 76, "342th value should be 76");
+-assertEq(view[343], 255, "343th value should be 255");
+-assertEq(view[344], 154, "344th value should be 154");
+-assertEq(view[345], 255, "345th value should be 255");
+-assertEq(view[346], 101, "346th value should be 101");
+-assertEq(view[347], 255, "347th value should be 255");
+-assertEq(view[348], 154, "348th value should be 154");
+-assertEq(view[349], 255, "349th value should be 255");
+-assertEq(view[350], 101, "350th value should be 101");
+-assertEq(view[351], 255, "351th value should be 255");
+-assertEq(view[352], 128, "352th value should be 128");
+-assertEq(view[353], 127, "353th value should be 127");
+-assertEq(view[354], 127, "354th value should be 127");
+-assertEq(view[355], 255, "355th value should be 255");
+-assertEq(view[356], 52, "356th value should be 52");
+-assertEq(view[357], 255, "357th value should be 255");
+-assertEq(view[358], 203, "358th value should be 203");
+-assertEq(view[359], 255, "359th value should be 255");
+-assertEq(view[360], 0, "360th value should be 0");
+-assertEq(view[361], 0, "361th value should be 0");
+-assertEq(view[362], 0, "362th value should be 0");
+-assertEq(view[363], 255, "363th value should be 255");
+-assertEq(view[364], 0, "364th value should be 0");
+-assertEq(view[365], 0, "365th value should be 0");
+-assertEq(view[366], 0, "366th value should be 0");
+-assertEq(view[367], 255, "367th value should be 255");
+-assertEq(view[368], 0, "368th value should be 0");
+-assertEq(view[369], 0, "369th value should be 0");
+-assertEq(view[370], 0, "370th value should be 0");
+-assertEq(view[371], 255, "371th value should be 255");
+-assertEq(view[372], 0, "372th value should be 0");
+-assertEq(view[373], 0, "373th value should be 0");
+-assertEq(view[374], 0, "374th value should be 0");
+-assertEq(view[375], 255, "375th value should be 255");
+-assertEq(view[376], 0, "376th value should be 0");
+-assertEq(view[377], 0, "377th value should be 0");
+-assertEq(view[378], 0, "378th value should be 0");
+-assertEq(view[379], 255, "379th value should be 255");
+-assertEq(view[380], 0, "380th value should be 0");
+-assertEq(view[381], 0, "381th value should be 0");
+-assertEq(view[382], 0, "382th value should be 0");
+-assertEq(view[383], 255, "383th value should be 255");
+-assertEq(view[384], 52, "384th value should be 52");
+-assertEq(view[385], 255, "385th value should be 255");
+-assertEq(view[386], 203, "386th value should be 203");
+-assertEq(view[387], 255, "387th value should be 255");
+-assertEq(view[388], 179, "388th value should be 179");
+-assertEq(view[389], 127, "389th value should be 127");
+-assertEq(view[390], 76, "390th value should be 76");
+-assertEq(view[391], 255, "391th value should be 255");
+-assertEq(view[392], 205, "392th value should be 205");
+-assertEq(view[393], 255, "393th value should be 255");
+-assertEq(view[394], 50, "394th value should be 50");
+-assertEq(view[395], 255, "395th value should be 255");
+-assertEq(view[396], 205, "396th value should be 205");
+-assertEq(view[397], 255, "397th value should be 255");
+-assertEq(view[398], 50, "398th value should be 50");
+-assertEq(view[399], 255, "399th value should be 255");
+-assertEq(view[400], 205, "400th value should be 205");
+-assertEq(view[401], 255, "401th value should be 255");
+-assertEq(view[402], 50, "402th value should be 50");
+-assertEq(view[403], 255, "403th value should be 255");
+-assertEq(view[404], 205, "404th value should be 205");
+-assertEq(view[405], 255, "405th value should be 255");
+-assertEq(view[406], 50, "406th value should be 50");
+-assertEq(view[407], 255, "407th value should be 255");
+-assertEq(view[408], 205, "408th value should be 205");
+-assertEq(view[409], 255, "409th value should be 255");
+-assertEq(view[410], 50, "410th value should be 50");
+-assertEq(view[411], 255, "411th value should be 255");
+-assertEq(view[412], 205, "412th value should be 205");
+-assertEq(view[413], 255, "413th value should be 255");
+-assertEq(view[414], 50, "414th value should be 50");
+-assertEq(view[415], 255, "415th value should be 255");
+-assertEq(view[416], 154, "416th value should be 154");
+-assertEq(view[417], 255, "417th value should be 255");
+-assertEq(view[418], 101, "418th value should be 101");
+-assertEq(view[419], 255, "419th value should be 255");
+-assertEq(view[420], 128, "420th value should be 128");
+-assertEq(view[421], 127, "421th value should be 127");
+-assertEq(view[422], 127, "422th value should be 127");
+-assertEq(view[423], 255, "423th value should be 255");
+-assertEq(view[424], 154, "424th value should be 154");
+-assertEq(view[425], 255, "425th value should be 255");
+-assertEq(view[426], 101, "426th value should be 101");
+-assertEq(view[427], 255, "427th value should be 255");
+-assertEq(view[428], 128, "428th value should be 128");
+-assertEq(view[429], 127, "429th value should be 127");
+-assertEq(view[430], 127, "430th value should be 127");
+-assertEq(view[431], 255, "431th value should be 255");
+-assertEq(view[432], 103, "432th value should be 103");
+-assertEq(view[433], 255, "433th value should be 255");
+-assertEq(view[434], 152, "434th value should be 152");
+-assertEq(view[435], 255, "435th value should be 255");
+-assertEq(view[436], 0, "436th value should be 0");
+-assertEq(view[437], 0, "437th value should be 0");
+-assertEq(view[438], 0, "438th value should be 0");
+-assertEq(view[439], 255, "439th value should be 255");
+-assertEq(view[440], 0, "440th value should be 0");
+-assertEq(view[441], 0, "441th value should be 0");
+-assertEq(view[442], 0, "442th value should be 0");
+-assertEq(view[443], 255, "443th value should be 255");
+-assertEq(view[444], 0, "444th value should be 0");
+-assertEq(view[445], 0, "445th value should be 0");
+-assertEq(view[446], 0, "446th value should be 0");
+-assertEq(view[447], 255, "447th value should be 255");
+-assertEq(view[448], 0, "448th value should be 0");
+-assertEq(view[449], 0, "449th value should be 0");
+-assertEq(view[450], 0, "450th value should be 0");
+-assertEq(view[451], 255, "451th value should be 255");
+-assertEq(view[452], 0, "452th value should be 0");
+-assertEq(view[453], 0, "453th value should be 0");
+-assertEq(view[454], 0, "454th value should be 0");
+-assertEq(view[455], 255, "455th value should be 255");
+-assertEq(view[456], 0, "456th value should be 0");
+-assertEq(view[457], 0, "457th value should be 0");
+-assertEq(view[458], 0, "458th value should be 0");
+-assertEq(view[459], 255, "459th value should be 255");
+-assertEq(view[460], 0, "460th value should be 0");
+-assertEq(view[461], 0, "461th value should be 0");
+-assertEq(view[462], 0, "462th value should be 0");
+-assertEq(view[463], 255, "463th value should be 255");
+-assertEq(view[464], 78, "464th value should be 78");
+-assertEq(view[465], 127, "465th value should be 127");
+-assertEq(view[466], 178, "466th value should be 178");
+-assertEq(view[467], 255, "467th value should be 255");
+-assertEq(view[468], 154, "468th value should be 154");
+-assertEq(view[469], 255, "469th value should be 255");
+-assertEq(view[470], 101, "470th value should be 101");
+-assertEq(view[471], 255, "471th value should be 255");
+-assertEq(view[472], 205, "472th value should be 205");
+-assertEq(view[473], 255, "473th value should be 255");
+-assertEq(view[474], 50, "474th value should be 50");
+-assertEq(view[475], 255, "475th value should be 255");
+-assertEq(view[476], 205, "476th value should be 205");
+-assertEq(view[477], 255, "477th value should be 255");
+-assertEq(view[478], 50, "478th value should be 50");
+-assertEq(view[479], 255, "479th value should be 255");
+-assertEq(view[480], 205, "480th value should be 205");
+-assertEq(view[481], 255, "481th value should be 255");
+-assertEq(view[482], 50, "482th value should be 50");
+-assertEq(view[483], 255, "483th value should be 255");
+-assertEq(view[484], 205, "484th value should be 205");
+-assertEq(view[485], 255, "485th value should be 255");
+-assertEq(view[486], 50, "486th value should be 50");
+-assertEq(view[487], 255, "487th value should be 255");
+-assertEq(view[488], 179, "488th value should be 179");
+-assertEq(view[489], 127, "489th value should be 127");
+-assertEq(view[490], 76, "490th value should be 76");
+-assertEq(view[491], 255, "491th value should be 255");
+-assertEq(view[492], 179, "492th value should be 179");
+-assertEq(view[493], 127, "493th value should be 127");
+-assertEq(view[494], 76, "494th value should be 76");
+-assertEq(view[495], 255, "495th value should be 255");
+-assertEq(view[496], 128, "496th value should be 128");
+-assertEq(view[497], 127, "497th value should be 127");
+-assertEq(view[498], 127, "498th value should be 127");
+-assertEq(view[499], 255, "499th value should be 255");
+-assertEq(view[500], 52, "500th value should be 52");
+-assertEq(view[501], 255, "501th value should be 255");
+-assertEq(view[502], 203, "502th value should be 203");
+-assertEq(view[503], 255, "503th value should be 255");
+-assertEq(view[504], 0, "504th value should be 0");
+-assertEq(view[505], 0, "505th value should be 0");
+-assertEq(view[506], 0, "506th value should be 0");
+-assertEq(view[507], 255, "507th value should be 255");
+-assertEq(view[508], 78, "508th value should be 78");
+-assertEq(view[509], 127, "509th value should be 127");
+-assertEq(view[510], 178, "510th value should be 178");
+-assertEq(view[511], 255, "511th value should be 255");
+-assertEq(view[512], 52, "512th value should be 52");
+-assertEq(view[513], 255, "513th value should be 255");
+-assertEq(view[514], 203, "514th value should be 203");
+-assertEq(view[515], 255, "515th value should be 255");
+-assertEq(view[516], 0, "516th value should be 0");
+-assertEq(view[517], 0, "517th value should be 0");
+-assertEq(view[518], 0, "518th value should be 0");
+-assertEq(view[519], 255, "519th value should be 255");
+-assertEq(view[520], 0, "520th value should be 0");
+-assertEq(view[521], 0, "521th value should be 0");
+-assertEq(view[522], 0, "522th value should be 0");
+-assertEq(view[523], 255, "523th value should be 255");
+-assertEq(view[524], 0, "524th value should be 0");
+-assertEq(view[525], 0, "525th value should be 0");
+-assertEq(view[526], 0, "526th value should be 0");
+-assertEq(view[527], 255, "527th value should be 255");
+-assertEq(view[528], 0, "528th value should be 0");
+-assertEq(view[529], 0, "529th value should be 0");
+-assertEq(view[530], 0, "530th value should be 0");
+-assertEq(view[531], 255, "531th value should be 255");
+-assertEq(view[532], 0, "532th value should be 0");
+-assertEq(view[533], 0, "533th value should be 0");
+-assertEq(view[534], 0, "534th value should be 0");
+-assertEq(view[535], 255, "535th value should be 255");
+-assertEq(view[536], 0, "536th value should be 0");
+-assertEq(view[537], 0, "537th value should be 0");
+-assertEq(view[538], 0, "538th value should be 0");
+-assertEq(view[539], 255, "539th value should be 255");
+-assertEq(view[540], 0, "540th value should be 0");
+-assertEq(view[541], 0, "541th value should be 0");
+-assertEq(view[542], 0, "542th value should be 0");
+-assertEq(view[543], 255, "543th value should be 255");
+-assertEq(view[544], 0, "544th value should be 0");
+-assertEq(view[545], 0, "545th value should be 0");
+-assertEq(view[546], 0, "546th value should be 0");
+-assertEq(view[547], 255, "547th value should be 255");
+-assertEq(view[548], 154, "548th value should be 154");
+-assertEq(view[549], 255, "549th value should be 255");
+-assertEq(view[550], 101, "550th value should be 101");
+-assertEq(view[551], 255, "551th value should be 255");
+-assertEq(view[552], 205, "552th value should be 205");
+-assertEq(view[553], 255, "553th value should be 255");
+-assertEq(view[554], 50, "554th value should be 50");
+-assertEq(view[555], 255, "555th value should be 255");
+-assertEq(view[556], 205, "556th value should be 205");
+-assertEq(view[557], 255, "557th value should be 255");
+-assertEq(view[558], 50, "558th value should be 50");
+-assertEq(view[559], 255, "559th value should be 255");
+-assertEq(view[560], 205, "560th value should be 205");
+-assertEq(view[561], 255, "561th value should be 255");
+-assertEq(view[562], 50, "562th value should be 50");
+-assertEq(view[563], 255, "563th value should be 255");
+-assertEq(view[564], 179, "564th value should be 179");
+-assertEq(view[565], 127, "565th value should be 127");
+-assertEq(view[566], 76, "566th value should be 76");
+-assertEq(view[567], 255, "567th value should be 255");
+-assertEq(view[568], 179, "568th value should be 179");
+-assertEq(view[569], 127, "569th value should be 127");
+-assertEq(view[570], 76, "570th value should be 76");
+-assertEq(view[571], 255, "571th value should be 255");
+-assertEq(view[572], 154, "572th value should be 154");
+-assertEq(view[573], 255, "573th value should be 255");
+-assertEq(view[574], 101, "574th value should be 101");
+-assertEq(view[575], 255, "575th value should be 255");
+-assertEq(view[576], 103, "576th value should be 103");
+-assertEq(view[577], 255, "577th value should be 255");
+-assertEq(view[578], 152, "578th value should be 152");
+-assertEq(view[579], 255, "579th value should be 255");
+-assertEq(view[580], 0, "580th value should be 0");
+-assertEq(view[581], 0, "581th value should be 0");
+-assertEq(view[582], 0, "582th value should be 0");
+-assertEq(view[583], 255, "583th value should be 255");
+-assertEq(view[584], 0, "584th value should be 0");
+-assertEq(view[585], 0, "585th value should be 0");
+-assertEq(view[586], 0, "586th value should be 0");
+-assertEq(view[587], 255, "587th value should be 255");
+-assertEq(view[588], 0, "588th value should be 0");
+-assertEq(view[589], 0, "589th value should be 0");
+-assertEq(view[590], 0, "590th value should be 0");
+-assertEq(view[591], 255, "591th value should be 255");
+-assertEq(view[592], 0, "592th value should be 0");
+-assertEq(view[593], 0, "593th value should be 0");
+-assertEq(view[594], 0, "594th value should be 0");
+-assertEq(view[595], 255, "595th value should be 255");
+-assertEq(view[596], 0, "596th value should be 0");
+-assertEq(view[597], 0, "597th value should be 0");
+-assertEq(view[598], 0, "598th value should be 0");
+-assertEq(view[599], 255, "599th value should be 255");
+-assertEq(view[600], 0, "600th value should be 0");
+-assertEq(view[601], 0, "601th value should be 0");
+-assertEq(view[602], 0, "602th value should be 0");
+-assertEq(view[603], 255, "603th value should be 255");
+-assertEq(view[604], 0, "604th value should be 0");
+-assertEq(view[605], 0, "605th value should be 0");
+-assertEq(view[606], 0, "606th value should be 0");
+-assertEq(view[607], 255, "607th value should be 255");
+-assertEq(view[608], 0, "608th value should be 0");
+-assertEq(view[609], 0, "609th value should be 0");
+-assertEq(view[610], 0, "610th value should be 0");
+-assertEq(view[611], 255, "611th value should be 255");
+-assertEq(view[612], 0, "612th value should be 0");
+-assertEq(view[613], 0, "613th value should be 0");
+-assertEq(view[614], 0, "614th value should be 0");
+-assertEq(view[615], 255, "615th value should be 255");
+-assertEq(view[616], 0, "616th value should be 0");
+-assertEq(view[617], 0, "617th value should be 0");
+-assertEq(view[618], 0, "618th value should be 0");
+-assertEq(view[619], 255, "619th value should be 255");
+-assertEq(view[620], 0, "620th value should be 0");
+-assertEq(view[621], 0, "621th value should be 0");
+-assertEq(view[622], 0, "622th value should be 0");
+-assertEq(view[623], 255, "623th value should be 255");
+-assertEq(view[624], 0, "624th value should be 0");
+-assertEq(view[625], 0, "625th value should be 0");
+-assertEq(view[626], 0, "626th value should be 0");
+-assertEq(view[627], 255, "627th value should be 255");
+-assertEq(view[628], 154, "628th value should be 154");
+-assertEq(view[629], 255, "629th value should be 255");
+-assertEq(view[630], 101, "630th value should be 101");
+-assertEq(view[631], 255, "631th value should be 255");
+-assertEq(view[632], 205, "632th value should be 205");
+-assertEq(view[633], 255, "633th value should be 255");
+-assertEq(view[634], 50, "634th value should be 50");
+-assertEq(view[635], 255, "635th value should be 255");
+-assertEq(view[636], 205, "636th value should be 205");
+-assertEq(view[637], 255, "637th value should be 255");
+-assertEq(view[638], 50, "638th value should be 50");
+-assertEq(view[639], 255, "639th value should be 255");
+-assertEq(view[640], 179, "640th value should be 179");
+-assertEq(view[641], 127, "641th value should be 127");
+-assertEq(view[642], 76, "642th value should be 76");
+-assertEq(view[643], 255, "643th value should be 255");
+-assertEq(view[644], 179, "644th value should be 179");
+-assertEq(view[645], 127, "645th value should be 127");
+-assertEq(view[646], 76, "646th value should be 76");
+-assertEq(view[647], 255, "647th value should be 255");
+-assertEq(view[648], 154, "648th value should be 154");
+-assertEq(view[649], 255, "649th value should be 255");
+-assertEq(view[650], 101, "650th value should be 101");
+-assertEq(view[651], 255, "651th value should be 255");
+-assertEq(view[652], 128, "652th value should be 128");
+-assertEq(view[653], 127, "653th value should be 127");
+-assertEq(view[654], 127, "654th value should be 127");
+-assertEq(view[655], 255, "655th value should be 255");
+-assertEq(view[656], 52, "656th value should be 52");
+-assertEq(view[657], 255, "657th value should be 255");
+-assertEq(view[658], 203, "658th value should be 203");
+-assertEq(view[659], 255, "659th value should be 255");
+-assertEq(view[660], 0, "660th value should be 0");
+-assertEq(view[661], 0, "661th value should be 0");
+-assertEq(view[662], 0, "662th value should be 0");
+-assertEq(view[663], 255, "663th value should be 255");
+-assertEq(view[664], 0, "664th value should be 0");
+-assertEq(view[665], 0, "665th value should be 0");
+-assertEq(view[666], 0, "666th value should be 0");
+-assertEq(view[667], 255, "667th value should be 255");
+-assertEq(view[668], 0, "668th value should be 0");
+-assertEq(view[669], 0, "669th value should be 0");
+-assertEq(view[670], 0, "670th value should be 0");
+-assertEq(view[671], 255, "671th value should be 255");
+-assertEq(view[672], 0, "672th value should be 0");
+-assertEq(view[673], 0, "673th value should be 0");
+-assertEq(view[674], 0, "674th value should be 0");
+-assertEq(view[675], 255, "675th value should be 255");
+-assertEq(view[676], 0, "676th value should be 0");
+-assertEq(view[677], 0, "677th value should be 0");
+-assertEq(view[678], 0, "678th value should be 0");
+-assertEq(view[679], 255, "679th value should be 255");
+-assertEq(view[680], 0, "680th value should be 0");
+-assertEq(view[681], 0, "681th value should be 0");
+-assertEq(view[682], 0, "682th value should be 0");
+-assertEq(view[683], 255, "683th value should be 255");
+-assertEq(view[684], 0, "684th value should be 0");
+-assertEq(view[685], 0, "685th value should be 0");
+-assertEq(view[686], 0, "686th value should be 0");
+-assertEq(view[687], 255, "687th value should be 255");
+-assertEq(view[688], 0, "688th value should be 0");
+-assertEq(view[689], 0, "689th value should be 0");
+-assertEq(view[690], 0, "690th value should be 0");
+-assertEq(view[691], 255, "691th value should be 255");
+-assertEq(view[692], 0, "692th value should be 0");
+-assertEq(view[693], 0, "693th value should be 0");
+-assertEq(view[694], 0, "694th value should be 0");
+-assertEq(view[695], 255, "695th value should be 255");
+-assertEq(view[696], 0, "696th value should be 0");
+-assertEq(view[697], 0, "697th value should be 0");
+-assertEq(view[698], 0, "698th value should be 0");
+-assertEq(view[699], 255, "699th value should be 255");
+-assertEq(view[700], 0, "700th value should be 0");
+-assertEq(view[701], 0, "701th value should be 0");
+-assertEq(view[702], 0, "702th value should be 0");
+-assertEq(view[703], 255, "703th value should be 255");
+-assertEq(view[704], 0, "704th value should be 0");
+-assertEq(view[705], 0, "705th value should be 0");
+-assertEq(view[706], 0, "706th value should be 0");
+-assertEq(view[707], 255, "707th value should be 255");
+-assertEq(view[708], 154, "708th value should be 154");
+-assertEq(view[709], 255, "709th value should be 255");
+-assertEq(view[710], 101, "710th value should be 101");
+-assertEq(view[711], 255, "711th value should be 255");
+-assertEq(view[712], 179, "712th value should be 179");
+-assertEq(view[713], 127, "713th value should be 127");
+-assertEq(view[714], 76, "714th value should be 76");
+-assertEq(view[715], 255, "715th value should be 255");
+-assertEq(view[716], 205, "716th value should be 205");
+-assertEq(view[717], 255, "717th value should be 255");
+-assertEq(view[718], 50, "718th value should be 50");
+-assertEq(view[719], 255, "719th value should be 255");
+-assertEq(view[720], 154, "720th value should be 154");
+-assertEq(view[721], 255, "721th value should be 255");
+-assertEq(view[722], 101, "722th value should be 101");
+-assertEq(view[723], 255, "723th value should be 255");
+-assertEq(view[724], 52, "724th value should be 52");
+-assertEq(view[725], 255, "725th value should be 255");
+-assertEq(view[726], 203, "726th value should be 203");
+-assertEq(view[727], 255, "727th value should be 255");
+-assertEq(view[728], 128, "728th value should be 128");
+-assertEq(view[729], 127, "729th value should be 127");
+-assertEq(view[730], 127, "730th value should be 127");
+-assertEq(view[731], 255, "731th value should be 255");
+-assertEq(view[732], 78, "732th value should be 78");
+-assertEq(view[733], 127, "733th value should be 127");
+-assertEq(view[734], 178, "734th value should be 178");
+-assertEq(view[735], 255, "735th value should be 255");
+-assertEq(view[736], 0, "736th value should be 0");
+-assertEq(view[737], 0, "737th value should be 0");
+-assertEq(view[738], 0, "738th value should be 0");
+-assertEq(view[739], 255, "739th value should be 255");
+-assertEq(view[740], 0, "740th value should be 0");
+-assertEq(view[741], 0, "741th value should be 0");
+-assertEq(view[742], 0, "742th value should be 0");
+-assertEq(view[743], 255, "743th value should be 255");
+-assertEq(view[744], 0, "744th value should be 0");
+-assertEq(view[745], 0, "745th value should be 0");
+-assertEq(view[746], 0, "746th value should be 0");
+-assertEq(view[747], 255, "747th value should be 255");
+-assertEq(view[748], 0, "748th value should be 0");
+-assertEq(view[749], 0, "749th value should be 0");
+-assertEq(view[750], 0, "750th value should be 0");
+-assertEq(view[751], 255, "751th value should be 255");
+-assertEq(view[752], 0, "752th value should be 0");
+-assertEq(view[753], 0, "753th value should be 0");
+-assertEq(view[754], 0, "754th value should be 0");
+-assertEq(view[755], 255, "755th value should be 255");
+-assertEq(view[756], 0, "756th value should be 0");
+-assertEq(view[757], 0, "757th value should be 0");
+-assertEq(view[758], 0, "758th value should be 0");
+-assertEq(view[759], 255, "759th value should be 255");
+-assertEq(view[760], 0, "760th value should be 0");
+-assertEq(view[761], 0, "761th value should be 0");
+-assertEq(view[762], 0, "762th value should be 0");
+-assertEq(view[763], 255, "763th value should be 255");
+-assertEq(view[764], 0, "764th value should be 0");
+-assertEq(view[765], 0, "765th value should be 0");
+-assertEq(view[766], 0, "766th value should be 0");
+-assertEq(view[767], 255, "767th value should be 255");
+-assertEq(view[768], 0, "768th value should be 0");
+-assertEq(view[769], 0, "769th value should be 0");
+-assertEq(view[770], 0, "770th value should be 0");
+-assertEq(view[771], 255, "771th value should be 255");
+-assertEq(view[772], 0, "772th value should be 0");
+-assertEq(view[773], 0, "773th value should be 0");
+-assertEq(view[774], 0, "774th value should be 0");
+-assertEq(view[775], 255, "775th value should be 255");
+-assertEq(view[776], 0, "776th value should be 0");
+-assertEq(view[777], 0, "777th value should be 0");
+-assertEq(view[778], 0, "778th value should be 0");
+-assertEq(view[779], 255, "779th value should be 255");
+-assertEq(view[780], 0, "780th value should be 0");
+-assertEq(view[781], 0, "781th value should be 0");
+-assertEq(view[782], 0, "782th value should be 0");
+-assertEq(view[783], 255, "783th value should be 255");
+-assertEq(view[784], 78, "784th value should be 78");
+-assertEq(view[785], 127, "785th value should be 127");
+-assertEq(view[786], 178, "786th value should be 178");
+-assertEq(view[787], 255, "787th value should be 255");
+-assertEq(view[788], 154, "788th value should be 154");
+-assertEq(view[789], 255, "789th value should be 255");
+-assertEq(view[790], 101, "790th value should be 101");
+-assertEq(view[791], 255, "791th value should be 255");
+-assertEq(view[792], 179, "792th value should be 179");
+-assertEq(view[793], 127, "793th value should be 127");
+-assertEq(view[794], 76, "794th value should be 76");
+-assertEq(view[795], 255, "795th value should be 255");
+-assertEq(view[796], 205, "796th value should be 205");
+-assertEq(view[797], 255, "797th value should be 255");
+-assertEq(view[798], 50, "798th value should be 50");
+-assertEq(view[799], 255, "799th value should be 255");
+-assertEq(view[800], 128, "800th value should be 128");
+-assertEq(view[801], 127, "801th value should be 127");
+-assertEq(view[802], 127, "802th value should be 127");
+-assertEq(view[803], 255, "803th value should be 255");
+-assertEq(view[804], 0, "804th value should be 0");
+-assertEq(view[805], 0, "805th value should be 0");
+-assertEq(view[806], 0, "806th value should be 0");
+-assertEq(view[807], 255, "807th value should be 255");
+-assertEq(view[808], 26, "808th value should be 26");
+-assertEq(view[809], 127, "809th value should be 127");
+-assertEq(view[810], 229, "810th value should be 229");
+-assertEq(view[811], 255, "811th value should be 255");
+-assertEq(view[812], 0, "812th value should be 0");
+-assertEq(view[813], 0, "813th value should be 0");
+-assertEq(view[814], 0, "814th value should be 0");
+-assertEq(view[815], 255, "815th value should be 255");
+-assertEq(view[816], 0, "816th value should be 0");
+-assertEq(view[817], 0, "817th value should be 0");
+-assertEq(view[818], 0, "818th value should be 0");
+-assertEq(view[819], 255, "819th value should be 255");
+-assertEq(view[820], 0, "820th value should be 0");
+-assertEq(view[821], 0, "821th value should be 0");
+-assertEq(view[822], 0, "822th value should be 0");
+-assertEq(view[823], 255, "823th value should be 255");
+-assertEq(view[824], 0, "824th value should be 0");
+-assertEq(view[825], 0, "825th value should be 0");
+-assertEq(view[826], 0, "826th value should be 0");
+-assertEq(view[827], 255, "827th value should be 255");
+-assertEq(view[828], 0, "828th value should be 0");
+-assertEq(view[829], 0, "829th value should be 0");
+-assertEq(view[830], 0, "830th value should be 0");
+-assertEq(view[831], 255, "831th value should be 255");
+-assertEq(view[832], 0, "832th value should be 0");
+-assertEq(view[833], 0, "833th value should be 0");
+-assertEq(view[834], 0, "834th value should be 0");
+-assertEq(view[835], 255, "835th value should be 255");
+-assertEq(view[836], 0, "836th value should be 0");
+-assertEq(view[837], 0, "837th value should be 0");
+-assertEq(view[838], 0, "838th value should be 0");
+-assertEq(view[839], 255, "839th value should be 255");
+-assertEq(view[840], 0, "840th value should be 0");
+-assertEq(view[841], 0, "841th value should be 0");
+-assertEq(view[842], 0, "842th value should be 0");
+-assertEq(view[843], 255, "843th value should be 255");
+-assertEq(view[844], 0, "844th value should be 0");
+-assertEq(view[845], 0, "845th value should be 0");
+-assertEq(view[846], 0, "846th value should be 0");
+-assertEq(view[847], 255, "847th value should be 255");
+-assertEq(view[848], 0, "848th value should be 0");
+-assertEq(view[849], 0, "849th value should be 0");
+-assertEq(view[850], 0, "850th value should be 0");
+-assertEq(view[851], 255, "851th value should be 255");
+-assertEq(view[852], 0, "852th value should be 0");
+-assertEq(view[853], 0, "853th value should be 0");
+-assertEq(view[854], 0, "854th value should be 0");
+-assertEq(view[855], 255, "855th value should be 255");
+-assertEq(view[856], 0, "856th value should be 0");
+-assertEq(view[857], 0, "857th value should be 0");
+-assertEq(view[858], 0, "858th value should be 0");
+-assertEq(view[859], 255, "859th value should be 255");
+-assertEq(view[860], 0, "860th value should be 0");
+-assertEq(view[861], 0, "861th value should be 0");
+-assertEq(view[862], 0, "862th value should be 0");
+-assertEq(view[863], 255, "863th value should be 255");
+-assertEq(view[864], 103, "864th value should be 103");
+-assertEq(view[865], 255, "865th value should be 255");
+-assertEq(view[866], 152, "866th value should be 152");
+-assertEq(view[867], 255, "867th value should be 255");
+-assertEq(view[868], 154, "868th value should be 154");
+-assertEq(view[869], 255, "869th value should be 255");
+-assertEq(view[870], 101, "870th value should be 101");
+-assertEq(view[871], 255, "871th value should be 255");
+-assertEq(view[872], 179, "872th value should be 179");
+-assertEq(view[873], 127, "873th value should be 127");
+-assertEq(view[874], 76, "874th value should be 76");
+-assertEq(view[875], 255, "875th value should be 255");
+-assertEq(view[876], 205, "876th value should be 205");
+-assertEq(view[877], 255, "877th value should be 255");
+-assertEq(view[878], 50, "878th value should be 50");
+-assertEq(view[879], 255, "879th value should be 255");
+-assertEq(view[880], 179, "880th value should be 179");
+-assertEq(view[881], 127, "881th value should be 127");
+-assertEq(view[882], 76, "882th value should be 76");
+-assertEq(view[883], 255, "883th value should be 255");
+-assertEq(view[884], 179, "884th value should be 179");
+-assertEq(view[885], 127, "885th value should be 127");
+-assertEq(view[886], 76, "886th value should be 76");
+-assertEq(view[887], 255, "887th value should be 255");
+-assertEq(view[888], 128, "888th value should be 128");
+-assertEq(view[889], 127, "889th value should be 127");
+-assertEq(view[890], 127, "890th value should be 127");
+-assertEq(view[891], 255, "891th value should be 255");
+-assertEq(view[892], 103, "892th value should be 103");
+-assertEq(view[893], 255, "893th value should be 255");
+-assertEq(view[894], 152, "894th value should be 152");
+-assertEq(view[895], 255, "895th value should be 255");
+-assertEq(view[896], 26, "896th value should be 26");
+-assertEq(view[897], 127, "897th value should be 127");
+-assertEq(view[898], 229, "898th value should be 229");
+-assertEq(view[899], 255, "899th value should be 255");
+-assertEq(view[900], 0, "900th value should be 0");
+-assertEq(view[901], 0, "901th value should be 0");
+-assertEq(view[902], 0, "902th value should be 0");
+-assertEq(view[903], 255, "903th value should be 255");
+-assertEq(view[904], 0, "904th value should be 0");
+-assertEq(view[905], 0, "905th value should be 0");
+-assertEq(view[906], 0, "906th value should be 0");
+-assertEq(view[907], 255, "907th value should be 255");
+-assertEq(view[908], 0, "908th value should be 0");
+-assertEq(view[909], 0, "909th value should be 0");
+-assertEq(view[910], 0, "910th value should be 0");
+-assertEq(view[911], 255, "911th value should be 255");
+-assertEq(view[912], 0, "912th value should be 0");
+-assertEq(view[913], 0, "913th value should be 0");
+-assertEq(view[914], 0, "914th value should be 0");
+-assertEq(view[915], 255, "915th value should be 255");
+-assertEq(view[916], 0, "916th value should be 0");
+-assertEq(view[917], 0, "917th value should be 0");
+-assertEq(view[918], 0, "918th value should be 0");
+-assertEq(view[919], 255, "919th value should be 255");
+-assertEq(view[920], 0, "920th value should be 0");
+-assertEq(view[921], 0, "921th value should be 0");
+-assertEq(view[922], 0, "922th value should be 0");
+-assertEq(view[923], 255, "923th value should be 255");
+-assertEq(view[924], 0, "924th value should be 0");
+-assertEq(view[925], 0, "925th value should be 0");
+-assertEq(view[926], 0, "926th value should be 0");
+-assertEq(view[927], 255, "927th value should be 255");
+-assertEq(view[928], 0, "928th value should be 0");
+-assertEq(view[929], 0, "929th value should be 0");
+-assertEq(view[930], 0, "930th value should be 0");
+-assertEq(view[931], 255, "931th value should be 255");
+-assertEq(view[932], 0, "932th value should be 0");
+-assertEq(view[933], 0, "933th value should be 0");
+-assertEq(view[934], 0, "934th value should be 0");
+-assertEq(view[935], 255, "935th value should be 255");
+-assertEq(view[936], 0, "936th value should be 0");
+-assertEq(view[937], 0, "937th value should be 0");
+-assertEq(view[938], 0, "938th value should be 0");
+-assertEq(view[939], 255, "939th value should be 255");
+-assertEq(view[940], 0, "940th value should be 0");
+-assertEq(view[941], 0, "941th value should be 0");
+-assertEq(view[942], 0, "942th value should be 0");
+-assertEq(view[943], 255, "943th value should be 255");
+-assertEq(view[944], 0, "944th value should be 0");
+-assertEq(view[945], 0, "945th value should be 0");
+-assertEq(view[946], 0, "946th value should be 0");
+-assertEq(view[947], 255, "947th value should be 255");
+-assertEq(view[948], 154, "948th value should be 154");
+-assertEq(view[949], 255, "949th value should be 255");
+-assertEq(view[950], 101, "950th value should be 101");
+-assertEq(view[951], 255, "951th value should be 255");
+-assertEq(view[952], 179, "952th value should be 179");
+-assertEq(view[953], 127, "953th value should be 127");
+-assertEq(view[954], 76, "954th value should be 76");
+-assertEq(view[955], 255, "955th value should be 255");
+-assertEq(view[956], 205, "956th value should be 205");
+-assertEq(view[957], 255, "957th value should be 255");
+-assertEq(view[958], 50, "958th value should be 50");
+-assertEq(view[959], 255, "959th value should be 255");
+-assertEq(view[960], 179, "960th value should be 179");
+-assertEq(view[961], 127, "961th value should be 127");
+-assertEq(view[962], 76, "962th value should be 76");
+-assertEq(view[963], 255, "963th value should be 255");
+-assertEq(view[964], 179, "964th value should be 179");
+-assertEq(view[965], 127, "965th value should be 127");
+-assertEq(view[966], 76, "966th value should be 76");
+-assertEq(view[967], 255, "967th value should be 255");
+-assertEq(view[968], 179, "968th value should be 179");
+-assertEq(view[969], 127, "969th value should be 127");
+-assertEq(view[970], 76, "970th value should be 76");
+-assertEq(view[971], 255, "971th value should be 255");
+-assertEq(view[972], 154, "972th value should be 154");
+-assertEq(view[973], 255, "973th value should be 255");
+-assertEq(view[974], 101, "974th value should be 101");
+-assertEq(view[975], 255, "975th value should be 255");
+-assertEq(view[976], 103, "976th value should be 103");
+-assertEq(view[977], 255, "977th value should be 255");
+-assertEq(view[978], 152, "978th value should be 152");
+-assertEq(view[979], 255, "979th value should be 255");
+-assertEq(view[980], 0, "980th value should be 0");
+-assertEq(view[981], 0, "981th value should be 0");
+-assertEq(view[982], 0, "982th value should be 0");
+-assertEq(view[983], 255, "983th value should be 255");
+-assertEq(view[984], 0, "984th value should be 0");
+-assertEq(view[985], 0, "985th value should be 0");
+-assertEq(view[986], 0, "986th value should be 0");
+-assertEq(view[987], 255, "987th value should be 255");
+-assertEq(view[988], 0, "988th value should be 0");
+-assertEq(view[989], 0, "989th value should be 0");
+-assertEq(view[990], 0, "990th value should be 0");
+-assertEq(view[991], 255, "991th value should be 255");
+-assertEq(view[992], 0, "992th value should be 0");
+-assertEq(view[993], 0, "993th value should be 0");
+-assertEq(view[994], 0, "994th value should be 0");
+-assertEq(view[995], 255, "995th value should be 255");
+-assertEq(view[996], 0, "996th value should be 0");
+-assertEq(view[997], 0, "997th value should be 0");
+-assertEq(view[998], 0, "998th value should be 0");
+-assertEq(view[999], 255, "999th value should be 255");
+-assertEq(view[1000], 0, "1000th value should be 0");
+-assertEq(view[1001], 0, "1001th value should be 0");
+-assertEq(view[1002], 0, "1002th value should be 0");
+-assertEq(view[1003], 255, "1003th value should be 255");
+-assertEq(view[1004], 0, "1004th value should be 0");
+-assertEq(view[1005], 0, "1005th value should be 0");
+-assertEq(view[1006], 0, "1006th value should be 0");
+-assertEq(view[1007], 255, "1007th value should be 255");
+-assertEq(view[1008], 0, "1008th value should be 0");
+-assertEq(view[1009], 0, "1009th value should be 0");
+-assertEq(view[1010], 0, "1010th value should be 0");
+-assertEq(view[1011], 255, "1011th value should be 255");
+-assertEq(view[1012], 0, "1012th value should be 0");
+-assertEq(view[1013], 0, "1013th value should be 0");
+-assertEq(view[1014], 0, "1014th value should be 0");
+-assertEq(view[1015], 255, "1015th value should be 255");
+-assertEq(view[1016], 0, "1016th value should be 0");
+-assertEq(view[1017], 0, "1017th value should be 0");
+-assertEq(view[1018], 0, "1018th value should be 0");
+-assertEq(view[1019], 255, "1019th value should be 255");
+-assertEq(view[1020], 0, "1020th value should be 0");
+-assertEq(view[1021], 0, "1021th value should be 0");
+-assertEq(view[1022], 0, "1022th value should be 0");
+-assertEq(view[1023], 255, "1023th value should be 255");
+-assertEq(view[1024], 0, "1024th value should be 0");
+-assertEq(view[1025], 0, "1025th value should be 0");
+-assertEq(view[1026], 0, "1026th value should be 0");
+-assertEq(view[1027], 255, "1027th value should be 255");
+-assertEq(view[1028], 154, "1028th value should be 154");
+-assertEq(view[1029], 255, "1029th value should be 255");
+-assertEq(view[1030], 101, "1030th value should be 101");
+-assertEq(view[1031], 255, "1031th value should be 255");
+-assertEq(view[1032], 205, "1032th value should be 205");
+-assertEq(view[1033], 255, "1033th value should be 255");
+-assertEq(view[1034], 50, "1034th value should be 50");
+-assertEq(view[1035], 255, "1035th value should be 255");
+-assertEq(view[1036], 205, "1036th value should be 205");
+-assertEq(view[1037], 255, "1037th value should be 255");
+-assertEq(view[1038], 50, "1038th value should be 50");
+-assertEq(view[1039], 255, "1039th value should be 255");
+-assertEq(view[1040], 205, "1040th value should be 205");
+-assertEq(view[1041], 255, "1041th value should be 255");
+-assertEq(view[1042], 50, "1042th value should be 50");
+-assertEq(view[1043], 255, "1043th value should be 255");
+-assertEq(view[1044], 179, "1044th value should be 179");
+-assertEq(view[1045], 127, "1045th value should be 127");
+-assertEq(view[1046], 76, "1046th value should be 76");
+-assertEq(view[1047], 255, "1047th value should be 255");
+-assertEq(view[1048], 179, "1048th value should be 179");
+-assertEq(view[1049], 127, "1049th value should be 127");
+-assertEq(view[1050], 76, "1050th value should be 76");
+-assertEq(view[1051], 255, "1051th value should be 255");
+-assertEq(view[1052], 154, "1052th value should be 154");
+-assertEq(view[1053], 255, "1053th value should be 255");
+-assertEq(view[1054], 101, "1054th value should be 101");
+-assertEq(view[1055], 255, "1055th value should be 255");
+-assertEq(view[1056], 128, "1056th value should be 128");
+-assertEq(view[1057], 127, "1057th value should be 127");
+-assertEq(view[1058], 127, "1058th value should be 127");
+-assertEq(view[1059], 255, "1059th value should be 255");
+-assertEq(view[1060], 0, "1060th value should be 0");
+-assertEq(view[1061], 0, "1061th value should be 0");
+-assertEq(view[1062], 0, "1062th value should be 0");
+-assertEq(view[1063], 255, "1063th value should be 255");
+-assertEq(view[1064], 0, "1064th value should be 0");
+-assertEq(view[1065], 0, "1065th value should be 0");
+-assertEq(view[1066], 0, "1066th value should be 0");
+-assertEq(view[1067], 255, "1067th value should be 255");
+-assertEq(view[1068], 26, "1068th value should be 26");
+-assertEq(view[1069], 127, "1069th value should be 127");
+-assertEq(view[1070], 229, "1070th value should be 229");
+-assertEq(view[1071], 255, "1071th value should be 255");
+-assertEq(view[1072], 26, "1072th value should be 26");
+-assertEq(view[1073], 127, "1073th value should be 127");
+-assertEq(view[1074], 229, "1074th value should be 229");
+-assertEq(view[1075], 255, "1075th value should be 255");
+-assertEq(view[1076], 0, "1076th value should be 0");
+-assertEq(view[1077], 0, "1077th value should be 0");
+-assertEq(view[1078], 0, "1078th value should be 0");
+-assertEq(view[1079], 255, "1079th value should be 255");
+-assertEq(view[1080], 0, "1080th value should be 0");
+-assertEq(view[1081], 0, "1081th value should be 0");
+-assertEq(view[1082], 0, "1082th value should be 0");
+-assertEq(view[1083], 255, "1083th value should be 255");
+-assertEq(view[1084], 0, "1084th value should be 0");
+-assertEq(view[1085], 0, "1085th value should be 0");
+-assertEq(view[1086], 0, "1086th value should be 0");
+-assertEq(view[1087], 255, "1087th value should be 255");
+-assertEq(view[1088], 0, "1088th value should be 0");
+-assertEq(view[1089], 0, "1089th value should be 0");
+-assertEq(view[1090], 0, "1090th value should be 0");
+-assertEq(view[1091], 255, "1091th value should be 255");
+-assertEq(view[1092], 0, "1092th value should be 0");
+-assertEq(view[1093], 0, "1093th value should be 0");
+-assertEq(view[1094], 0, "1094th value should be 0");
+-assertEq(view[1095], 255, "1095th value should be 255");
+-assertEq(view[1096], 0, "1096th value should be 0");
+-assertEq(view[1097], 0, "1097th value should be 0");
+-assertEq(view[1098], 0, "1098th value should be 0");
+-assertEq(view[1099], 255, "1099th value should be 255");
+-assertEq(view[1100], 0, "1100th value should be 0");
+-assertEq(view[1101], 0, "1101th value should be 0");
+-assertEq(view[1102], 0, "1102th value should be 0");
+-assertEq(view[1103], 255, "1103th value should be 255");
+-assertEq(view[1104], 0, "1104th value should be 0");
+-assertEq(view[1105], 0, "1105th value should be 0");
+-assertEq(view[1106], 0, "1106th value should be 0");
+-assertEq(view[1107], 255, "1107th value should be 255");
+-assertEq(view[1108], 154, "1108th value should be 154");
+-assertEq(view[1109], 255, "1109th value should be 255");
+-assertEq(view[1110], 101, "1110th value should be 101");
+-assertEq(view[1111], 255, "1111th value should be 255");
+-assertEq(view[1112], 205, "1112th value should be 205");
+-assertEq(view[1113], 255, "1113th value should be 255");
+-assertEq(view[1114], 50, "1114th value should be 50");
+-assertEq(view[1115], 255, "1115th value should be 255");
+-assertEq(view[1116], 205, "1116th value should be 205");
+-assertEq(view[1117], 255, "1117th value should be 255");
+-assertEq(view[1118], 50, "1118th value should be 50");
+-assertEq(view[1119], 255, "1119th value should be 255");
+-assertEq(view[1120], 205, "1120th value should be 205");
+-assertEq(view[1121], 255, "1121th value should be 255");
+-assertEq(view[1122], 50, "1122th value should be 50");
+-assertEq(view[1123], 255, "1123th value should be 255");
+-assertEq(view[1124], 205, "1124th value should be 205");
+-assertEq(view[1125], 255, "1125th value should be 255");
+-assertEq(view[1126], 50, "1126th value should be 50");
+-assertEq(view[1127], 255, "1127th value should be 255");
+-assertEq(view[1128], 205, "1128th value should be 205");
+-assertEq(view[1129], 255, "1129th value should be 255");
+-assertEq(view[1130], 50, "1130th value should be 50");
+-assertEq(view[1131], 255, "1131th value should be 255");
+-assertEq(view[1132], 179, "1132th value should be 179");
+-assertEq(view[1133], 127, "1133th value should be 127");
+-assertEq(view[1134], 76, "1134th value should be 76");
+-assertEq(view[1135], 255, "1135th value should be 255");
+-assertEq(view[1136], 154, "1136th value should be 154");
+-assertEq(view[1137], 255, "1137th value should be 255");
+-assertEq(view[1138], 101, "1138th value should be 101");
+-assertEq(view[1139], 255, "1139th value should be 255");
+-assertEq(view[1140], 128, "1140th value should be 128");
+-assertEq(view[1141], 127, "1141th value should be 127");
+-assertEq(view[1142], 127, "1142th value should be 127");
+-assertEq(view[1143], 255, "1143th value should be 255");
+-assertEq(view[1144], 128, "1144th value should be 128");
+-assertEq(view[1145], 127, "1145th value should be 127");
+-assertEq(view[1146], 127, "1146th value should be 127");
+-assertEq(view[1147], 255, "1147th value should be 255");
+-assertEq(view[1148], 103, "1148th value should be 103");
+-assertEq(view[1149], 255, "1149th value should be 255");
+-assertEq(view[1150], 152, "1150th value should be 152");
+-assertEq(view[1151], 255, "1151th value should be 255");
+-assertEq(view[1152], 78, "1152th value should be 78");
+-assertEq(view[1153], 127, "1153th value should be 127");
+-assertEq(view[1154], 178, "1154th value should be 178");
+-assertEq(view[1155], 255, "1155th value should be 255");
+-assertEq(view[1156], 0, "1156th value should be 0");
+-assertEq(view[1157], 0, "1157th value should be 0");
+-assertEq(view[1158], 0, "1158th value should be 0");
+-assertEq(view[1159], 255, "1159th value should be 255");
+-assertEq(view[1160], 0, "1160th value should be 0");
+-assertEq(view[1161], 0, "1161th value should be 0");
+-assertEq(view[1162], 0, "1162th value should be 0");
+-assertEq(view[1163], 255, "1163th value should be 255");
+-assertEq(view[1164], 0, "1164th value should be 0");
+-assertEq(view[1165], 0, "1165th value should be 0");
+-assertEq(view[1166], 0, "1166th value should be 0");
+-assertEq(view[1167], 255, "1167th value should be 255");
+-assertEq(view[1168], 0, "1168th value should be 0");
+-assertEq(view[1169], 0, "1169th value should be 0");
+-assertEq(view[1170], 0, "1170th value should be 0");
+-assertEq(view[1171], 255, "1171th value should be 255");
+-assertEq(view[1172], 0, "1172th value should be 0");
+-assertEq(view[1173], 0, "1173th value should be 0");
+-assertEq(view[1174], 0, "1174th value should be 0");
+-assertEq(view[1175], 255, "1175th value should be 255");
+-assertEq(view[1176], 0, "1176th value should be 0");
+-assertEq(view[1177], 0, "1177th value should be 0");
+-assertEq(view[1178], 0, "1178th value should be 0");
+-assertEq(view[1179], 255, "1179th value should be 255");
+-assertEq(view[1180], 0, "1180th value should be 0");
+-assertEq(view[1181], 0, "1181th value should be 0");
+-assertEq(view[1182], 0, "1182th value should be 0");
+-assertEq(view[1183], 255, "1183th value should be 255");
+-assertEq(view[1184], 26, "1184th value should be 26");
+-assertEq(view[1185], 127, "1185th value should be 127");
+-assertEq(view[1186], 229, "1186th value should be 229");
+-assertEq(view[1187], 255, "1187th value should be 255");
+-assertEq(view[1188], 154, "1188th value should be 154");
+-assertEq(view[1189], 255, "1189th value should be 255");
+-assertEq(view[1190], 101, "1190th value should be 101");
+-assertEq(view[1191], 255, "1191th value should be 255");
+-assertEq(view[1192], 205, "1192th value should be 205");
+-assertEq(view[1193], 255, "1193th value should be 255");
+-assertEq(view[1194], 50, "1194th value should be 50");
+-assertEq(view[1195], 255, "1195th value should be 255");
+-assertEq(view[1196], 205, "1196th value should be 205");
+-assertEq(view[1197], 255, "1197th value should be 255");
+-assertEq(view[1198], 50, "1198th value should be 50");
+-assertEq(view[1199], 255, "1199th value should be 255");
+-assertEq(view[1200], 230, "1200th value should be 230");
+-assertEq(view[1201], 127, "1201th value should be 127");
+-assertEq(view[1202], 25, "1202th value should be 25");
+-assertEq(view[1203], 255, "1203th value should be 255");
+-assertEq(view[1204], 205, "1204th value should be 205");
+-assertEq(view[1205], 255, "1205th value should be 255");
+-assertEq(view[1206], 50, "1206th value should be 50");
+-assertEq(view[1207], 255, "1207th value should be 255");
+-assertEq(view[1208], 205, "1208th value should be 205");
+-assertEq(view[1209], 255, "1209th value should be 255");
+-assertEq(view[1210], 50, "1210th value should be 50");
+-assertEq(view[1211], 255, "1211th value should be 255");
+-assertEq(view[1212], 205, "1212th value should be 205");
+-assertEq(view[1213], 255, "1213th value should be 255");
+-assertEq(view[1214], 50, "1214th value should be 50");
+-assertEq(view[1215], 255, "1215th value should be 255");
+-assertEq(view[1216], 205, "1216th value should be 205");
+-assertEq(view[1217], 255, "1217th value should be 255");
+-assertEq(view[1218], 50, "1218th value should be 50");
+-assertEq(view[1219], 255, "1219th value should be 255");
+-assertEq(view[1220], 154, "1220th value should be 154");
+-assertEq(view[1221], 255, "1221th value should be 255");
+-assertEq(view[1222], 101, "1222th value should be 101");
+-assertEq(view[1223], 255, "1223th value should be 255");
+-assertEq(view[1224], 154, "1224th value should be 154");
+-assertEq(view[1225], 255, "1225th value should be 255");
+-assertEq(view[1226], 101, "1226th value should be 101");
+-assertEq(view[1227], 255, "1227th value should be 255");
+-assertEq(view[1228], 154, "1228th value should be 154");
+-assertEq(view[1229], 255, "1229th value should be 255");
+-assertEq(view[1230], 101, "1230th value should be 101");
+-assertEq(view[1231], 255, "1231th value should be 255");
+-assertEq(view[1232], 128, "1232th value should be 128");
+-assertEq(view[1233], 127, "1233th value should be 127");
+-assertEq(view[1234], 127, "1234th value should be 127");
+-assertEq(view[1235], 255, "1235th value should be 255");
+-assertEq(view[1236], 26, "1236th value should be 26");
+-assertEq(view[1237], 127, "1237th value should be 127");
+-assertEq(view[1238], 229, "1238th value should be 229");
+-assertEq(view[1239], 255, "1239th value should be 255");
+-assertEq(view[1240], 0, "1240th value should be 0");
+-assertEq(view[1241], 0, "1241th value should be 0");
+-assertEq(view[1242], 0, "1242th value should be 0");
+-assertEq(view[1243], 255, "1243th value should be 255");
+-assertEq(view[1244], 0, "1244th value should be 0");
+-assertEq(view[1245], 0, "1245th value should be 0");
+-assertEq(view[1246], 0, "1246th value should be 0");
+-assertEq(view[1247], 255, "1247th value should be 255");
+-assertEq(view[1248], 0, "1248th value should be 0");
+-assertEq(view[1249], 0, "1249th value should be 0");
+-assertEq(view[1250], 0, "1250th value should be 0");
+-assertEq(view[1251], 255, "1251th value should be 255");
+-assertEq(view[1252], 0, "1252th value should be 0");
+-assertEq(view[1253], 0, "1253th value should be 0");
+-assertEq(view[1254], 0, "1254th value should be 0");
+-assertEq(view[1255], 255, "1255th value should be 255");
+-assertEq(view[1256], 0, "1256th value should be 0");
+-assertEq(view[1257], 0, "1257th value should be 0");
+-assertEq(view[1258], 0, "1258th value should be 0");
+-assertEq(view[1259], 255, "1259th value should be 255");
+-assertEq(view[1260], 0, "1260th value should be 0");
+-assertEq(view[1261], 0, "1261th value should be 0");
+-assertEq(view[1262], 0, "1262th value should be 0");
+-assertEq(view[1263], 255, "1263th value should be 255");
+-assertEq(view[1264], 78, "1264th value should be 78");
+-assertEq(view[1265], 127, "1265th value should be 127");
+-assertEq(view[1266], 178, "1266th value should be 178");
+-assertEq(view[1267], 255, "1267th value should be 255");
+-assertEq(view[1268], 179, "1268th value should be 179");
+-assertEq(view[1269], 127, "1269th value should be 127");
+-assertEq(view[1270], 76, "1270th value should be 76");
+-assertEq(view[1271], 255, "1271th value should be 255");
+-assertEq(view[1272], 205, "1272th value should be 205");
+-assertEq(view[1273], 255, "1273th value should be 255");
+-assertEq(view[1274], 50, "1274th value should be 50");
+-assertEq(view[1275], 255, "1275th value should be 255");
+-assertEq(view[1276], 205, "1276th value should be 205");
+-assertEq(view[1277], 255, "1277th value should be 255");
+-assertEq(view[1278], 50, "1278th value should be 50");
+-assertEq(view[1279], 255, "1279th value should be 255");
+-assertEq(view[1280], 0, "1280th value should be 0");
+-assertEq(view[1281], 0, "1281th value should be 0");
+-assertEq(view[1282], 0, "1282th value should be 0");
+-assertEq(view[1283], 255, "1283th value should be 255");
+-assertEq(view[1284], 205, "1284th value should be 205");
+-assertEq(view[1285], 255, "1285th value should be 255");
+-assertEq(view[1286], 50, "1286th value should be 50");
+-assertEq(view[1287], 255, "1287th value should be 255");
+-assertEq(view[1288], 205, "1288th value should be 205");
+-assertEq(view[1289], 255, "1289th value should be 255");
+-assertEq(view[1290], 50, "1290th value should be 50");
+-assertEq(view[1291], 255, "1291th value should be 255");
+-assertEq(view[1292], 205, "1292th value should be 205");
+-assertEq(view[1293], 255, "1293th value should be 255");
+-assertEq(view[1294], 50, "1294th value should be 50");
+-assertEq(view[1295], 255, "1295th value should be 255");
+-assertEq(view[1296], 205, "1296th value should be 205");
+-assertEq(view[1297], 255, "1297th value should be 255");
+-assertEq(view[1298], 50, "1298th value should be 50");
+-assertEq(view[1299], 255, "1299th value should be 255");
+-assertEq(view[1300], 205, "1300th value should be 205");
+-assertEq(view[1301], 255, "1301th value should be 255");
+-assertEq(view[1302], 50, "1302th value should be 50");
+-assertEq(view[1303], 255, "1303th value should be 255");
+-assertEq(view[1304], 179, "1304th value should be 179");
+-assertEq(view[1305], 127, "1305th value should be 127");
+-assertEq(view[1306], 76, "1306th value should be 76");
+-assertEq(view[1307], 255, "1307th value should be 255");
+-assertEq(view[1308], 154, "1308th value should be 154");
+-assertEq(view[1309], 255, "1309th value should be 255");
+-assertEq(view[1310], 101, "1310th value should be 101");
+-assertEq(view[1311], 255, "1311th value should be 255");
+-assertEq(view[1312], 154, "1312th value should be 154");
+-assertEq(view[1313], 255, "1313th value should be 255");
+-assertEq(view[1314], 101, "1314th value should be 101");
+-assertEq(view[1315], 255, "1315th value should be 255");
+-assertEq(view[1316], 0, "1316th value should be 0");
+-assertEq(view[1317], 0, "1317th value should be 0");
+-assertEq(view[1318], 0, "1318th value should be 0");
+-assertEq(view[1319], 255, "1319th value should be 255");
+-assertEq(view[1320], 0, "1320th value should be 0");
+-assertEq(view[1321], 0, "1321th value should be 0");
+-assertEq(view[1322], 0, "1322th value should be 0");
+-assertEq(view[1323], 255, "1323th value should be 255");
+-assertEq(view[1324], 0, "1324th value should be 0");
+-assertEq(view[1325], 0, "1325th value should be 0");
+-assertEq(view[1326], 0, "1326th value should be 0");
+-assertEq(view[1327], 255, "1327th value should be 255");
+-assertEq(view[1328], 0, "1328th value should be 0");
+-assertEq(view[1329], 0, "1329th value should be 0");
+-assertEq(view[1330], 0, "1330th value should be 0");
+-assertEq(view[1331], 255, "1331th value should be 255");
+-assertEq(view[1332], 0, "1332th value should be 0");
+-assertEq(view[1333], 0, "1333th value should be 0");
+-assertEq(view[1334], 0, "1334th value should be 0");
+-assertEq(view[1335], 255, "1335th value should be 255");
+-assertEq(view[1336], 0, "1336th value should be 0");
+-assertEq(view[1337], 0, "1337th value should be 0");
+-assertEq(view[1338], 0, "1338th value should be 0");
+-assertEq(view[1339], 255, "1339th value should be 255");
+-assertEq(view[1340], 0, "1340th value should be 0");
+-assertEq(view[1341], 0, "1341th value should be 0");
+-assertEq(view[1342], 0, "1342th value should be 0");
+-assertEq(view[1343], 255, "1343th value should be 255");
+-assertEq(view[1344], 0, "1344th value should be 0");
+-assertEq(view[1345], 0, "1345th value should be 0");
+-assertEq(view[1346], 0, "1346th value should be 0");
+-assertEq(view[1347], 255, "1347th value should be 255");
+-assertEq(view[1348], 179, "1348th value should be 179");
+-assertEq(view[1349], 127, "1349th value should be 127");
+-assertEq(view[1350], 76, "1350th value should be 76");
+-assertEq(view[1351], 255, "1351th value should be 255");
+-assertEq(view[1352], 205, "1352th value should be 205");
+-assertEq(view[1353], 255, "1353th value should be 255");
+-assertEq(view[1354], 50, "1354th value should be 50");
+-assertEq(view[1355], 255, "1355th value should be 255");
+-assertEq(view[1356], 205, "1356th value should be 205");
+-assertEq(view[1357], 255, "1357th value should be 255");
+-assertEq(view[1358], 50, "1358th value should be 50");
+-assertEq(view[1359], 255, "1359th value should be 255");
+-assertEq(view[1360], 0, "1360th value should be 0");
+-assertEq(view[1361], 0, "1361th value should be 0");
+-assertEq(view[1362], 0, "1362th value should be 0");
+-assertEq(view[1363], 255, "1363th value should be 255");
+-assertEq(view[1364], 205, "1364th value should be 205");
+-assertEq(view[1365], 255, "1365th value should be 255");
+-assertEq(view[1366], 50, "1366th value should be 50");
+-assertEq(view[1367], 255, "1367th value should be 255");
+-assertEq(view[1368], 205, "1368th value should be 205");
+-assertEq(view[1369], 255, "1369th value should be 255");
+-assertEq(view[1370], 50, "1370th value should be 50");
+-assertEq(view[1371], 255, "1371th value should be 255");
+-assertEq(view[1372], 205, "1372th value should be 205");
+-assertEq(view[1373], 255, "1373th value should be 255");
+-assertEq(view[1374], 50, "1374th value should be 50");
+-assertEq(view[1375], 255, "1375th value should be 255");
+-assertEq(view[1376], 205, "1376th value should be 205");
+-assertEq(view[1377], 255, "1377th value should be 255");
+-assertEq(view[1378], 50, "1378th value should be 50");
+-assertEq(view[1379], 255, "1379th value should be 255");
+-assertEq(view[1380], 205, "1380th value should be 205");
+-assertEq(view[1381], 255, "1381th value should be 255");
+-assertEq(view[1382], 50, "1382th value should be 50");
+-assertEq(view[1383], 255, "1383th value should be 255");
+-assertEq(view[1384], 205, "1384th value should be 205");
+-assertEq(view[1385], 255, "1385th value should be 255");
+-assertEq(view[1386], 50, "1386th value should be 50");
+-assertEq(view[1387], 255, "1387th value should be 255");
+-assertEq(view[1388], 179, "1388th value should be 179");
+-assertEq(view[1389], 127, "1389th value should be 127");
+-assertEq(view[1390], 76, "1390th value should be 76");
+-assertEq(view[1391], 255, "1391th value should be 255");
+-assertEq(view[1392], 179, "1392th value should be 179");
+-assertEq(view[1393], 127, "1393th value should be 127");
+-assertEq(view[1394], 76, "1394th value should be 76");
+-assertEq(view[1395], 255, "1395th value should be 255");
+-assertEq(view[1396], 103, "1396th value should be 103");
+-assertEq(view[1397], 255, "1397th value should be 255");
+-assertEq(view[1398], 152, "1398th value should be 152");
+-assertEq(view[1399], 255, "1399th value should be 255");
+-assertEq(view[1400], 78, "1400th value should be 78");
+-assertEq(view[1401], 127, "1401th value should be 127");
+-assertEq(view[1402], 178, "1402th value should be 178");
+-assertEq(view[1403], 255, "1403th value should be 255");
+-assertEq(view[1404], 52, "1404th value should be 52");
+-assertEq(view[1405], 255, "1405th value should be 255");
+-assertEq(view[1406], 203, "1406th value should be 203");
+-assertEq(view[1407], 255, "1407th value should be 255");
+-assertEq(view[1408], 0, "1408th value should be 0");
+-assertEq(view[1409], 0, "1409th value should be 0");
+-assertEq(view[1410], 0, "1410th value should be 0");
+-assertEq(view[1411], 255, "1411th value should be 255");
+-assertEq(view[1412], 0, "1412th value should be 0");
+-assertEq(view[1413], 0, "1413th value should be 0");
+-assertEq(view[1414], 0, "1414th value should be 0");
+-assertEq(view[1415], 255, "1415th value should be 255");
+-assertEq(view[1416], 52, "1416th value should be 52");
+-assertEq(view[1417], 255, "1417th value should be 255");
+-assertEq(view[1418], 203, "1418th value should be 203");
+-assertEq(view[1419], 255, "1419th value should be 255");
+-assertEq(view[1420], 128, "1420th value should be 128");
+-assertEq(view[1421], 127, "1421th value should be 127");
+-assertEq(view[1422], 127, "1422th value should be 127");
+-assertEq(view[1423], 255, "1423th value should be 255");
+-assertEq(view[1424], 128, "1424th value should be 128");
+-assertEq(view[1425], 127, "1425th value should be 127");
+-assertEq(view[1426], 127, "1426th value should be 127");
+-assertEq(view[1427], 255, "1427th value should be 255");
+-assertEq(view[1428], 205, "1428th value should be 205");
+-assertEq(view[1429], 255, "1429th value should be 255");
+-assertEq(view[1430], 50, "1430th value should be 50");
+-assertEq(view[1431], 255, "1431th value should be 255");
+-assertEq(view[1432], 205, "1432th value should be 205");
+-assertEq(view[1433], 255, "1433th value should be 255");
+-assertEq(view[1434], 50, "1434th value should be 50");
+-assertEq(view[1435], 255, "1435th value should be 255");
+-assertEq(view[1436], 230, "1436th value should be 230");
+-assertEq(view[1437], 127, "1437th value should be 127");
+-assertEq(view[1438], 25, "1438th value should be 25");
+-assertEq(view[1439], 255, "1439th value should be 255");
+-assertEq(view[1440], 0, "1440th value should be 0");
+-assertEq(view[1441], 0, "1441th value should be 0");
+-assertEq(view[1442], 0, "1442th value should be 0");
+-assertEq(view[1443], 255, "1443th value should be 255");
+-assertEq(view[1444], 230, "1444th value should be 230");
+-assertEq(view[1445], 127, "1445th value should be 127");
+-assertEq(view[1446], 25, "1446th value should be 25");
+-assertEq(view[1447], 255, "1447th value should be 255");
+-assertEq(view[1448], 205, "1448th value should be 205");
+-assertEq(view[1449], 255, "1449th value should be 255");
+-assertEq(view[1450], 50, "1450th value should be 50");
+-assertEq(view[1451], 255, "1451th value should be 255");
+-assertEq(view[1452], 205, "1452th value should be 205");
+-assertEq(view[1453], 255, "1453th value should be 255");
+-assertEq(view[1454], 50, "1454th value should be 50");
+-assertEq(view[1455], 255, "1455th value should be 255");
+-assertEq(view[1456], 205, "1456th value should be 205");
+-assertEq(view[1457], 255, "1457th value should be 255");
+-assertEq(view[1458], 50, "1458th value should be 50");
+-assertEq(view[1459], 255, "1459th value should be 255");
+-assertEq(view[1460], 205, "1460th value should be 205");
+-assertEq(view[1461], 255, "1461th value should be 255");
+-assertEq(view[1462], 50, "1462th value should be 50");
+-assertEq(view[1463], 255, "1463th value should be 255");
+-assertEq(view[1464], 205, "1464th value should be 205");
+-assertEq(view[1465], 255, "1465th value should be 255");
+-assertEq(view[1466], 50, "1466th value should be 50");
+-assertEq(view[1467], 255, "1467th value should be 255");
+-assertEq(view[1468], 179, "1468th value should be 179");
+-assertEq(view[1469], 127, "1469th value should be 127");
+-assertEq(view[1470], 76, "1470th value should be 76");
+-assertEq(view[1471], 255, "1471th value should be 255");
+-assertEq(view[1472], 179, "1472th value should be 179");
+-assertEq(view[1473], 127, "1473th value should be 127");
+-assertEq(view[1474], 76, "1474th value should be 76");
+-assertEq(view[1475], 255, "1475th value should be 255");
+-assertEq(view[1476], 179, "1476th value should be 179");
+-assertEq(view[1477], 127, "1477th value should be 127");
+-assertEq(view[1478], 76, "1478th value should be 76");
+-assertEq(view[1479], 255, "1479th value should be 255");
+-assertEq(view[1480], 128, "1480th value should be 128");
+-assertEq(view[1481], 127, "1481th value should be 127");
+-assertEq(view[1482], 127, "1482th value should be 127");
+-assertEq(view[1483], 255, "1483th value should be 255");
+-assertEq(view[1484], 103, "1484th value should be 103");
+-assertEq(view[1485], 255, "1485th value should be 255");
+-assertEq(view[1486], 152, "1486th value should be 152");
+-assertEq(view[1487], 255, "1487th value should be 255");
+-assertEq(view[1488], 0, "1488th value should be 0");
+-assertEq(view[1489], 0, "1489th value should be 0");
+-assertEq(view[1490], 0, "1490th value should be 0");
+-assertEq(view[1491], 255, "1491th value should be 255");
+-assertEq(view[1492], 0, "1492th value should be 0");
+-assertEq(view[1493], 0, "1493th value should be 0");
+-assertEq(view[1494], 0, "1494th value should be 0");
+-assertEq(view[1495], 255, "1495th value should be 255");
+-assertEq(view[1496], 128, "1496th value should be 128");
+-assertEq(view[1497], 127, "1497th value should be 127");
+-assertEq(view[1498], 127, "1498th value should be 127");
+-assertEq(view[1499], 255, "1499th value should be 255");
+-assertEq(view[1500], 154, "1500th value should be 154");
+-assertEq(view[1501], 255, "1501th value should be 255");
+-assertEq(view[1502], 101, "1502th value should be 101");
+-assertEq(view[1503], 255, "1503th value should be 255");
+-assertEq(view[1504], 179, "1504th value should be 179");
+-assertEq(view[1505], 127, "1505th value should be 127");
+-assertEq(view[1506], 76, "1506th value should be 76");
+-assertEq(view[1507], 255, "1507th value should be 255");
+-assertEq(view[1508], 205, "1508th value should be 205");
+-assertEq(view[1509], 255, "1509th value should be 255");
+-assertEq(view[1510], 50, "1510th value should be 50");
+-assertEq(view[1511], 255, "1511th value should be 255");
+-assertEq(view[1512], 205, "1512th value should be 205");
+-assertEq(view[1513], 255, "1513th value should be 255");
+-assertEq(view[1514], 50, "1514th value should be 50");
+-assertEq(view[1515], 255, "1515th value should be 255");
+-assertEq(view[1516], 230, "1516th value should be 230");
+-assertEq(view[1517], 127, "1517th value should be 127");
+-assertEq(view[1518], 25, "1518th value should be 25");
+-assertEq(view[1519], 255, "1519th value should be 255");
+-assertEq(view[1520], 0, "1520th value should be 0");
+-assertEq(view[1521], 0, "1521th value should be 0");
+-assertEq(view[1522], 0, "1522th value should be 0");
+-assertEq(view[1523], 255, "1523th value should be 255");
+-assertEq(view[1524], 230, "1524th value should be 230");
+-assertEq(view[1525], 127, "1525th value should be 127");
+-assertEq(view[1526], 25, "1526th value should be 25");
+-assertEq(view[1527], 255, "1527th value should be 255");
+-assertEq(view[1528], 230, "1528th value should be 230");
+-assertEq(view[1529], 127, "1529th value should be 127");
+-assertEq(view[1530], 25, "1530th value should be 25");
+-assertEq(view[1531], 255, "1531th value should be 255");
+-assertEq(view[1532], 205, "1532th value should be 205");
+-assertEq(view[1533], 255, "1533th value should be 255");
+-assertEq(view[1534], 50, "1534th value should be 50");
+-assertEq(view[1535], 255, "1535th value should be 255");
+-assertEq(view[1536], 205, "1536th value should be 205");
+-assertEq(view[1537], 255, "1537th value should be 255");
+-assertEq(view[1538], 50, "1538th value should be 50");
+-assertEq(view[1539], 255, "1539th value should be 255");
+-assertEq(view[1540], 205, "1540th value should be 205");
+-assertEq(view[1541], 255, "1541th value should be 255");
+-assertEq(view[1542], 50, "1542th value should be 50");
+-assertEq(view[1543], 255, "1543th value should be 255");
+-assertEq(view[1544], 205, "1544th value should be 205");
+-assertEq(view[1545], 255, "1545th value should be 255");
+-assertEq(view[1546], 50, "1546th value should be 50");
+-assertEq(view[1547], 255, "1547th value should be 255");
+-assertEq(view[1548], 205, "1548th value should be 205");
+-assertEq(view[1549], 255, "1549th value should be 255");
+-assertEq(view[1550], 50, "1550th value should be 50");
+-assertEq(view[1551], 255, "1551th value should be 255");
+-assertEq(view[1552], 179, "1552th value should be 179");
+-assertEq(view[1553], 127, "1553th value should be 127");
+-assertEq(view[1554], 76, "1554th value should be 76");
+-assertEq(view[1555], 255, "1555th value should be 255");
+-assertEq(view[1556], 179, "1556th value should be 179");
+-assertEq(view[1557], 127, "1557th value should be 127");
+-assertEq(view[1558], 76, "1558th value should be 76");
+-assertEq(view[1559], 255, "1559th value should be 255");
+-assertEq(view[1560], 179, "1560th value should be 179");
+-assertEq(view[1561], 127, "1561th value should be 127");
+-assertEq(view[1562], 76, "1562th value should be 76");
+-assertEq(view[1563], 255, "1563th value should be 255");
+-assertEq(view[1564], 154, "1564th value should be 154");
+-assertEq(view[1565], 255, "1565th value should be 255");
+-assertEq(view[1566], 101, "1566th value should be 101");
+-assertEq(view[1567], 255, "1567th value should be 255");
+-assertEq(view[1568], 26, "1568th value should be 26");
+-assertEq(view[1569], 127, "1569th value should be 127");
+-assertEq(view[1570], 229, "1570th value should be 229");
+-assertEq(view[1571], 255, "1571th value should be 255");
+-assertEq(view[1572], 0, "1572th value should be 0");
+-assertEq(view[1573], 0, "1573th value should be 0");
+-assertEq(view[1574], 0, "1574th value should be 0");
+-assertEq(view[1575], 255, "1575th value should be 255");
+-assertEq(view[1576], 154, "1576th value should be 154");
+-assertEq(view[1577], 255, "1577th value should be 255");
+-assertEq(view[1578], 101, "1578th value should be 101");
+-assertEq(view[1579], 255, "1579th value should be 255");
+-assertEq(view[1580], 179, "1580th value should be 179");
+-assertEq(view[1581], 127, "1581th value should be 127");
+-assertEq(view[1582], 76, "1582th value should be 76");
+-assertEq(view[1583], 255, "1583th value should be 255");
+-assertEq(view[1584], 205, "1584th value should be 205");
+-assertEq(view[1585], 255, "1585th value should be 255");
+-assertEq(view[1586], 50, "1586th value should be 50");
+-assertEq(view[1587], 255, "1587th value should be 255");
+-assertEq(view[1588], 205, "1588th value should be 205");
+-assertEq(view[1589], 255, "1589th value should be 255");
+-assertEq(view[1590], 50, "1590th value should be 50");
+-assertEq(view[1591], 255, "1591th value should be 255");
+-assertEq(view[1592], 230, "1592th value should be 230");
+-assertEq(view[1593], 127, "1593th value should be 127");
+-assertEq(view[1594], 25, "1594th value should be 25");
+-assertEq(view[1595], 255, "1595th value should be 255");
+-assertEq(view[1596], 230, "1596th value should be 230");
+-assertEq(view[1597], 127, "1597th value should be 127");
+-assertEq(view[1598], 25, "1598th value should be 25");
+-assertEq(view[1599], 255, "1599th value should be 255");
+-
+-// Code used to generate the assertEq list above.
+-function generateAssertList() {
+-  function template(i, x) {
+-    return 'assertEq(view[' + i + '], ' + x + ', "' + i + 'th value should be ' + x + '");\n';
+-  }
+-  var buf = ''
+-  for (var i = 0; i < LIMIT_SHOW; i++)
+-      buf += template(i, view[i]);
+-  print(buf);
+-}
+-//generateAssertList();
+diff --git a/js/src/jit-test/tests/asm.js/testAsmJSWasmMixing.js b/js/src/jit-test/tests/asm.js/testAsmJSWasmMixing.js
+--- a/js/src/jit-test/tests/asm.js/testAsmJSWasmMixing.js
++++ b/js/src/jit-test/tests/asm.js/testAsmJSWasmMixing.js
+@@ -8,27 +8,16 @@ const Memory = WebAssembly.Memory;
+ var asmJS = asmCompile('stdlib', 'ffis', 'buf', USE_ASM + 'var i32 = new stdlib.Int32Array(buf); return {}');
+ 
+ var asmJSBuf = new ArrayBuffer(BUF_MIN);
+ asmLink(asmJS, this, null, asmJSBuf);
+ 
+ var wasmMem = wasmEvalText('(module (memory 1 1) (export "mem" memory))').exports.mem;
+ assertAsmLinkFail(asmJS, this, null, wasmMem.buffer);
+ 
+-if (!getBuildConfiguration().x64 && isSimdAvailable() && this["SIMD"]) {
+-    var simdJS = asmCompile('stdlib', 'ffis', 'buf', USE_ASM + 'var i32 = new stdlib.Int32Array(buf); var i32x4 = stdlib.SIMD.Int32x4; return {}');
+-    assertAsmLinkFail(simdJS, this, null, asmJSBuf);
+-    assertAsmLinkFail(simdJS, this, null, wasmMem.buffer);
+-
+-    var simdJSBuf = new ArrayBuffer(BUF_MIN);
+-    asmLink(simdJS, this, null, simdJSBuf);
+-    asmLink(simdJS, this, null, simdJSBuf);  // multiple SIMD.js instantiations succeed
+-    assertAsmLinkFail(asmJS, this, null, simdJSBuf);  // but not asm.js
+-}
+-
+ setJitCompilerOption('asmjs.atomics.enable', 1);
+ 
+ var sharedAsmJS = asmCompile('stdlib', 'ffis', 'buf',
+ 			     USE_ASM +
+ 			     'var i32 = new stdlib.Int32Array(buf);' +
+ 			     'var aload = stdlib.Atomics.load;' + // Declare shared memory
+ 			     'return {}');
+ 
+diff --git a/js/src/jit-test/tests/asm.js/testBug1099216.js b/js/src/jit-test/tests/asm.js/testBug1099216.js
+deleted file mode 100644
+--- a/js/src/jit-test/tests/asm.js/testBug1099216.js
++++ /dev/null
+@@ -1,61 +0,0 @@
+-if (typeof SIMD === 'undefined' || !isSimdAvailable()) {
+-    print("won't run tests as simd extensions aren't activated yet");
+-    quit(0);
+-}
+-
+-(function(global) {
+-    "use asm";
+-    var frd = global.Math.fround;
+-    var fx4 = global.SIMD.Float32x4;
+-    var fc4 = fx4.check;
+-    var fsp = fx4.splat;
+-    function s(){}
+-    function d(x){x=fc4(x);}
+-    function e() {
+-        var x = frd(0);
+-        x = frd(x / x);
+-        s();
+-        d(fsp(x));
+-    }
+-    return e;
+-})(this)();
+-
+-(function(m) {
+-    "use asm"
+-    var k = m.SIMD.Bool32x4
+-    var g = m.SIMD.Int32x4
+-    var gc = g.check;
+-    var h = g.select
+-    function f() {
+-        var x = k(0, 0, 0, 0)
+-        var y = g(1, 2, 3, 4)
+-        return gc(h(x, y, y))
+-    }
+-    return f;
+-})(this)();
+-
+-t = (function(global) {
+-    "use asm"
+-    var toF = global.Math.fround
+-    var f4 = global.SIMD.Float32x4
+-    var f4c = f4.check
+-    function p(x, y, width, value, max_iterations) {
+-        x = x | 0
+-        y = y | 0
+-        width = width | 0
+-        value = value | 0
+-        max_iterations = max_iterations | 0
+-    }
+-    function m(xf, yf, yd, max_iterations) {
+-        xf = toF(xf)
+-        yf = toF(yf)
+-        yd = toF(yd)
+-        max_iterations = max_iterations | 0
+-        var _ = f4(0, 0, 0, 0), c_im4 = f4(0, 0, 0, 0)
+-        c_im4 = f4(yf, yd, yd, yf)
+-        return f4c(c_im4);
+-    }
+-    return {p:p,m:m};
+-})(this)
+-t.p();
+-t.m();
+diff --git a/js/src/jit-test/tests/asm.js/testJumpRange.js b/js/src/jit-test/tests/asm.js/testJumpRange.js
+--- a/js/src/jit-test/tests/asm.js/testJumpRange.js
++++ b/js/src/jit-test/tests/asm.js/testJumpRange.js
+@@ -13,34 +13,14 @@ for (let threshold of [0, 50, 100, 5000,
+     assertEq(asmCompile(
+         USE_ASM + `
+             function h() { return ((g()|0)+2)|0 }
+             function g() { return ((f()|0)+1)|0 }
+             function f() { return 42 }
+             return h
+         `)()(), 45);
+ 
+-    if (isSimdAvailable() && this.SIMD) {
+-        var buf = new ArrayBuffer(BUF_MIN);
+-        new Int32Array(buf)[0] = 10;
+-        new Float32Array(buf)[1] = 42;
+-        assertEq(asmCompile('stdlib', 'ffis', 'buf',
+-            USE_ASM + `
+-                var H = new stdlib.Uint8Array(buf);
+-                var i4 = stdlib.SIMD.Int32x4;
+-                var f4 = stdlib.SIMD.Float32x4;
+-                var i4load = i4.load;
+-                var f4load = f4.load;
+-                var toi4 = i4.fromFloat32x4;
+-                var i4ext = i4.extractLane;
+-                function f(i) { i=i|0; return i4ext(i4load(H, i), 0)|0 }
+-                function g(i) { i=i|0; return (i4ext(toi4(f4load(H, i)),1) + (f(i)|0))|0 }
+-                function h(i) { i=i|0; return g(i)|0 }
+-                return h
+-            `)(this, null, buf)(0), 52);
+-    }
+-
+     enableGeckoProfiling();
+     asmLink(asmCompile(USE_ASM + 'function f() {} function g() { f() } function h() { g() } return h'))();
+     disableGeckoProfiling();
+ 
+     assertEq(asmCompile(fatFunc)()(), 142);
+ }
+diff --git a/js/src/jit-test/tests/asm.js/testProfiling.js b/js/src/jit-test/tests/asm.js/testProfiling.js
+--- a/js/src/jit-test/tests/asm.js/testProfiling.js
++++ b/js/src/jit-test/tests/asm.js/testProfiling.js
+@@ -205,30 +205,16 @@ if (jitOptions['baseline.enable']) {
+     for (var i = 0; i < 20; i++)
+         assertEq(f1(), 32);
+     enableSingleStepProfiling();
+     assertEq(f1(), 32);
+     var stacks = disableSingleStepProfiling();
+     assertStackContainsSeq(stacks, ">,f1,>,<,f1,>,>,<,f1,>,f2,>,<,f1,>,<,f2,>,<,f1,>,f2,>,<,f1,>,>,<,f1,>,<,f1,>,f1,>,>");
+ }
+ 
+-
+-if (isSimdAvailable() && typeof SIMD !== 'undefined') {
+-    // SIMD out-of-bounds exit
+-    var buf = new ArrayBuffer(0x10000);
+-    var f = asmLink(asmCompile('g','ffi','buf', USE_ASM + 'var f4=g.SIMD.float32x4; var f4l=f4.load; var u8=new g.Uint8Array(buf); function f(i) { i=i|0; return f4l(u8, 0xFFFF + i | 0); } return f'), this, {}, buf);
+-    enableSingleStepProfiling();
+-    assertThrowsInstanceOf(() => f(4), RangeError);
+-    var stacks = disableSingleStepProfiling();
+-    // TODO check that expected is actually the correctly expected string, when
+-    // SIMD is implemented on ARM.
+-    assertStackContainsSeq(stacks, ">,f,>,inline stub,f,>");
+-}
+-
+-
+ // Thunks
+ setJitCompilerOption("jump-threshold", 0);
+ var h = asmLink(asmCompile(USE_ASM + 'function f() {} function g() { f() } function h() { g() } return h'));
+ enableSingleStepProfiling();
+ h();
+ var stacks = disableSingleStepProfiling();
+ assertStackContainsSeq(stacks, ">,h,>,g,h,>,f,g,h,>,g,h,>,h,>,>");
+ setJitCompilerOption("jump-threshold", -1);
+diff --git a/js/src/jit-test/tests/asm.js/testSIMD-16x8.js b/js/src/jit-test/tests/asm.js/testSIMD-16x8.js
+deleted file mode 100644
+--- a/js/src/jit-test/tests/asm.js/testSIMD-16x8.js
++++ /dev/null
+@@ -1,525 +0,0 @@
+-load(libdir + "asm.js");
+-load(libdir + "simd.js");
+-load(libdir + "asserts.js");
+-
+-// Set to true to see more JS debugging spew.
+-const DEBUG = false;
+-
+-if (!isSimdAvailable()) {
+-    DEBUG && print("won't run tests as simd extensions aren't activated yet");
+-    quit(0);
+-}
+-
+-// Tests for 16x8 SIMD types: Int16x8, Uint16x8, Bool16x8.
+-
+-const I16x8 = 'var i16x8 = glob.SIMD.Int16x8;'
+-const I16x8CHK = 'var i16x8chk = i16x8.check;'
+-const I16x8EXT = 'var i16x8ext = i16x8.extractLane;'
+-const I16x8REP = 'var i16x8rep = i16x8.replaceLane;'
+-const I16x8U16x8 = 'var i16x8u16x8 = i16x8.fromUint16x8Bits;'
+-
+-const U16x8 = 'var u16x8 = glob.SIMD.Uint16x8;'
+-const U16x8CHK = 'var u16x8chk = u16x8.check;'
+-const U16x8EXT = 'var u16x8ext = u16x8.extractLane;'
+-const U16x8REP = 'var u16x8rep = u16x8.replaceLane;'
+-const U16x8I16x8 = 'var u16x8i16x8 = u16x8.fromInt16x8Bits;'
+-
+-const B16x8 = 'var b16x8 = glob.SIMD.Bool16x8;'
+-const B16x8CHK = 'var b16x8chk = b16x8.check;'
+-const B16x8EXT = 'var b16x8ext = b16x8.extractLane;'
+-const B16x8REP = 'var b16x8rep = b16x8.replaceLane;'
+-
+-const INT16_MAX = 0x7fff
+-const INT16_MIN = -0x10000
+-const UINT16_MAX = 0xffff
+-
+-// Linking
+-assertEq(asmLink(asmCompile('glob', USE_ASM + I16x8 + "function f() {} return f"), {SIMD:{Int16x8: SIMD.Int16x8}})(), undefined);
+-assertEq(asmLink(asmCompile('glob', USE_ASM + U16x8 + "function f() {} return f"), {SIMD:{Uint16x8: SIMD.Uint16x8}})(), undefined);
+-assertEq(asmLink(asmCompile('glob', USE_ASM + B16x8 + "function f() {} return f"), {SIMD:{Bool16x8: SIMD.Bool16x8}})(), undefined);
+-
+-// Local variable of Int16x8 type.
+-assertAsmTypeFail('glob', USE_ASM + "function f() {var x=Int16x8(1,2,3,4,5,6,7,8);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I16x8 + "function f() {var x=i16x8;} return f");
+-assertAsmTypeFail('glob', USE_ASM + I16x8 + "function f() {var x=i16x8();} return f");
+-assertAsmTypeFail('glob', USE_ASM + I16x8 + "function f() {var x=i16x8(1);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I16x8 + "function f() {var x=i16x8(1,2,3,4);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I16x8 + "function f() {var x=i16x8(1,2,3,4,5,6,7,8.0);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I16x8 + "function f() {var x=i16x8(1,2,3,4,5,6,7,8,9);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I16x8 + "function f() {var x=i16x8(1,2,3,4,5,6,7,8|0);} return f");
+-assertEq(asmLink(asmCompile('glob', USE_ASM + I16x8 + "function f() {var x=i16x8(1,2,3,4,5,6,7,8);} return f"), this)(), undefined);
+-assertEq(asmLink(asmCompile('glob', USE_ASM + I16x8 + "function f() {var x=i16x8(1,2,3,4,5,6,7," + (INT16_MAX + 1) + ");} return f"), this)(), undefined);
+-
+-// Local variable of Uint16x8 type.
+-assertAsmTypeFail('glob', USE_ASM + "function f() {var x=Uint16x8(1,2,3,4,5,6,7,8);} return f");
+-assertAsmTypeFail('glob', USE_ASM + U16x8 + "function f() {var x=u16x8;} return f");
+-assertAsmTypeFail('glob', USE_ASM + U16x8 + "function f() {var x=u16x8();} return f");
+-assertAsmTypeFail('glob', USE_ASM + U16x8 + "function f() {var x=u16x8(1);} return f");
+-assertAsmTypeFail('glob', USE_ASM + U16x8 + "function f() {var x=u16x8(1,2,3,4);} return f");
+-assertAsmTypeFail('glob', USE_ASM + U16x8 + "function f() {var x=u16x8(1,2,3,4,5,6,7,8.0);} return f");
+-assertAsmTypeFail('glob', USE_ASM + U16x8 + "function f() {var x=u16x8(1,2,3,4,5,6,7,8,9);} return f");
+-assertAsmTypeFail('glob', USE_ASM + U16x8 + "function f() {var x=u16x8(1,2,3,4,5,6,7,8|0);} return f");
+-assertEq(asmLink(asmCompile('glob', USE_ASM + U16x8 + "function f() {var x=u16x8(1,2,3,4,5,6,7,8);} return f"), this)(), undefined);
+-assertEq(asmLink(asmCompile('glob', USE_ASM + U16x8 + "function f() {var x=u16x8(1,2,3,4,5,6,7," + (UINT16_MAX + 1) + ");} return f"), this)(), undefined);
+-
+-// Local variable of Bool16x8 type.
+-assertAsmTypeFail('glob', USE_ASM + "function f() {var x=Bool16x8(1,0,0,0, 0,0,0,0);} return f");
+-assertAsmTypeFail('glob', USE_ASM + B16x8 + "function f() {var x=b16x8;} return f");
+-assertAsmTypeFail('glob', USE_ASM + B16x8 + "function f() {var x=b16x8();} return f");
+-assertAsmTypeFail('glob', USE_ASM + B16x8 + "function f() {var x=b16x8(1);} return f");
+-assertAsmTypeFail('glob', USE_ASM + B16x8 + "function f() {var x=b16x8(1,0,0,0);} return f");
+-assertAsmTypeFail('glob', USE_ASM + B16x8 + "function f() {var x=b16x8(1,0,0,0, 0,0,0,1.0);} return f");
+-assertAsmTypeFail('glob', USE_ASM + B16x8 + "function f() {var x=b16x8(1,0,0,0, 0,0,0,0|0);} return f");
+-assertAsmTypeFail('glob', USE_ASM + B16x8 + "function f() {var x=b16x8(1,0,0,0, 0,0,0,0, 1);} return f");
+-assertEq(asmLink(asmCompile('glob', USE_ASM + B16x8 + "function f() {var x=b16x8(1,0,0,0, 0,-1,-2,0);} return f"), this)(), undefined);
+-
+-// Global variable of Int16x8 type.
+-assertEqVecArr(asmLink(asmCompile('glob', 'ffi', USE_ASM + I16x8 + I16x8CHK + "var g=i16x8chk(ffi.g); function f() { return i16x8chk(g); } return f"), this,
+-                       {g: SIMD.Int16x8(1,2,3,4,5,6,7,8)})(), [1,2,3,4,5,6,7,8]);
+-assertEqVecArr(asmLink(asmCompile('glob', 'ffi', USE_ASM + I16x8 + I16x8CHK + "var g=i16x8chk(ffi.g); function f() { g=i16x8(5,6,7,8,9,10,11,12); return i16x8chk(g); } return f"), this,
+-                       {g: SIMD.Int16x8(1,2,3,4,5,6,7,8)})(), [5,6,7,8,9,10,11,12]);
+-
+-// Global variable of Bool16x8 type.
+-assertEqVecArr(asmLink(asmCompile('glob', 'ffi', USE_ASM + B16x8 + B16x8CHK + "var g=b16x8chk(ffi.g); function f() { return b16x8chk(g); } return f"), this,
+-                       {g: SIMD.Bool16x8(1,1,0,1,0,0,1,0)})(), [true,true,false,true,false,false,true,false]);
+-assertEqVecArr(asmLink(asmCompile('glob', 'ffi', USE_ASM + B16x8 + B16x8CHK + "var g=b16x8chk(ffi.g); function f() { g=b16x8(1,1,0,1,0,1,1,1); return b16x8chk(g); } return f"), this,
+-                       {g: SIMD.Bool16x8(1,1,0,1,0,0,1,0)})(), [true,true,false,true,false,true,true,true]);
+-
+-// Unsigned SIMD globals are not allowed.
+-assertAsmTypeFail('glob', 'ffi', USE_ASM + U16x8 + U16x8CHK + "var g=u16x8chk(ffi.g); function f() { } return f");
+-
+-// Only signed Int16x8 allowed as return value.
+-assertEqVecArr(asmLink(asmCompile('glob', USE_ASM + I16x8 + "function f() {return i16x8(1,2,3,4,5,6,7,8);} return f"), this)(),
+-           [1, 2, 3, 4, 5, 6, 7, 8]);
+-assertEqVecArr(asmLink(asmCompile('glob', USE_ASM + I16x8 + I16x8CHK + "function f() {return i16x8chk(i16x8(1,2,3,32771,5,6,7,8));} return f"), this)(),
+-           [1, 2, 3, -32765, 5, 6, 7, 8]);
+-assertAsmTypeFail('glob', USE_ASM + U16x8 + "function f() {return u16x8(1,2,3,4,5,6,7,8);} return f");
+-assertAsmTypeFail('glob', USE_ASM + U16x8 + U16x8CHK + "function f() {return u16x8chk(u16x8(1,2,3,4,5,6,7,8));} return f");
+-
+-// Test splat.
+-function splat(x) {
+-    let r = []
+-    for (let i = 0; i < 8; i++)
+-        r.push(x);
+-    return r
+-}
+-
+-splatB = asmLink(asmCompile('glob', USE_ASM + B16x8 +
+-                            'var splat = b16x8.splat;' +
+-                            'function f(x) { x = x|0; return splat(x); } return f'), this);
+-assertEqVecArr(splatB(true), splat(true));
+-assertEqVecArr(splatB(false), splat(false));
+-
+-
+-splatB0 = asmLink(asmCompile('glob', USE_ASM + B16x8 +
+-                             'var splat = b16x8.splat;' +
+-                             'function f() { var x = 0; return splat(x); } return f'), this);
+-assertEqVecArr(splatB0(), splat(false));
+-splatB1 = asmLink(asmCompile('glob', USE_ASM + B16x8 +
+-                             'var splat = b16x8.splat;' +
+-                             'function f() { var x = 1; return splat(x); } return f'), this);
+-assertEqVecArr(splatB1(), splat(true));
+-
+-splatI = asmLink(asmCompile('glob', USE_ASM + I16x8 +
+-                            'var splat = i16x8.splat;' +
+-                            'function f(x) { x = x|0; return splat(x); } return f'), this);
+-for (let x of [0, 1, -1, 0x12345, 0x1234, -1000, -1000000]) {
+-    assertEqVecArr(splatI(x), splat(x << 16 >> 16));
+-}
+-
+-splatIc = asmLink(asmCompile('glob', USE_ASM + I16x8 +
+-                             'var splat = i16x8.splat;' +
+-                             'function f() { var x = 100; return splat(x); } return f'), this);
+-assertEqVecArr(splatIc(), splat(100))
+-
+-splatU = asmLink(asmCompile('glob', USE_ASM + U16x8 + I16x8 + I16x8U16x8 +
+-                            'var splat = u16x8.splat;' +
+-                            'function f(x) { x = x|0; return i16x8u16x8(splat(x)); } return f'), this);
+-for (let x of [0, 1, -1, 0x12345, 0x1234, -1000, -1000000]) {
+-    assertEqVecArr(SIMD.Uint16x8.fromInt16x8Bits(splatI(x)), splat(x << 16 >>> 16));
+-}
+-
+-splatUc = asmLink(asmCompile('glob', USE_ASM + U16x8 + I16x8 + I16x8U16x8 +
+-                             'var splat = u16x8.splat;' +
+-                             'function f() { var x = 200; return i16x8u16x8(splat(x)); } return f'), this);
+-assertEqVecArr(SIMD.Uint16x8.fromInt16x8Bits(splatUc()), splat(200))
+-
+-
+-// Test extractLane.
+-//
+-// The lane index must be a literal int, and we generate different code for
+-// different lanes.
+-function extractI(a, i) {
+-  return asmLink(asmCompile('glob', USE_ASM + I16x8 + I16x8EXT +
+-                            `function f() {var x=i16x8(${a.join(',')}); return i16x8ext(x, ${i})|0; } return f`), this)();
+-}
+-a = [-1,2,-3,4,-5,6,-7,-8];
+-for (var i = 0; i < 8; i++)
+-  assertEq(extractI(a, i), a[i]);
+-a = a.map(x => -x);
+-for (var i = 0; i < 8; i++)
+-  assertEq(extractI(a, i), a[i]);
+-
+-function extractU(a, i) {
+-  return asmLink(asmCompile('glob', USE_ASM + U16x8 + U16x8EXT +
+-                            `function f() {var x=u16x8(${a.join(',')}); return u16x8ext(x, ${i})|0; } return f`), this)();
+-}
+-a = [1,255,12,13,14,150,200,3];
+-for (var i = 0; i < 8; i++)
+-  assertEq(extractU(a, i), a[i]);
+-a = a.map(x => UINT16_MAX-x);
+-for (var i = 0; i < 8; i++)
+-  assertEq(extractU(a, i), a[i]);
+-
+-function extractB(a, i) {
+-  return asmLink(asmCompile('glob', USE_ASM + B16x8 + B16x8EXT +
+-                            `function f() {var x=b16x8(${a.join(',')}); return b16x8ext(x, ${i})|0; } return f`), this)();
+-}
+-a = [1,1,0,1, 1,0,0,0];
+-for (var i = 0; i < 8; i++)
+-  assertEq(extractB(a, i), a[i]);
+-a = a.map(x => 1-x);
+-for (var i = 0; i < 8; i++)
+-  assertEq(extractB(a, i), a[i]);
+-
+-// Test replaceLane.
+-function replaceI(a, i) {
+-  return asmLink(asmCompile('glob', USE_ASM + I16x8 + I16x8REP +
+-                            `function f(v) {v=v|0; var x=i16x8(${a.join(',')}); return i16x8rep(x,${i},v); } return f`), this);
+-}
+-a = [-1,2,-3,4,-5,6,-7,-9];
+-for (var i = 0; i < 8; i++) {
+-    var f = replaceI(a, i);
+-    var b = a.slice(0);
+-    b[i] = -20;
+-    assertEqVecArr(f(-20), b);
+-}
+-
+-function replaceU(a, i) {
+-  return asmLink(asmCompile('glob', USE_ASM + U16x8 + U16x8REP + I16x8 + I16x8U16x8 +
+-                            `function f(v) {v=v|0; var x=u16x8(${a.join(',')}); return i16x8u16x8(u16x8rep(x,${i},v)); } return f`), this);
+-}
+-a = [65000-1,2,65000-3,4,65000-5,6,65000-7,65000-9];
+-for (var i = 0; i < 8; i++) {
+-    var rawf = replaceU(a, i);
+-    var f = x => SIMD.Uint16x8.fromInt16x8Bits(rawf(x))
+-    var b = a.slice(0);
+-    b[i] = 1000;
+-    assertEqVecArr(f(1000), b);
+-}
+-
+-function replaceB(a, i) {
+-  return asmLink(asmCompile('glob', USE_ASM + B16x8 + B16x8REP +
+-                            `function f(v) {v=v|0; var x=b16x8(${a.join(',')}); return b16x8rep(x,${i},v); } return f`), this);
+-}
+-a = [1,1,0,1,1,0,0,0];
+-for (var i = 0; i < 8; i++) {
+-    var f = replaceB(a, i);
+-    var b = a.slice(0);
+-    let v = 1 - a[i];
+-    b[i] = v;
+-    assertEqVecArr(f(v), b.map(x => !!x));
+-}
+-
+-
+-// Test select.
+-selectI = asmLink(asmCompile('glob', USE_ASM + I16x8 + B16x8 + B16x8CHK +
+-                             'var select = i16x8.select;' +
+-                             'var a = i16x8(-1,2,-3,4,-5, 6,-7, 8);' +
+-                             'var b = i16x8( 5,6, 7,8, 9,10,11,12);' +
+-                             'function f(x) { x = b16x8chk(x); return select(x, a, b); } return f'), this);
+-assertEqVecArr(selectI(SIMD.Bool16x8( 0,0, 1,0, 1,1, 1, 0)),
+-                                    [ 5,6,-3,8,-5,6,-7,12]);
+-
+-selectU = asmLink(asmCompile('glob', USE_ASM + I16x8 + B16x8 + B16x8CHK + U16x8 + I16x8U16x8 + U16x8I16x8 +
+-                             'var select = u16x8.select;' +
+-                             'var a = i16x8(-1,2,-3,4,-5, 6,-7, 8);' +
+-                             'var b = i16x8( 5,6, 7,8, 9,10,11,12);' +
+-                             'function f(x) { x = b16x8chk(x); return i16x8u16x8(select(x, u16x8i16x8(a), u16x8i16x8(b))); } return f'), this);
+-assertEqVecArr(selectU(SIMD.Bool16x8( 0,0, 1,0, 1,1, 1, 0)),
+-                                    [ 5,6,-3,8,-5,6,-7,12]);
+-
+-// Test swizzle.
+-function swizzle(vec, lanes) {
+-    let r = [];
+-    for (let i = 0; i < 8; i++)
+-        r.push(vec[lanes[i]]);
+-    return r;
+-}
+-
+-function swizzleI(lanes) {
+-    let asm = asmLink(asmCompile('glob', USE_ASM + I16x8 + I16x8CHK +
+-                                 'var swz = i16x8.swizzle;' +
+-                                 `function f(a) { a = i16x8chk(a); return swz(a, ${lanes.join()}); } return f`), this);
+-    let a1 = [    -1,2,    -3,0x8000,0x7f,6,-7, 8].map(x => x << 16 >> 16);
+-    let a2 = [0x8000,2,0x8000,0x7fff,   0,0, 8,-9].map(x => x << 16 >> 16);
+-    let v1 = SIMD.Int16x8(...a1);
+-    let v2 = SIMD.Int16x8(...a2);
+-    assertEqVecArr(asm(v1), swizzle(a1, lanes));
+-    assertEqVecArr(asm(v2), swizzle(a2, lanes));
+-}
+-
+-swizzleI([3, 4, 7, 1, 4, 3, 1, 2]);
+-swizzleI([0, 0, 0, 0, 0, 0, 0, 0]);
+-swizzleI([7, 7, 7, 7, 7, 7, 7, 7]);
+-
+-function swizzleU(lanes) {
+-    let asm = asmLink(asmCompile('glob', USE_ASM + I16x8 + I16x8CHK + U16x8 + U16x8I16x8 + I16x8U16x8 +
+-                                 'var swz = u16x8.swizzle;' +
+-                                 `function f(a) { a = i16x8chk(a); return i16x8u16x8(swz(u16x8i16x8(a), ${lanes.join()})); } return f`), this);
+-    let a1 = [    -1,2,    -3,0x8000,0x7f,6,-7, 8].map(x => x << 16 >> 16);
+-    let a2 = [0x8000,2,0x8000,0x7fff,   0,0, 8,-9].map(x => x << 16 >> 16);
+-    let v1 = SIMD.Int16x8(...a1);
+-    let v2 = SIMD.Int16x8(...a2);
+-    assertEqVecArr(asm(v1), swizzle(a1, lanes));
+-    assertEqVecArr(asm(v2), swizzle(a2, lanes));
+-}
+-
+-swizzleU([3, 4, 7, 1, 4, 3, 1, 2]);
+-swizzleU([0, 0, 0, 0, 0, 0, 0, 0]);
+-swizzleU([7, 7, 7, 7, 7, 7, 7, 7]);
+-
+-// Out-of-range lane indexes.
+-assertAsmTypeFail('glob', USE_ASM + I16x8 + 'var swz = i16x8.swizzle; ' +
+-                  'function f() { var x=i16x8(0,0,0,0,0,0,0,0); swz(x,1,2,3,4,5,6,7,8); } return f');
+-assertAsmTypeFail('glob', USE_ASM + U16x8 + 'var swz = u16x8.swizzle; ' +
+-                  'function f() { var x=u16x8(0,0,0,0,0,0,0,0); swz(x,1,2,3,4,5,6,7,8); } return f');
+-// Missing lane indexes.
+-assertAsmTypeFail('glob', USE_ASM + I16x8 + 'var swz = i16x8.swizzle; ' +
+-                  'function f() { var x=i16x8(0,0,0,0,0,0,0,0); swz(x,1,2,3,4,5,6,7); } return f');
+-assertAsmTypeFail('glob', USE_ASM + U16x8 + 'var swz = u16x8.swizzle; ' +
+-                  'function f() { var x=u16x8(0,0,0,0,0,0,0,0); swz(x,1,2,3,4,5,6,7); } return f');
+-
+-
+-// Test shuffle.
+-function shuffle(vec1, vec2, lanes) {
+-    let r = [];
+-    let vec = vec1.concat(vec2)
+-    for (let i = 0; i < 8; i++)
+-        r.push(vec[lanes[i]]);
+-    return r;
+-}
+-
+-function shuffleI(lanes) {
+-    let asm = asmLink(asmCompile('glob', USE_ASM + I16x8 + I16x8CHK +
+-                                 'var shuf = i16x8.shuffle;' +
+-                                 `function f(a1, a2) { a1 = i16x8chk(a1); a2 = i16x8chk(a2); return shuf(a1, a2, ${lanes.join()}); } return f`), this);
+-    let a1 = [    -1,2,    -3,0x8000,0x7f,6,-7, 8].map(x => x << 16 >> 16);
+-    let a2 = [0x8000,2,0x8000,0x7fff,   0,0, 8,-9].map(x => x << 16 >> 16);
+-    let v1 = SIMD.Int16x8(...a1);
+-    let v2 = SIMD.Int16x8(...a2);
+-    assertEqVecArr(asm(v1, v2), shuffle(a1, a2, lanes));
+-}
+-
+-function shuffleU(lanes) {
+-    let asm = asmLink(asmCompile('glob', USE_ASM + I16x8 + I16x8CHK + U16x8 + U16x8I16x8 + I16x8U16x8 +
+-                                 'var shuf = u16x8.shuffle;' +
+-                                 'function f(a1, a2) { a1 = i16x8chk(a1); a2 = i16x8chk(a2); ' +
+-                                 `return i16x8u16x8(shuf(u16x8i16x8(a1), u16x8i16x8(a2), ${lanes.join()})); } return f`), this);
+-    let a1 = [    -1,2,    -3,0x8000,0x7f,6,-7, 8].map(x => x << 16 >> 16);
+-    let a2 = [0x8000,2,0x8000,0x7fff,   0,0, 8,-9].map(x => x << 16 >> 16);
+-    let v1 = SIMD.Int16x8(...a1);
+-    let v2 = SIMD.Int16x8(...a2);
+-    assertEqVecArr(asm(v1, v2), shuffle(a1, a2, lanes));
+-}
+-
+-shuffleI([0, 0, 0, 0, 0, 0, 0, 0])
+-shuffleI([15, 15, 15, 15, 15, 15, 15, 15])
+-shuffleI([6, 2, 0, 14, 6, 10, 11, 1])
+-
+-shuffleU([7, 7, 7, 7, 7, 7, 7, 7])
+-shuffleU([8, 15, 15, 15, 15, 15, 15, 15])
+-shuffleU([6, 2, 0, 14, 6, 10, 11, 1])
+-
+-// Test unary operators.
+-function unaryI(opname, lanefunc) {
+-    let simdfunc = asmLink(asmCompile('glob', USE_ASM + I16x8 + I16x8CHK +
+-                                      `var fut = i16x8.${opname};` +
+-                                      'function f(v) { v = i16x8chk(v); return fut(v); } return f'), this);
+-    let a = [65000-1,2,65000-3,4,65000-5,6,65000-7,65000-9];
+-    let v = SIMD.Int16x8(...a);
+-    assertEqVecArr(simdfunc(v), a.map(lanefunc));
+-}
+-
+-function unaryU(opname, lanefunc) {
+-    let simdfunc = asmLink(asmCompile('glob', USE_ASM + U16x8 + I16x8 + I16x8CHK + U16x8I16x8 + I16x8U16x8 +
+-                                      `var fut = u16x8.${opname};` +
+-                                      'function f(v) { v = i16x8chk(v); return i16x8u16x8(fut(u16x8i16x8(v))); } return f'), this);
+-    let a = [65000-1,2,65000-3,4,65000-5,6,65000-7,65000-9];
+-    let v = SIMD.Int16x8(...a);
+-    assertEqVecArr(SIMD.Uint16x8.fromInt16x8Bits(simdfunc(v)), a.map(lanefunc));
+-}
+-
+-function unaryB(opname, lanefunc) {
+-    let simdfunc = asmLink(asmCompile('glob', USE_ASM + B16x8 + B16x8CHK +
+-                                      `var fut = b16x8.${opname};` +
+-                                      'function f(v) { v = b16x8chk(v); return fut(v); } return f'), this);
+-    let a = [1,1,0,1,1,0,0,0];
+-    let v = SIMD.Bool16x8(...a);
+-    assertEqVecArr(simdfunc(v), a.map(lanefunc));
+-}
+-
+-unaryI('not', x => ~x << 16 >> 16);
+-unaryU('not', x => ~x << 16 >>> 16);
+-unaryB('not', x => !x);
+-unaryI('neg', x => -x << 16 >> 16);
+-unaryU('neg', x => -x << 16 >>> 16);
+-
+-
+-// Test binary operators.
+-function zipmap(a1, a2, f) {
+-    assertEq(a1.length, a2.length);
+-    let r = [];
+-    for (var i = 0; i < a1.length; i++)
+-        r.push(f(a1[i], a2[i]));
+-    return r
+-}
+-
+-function binaryI(opname, lanefunc) {
+-    let simdfunc = asmLink(asmCompile('glob', USE_ASM + I16x8 + I16x8CHK +
+-                                      `var fut = i16x8.${opname};` +
+-                                      'function f(v1, v2) { v1 = i16x8chk(v1); v2 = i16x8chk(v2); return fut(v1, v2); } return f'), this);
+-    let a1 = [    -1,2,    -3,0x8000,0x7f,6,-7, 8].map(x => x << 16 >> 16);
+-    let a2 = [0x8000,2,0x8000,0x7fff,   0,0, 8,-9].map(x => x << 16 >> 16);
+-    let ref = zipmap(a1, a2, lanefunc);
+-    let v1 = SIMD.Int16x8(...a1);
+-    let v2 = SIMD.Int16x8(...a2);
+-    assertEqVecArr(simdfunc(v1, v2), ref);
+-}
+-
+-function binaryU(opname, lanefunc) {
+-    let simdfunc = asmLink(asmCompile('glob', USE_ASM + U16x8 + I16x8 + I16x8CHK + U16x8I16x8 + I16x8U16x8 +
+-                                      `var fut = u16x8.${opname};` +
+-                                      'function f(v1, v2) { v1 = i16x8chk(v1); v2 = i16x8chk(v2); return i16x8u16x8(fut(u16x8i16x8(v1), u16x8i16x8(v2))); } return f'), this);
+-    let a1 = [    -1,2,    -3,0x8000,0x7f,6,-7, 8].map(x => x << 16 >>> 16);
+-    let a2 = [0x8000,2,0x8000,0x7fff,   0,0, 8,-9].map(x => x << 16 >>> 16);
+-    let ref = zipmap(a1, a2, lanefunc);
+-    let v1 = SIMD.Int16x8(...a1);
+-    let v2 = SIMD.Int16x8(...a2);
+-    let res = SIMD.Uint16x8.fromInt16x8Bits(simdfunc(v1, v2));
+-    assertEqVecArr(res, ref);
+-}
+-
+-function binaryB(opname, lanefunc) {
+-    let simdfunc = asmLink(asmCompile('glob', USE_ASM + B16x8 + B16x8CHK +
+-                                      `var fut = b16x8.${opname};` +
+-                                      'function f(v1, v2) { v1 = b16x8chk(v1); v2 = b16x8chk(v2); return fut(v1, v2); } return f'), this);
+-    let a = [1,1,0,1,1,0,0,0];
+-    let v = SIMD.Bool16x8(...a);
+-    assertEqVecArr(simdfunc(v), a.map(lanefunc));
+-}
+-
+-binaryI('add', (x, y) => (x + y) << 16 >> 16);
+-binaryI('sub', (x, y) => (x - y) << 16 >> 16);
+-binaryI('mul', (x, y) => (x * y) << 16 >> 16);
+-binaryU('add', (x, y) => (x + y) << 16 >>> 16);
+-binaryU('sub', (x, y) => (x - y) << 16 >>> 16);
+-binaryU('mul', (x, y) => (x * y) << 16 >>> 16);
+-
+-binaryI('and', (x, y) => (x & y) << 16 >> 16);
+-binaryI('or',  (x, y) => (x | y) << 16 >> 16);
+-binaryI('xor', (x, y) => (x ^ y) << 16 >> 16);
+-binaryU('and', (x, y) => (x & y) << 16 >>> 16);
+-binaryU('or',  (x, y) => (x | y) << 16 >>> 16);
+-binaryU('xor', (x, y) => (x ^ y) << 16 >>> 16);
+-
+-function sat(x, lo, hi) {
+-    if (x < lo) return lo;
+-    if (x > hi) return hi;
+-    return x
+-}
+-function isat(x) { return sat(x, -32768, 32767); }
+-function usat(x) { return sat(x, 0, 0xffff); }
+-
+-binaryI('addSaturate', (x, y) => isat(x + y))
+-binaryI('subSaturate', (x, y) => isat(x - y))
+-binaryU('addSaturate', (x, y) => usat(x + y))
+-binaryU('subSaturate', (x, y) => usat(x - y))
+-
+-
+-// Test shift operators.
+-function zip1map(a, s, f) {
+-    return a.map(x => f(x, s));
+-}
+-
+-function shiftI(opname, lanefunc) {
+-    let simdfunc = asmLink(asmCompile('glob', USE_ASM + I16x8 + I16x8CHK +
+-                                      `var fut = i16x8.${opname};` +
+-                                      'function f(v, s) { v = i16x8chk(v); s = s|0; return fut(v, s); } return f'), this);
+-    let a = [-1,2,-3,0x80,0x7f,6,0x8000,0x7fff];
+-    let v = SIMD.Int16x8(...a);
+-    for (let s of [0, 1, 2, 6, 7, 8, 9, 10, 16, 255, -1, -8, -7, -1000]) {
+-        let ref = zip1map(a, s, lanefunc);
+-        // 1. Test dynamic shift amount.
+-        assertEqVecArr(simdfunc(v, s), ref);
+-
+-        // 2. Test constant shift amount.
+-        let cstf = asmLink(asmCompile('glob', USE_ASM + I16x8 + I16x8CHK +
+-                                      `var fut = i16x8.${opname};` +
+-                                      `function f(v) { v = i16x8chk(v); return fut(v, ${s}); } return f`), this);
+-        assertEqVecArr(cstf(v, s), ref);
+-    }
+-}
+-
+-function shiftU(opname, lanefunc) {
+-    let simdfunc = asmLink(asmCompile('glob', USE_ASM + U16x8 + I16x8 + I16x8CHK + U16x8I16x8 + I16x8U16x8 +
+-                                      `var fut = u16x8.${opname};` +
+-                                      'function f(v, s) { v = i16x8chk(v); s = s|0; return i16x8u16x8(fut(u16x8i16x8(v), s)); } return f'), this);
+-    let a = [-1,2,-3,0x80,0x7f,6,0x8000,0x7fff];
+-    let v = SIMD.Int16x8(...a);
+-    for (let s of [0, 1, 2, 6, 7, 8, 9, 10, 16, 255, -1, -8, -7, -1000]) {
+-        let ref = zip1map(a, s, lanefunc);
+-        // 1. Test dynamic shift amount.
+-        assertEqVecArr(SIMD.Uint16x8.fromInt16x8Bits(simdfunc(v, s)), ref);
+-
+-        // 2. Test constant shift amount.
+-        let cstf = asmLink(asmCompile('glob', USE_ASM + U16x8 + I16x8 + I16x8CHK + U16x8I16x8 + I16x8U16x8 +
+-                                      `var fut = u16x8.${opname};` +
+-                                      `function f(v) { v = i16x8chk(v); return i16x8u16x8(fut(u16x8i16x8(v), ${s})); } return f`), this);
+-        assertEqVecArr(SIMD.Uint16x8.fromInt16x8Bits(cstf(v, s)), ref);
+-    }
+-}
+-
+-shiftI('shiftLeftByScalar', (x,s) => (x << (s & 15)) << 16 >> 16);
+-shiftU('shiftLeftByScalar', (x,s) => (x << (s & 15)) << 16 >>> 16);
+-shiftI('shiftRightByScalar', (x,s) => ((x << 16 >> 16) >> (s & 15)) << 16 >> 16);
+-shiftU('shiftRightByScalar', (x,s) => ((x << 16 >>> 16) >>> (s & 15)) << 16 >>> 16);
+-
+-
+-// Comparisons.
+-function compareI(opname, lanefunc) {
+-    let simdfunc = asmLink(asmCompile('glob', USE_ASM + I16x8 + I16x8CHK +
+-                                      `var fut = i16x8.${opname};` +
+-                                      'function f(v1, v2) { v1 = i16x8chk(v1); v2 = i16x8chk(v2); return fut(v1, v2); } return f'), this);
+-    let a1 = [    -1,2,    -3,0x8000,0x7f,6,-7, 8].map(x => x << 16 >> 16);
+-    let a2 = [0x8000,2,0x8000,0x7fff,   0,0, 8,-9].map(x => x << 16 >> 16);
+-    let ref = zipmap(a1, a2, lanefunc);
+-    let v1 = SIMD.Int16x8(...a1);
+-    let v2 = SIMD.Int16x8(...a2);
+-    assertEqVecArr(simdfunc(v1, v2), ref);
+-}
+-
+-function compareU(opname, lanefunc) {
+-    let simdfunc = asmLink(asmCompile('glob', USE_ASM + I16x8 + I16x8CHK + U16x8 + U16x8I16x8 +
+-                                      `var fut = u16x8.${opname};` +
+-                                      'function f(v1, v2) { v1 = i16x8chk(v1); v2 = i16x8chk(v2); return fut(u16x8i16x8(v1), u16x8i16x8(v2)); } return f'), this);
+-    let a1 = [    -1,2,    -3,0x8000,0x7f,6,-7, 8].map(x => x << 16 >>> 16);
+-    let a2 = [0x8000,2,0x8000,0x7fff,   0,0, 8,-9].map(x => x << 16 >>> 16);
+-    let ref = zipmap(a1, a2, lanefunc);
+-    let v1 = SIMD.Int16x8(...a1);
+-    let v2 = SIMD.Int16x8(...a2);
+-    assertEqVecArr(simdfunc(v1, v2), ref);
+-}
+-
+-compareI("equal", (x,y) => x == y);
+-compareU("equal", (x,y) => x == y);
+-compareI("notEqual", (x,y) => x != y);
+-compareU("notEqual", (x,y) => x != y);
+-compareI("lessThan", (x,y) => x < y);
+-compareU("lessThan", (x,y) => x < y);
+-compareI("lessThanOrEqual", (x,y) => x <= y);
+-compareU("lessThanOrEqual", (x,y) => x <= y);
+-compareI("greaterThan", (x,y) => x > y);
+-compareU("greaterThan", (x,y) => x > y);
+-compareI("greaterThanOrEqual", (x,y) => x >= y);
+-compareU("greaterThanOrEqual", (x,y) => x >= y);
+diff --git a/js/src/jit-test/tests/asm.js/testSIMD-8x16.js b/js/src/jit-test/tests/asm.js/testSIMD-8x16.js
+deleted file mode 100644
+--- a/js/src/jit-test/tests/asm.js/testSIMD-8x16.js
++++ /dev/null
+@@ -1,539 +0,0 @@
+-load(libdir + "asm.js");
+-load(libdir + "simd.js");
+-load(libdir + "asserts.js");
+-
+-// Set to true to see more JS debugging spew.
+-const DEBUG = false;
+-
+-if (!isSimdAvailable()) {
+-    DEBUG && print("won't run tests as simd extensions aren't activated yet");
+-    quit(0);
+-}
+-
+-// Tests for 8x16 SIMD types: Int8x16, Uint8x16, Bool8x16.
+-
+-const I8x16 = 'var i8x16 = glob.SIMD.Int8x16;'
+-const I8x16CHK = 'var i8x16chk = i8x16.check;'
+-const I8x16EXT = 'var i8x16ext = i8x16.extractLane;'
+-const I8x16REP = 'var i8x16rep = i8x16.replaceLane;'
+-const I8x16U8x16 = 'var i8x16u8x16 = i8x16.fromUint8x16Bits;'
+-
+-const U8x16 = 'var u8x16 = glob.SIMD.Uint8x16;'
+-const U8x16CHK = 'var u8x16chk = u8x16.check;'
+-const U8x16EXT = 'var u8x16ext = u8x16.extractLane;'
+-const U8x16REP = 'var u8x16rep = u8x16.replaceLane;'
+-const U8x16I8x16 = 'var u8x16i8x16 = u8x16.fromInt8x16Bits;'
+-
+-const B8x16 = 'var b8x16 = glob.SIMD.Bool8x16;'
+-const B8x16CHK = 'var b8x16chk = b8x16.check;'
+-const B8x16EXT = 'var b8x16ext = b8x16.extractLane;'
+-const B8x16REP = 'var b8x16rep = b8x16.replaceLane;'
+-
+-const INT8_MAX = 127
+-const INT8_MIN = -128
+-const UINT8_MAX = 255
+-
+-// Linking
+-assertEq(asmLink(asmCompile('glob', USE_ASM + I8x16 + "function f() {} return f"), {SIMD:{Int8x16: SIMD.Int8x16}})(), undefined);
+-assertEq(asmLink(asmCompile('glob', USE_ASM + U8x16 + "function f() {} return f"), {SIMD:{Uint8x16: SIMD.Uint8x16}})(), undefined);
+-assertEq(asmLink(asmCompile('glob', USE_ASM + B8x16 + "function f() {} return f"), {SIMD:{Bool8x16: SIMD.Bool8x16}})(), undefined);
+-
+-// Local variable of Int8x16 type.
+-assertAsmTypeFail('glob', USE_ASM + "function f() {var x=Int8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I8x16 + "function f() {var x=i8x16;} return f");
+-assertAsmTypeFail('glob', USE_ASM + I8x16 + "function f() {var x=i8x16();} return f");
+-assertAsmTypeFail('glob', USE_ASM + I8x16 + "function f() {var x=i8x16(1);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I8x16 + "function f() {var x=i8x16(1,2,3,4);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I8x16 + "function f() {var x=i8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16.0);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I8x16 + "function f() {var x=i8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I8x16 + "function f() {var x=i8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16|0);} return f");
+-assertEq(asmLink(asmCompile('glob', USE_ASM + I8x16 + "function f() {var x=i8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16);} return f"), this)(), undefined);
+-assertEq(asmLink(asmCompile('glob', USE_ASM + I8x16 + "function f() {var x=i8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15," + (INT8_MAX + 1) + ");} return f"), this)(), undefined);
+-
+-// Local variable of Uint8x16 type.
+-assertAsmTypeFail('glob', USE_ASM + "function f() {var x=Uint8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16);} return f");
+-assertAsmTypeFail('glob', USE_ASM + U8x16 + "function f() {var x=u8x16;} return f");
+-assertAsmTypeFail('glob', USE_ASM + U8x16 + "function f() {var x=u8x16();} return f");
+-assertAsmTypeFail('glob', USE_ASM + U8x16 + "function f() {var x=u8x16(1);} return f");
+-assertAsmTypeFail('glob', USE_ASM + U8x16 + "function f() {var x=u8x16(1,2,3,4);} return f");
+-assertAsmTypeFail('glob', USE_ASM + U8x16 + "function f() {var x=u8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16.0);} return f");
+-assertAsmTypeFail('glob', USE_ASM + U8x16 + "function f() {var x=u8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17);} return f");
+-assertAsmTypeFail('glob', USE_ASM + U8x16 + "function f() {var x=u8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16|0);} return f");
+-assertEq(asmLink(asmCompile('glob', USE_ASM + U8x16 + "function f() {var x=u8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16);} return f"), this)(), undefined);
+-assertEq(asmLink(asmCompile('glob', USE_ASM + U8x16 + "function f() {var x=u8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15," + (UINT8_MAX + 1) + ");} return f"), this)(), undefined);
+-
+-// Local variable of Bool8x16 type.
+-assertAsmTypeFail('glob', USE_ASM + "function f() {var x=Bool8x16(1,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1);} return f");
+-assertAsmTypeFail('glob', USE_ASM + B8x16 + "function f() {var x=b8x16;} return f");
+-assertAsmTypeFail('glob', USE_ASM + B8x16 + "function f() {var x=b8x16();} return f");
+-assertAsmTypeFail('glob', USE_ASM + B8x16 + "function f() {var x=b8x16(1);} return f");
+-assertAsmTypeFail('glob', USE_ASM + B8x16 + "function f() {var x=b8x16(1,0,0,0);} return f");
+-assertAsmTypeFail('glob', USE_ASM + B8x16 + "function f() {var x=b8x16(1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1.0);} return f");
+-assertAsmTypeFail('glob', USE_ASM + B8x16 + "function f() {var x=b8x16(1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1|0);} return f");
+-assertAsmTypeFail('glob', USE_ASM + B8x16 + "function f() {var x=b8x16(1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1);} return f");
+-assertEq(asmLink(asmCompile('glob', USE_ASM + B8x16 + "function f() {var x=b8x16(1,0,0,0,0,0,0,0,0,1,-1,2,-2,1,1,1);} return f"), this)(), undefined);
+-
+-// Global variable of Int8x16 type.
+-assertEqVecArr(asmLink(asmCompile('glob', 'ffi', USE_ASM + I8x16 + I8x16CHK + "var g=i8x16chk(ffi.g); function f() { return i8x16chk(g); } return f"), this,
+-                       {g: SIMD.Int8x16(1,2,3,4,5,6,7,8,10,11,12,13,14,15,16,17)})(), [1,2,3,4,5,6,7,8,10,11,12,13,14,15,16,17]);
+-assertEqVecArr(asmLink(asmCompile('glob', 'ffi', USE_ASM + I8x16 + I8x16CHK + "var g=i8x16chk(ffi.g); function f() { g=i8x16(5,6,7,8,9,10,11,12,1,2,3,4,5,6,7,8); return i8x16chk(g); } return f"), this,
+-                       {g: SIMD.Int8x16(1,2,3,4,5,6,7,8,10,11,12,13,14,15,16,17)})(), [5,6,7,8,9,10,11,12,1,2,3,4,5,6,7,8]);
+-
+-// Global variable of Bool8x16 type.
+-assertEqVecArr(asmLink(asmCompile('glob', 'ffi', USE_ASM + B8x16 + B8x16CHK + "var g=b8x16chk(ffi.g); function f() { return b8x16chk(g); } return f"), this,
+-                       {g: SIMD.Bool8x16(1,1,0,1,0,0,1,0,0,1,0,1,0,0,1,0)})(), [true,true,false,true,false,false,true,false,false,true,false,true,false,false,true,false]);
+-assertEqVecArr(asmLink(asmCompile('glob', 'ffi', USE_ASM + B8x16 + B8x16CHK + "var g=b8x16chk(ffi.g); function f() { g=b8x16(1,1,0,1,0,1,1,1,0,1,0,1,1,1,0,0); return b8x16chk(g); } return f"), this,
+-                       {g: SIMD.Bool8x16(1,1,0,1,0,0,1,0)})(), [true,true,false,true,false,true,true,true,false,true,false,true,true,true,false,false]);
+-
+-// Unsigned SIMD globals are not allowed.
+-assertAsmTypeFail('glob', 'ffi', USE_ASM + U8x16 + U8x16CHK + "var g=u8x16chk(ffi.g); function f() { } return f");
+-
+-// Only signed Int8x16 allowed as return value.
+-assertEqVecArr(asmLink(asmCompile('glob', USE_ASM + I8x16 + "function f() {return i8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16);} return f"), this)(),
+-           [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]);
+-assertEqVecArr(asmLink(asmCompile('glob', USE_ASM + I8x16 + I8x16CHK + "function f() {return i8x16chk(i8x16(1,2,3,132,5,6,7,8,9,10,11,12,13,14,15,16));} return f"), this)(),
+-           [1, 2, 3, -124, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]);
+-assertAsmTypeFail('glob', USE_ASM + U8x16 + "function f() {return u8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16);} return f");
+-assertAsmTypeFail('glob', USE_ASM + U8x16 + U8x16CHK + "function f() {return u8x16chk(u8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16));} return f");
+-
+-// Test splat.
+-function splat(x) {
+-    let r = []
+-    for (let i = 0; i < 16; i++)
+-        r.push(x);
+-    return r
+-}
+-
+-splatB = asmLink(asmCompile('glob', USE_ASM + B8x16 +
+-                            'var splat = b8x16.splat;' +
+-                            'function f(x) { x = x|0; return splat(x); } return f'), this);
+-assertEqVecArr(splatB(true), splat(true));
+-assertEqVecArr(splatB(false), splat(false));
+-
+-
+-splatB0 = asmLink(asmCompile('glob', USE_ASM + B8x16 +
+-                             'var splat = b8x16.splat;' +
+-                             'function f() { var x = 0; return splat(x); } return f'), this);
+-assertEqVecArr(splatB0(), splat(false));
+-splatB1 = asmLink(asmCompile('glob', USE_ASM + B8x16 +
+-                             'var splat = b8x16.splat;' +
+-                             'function f() { var x = 1; return splat(x); } return f'), this);
+-assertEqVecArr(splatB1(), splat(true));
+-
+-splatI = asmLink(asmCompile('glob', USE_ASM + I8x16 +
+-                            'var splat = i8x16.splat;' +
+-                            'function f(x) { x = x|0; return splat(x); } return f'), this);
+-for (let x of [0, 1, -1, 0x1234, 0x12, 1000, -1000000]) {
+-    assertEqVecArr(splatI(x), splat(x << 24 >> 24));
+-}
+-
+-splatIc = asmLink(asmCompile('glob', USE_ASM + I8x16 +
+-                             'var splat = i8x16.splat;' +
+-                             'function f() { var x = 100; return splat(x); } return f'), this);
+-assertEqVecArr(splatIc(), splat(100))
+-
+-splatU = asmLink(asmCompile('glob', USE_ASM + U8x16 + I8x16 + I8x16U8x16 +
+-                            'var splat = u8x16.splat;' +
+-                            'function f(x) { x = x|0; return i8x16u8x16(splat(x)); } return f'), this);
+-for (let x of [0, 1, -1, 0x1234, 0x12, 1000, -1000000]) {
+-    assertEqVecArr(SIMD.Uint8x16.fromInt8x16Bits(splatI(x)), splat(x << 24 >>> 24));
+-}
+-
+-splatUc = asmLink(asmCompile('glob', USE_ASM + U8x16 + I8x16 + I8x16U8x16 +
+-                            'var splat = u8x16.splat;' +
+-                            'function f() { var x = 200; return i8x16u8x16(splat(x)); } return f'), this);
+-assertEqVecArr(SIMD.Uint8x16.fromInt8x16Bits(splatUc()), splat(200))
+-
+-
+-// Test extractLane.
+-//
+-// The lane index must be a literal int, and we generate different code for
+-// different lanes.
+-function extractI(a, i) {
+-  return asmLink(asmCompile('glob', USE_ASM + I8x16 + I8x16EXT +
+-                            `function f() {var x=i8x16(${a.join(',')}); return i8x16ext(x, ${i})|0; } return f`), this)();
+-}
+-a = [-1,2,-3,4,-5,6,-7,8,-9,10,-11,12,-13,-14,-15,-16];
+-for (var i = 0; i < 16; i++)
+-  assertEq(extractI(a, i), a[i]);
+-a = a.map(x => -x);
+-for (var i = 0; i < 16; i++)
+-  assertEq(extractI(a, i), a[i]);
+-
+-function extractU(a, i) {
+-  return asmLink(asmCompile('glob', USE_ASM + U8x16 + U8x16EXT +
+-                            `function f() {var x=u8x16(${a.join(',')}); return u8x16ext(x, ${i})|0; } return f`), this)();
+-}
+-a = [1,255,12,13,14,150,200,3,4,5,6,7,8,9,10,16];
+-for (var i = 0; i < 16; i++)
+-  assertEq(extractU(a, i), a[i]);
+-a = a.map(x => 255-x);
+-for (var i = 0; i < 16; i++)
+-  assertEq(extractU(a, i), a[i]);
+-
+-function extractB(a, i) {
+-  return asmLink(asmCompile('glob', USE_ASM + B8x16 + B8x16EXT +
+-                            `function f() {var x=b8x16(${a.join(',')}); return b8x16ext(x, ${i})|0; } return f`), this)();
+-}
+-a = [1,1,0,1,1,0,0,0,1,1,1,1,0,0,0,1];
+-for (var i = 0; i < 16; i++)
+-  assertEq(extractB(a, i), a[i]);
+-a = a.map(x => 1-x);
+-for (var i = 0; i < 16; i++)
+-  assertEq(extractB(a, i), a[i]);
+-
+-// Test replaceLane.
+-function replaceI(a, i) {
+-  return asmLink(asmCompile('glob', USE_ASM + I8x16 + I8x16REP +
+-                            `function f(v) {v=v|0; var x=i8x16(${a.join(',')}); return i8x16rep(x,${i},v); } return f`), this);
+-}
+-a = [-1,2,-3,4,-5,6,-7,8,-9,10,-11,12,-13,-14,-15,-16];
+-for (var i = 0; i < 16; i++) {
+-    var f = replaceI(a, i);
+-    var b = a.slice(0);
+-    b[i] = -20;
+-    assertEqVecArr(f(-20), b);
+-}
+-
+-function replaceU(a, i) {
+-  return asmLink(asmCompile('glob', USE_ASM + U8x16 + U8x16REP + I8x16 + I8x16U8x16 +
+-                            `function f(v) {v=v|0; var x=u8x16(${a.join(',')}); x=u8x16rep(x,${i},v); return i8x16u8x16(x); } return f`), this);
+-}
+-a = [256-1,2,256-3,4,256-5,6,256-7,8,256-9,10,256-11,12,256-13,256-14,256-15,256-16];
+-for (var i = 0; i < 16; i++) {
+-    // Result returned as Int8x16, convert back.
+-    var rawf = replaceU(a, i);
+-    var f = x => SIMD.Uint8x16.fromInt8x16Bits(rawf(x));
+-    var b = a.slice(0);
+-    b[i] = 100;
+-    assertEqVecArr(f(100), b);
+-}
+-
+-function replaceB(a, i) {
+-  return asmLink(asmCompile('glob', USE_ASM + B8x16 + B8x16REP +
+-                            `function f(v) {v=v|0; var x=b8x16(${a.join(',')}); return b8x16rep(x,${i},v); } return f`), this);
+-}
+-a = [1,1,0,1,1,0,0,0,1,1,1,1,0,0,0,1];
+-for (var i = 0; i < 16; i++) {
+-    var f = replaceB(a, i);
+-    var b = a.slice(0);
+-    v = 1 - a[i];
+-    b[i] = v;
+-    assertEqVecArr(f(v), b.map(x => !!x));
+-}
+-
+-
+-// Test select.
+-selectI = asmLink(asmCompile('glob', USE_ASM + I8x16 + B8x16 + B8x16CHK +
+-                             'var select = i8x16.select;' +
+-                             'var a = i8x16(-1,2,-3,4,-5, 6,-7, 8,-9,10,-11,12,-13,-14,-15,-16);' +
+-                             'var b = i8x16( 5,6, 7,8, 9,10,11,12,13,14, 15,16,-77, 45, 32,  0);' +
+-                             'function f(x) { x = b8x16chk(x); return select(x, a, b); } return f'), this);
+-assertEqVecArr(selectI(SIMD.Bool8x16( 0,0, 1,0, 1,1, 1, 0, 1, 1, 0, 0,  1,  1, 0,  1)),
+-                                    [ 5,6,-3,8,-5,6,-7,12,-9,10,15,16,-13,-14,32,-16]);
+-
+-selectU = asmLink(asmCompile('glob', USE_ASM + I8x16 + B8x16 + B8x16CHK + U8x16 + I8x16U8x16 + U8x16I8x16 +
+-                             'var select = u8x16.select;' +
+-                             'var a = i8x16(-1,2,-3,4,-5, 6,-7, 8,-9,10,-11,12,-13,-14,-15,-16);' +
+-                             'var b = i8x16( 5,6, 7,8, 9,10,11,12,13,14, 15,16,-77, 45, 32,  0);' +
+-                             'function f(x) { x = b8x16chk(x); return i8x16u8x16(select(x, u8x16i8x16(a), u8x16i8x16(b))); } return f'), this);
+-assertEqVecArr(selectU(SIMD.Bool8x16( 0,0, 1,0, 1,1, 1, 0, 1, 1, 0, 0,  1,  1, 0,  1)),
+-                                    [ 5,6,-3,8,-5,6,-7,12,-9,10,15,16,-13,-14,32,-16]);
+-
+-
+-// Test swizzle.
+-function swizzle(vec, lanes) {
+-    let r = [];
+-    for (let i = 0; i < 16; i++)
+-        r.push(vec[lanes[i]]);
+-    return r;
+-}
+-
+-function swizzleI(lanes) {
+-    let asm = asmLink(asmCompile('glob', USE_ASM + I8x16 + I8x16CHK +
+-                                 'var swz = i8x16.swizzle;' +
+-                                 `function f(a) { a = i8x16chk(a); return swz(a, ${lanes.join()}); } return f`), this);
+-    let a1 = [  -1,2,  -3,-128,0x7f,6,-7, 8,-9, 10,-11, 12,-13,-14,-15, -16];
+-    let a2 = [-128,2,-128,0x7f,   0,0, 8,-9,10,-11, 12,-13,-14,-15,-16,  -1];
+-    let v1 = SIMD.Int8x16(...a1);
+-    let v2 = SIMD.Int8x16(...a2);
+-    assertEqVecArr(asm(v1), swizzle(a1, lanes));
+-    assertEqVecArr(asm(v2), swizzle(a2, lanes));
+-}
+-
+-swizzleI([10, 1, 7, 5, 1, 2, 6, 8, 5, 13, 0, 6, 2, 8, 0, 9]);
+-swizzleI([ 0, 0, 0, 0, 0, 0, 0, 0, 0,  0, 0, 0, 0, 0, 0, 0]);
+-swizzleI([15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15]);
+-
+-function swizzleU(lanes) {
+-    let asm = asmLink(asmCompile('glob', USE_ASM + I8x16 + I8x16CHK + U8x16 + U8x16I8x16 + I8x16U8x16 +
+-                                 'var swz = u8x16.swizzle;' +
+-                                 `function f(a) { a = i8x16chk(a); return i8x16u8x16(swz(u8x16i8x16(a), ${lanes.join()})); } return f`), this);
+-    let a1 = [  -1,2,  -3,-128,0x7f,6,-7, 8,-9, 10,-11, 12,-13,-14,-15, -16];
+-    let a2 = [-128,2,-128,0x7f,   0,0, 8,-9,10,-11, 12,-13,-14,-15,-16,  -1];
+-    let v1 = SIMD.Int8x16(...a1);
+-    let v2 = SIMD.Int8x16(...a2);
+-    assertEqVecArr(asm(v1), swizzle(a1, lanes));
+-    assertEqVecArr(asm(v2), swizzle(a2, lanes));
+-}
+-
+-swizzleU([10, 1, 7, 5, 1, 2, 6, 8, 5, 13, 0, 6, 2, 8, 0, 9]);
+-swizzleU([ 0, 0, 0, 0, 0, 0, 0, 0, 0,  0, 0, 0, 0, 0, 0, 0]);
+-swizzleU([15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15]);
+-
+-// Out-of-range lane indexes.
+-assertAsmTypeFail('glob', USE_ASM + I8x16 + 'var swz = i8x16.swizzle; ' +
+-                  'function f() { var x=i8x16(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0); swz(x,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16); } return f');
+-assertAsmTypeFail('glob', USE_ASM + U8x16 + 'var swz = u8x16.swizzle; ' +
+-                  'function f() { var x=u8x16(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0); swz(x,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16); } return f');
+-// Missing lane indexes.
+-assertAsmTypeFail('glob', USE_ASM + I8x16 + 'var swz = i8x16.swizzle; ' +
+-                  'function f() { var x=i8x16(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0); swz(x,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15); } return f');
+-assertAsmTypeFail('glob', USE_ASM + U8x16 + 'var swz = u8x16.swizzle; ' +
+-                  'function f() { var x=u8x16(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0); swz(x,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15); } return f');
+-
+-
+-// Test shuffle.
+-function shuffle(vec1, vec2, lanes) {
+-    let r = [];
+-    let vec = vec1.concat(vec2);
+-    for (let i = 0; i < 16; i++)
+-        r.push(vec[lanes[i]]);
+-    return r;
+-}
+-
+-function shuffleI(lanes) {
+-    let asm = asmLink(asmCompile('glob', USE_ASM + I8x16 + I8x16CHK +
+-                                 'var shuf = i8x16.shuffle;' +
+-                                 `function f(a1, a2) { a1 = i8x16chk(a1); a2 = i8x16chk(a2); return shuf(a1, a2, ${lanes.join()}); } return f`), this);
+-    let a1 = [  -1,2,  -3,-128,0x7f,6,-7, 8,-9, 10,-11, 12,-13,-14,-15, -16];
+-    let a2 = [-128,2,-128,0x7f,   0,0, 8,-9,10,-11, 12,-13,-14,-15,-16,  -1];
+-    let v1 = SIMD.Int8x16(...a1);
+-    let v2 = SIMD.Int8x16(...a2);
+-    assertEqVecArr(asm(v1, v2), shuffle(a1, a2, lanes));
+-}
+-
+-shuffleI([31, 9, 5, 4, 29, 12, 19, 10, 16, 22, 10, 9, 6, 18, 9, 8]);
+-shuffleI([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
+-shuffleI([31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31]);
+-
+-function shuffleU(lanes) {
+-    let asm = asmLink(asmCompile('glob', USE_ASM + I8x16 + I8x16CHK + U8x16 + U8x16I8x16 + I8x16U8x16 +
+-                                 'var shuf = u8x16.shuffle;' +
+-                                 'function f(a1, a2) { a1 = i8x16chk(a1); a2 = i8x16chk(a2); ' +
+-                                 `return i8x16u8x16(shuf(u8x16i8x16(a1), u8x16i8x16(a2), ${lanes.join()})); } return f`), this);
+-    let a1 = [  -1,2,  -3,-128,0x7f,6,-7, 8,-9, 10,-11, 12,-13,-14,-15, -16];
+-    let a2 = [-128,2,-128,0x7f,   0,0, 8,-9,10,-11, 12,-13,-14,-15,-16,  -1];
+-    let v1 = SIMD.Int8x16(...a1);
+-    let v2 = SIMD.Int8x16(...a2);
+-    assertEqVecArr(asm(v1, v2), shuffle(a1, a2, lanes));
+-}
+-
+-shuffleU([31, 9, 5, 4, 29, 12, 19, 10, 16, 22, 10, 9, 6, 18, 9, 8]);
+-shuffleU([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
+-shuffleU([31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31]);
+-
+-
+-// Out-of-range lane indexes.
+-assertAsmTypeFail('glob', USE_ASM + I8x16 + 'var shuf = i8x16.shuffle; ' +
+-                  'function f() { var x=i8x16(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0); shuf(x,x,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,32); } return f');
+-assertAsmTypeFail('glob', USE_ASM + U8x16 + 'var shuf = u8x16.shuffle; ' +
+-                  'function f() { var x=u8x16(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0); shuf(x,x,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,32); } return f');
+-// Missing lane indexes.
+-assertAsmTypeFail('glob', USE_ASM + I8x16 + 'var shuf = i8x16.shuffle; ' +
+-                  'function f() { var x=i8x16(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0); shuf(x,x,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15); } return f');
+-assertAsmTypeFail('glob', USE_ASM + U8x16 + 'var shuf = u8x16.shuffle; ' +
+-                  'function f() { var x=u8x16(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0); shuf(x,x,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15); } return f');
+-
+-
+-// Test unary operators.
+-function unaryI(opname, lanefunc) {
+-    let simdfunc = asmLink(asmCompile('glob', USE_ASM + I8x16 + I8x16CHK +
+-                                      `var fut = i8x16.${opname};` +
+-                                      'function f(v) { v = i8x16chk(v); return fut(v); } return f'), this);
+-    let a = [-1,2,-3,4,-5,6,-7,8,-9,10,-11,12,-13,-14,-15,-16];
+-    let v = SIMD.Int8x16(...a);
+-    assertEqVecArr(simdfunc(v), a.map(lanefunc));
+-}
+-
+-function unaryU(opname, lanefunc) {
+-    let simdfunc = asmLink(asmCompile('glob', USE_ASM + U8x16 + I8x16 + I8x16CHK + U8x16I8x16 + I8x16U8x16 +
+-                                      `var fut = u8x16.${opname};` +
+-                                      'function f(v) { v = i8x16chk(v); return i8x16u8x16(fut(u8x16i8x16(v))); } return f'), this);
+-    let a = [256-1,2,256-3,4,256-5,6,256-7,8,256-9,10,256-11,12,256-13,256-14,256-15,256-16];
+-    let v = SIMD.Int8x16(...a);
+-    assertEqVecArr(SIMD.Uint8x16.fromInt8x16Bits(simdfunc(v)), a.map(lanefunc));
+-}
+-
+-function unaryB(opname, lanefunc) {
+-    let simdfunc = asmLink(asmCompile('glob', USE_ASM + B8x16 + B8x16CHK +
+-                                      `var fut = b8x16.${opname};` +
+-                                      'function f(v) { v = b8x16chk(v); return fut(v); } return f'), this);
+-    let a = [1,1,0,1,1,0,0,0,1,1,1,1,0,0,0,1];
+-    let v = SIMD.Bool8x16(...a);
+-    assertEqVecArr(simdfunc(v), a.map(lanefunc));
+-}
+-
+-unaryI('not', x => ~x << 24 >> 24);
+-unaryU('not', x => ~x << 24 >>> 24);
+-unaryB('not', x => !x);
+-unaryI('neg', x => -x << 24 >> 24);
+-unaryU('neg', x => -x << 24 >>> 24);
+-
+-
+-// Test binary operators.
+-function zipmap(a1, a2, f) {
+-    assertEq(a1.length, a2.length);
+-    let r = [];
+-    for (var i = 0; i < a1.length; i++)
+-        r.push(f(a1[i], a2[i]));
+-    return r
+-}
+-
+-function binaryI(opname, lanefunc) {
+-    let simdfunc = asmLink(asmCompile('glob', USE_ASM + I8x16 + I8x16CHK +
+-                                      `var fut = i8x16.${opname};` +
+-                                      'function f(v1, v2) { v1 = i8x16chk(v1); v2 = i8x16chk(v2); return fut(v1, v2); } return f'), this);
+-    let a1 = [  -1,2,  -3,-128,0x7f,6,-7, 8,-9, 10,-11, 12,-13,-14,-15, -16];
+-    let a2 = [-128,2,-128,0x7f,   0,0, 8,-9,10,-11, 12,-13,-14,-15,-16,  -1];
+-    let ref = zipmap(a1, a2, lanefunc);
+-    let v1 = SIMD.Int8x16(...a1);
+-    let v2 = SIMD.Int8x16(...a2);
+-    assertEqVecArr(simdfunc(v1, v2), ref);
+-}
+-
+-function binaryU(opname, lanefunc) {
+-    let simdfunc = asmLink(asmCompile('glob', USE_ASM + U8x16 + I8x16 + I8x16CHK + U8x16I8x16 + I8x16U8x16 +
+-                                      `var fut = u8x16.${opname};` +
+-                                      'function f(v1, v2) { v1 = i8x16chk(v1); v2 = i8x16chk(v2); return i8x16u8x16(fut(u8x16i8x16(v1), u8x16i8x16(v2))); } return f'), this);
+-    let a1 = [  -1,2,  -3,0x80,0x7f,6,-7, 8,-9, 10,-11, 12,-13,-14,-15, -16].map(x => x & 0xff);
+-    let a2 = [0x80,2,0x80,0x7f,   0,0, 8,-9,10,-11, 12,-13,-14,-15,-16,0xff].map(x => x & 0xff);
+-    let ref = zipmap(a1, a2, lanefunc);
+-    let v1 = SIMD.Int8x16(...a1);
+-    let v2 = SIMD.Int8x16(...a2);
+-    let res = SIMD.Uint8x16.fromInt8x16Bits(simdfunc(v1, v2));
+-    assertEqVecArr(res, ref);
+-}
+-
+-function binaryB(opname, lanefunc) {
+-    let simdfunc = asmLink(asmCompile('glob', USE_ASM + B8x16 + B8x16CHK +
+-                                      `var fut = b8x16.${opname};` +
+-                                      'function f(v1, v2) { v1 = b8x16chk(v1); v2 = b8x16chk(v2); return fut(v1, v2); } return f'), this);
+-    let a = [1,1,0,1,1,0,0,0,1,1,1,1,0,0,0,1];
+-    let v = SIMD.Bool8x16(...a);
+-    assertEqVecArr(simdfunc(v), a.map(lanefunc));
+-}
+-
+-binaryI('add', (x, y) => (x + y) << 24 >> 24);
+-binaryI('sub', (x, y) => (x - y) << 24 >> 24);
+-binaryI('mul', (x, y) => (x * y) << 24 >> 24);
+-binaryU('add', (x, y) => (x + y) << 24 >>> 24);
+-binaryU('sub', (x, y) => (x - y) << 24 >>> 24);
+-binaryU('mul', (x, y) => (x * y) << 24 >>> 24);
+-
+-binaryI('and', (x, y) => (x & y) << 24 >> 24);
+-binaryI('or',  (x, y) => (x | y) << 24 >> 24);
+-binaryI('xor', (x, y) => (x ^ y) << 24 >> 24);
+-binaryU('and', (x, y) => (x & y) << 24 >>> 24);
+-binaryU('or',  (x, y) => (x | y) << 24 >>> 24);
+-binaryU('xor', (x, y) => (x ^ y) << 24 >>> 24);
+-
+-function sat(x, lo, hi) {
+-    if (x < lo) return lo;
+-    if (x > hi) return hi;
+-    return x
+-}
+-function isat(x) { return sat(x, -128, 127); }
+-function usat(x) { return sat(x, 0, 255); }
+-
+-binaryI('addSaturate', (x, y) => isat(x + y))
+-binaryI('subSaturate', (x, y) => isat(x - y))
+-binaryU('addSaturate', (x, y) => usat(x + y))
+-binaryU('subSaturate', (x, y) => usat(x - y))
+-
+-// Test shift operators.
+-function zip1map(a, s, f) {
+-    return a.map(x => f(x, s));
+-}
+-
+-function shiftI(opname, lanefunc) {
+-    let simdfunc = asmLink(asmCompile('glob', USE_ASM + I8x16 + I8x16CHK +
+-                                      `var fut = i8x16.${opname};` +
+-                                      'function f(v, s) { v = i8x16chk(v); s = s|0; return fut(v, s); } return f'), this);
+-    let a = [0x80,2,0x80,0x7f,   0,0, 8,-9,10,-11, 12,-13,-14,-15,-16,0xff];
+-    let v = SIMD.Int8x16(...a);
+-    for (let s of [0, 1, 2, 6, 7, 8, 9, 10, 16, 255, -1, -8, -7, -1000]) {
+-        let ref = zip1map(a, s, lanefunc);
+-        // 1. Test dynamic shift amount.
+-        assertEqVecArr(simdfunc(v, s), ref);
+-
+-        // 2. Test constant shift amount.
+-        let cstf = asmLink(asmCompile('glob', USE_ASM + I8x16 + I8x16CHK +
+-                                      `var fut = i8x16.${opname};` +
+-                                      `function f(v) { v = i8x16chk(v); return fut(v, ${s}); } return f`), this);
+-        assertEqVecArr(cstf(v, s), ref);
+-    }
+-}
+-
+-function shiftU(opname, lanefunc) {
+-    let simdfunc = asmLink(asmCompile('glob', USE_ASM + U8x16 + I8x16 + I8x16CHK + U8x16I8x16 + I8x16U8x16 +
+-                                      `var fut = u8x16.${opname};` +
+-                                      'function f(v, s) { v = i8x16chk(v); s = s|0; return i8x16u8x16(fut(u8x16i8x16(v), s)); } return f'), this);
+-    let a = [0x80,2,0x80,0x7f,   0,0, 8,-9,10,-11, 12,-13,-14,-15,-16,0xff];
+-    let v = SIMD.Int8x16(...a);
+-    for (let s of [0, 1, 2, 6, 7, 8, 9, 10, 16, 255, -1, -8, -7, -1000]) {
+-        let ref = zip1map(a, s, lanefunc);
+-        // 1. Test dynamic shift amount.
+-        assertEqVecArr(SIMD.Uint8x16.fromInt8x16Bits(simdfunc(v, s)), ref);
+-
+-        // 2. Test constant shift amount.
+-        let cstf = asmLink(asmCompile('glob', USE_ASM + U8x16 + I8x16 + I8x16CHK + U8x16I8x16 + I8x16U8x16 +
+-                                      `var fut = u8x16.${opname};` +
+-                                      `function f(v) { v = i8x16chk(v); return i8x16u8x16(fut(u8x16i8x16(v), ${s})); } return f`), this);
+-        assertEqVecArr(SIMD.Uint8x16.fromInt8x16Bits(cstf(v, s)), ref);
+-    }
+-}
+-
+-shiftI('shiftLeftByScalar', (x,s) => (x << (s & 7)) << 24 >> 24);
+-shiftU('shiftLeftByScalar', (x,s) => (x << (s & 7)) << 24 >>> 24);
+-shiftI('shiftRightByScalar', (x,s) => ((x << 24 >> 24) >> (s & 7)) << 24 >> 24);
+-shiftU('shiftRightByScalar', (x,s) => ((x << 24 >>> 24) >>> (s & 7)) << 24 >>> 24);
+-
+-
+-// Comparisons.
+-function compareI(opname, lanefunc) {
+-    let simdfunc = asmLink(asmCompile('glob', USE_ASM + I8x16 + I8x16CHK +
+-                                      `var fut = i8x16.${opname};` +
+-                                      'function f(v1, v2) { v1 = i8x16chk(v1); v2 = i8x16chk(v2); return fut(v1, v2); } return f'), this);
+-    let a1 = [  -1,2,  -3,-128,0x7f,6,-7, 8,-9, 10,-11, 12,-13,-14,-15, -16];
+-    let a2 = [-128,2,-128,0x7f,   0,0, 8,-9,10,-11, 12,-13,-14,-15,-16,  -1];
+-    let ref = zipmap(a1, a2, lanefunc);
+-    let v1 = SIMD.Int8x16(...a1);
+-    let v2 = SIMD.Int8x16(...a2);
+-    assertEqVecArr(simdfunc(v1, v2), ref);
+-}
+-
+-function compareU(opname, lanefunc) {
+-    let simdfunc = asmLink(asmCompile('glob', USE_ASM + I8x16 + I8x16CHK + U8x16 + U8x16I8x16 +
+-                                      `var fut = u8x16.${opname};` +
+-                                      'function f(v1, v2) { v1 = i8x16chk(v1); v2 = i8x16chk(v2); return fut(u8x16i8x16(v1), u8x16i8x16(v2)); } return f'), this);
+-    let a1 = [  -1,2,  -3,-128,0x7f,6,-7, 8,-9, 10,-11, 12,-13,-14,-15, -16].map(x => x << 24 >>> 24);
+-    let a2 = [-128,2,-128,0x7f,   0,0, 8,-9,10,-11, 12,-13,-14,-15,-16,  -1].map(x => x << 24 >>> 24);
+-    let ref = zipmap(a1, a2, lanefunc);
+-    let v1 = SIMD.Int8x16(...a1);
+-    let v2 = SIMD.Int8x16(...a2);
+-    assertEqVecArr(simdfunc(v1, v2), ref);
+-}
+-
+-compareI("equal", (x,y) => x == y);
+-compareU("equal", (x,y) => x == y);
+-compareI("notEqual", (x,y) => x != y);
+-compareU("notEqual", (x,y) => x != y);
+-compareI("lessThan", (x,y) => x < y);
+-compareU("lessThan", (x,y) => x < y);
+-compareI("lessThanOrEqual", (x,y) => x <= y);
+-compareU("lessThanOrEqual", (x,y) => x <= y);
+-compareI("greaterThan", (x,y) => x > y);
+-compareU("greaterThan", (x,y) => x > y);
+-compareI("greaterThanOrEqual", (x,y) => x >= y);
+-compareU("greaterThanOrEqual", (x,y) => x >= y);
+diff --git a/js/src/jit-test/tests/asm.js/testSIMD-bitcasts.js b/js/src/jit-test/tests/asm.js/testSIMD-bitcasts.js
+deleted file mode 100644
+--- a/js/src/jit-test/tests/asm.js/testSIMD-bitcasts.js
++++ /dev/null
+@@ -1,84 +0,0 @@
+-load(libdir + "asm.js");
+-load(libdir + "simd.js");
+-load(libdir + "asserts.js");
+-
+-// Set to true to see more JS debugging spew.
+-const DEBUG = false;
+-
+-if (!isSimdAvailable()) {
+-    DEBUG && print("won't run tests as simd extensions aren't activated yet");
+-    quit(0);
+-}
+-
+-// Test all bit-casts and normal loads and stores.
+-var heap = new ArrayBuffer(BUF_MIN);
+-var asU8 = new Uint8Array(heap);
+-var allTypes = [
+-    "Int8x16",
+-    "Int16x8",
+-    "Int32x4",
+-    "Uint8x16",
+-    "Uint16x8",
+-    "Uint32x4",
+-    "Float32x4"
+-];
+-
+-// Generate a load bit-cast store test function that performs:
+-//
+-// function f(a, b) {
+-//     vec = src.load(H, a);
+-//     cast = dst.from«src»Bits(vec);
+-//     store(H, b, cast);
+-// }
+-//
+-// Here, `H` is the heap provided by `heap`.
+-function test_func(src, dst) {
+-    text = `
+-        "use asm";
+-        var src = glob.SIMD.${src};
+-        var dst = glob.SIMD.${dst};
+-        var ld = src.load;
+-        var st = dst.store;
+-        var bc = dst.from${src}Bits;
+-
+-        var H = new glob.Uint8Array(heap);
+-
+-        function f(a, b) {
+-            a = a|0;
+-            b = b|0;
+-
+-            st(H, b, bc(ld(H, a)));
+-        }
+-
+-        return f;
+-    `;
+-    return asmLink(asmCompile('glob', 'ffi', 'heap', text), this, null, heap);
+-}
+-
+-function assertBuf16(a, b) {
+-    for (let i=0; i < 16; i++) {
+-        assertEq(asU8[a+i], asU8[b+i]);
+-    }
+-}
+-
+-for (let src of allTypes) {
+-    for (let dst of allTypes) {
+-        // Skip identity conversions.
+-        if (src == dst) continue;
+-
+-        print(src, dst);
+-        let f = test_func(src, dst);
+-        // Initialize with pseudo-random data.
+-        for (let i = 0; i < 64; i++) {
+-            asU8[i] = (i + 17) * 97;
+-        }
+-
+-        // Aligned load/store.
+-        f(0, 16);
+-        assertBuf16(0, 16);
+-
+-        // Unaligned access.
+-        f(1, 27);
+-        assertBuf16(1, 27);
+-    }
+-}
+diff --git a/js/src/jit-test/tests/asm.js/testSIMD-load-store.js b/js/src/jit-test/tests/asm.js/testSIMD-load-store.js
+deleted file mode 100644
+--- a/js/src/jit-test/tests/asm.js/testSIMD-load-store.js
++++ /dev/null
+@@ -1,457 +0,0 @@
+-// |jit-test|
+-load(libdir + "asm.js");
+-load(libdir + "simd.js");
+-load(libdir + "asserts.js");
+-
+-// Avoid pathological --ion-eager compile times due to bails in loops
+-setJitCompilerOption('ion.warmup.trigger', 1000000);
+-
+-// Set to true to see more JS debugging spew
+-const DEBUG = false;
+-
+-if (!isSimdAvailable() || typeof SIMD === 'undefined' || !isAsmJSCompilationAvailable()) {
+-    DEBUG && print("won't run tests as simd extensions aren't activated yet");
+-    quit(0);
+-}
+-
+-const RuntimeError = WebAssembly.RuntimeError;
+-
+-const INT32_MAX = Math.pow(2, 31) - 1;
+-const INT32_MIN = INT32_MAX + 1 | 0;
+-
+-try {
+-
+-// Load / Store
+-var IMPORTS = USE_ASM + 'var H=new glob.Uint8Array(heap); var i4=glob.SIMD.Int32x4; var ci4=i4.check; var load=i4.load; var store=i4.store;';
+-
+-//      Bad number of args
+-assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "function f(){load();} return f");
+-assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "function f(){load(3);} return f");
+-assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "function f(){load(3, 4, 5);} return f");
+-
+-//      Bad type of args
+-assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "function f(){load(3, 5);} return f");
+-assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "function f(){load(H, 5.0);} return f");
+-assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "function f(){var i=0.;load(H, i);} return f");
+-assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "var H2=new glob.Int32Array(heap); function f(){var i=0;load(H2, i)} return f");
+-assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "var H2=42; function f(){var i=0;load(H2, i)} return f");
+-assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "function f(){var i=0;load(H2, i)} return f");
+-assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "var f4=glob.SIMD.Float32x4; function f(){var i=0;var vec=f4(1,2,3,4); store(H, i, vec)} return f");
+-
+-//      Bad coercions of returned values
+-assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "function f(){var i=0;return load(H, i)|0;} return f");
+-assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "function f(){var i=0;return +load(H, i);} return f");
+-
+-//      Literal index constants
+-var buf = new ArrayBuffer(BUF_MIN);
+-var SIZE_TA = BUF_MIN >> 2
+-var asI32 = new Int32Array(buf);
+-asI32[SIZE_TA - 4] = 4;
+-asI32[SIZE_TA - 3] = 3;
+-asI32[SIZE_TA - 2] = 2;
+-asI32[SIZE_TA - 1] = 1;
+-
+-assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "function f(){load(H, -1);} return f");
+-assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "function f(){load(H, " + (INT32_MAX + 1) + ");} return f");
+-assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "function f(){load(H, " + (INT32_MAX + 1 - 15) + ");} return f");
+-asmCompile('glob', 'ffi', 'heap', IMPORTS + "function f(){load(H, " + (INT32_MAX + 1 - 16) + ");} return f");
+-
+-assertAsmLinkFail(asmCompile('glob', 'ffi', 'heap', IMPORTS + "function f() {return ci4(load(H, " + (BUF_MIN - 15) + "));} return f"), this, {}, buf);
+-assertEqX4(asmLink(asmCompile('glob', 'ffi', 'heap', IMPORTS + "function f() {return ci4(load(H, " + (BUF_MIN - 16) + "));} return f"), this, {}, buf)(), [4, 3, 2, 1]);
+-assertEqX4(asmLink(asmCompile('glob', 'ffi', 'heap', IMPORTS + "function f() {return ci4(load(H, " + BUF_MIN + " - 16 | 0));} return f"), this, {}, buf)(), [4, 3, 2, 1]);
+-
+-var CONSTANT_INDEX = 42;
+-var CONSTANT_BYTE_INDEX = CONSTANT_INDEX << 2;
+-
+-var loadStoreCode = `
+-    "use asm";
+-
+-    var H = new glob.Uint8Array(heap);
+-
+-    var i4 = glob.SIMD.Int32x4;
+-    var i4load = i4.load;
+-    var i4store = i4.store;
+-    var ci4 = i4.check;
+-
+-    var f4 = glob.SIMD.Float32x4;
+-    var f4load = f4.load;
+-    var f4store = f4.store;
+-    var cf4 = f4.check;
+-
+-    function f32l(i) { i=i|0; return cf4(f4load(H, i|0)); }
+-    function f32lcst() { return cf4(f4load(H, ${CONSTANT_BYTE_INDEX})); }
+-    function f32s(i, vec) { i=i|0; vec=cf4(vec); f4store(H, i|0, vec); }
+-    function f32scst(vec) { vec=cf4(vec); f4store(H, ${CONSTANT_BYTE_INDEX}, vec); }
+-
+-    function i32l(i) { i=i|0; return ci4(i4load(H, i|0)); }
+-    function i32lcst() { return ci4(i4load(H, ${CONSTANT_BYTE_INDEX})); }
+-    function i32s(i, vec) { i=i|0; vec=ci4(vec); i4store(H, i|0, vec); }
+-    function i32scst(vec) { vec=ci4(vec); i4store(H, ${CONSTANT_BYTE_INDEX}, vec); }
+-
+-    function f32lbndcheck(i) {
+-        i=i|0;
+-        if ((i|0) > ${CONSTANT_BYTE_INDEX}) i=${CONSTANT_BYTE_INDEX};
+-        if ((i|0) < 0) i = 0;
+-        return cf4(f4load(H, i|0));
+-    }
+-    function f32sbndcheck(i, vec) {
+-        i=i|0;
+-        vec=cf4(vec);
+-        if ((i|0) > ${CONSTANT_BYTE_INDEX}) i=${CONSTANT_BYTE_INDEX};
+-        if ((i|0) < 0) i = 0;
+-        return cf4(f4store(H, i|0, vec));
+-    }
+-
+-    return {
+-        f32l: f32l,
+-        f32lcst: f32lcst,
+-        f32s: f32s,
+-        f32scst: f32scst,
+-        f32lbndcheck: f32lbndcheck,
+-        f32sbndcheck: f32sbndcheck,
+-        i32l: i32l,
+-        i32lcst: i32lcst,
+-        i32s: i32s,
+-        i32scst: i32scst
+-    }
+-`;
+-
+-const SIZE = 0x8000;
+-
+-var F32 = new Float32Array(SIZE);
+-var reset = function() {
+-    for (var i = 0; i < SIZE; i++)
+-        F32[i] = i + 1;
+-};
+-reset();
+-
+-var buf = F32.buffer;
+-var m = asmLink(asmCompile('glob', 'ffi', 'heap', loadStoreCode), this, null, buf);
+-
+-function slice(TA, i, n) { return Array.prototype.slice.call(TA, i, i + n); }
+-
+-// Float32x4.load
+-function f32l(n) { return m.f32l((n|0) << 2 | 0); };
+-
+-//      Correct accesses
+-assertEqX4(f32l(0), slice(F32, 0, 4));
+-assertEqX4(f32l(1), slice(F32, 1, 4));
+-assertEqX4(f32l(SIZE - 4), slice(F32, SIZE - 4, 4));
+-
+-assertEqX4(m.f32lcst(), slice(F32, CONSTANT_INDEX, 4));
+-assertEqX4(m.f32lbndcheck(CONSTANT_BYTE_INDEX), slice(F32, CONSTANT_INDEX, 4));
+-
+-//      OOB
+-assertThrowsInstanceOf(() => f32l(-1), RuntimeError);
+-assertThrowsInstanceOf(() => f32l(SIZE), RuntimeError);
+-assertThrowsInstanceOf(() => f32l(SIZE - 1), RuntimeError);
+-assertThrowsInstanceOf(() => f32l(SIZE - 2), RuntimeError);
+-assertThrowsInstanceOf(() => f32l(SIZE - 3), RuntimeError);
+-
+-var code = `
+-    "use asm";
+-    var f4 = glob.SIMD.Float32x4;
+-    var f4l = f4.load;
+-    var u8 = new glob.Uint8Array(heap);
+-
+-    function g(x) {
+-        x = x|0;
+-        // set a constraint on the size of the heap
+-        var ptr = 0;
+-        ptr = u8[0xFFFF] | 0;
+-        // give a precise range to x
+-        x = (x>>0) > 5 ? 5 : x;
+-        x = (x>>0) < 0 ? 0 : x;
+-        // ptr value gets a precise range but the bounds check shouldn't get
+-        // eliminated.
+-        return f4l(u8, 0xFFFA + x | 0);
+-    }
+-
+-    return g;
+-`;
+-assertThrowsInstanceOf(() => asmLink(asmCompile('glob', 'ffi', 'heap', code), this, {}, new ArrayBuffer(0x10000))(0), RuntimeError);
+-
+-// Float32x4.store
+-function f32s(n, v) { return m.f32s((n|0) << 2 | 0, v); };
+-
+-var vec  = SIMD.Float32x4(5,6,7,8);
+-var vec2 = SIMD.Float32x4(0,1,2,3);
+-var vecWithNaN = SIMD.Float32x4(NaN, 2, NaN, 4);
+-
+-reset();
+-f32s(0, vec);
+-assertEqX4(vec, slice(F32, 0, 4));
+-
+-reset();
+-f32s(0, vec2);
+-assertEqX4(vec2, slice(F32, 0, 4));
+-
+-reset();
+-f32s(4, vec);
+-assertEqX4(vec, slice(F32, 4, 4));
+-
+-reset();
+-f32s(4, vecWithNaN);
+-assertEqX4(vecWithNaN, slice(F32, 4, 4));
+-
+-reset();
+-m.f32scst(vec2);
+-assertEqX4(vec2, slice(F32, CONSTANT_INDEX, 4));
+-
+-reset();
+-m.f32sbndcheck(CONSTANT_BYTE_INDEX, vec);
+-assertEqX4(vec, slice(F32, CONSTANT_INDEX, 4));
+-
+-//      OOB
+-reset();
+-assertThrowsInstanceOf(() => f32s(SIZE - 3, vec), RuntimeError);
+-assertThrowsInstanceOf(() => f32s(SIZE - 2, vec), RuntimeError);
+-assertThrowsInstanceOf(() => f32s(SIZE - 1, vec), RuntimeError);
+-assertThrowsInstanceOf(() => f32s(SIZE, vec), RuntimeError);
+-for (var i = 0; i < SIZE; i++)
+-    assertEq(F32[i], i + 1);
+-
+-// Int32x4.load
+-var I32 = new Int32Array(buf);
+-reset = function () {
+-    for (var i = 0; i < SIZE; i++)
+-        I32[i] = i + 1;
+-};
+-reset();
+-
+-function i32(n) { return m.i32l((n|0) << 2 | 0); };
+-
+-//      Correct accesses
+-assertEqX4(i32(0), slice(I32, 0, 4));
+-assertEqX4(i32(1), slice(I32, 1, 4));
+-assertEqX4(i32(SIZE - 4), slice(I32, SIZE - 4, 4));
+-
+-assertEqX4(m.i32lcst(), slice(I32, CONSTANT_INDEX, 4));
+-
+-//      OOB
+-assertThrowsInstanceOf(() => i32(-1), RuntimeError);
+-assertThrowsInstanceOf(() => i32(SIZE), RuntimeError);
+-assertThrowsInstanceOf(() => i32(SIZE - 1), RuntimeError);
+-assertThrowsInstanceOf(() => i32(SIZE - 2), RuntimeError);
+-assertThrowsInstanceOf(() => i32(SIZE - 3), RuntimeError);
+-
+-// Int32x4.store
+-function i32s(n, v) { return m.i32s((n|0) << 2 | 0, v); };
+-
+-var vec  = SIMD.Int32x4(5,6,7,8);
+-var vec2 = SIMD.Int32x4(0,1,2,3);
+-
+-reset();
+-i32s(0, vec);
+-assertEqX4(vec, slice(I32, 0, 4));
+-
+-reset();
+-i32s(0, vec2);
+-assertEqX4(vec2, slice(I32, 0, 4));
+-
+-reset();
+-i32s(4, vec);
+-assertEqX4(vec, slice(I32, 4, 4));
+-
+-reset();
+-m.i32scst(vec2);
+-assertEqX4(vec2, slice(I32, CONSTANT_INDEX, 4));
+-
+-//      OOB
+-reset();
+-assertThrowsInstanceOf(() => i32s(SIZE - 3, vec), RuntimeError);
+-assertThrowsInstanceOf(() => i32s(SIZE - 2, vec), RuntimeError);
+-assertThrowsInstanceOf(() => i32s(SIZE - 1, vec), RuntimeError);
+-assertThrowsInstanceOf(() => i32s(SIZE - 0, vec), RuntimeError);
+-for (var i = 0; i < SIZE; i++)
+-    assertEq(I32[i], i + 1);
+-
+-// Partial loads and stores
+-(function() {
+-
+-//      Variable indexes
+-function MakeCodeFor(typeName) {
+-    return `
+-    "use asm";
+-    var type = glob.SIMD.${typeName};
+-    var c = type.check;
+-
+-    var l1 = type.load1;
+-    var l2 = type.load2;
+-
+-    var s1 = type.store1;
+-    var s2 = type.store2;
+-
+-    var u8 = new glob.Uint8Array(heap);
+-
+-    function load1(i) { i=i|0; return l1(u8, i); }
+-    function load2(i) { i=i|0; return l2(u8, i); }
+-
+-    function loadCst1() { return l1(u8, 41 << 2); }
+-    function loadCst2() { return l2(u8, 41 << 2); }
+-
+-    function store1(i, x) { i=i|0; x=c(x); return s1(u8, i, x); }
+-    function store2(i, x) { i=i|0; x=c(x); return s2(u8, i, x); }
+-
+-    function storeCst1(x) { x=c(x); return s1(u8, 41 << 2, x); }
+-    function storeCst2(x) { x=c(x); return s2(u8, 41 << 2, x); }
+-
+-    return {
+-        load1: load1,
+-        load2: load2,
+-        loadCst1: loadCst1,
+-        loadCst2: loadCst2,
+-        store1: store1,
+-        store2: store2,
+-        storeCst1: storeCst1,
+-        storeCst2: storeCst2,
+-    }
+-`;
+-}
+-
+-var SIZE = 0x10000;
+-
+-function TestPartialLoads(m, typedArray, x, y, z, w) {
+-    // Fill array with predictable values
+-    for (var i = 0; i < SIZE; i += 4) {
+-        typedArray[i] =     x(i);
+-        typedArray[i + 1] = y(i);
+-        typedArray[i + 2] = z(i);
+-        typedArray[i + 3] = w(i);
+-    }
+-
+-    // Test correct loads
+-    var i = 0, j = 0; // i in elems, j in bytes
+-    assertEqX4(m.load1(j), [x(i), 0, 0, 0]);
+-    assertEqX4(m.load2(j), [x(i), y(i), 0, 0]);
+-
+-    j += 4;
+-    assertEqX4(m.load1(j), [y(i), 0, 0, 0]);
+-    assertEqX4(m.load2(j), [y(i), z(i), 0, 0]);
+-
+-    j += 4;
+-    assertEqX4(m.load1(j), [z(i), 0, 0, 0]);
+-    assertEqX4(m.load2(j), [z(i), w(i), 0, 0]);
+-
+-    j += 4;
+-    assertEqX4(m.load1(j), [w(i), 0, 0, 0]);
+-    assertEqX4(m.load2(j), [w(i), x(i+4), 0, 0]);
+-
+-    j += 4;
+-    i += 4;
+-    assertEqX4(m.load1(j), [x(i), 0, 0, 0]);
+-    assertEqX4(m.load2(j), [x(i), y(i), 0, 0]);
+-
+-    // Test loads with constant indexes (41)
+-    assertEqX4(m.loadCst1(), [y(40), 0, 0, 0]);
+-    assertEqX4(m.loadCst2(), [y(40), z(40), 0, 0]);
+-
+-    // Test limit and OOB accesses
+-    assertEqX4(m.load1((SIZE - 1) << 2), [w(SIZE - 4), 0, 0, 0]);
+-    assertThrowsInstanceOf(() => m.load1(((SIZE - 1) << 2) + 1), RuntimeError);
+-
+-    assertEqX4(m.load2((SIZE - 2) << 2), [z(SIZE - 4), w(SIZE - 4), 0, 0]);
+-    assertThrowsInstanceOf(() => m.load2(((SIZE - 2) << 2) + 1), RuntimeError);
+-}
+-
+-// Partial stores
+-function TestPartialStores(m, typedArray, typeName, x, y, z, w) {
+-    var val = SIMD[typeName](x, y, z, w);
+-
+-    function Reset() {
+-        for (var i = 0; i < SIZE; i++)
+-            typedArray[i] = i + 1;
+-    }
+-    function CheckNotModified(low, high) {
+-        for (var i = low; i < high; i++)
+-            assertEq(typedArray[i], i + 1);
+-    }
+-
+-    function TestStore1(i) {
+-        m.store1(i, val);
+-        CheckNotModified(0, i >> 2);
+-        assertEq(typedArray[i >> 2], x);
+-        CheckNotModified((i >> 2) + 1, SIZE);
+-        typedArray[i >> 2] = (i >> 2) + 1;
+-    }
+-
+-    function TestStore2(i) {
+-        m.store2(i, val);
+-        CheckNotModified(0, i >> 2);
+-        assertEq(typedArray[i >> 2], x);
+-        assertEq(typedArray[(i >> 2) + 1], y);
+-        CheckNotModified((i >> 2) + 2, SIZE);
+-        typedArray[i >> 2] = (i >> 2) + 1;
+-        typedArray[(i >> 2) + 1] = (i >> 2) + 2;
+-    }
+-
+-    function TestOOBStore(f) {
+-        assertThrowsInstanceOf(f, RuntimeError);
+-        CheckNotModified(0, SIZE);
+-    }
+-
+-    Reset();
+-
+-    TestStore1(0);
+-    TestStore1(1 << 2);
+-    TestStore1(2 << 2);
+-    TestStore1(3 << 2);
+-    TestStore1(1337 << 2);
+-
+-    var i = (SIZE - 1) << 2;
+-    TestStore1(i);
+-    TestOOBStore(() => m.store1(i + 1, val));
+-    TestOOBStore(() => m.store1(-1, val));
+-
+-    TestStore2(0);
+-    TestStore2(1 << 2);
+-    TestStore2(2 << 2);
+-    TestStore2(3 << 2);
+-    TestStore2(1337 << 2);
+-
+-    var i = (SIZE - 2) << 2;
+-    TestStore2(i);
+-    TestOOBStore(() => m.store2(i + 1, val));
+-    TestOOBStore(() => m.store2(-1, val));
+-
+-    // Constant indexes (41)
+-    m.storeCst1(val);
+-    CheckNotModified(0, 41);
+-    assertEq(typedArray[41], x);
+-    CheckNotModified(42, SIZE);
+-    typedArray[41] = 42;
+-
+-    m.storeCst2(val);
+-    CheckNotModified(0, 41);
+-    assertEq(typedArray[41], x);
+-    assertEq(typedArray[42], y);
+-    CheckNotModified(43, SIZE);
+-    typedArray[41] = 42;
+-    typedArray[42] = 43;
+-}
+-
+-var f32 = new Float32Array(SIZE);
+-var mFloat32x4 = asmLink(asmCompile('glob', 'ffi', 'heap', MakeCodeFor('Float32x4')), this, null, f32.buffer);
+-
+-TestPartialLoads(mFloat32x4, f32,
+-            (i) => i + 1,
+-            (i) => Math.fround(13.37),
+-            (i) => Math.fround(1/i),
+-            (i) => Math.fround(Math.sqrt(0x2000 - i)));
+-
+-TestPartialStores(mFloat32x4, f32, 'Float32x4', 42, -0, NaN, 0.1337);
+-
+-var i32 = new Int32Array(f32.buffer);
+-var mInt32x4 = asmLink(asmCompile('glob', 'ffi', 'heap', MakeCodeFor('Int32x4')), this, null, i32.buffer);
+-
+-TestPartialLoads(mInt32x4, i32,
+-            (i) => i + 1 | 0,
+-            (i) => -i | 0,
+-            (i) => i * 2 | 0,
+-            (i) => 42);
+-
+-TestPartialStores(mInt32x4, i32, 'Int32x4', 42, -3, 13, 37);
+-
+-})();
+-
+-} catch (e) { print('stack: ', e.stack); throw e }
+diff --git a/js/src/jit-test/tests/asm.js/testSIMD.js b/js/src/jit-test/tests/asm.js/testSIMD.js
+deleted file mode 100644
+--- a/js/src/jit-test/tests/asm.js/testSIMD.js
++++ /dev/null
+@@ -1,1575 +0,0 @@
+-load(libdir + "asm.js");
+-load(libdir + "simd.js");
+-load(libdir + "asserts.js");
+-var heap = new ArrayBuffer(0x10000);
+-
+-// Avoid pathological --ion-eager compile times due to bails in loops
+-setJitCompilerOption('ion.warmup.trigger', 1000000);
+-
+-// Set to true to see more JS debugging spew
+-const DEBUG = false;
+-
+-if (!isSimdAvailable() || typeof SIMD === 'undefined' || !isAsmJSCompilationAvailable()) {
+-    DEBUG && print("won't run tests as simd extensions aren't activated yet");
+-    quit(0);
+-}
+-
+-const I32 = 'var i4 = glob.SIMD.Int32x4;'
+-const CI32 = 'var ci4 = i4.check;'
+-const I32A = 'var i4a = i4.add;'
+-const I32S = 'var i4s = i4.sub;'
+-const I32M = 'var i4m = i4.mul;'
+-const I32U32 = 'var i4u4 = i4.fromUint32x4Bits;'
+-
+-const U32 = 'var u4 = glob.SIMD.Uint32x4;'
+-const CU32 = 'var cu4 = u4.check;'
+-const U32A = 'var u4a = u4.add;'
+-const U32S = 'var u4s = u4.sub;'
+-const U32M = 'var u4m = u4.mul;'
+-const U32I32 = 'var u4i4 = u4.fromInt32x4Bits;'
+-
+-const F32 = 'var f4 = glob.SIMD.Float32x4;'
+-const CF32 = 'var cf4 = f4.check;'
+-const F32A = 'var f4a = f4.add;'
+-const F32S = 'var f4s = f4.sub;'
+-const F32M = 'var f4m = f4.mul;'
+-const F32D = 'var f4d = f4.div;'
+-const FROUND = 'var f32=glob.Math.fround;'
+-const B32 = 'var b4 = glob.SIMD.Bool32x4;'
+-const CB32 = 'var cb4 = b4.check;'
+-
+-const EXTI4 = 'var e = i4.extractLane;'
+-const EXTU4 = 'var e = u4.extractLane;'
+-const EXTF4 = 'var e = f4.extractLane;'
+-const EXTB4 = 'var e = b4.extractLane;'
+-
+-// anyTrue / allTrue on boolean vectors.
+-const ANYB4 = 'var anyt=b4.anyTrue;'
+-const ALLB4 = 'var allt=b4.allTrue;'
+-
+-const INT32_MAX = Math.pow(2, 31) - 1;
+-const INT32_MIN = INT32_MAX + 1 | 0;
+-const UINT32_MAX = Math.pow(2, 32) - 1;
+-
+-const assertEqFFI = {assertEq:assertEq};
+-
+-function CheckI4(header, code, expected) {
+-    // code needs to contain a local called x
+-    header = USE_ASM + I32 + CI32 + EXTI4 + F32 + header;
+-    var observed = asmLink(asmCompile('glob', header + ';function f() {' + code + ';return ci4(x)} return f'), this)();
+-    assertEqX4(observed, expected);
+-}
+-
+-function CheckU4(header, code, expected) {
+-    // code needs to contain a local called x.
+-    header = USE_ASM + U32 + CU32 + EXTU4 + I32 + CI32 + I32U32 + header;
+-    var observed = asmLink(asmCompile('glob', header + ';function f() {' + code + ';return ci4(i4u4(x))} return f'), this)();
+-    // We can't return an unsigned SIMD type. Return Int32x4, convert to unsigned here.
+-    observed = SIMD.Uint32x4.fromInt32x4Bits(observed)
+-    assertEqX4(observed, expected);
+-}
+-
+-function CheckF4(header, code, expected) {
+-    // code needs to contain a local called x
+-    header = USE_ASM + F32 + CF32 + EXTF4 + header;
+-    var observed = asmLink(asmCompile('glob', header + ';function f() {' + code + ';return cf4(x)} return f'), this)();
+-    assertEqX4(observed, expected.map(Math.fround));
+-}
+-
+-function CheckB4(header, code, expected) {
+-    // code needs to contain a local called x
+-    header = USE_ASM + B32 + CB32 + header;
+-    var observed = asmLink(asmCompile('glob', header + ';function f() {' + code + ';return cb4(x)} return f'), this)();
+-    assertEqX4(observed, expected);
+-}
+-
+-try {
+-
+-// 1. Constructors
+-
+-// 1.1 Compilation
+-assertAsmTypeFail('glob', USE_ASM + "var i4 = Int32x4               ; return {}") ;
+-assertAsmTypeFail('glob', USE_ASM + "var i4 = glob.Int32x4          ; return {}") ;
+-assertAsmTypeFail('glob', USE_ASM + "var i4 = glob.globglob.Int32x4 ; return {}") ;
+-assertAsmTypeFail('glob', USE_ASM + "var i4 = glob.Math.Int32x4     ; return {}") ;
+-assertAsmTypeFail('glob', USE_ASM + "var herd = glob.SIMD.ponyX4    ; return {}") ;
+-
+-// 1.2 Linking
+-assertAsmLinkAlwaysFail(asmCompile('glob', USE_ASM + I32 + "return {}"), {});
+-assertAsmLinkFail(asmCompile('glob', USE_ASM + I32 + "return {}"), {SIMD: 42});
+-assertAsmLinkFail(asmCompile('glob', USE_ASM + I32 + "return {}"), {SIMD: Math.fround});
+-assertAsmLinkFail(asmCompile('glob', USE_ASM + I32 + "return {}"), {SIMD: {}});
+-assertAsmLinkFail(asmCompile('glob', USE_ASM + I32 + "return {}"), {SIMD: {Int32x4: 42}});
+-assertAsmLinkFail(asmCompile('glob', USE_ASM + I32 + "return {}"), {SIMD: {Int32x4: Math.fround}});
+-assertAsmLinkFail(asmCompile('glob', USE_ASM + I32 + "return {}"), {SIMD: {Int32x4: new Array}});
+-assertAsmLinkFail(asmCompile('glob', USE_ASM + I32 + "return {}"), {SIMD: {Int32x4: SIMD.Float32x4}});
+-
+-var [Type, int32] = [TypedObject.StructType, TypedObject.int32];
+-var MyStruct = new Type({'x': int32, 'y': int32, 'z': int32, 'w': int32});
+-assertAsmLinkFail(asmCompile('glob', USE_ASM + I32 + "return {}"), {SIMD: {Int32x4: MyStruct}});
+-assertAsmLinkFail(asmCompile('glob', USE_ASM + I32 + "return {}"), {SIMD: {Int32x4: new MyStruct}});
+-
+-assertEq(asmLink(asmCompile('glob', USE_ASM + I32 + "function f() {} return f"), {SIMD:{Int32x4: SIMD.Int32x4}})(), undefined);
+-
+-assertAsmLinkFail(asmCompile('glob', USE_ASM + F32 + "return {}"), {SIMD: {Float32x4: 42}});
+-assertAsmLinkFail(asmCompile('glob', USE_ASM + F32 + "return {}"), {SIMD: {Float32x4: Math.fround}});
+-assertAsmLinkFail(asmCompile('glob', USE_ASM + F32 + "return {}"), {SIMD: {Float32x4: new Array}});
+-assertEq(asmLink(asmCompile('glob', USE_ASM + F32 + "function f() {} return f"), {SIMD:{Float32x4: SIMD.Float32x4}})(), undefined);
+-
+-// 1.3 Correctness
+-// 1.3.1 Local variables declarations
+-assertAsmTypeFail('glob', USE_ASM + "function f() {var x=Int32x4(1,2,3,4);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {var x=i4;} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {var x=i4();} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {var x=i4(1);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {var x=i4(1, 2);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {var x=i4(1, 2, 3);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {var x=i4(1, 2, 3, 4.0);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {var x=i4(1, 2.0, 3, 4);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4a(1,2,3,4);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {var x=i4(1,2,3,2+2|0);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {var x=i4(1,2,3," + (INT32_MIN - 1) + ");} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {var x=i4(i4(1,2,3,4));} return f");
+-assertEq(asmLink(asmCompile('glob', USE_ASM + I32 + "function f() {var x=i4(1,2,3,4);} return f"), this)(), undefined);
+-assertEq(asmLink(asmCompile('glob', USE_ASM + I32 + "function f() {var x=i4(1,2,3," + (INT32_MAX + 1) + ");} return f"), this)(), undefined);
+-
+-assertAsmTypeFail('glob', USE_ASM + F32 + "function f() {var x=f4;} return f");
+-assertAsmTypeFail('glob', USE_ASM + F32 + "function f() {var x=f4();} return f");
+-assertAsmTypeFail('glob', USE_ASM + F32 + "function f() {var x=f4(1,2,3);} return f");
+-assertAsmTypeFail('glob', USE_ASM + F32 + "function f() {var x=f4(1.,2.,3.);} return f");
+-assertAsmTypeFail('glob', USE_ASM + F32 + "function f() {var x=f4(1.,2.,f32(3.),4.);} return f");
+-assertEq(asmLink(asmCompile('glob', USE_ASM + F32 + "function f() {var x=f4(1.,2.,3.,4.);} return f"), this)(), undefined);
+-assertEq(asmLink(asmCompile('glob', USE_ASM + F32 + "function f() {var x=f4(1,2,3,4);} return f"), this)(), undefined);
+-assertEq(asmLink(asmCompile('glob', USE_ASM + F32 + "function f() {var x=f4(1,2,3," + (INT32_MIN - 1) + ");} return f"), this)(), undefined);
+-assertEq(asmLink(asmCompile('glob', USE_ASM + F32 + "function f() {var x=f4(1,2,3," + (INT32_MAX + 1) + ");} return f"), this)(), undefined);
+-
+-// Places where NumLit can creep in
+-assertAsmTypeFail('glob', USE_ASM + I32 + "function f(i) {i=i|0; var z=0; switch(i|0) {case i4(1,2,3,4): z=1; break; default: z=2; break;}} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + "function f(i) {i=i|0; var z=0; return i * i4(1,2,3,4) | 0;} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + "function f(i) {var x=i4(1,2,3,i4(4,5,6,7))} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + "function f(i) {var x=i4(1,2,3,f4(4,5,6,7))} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + "function f(i) {var x=f4(1,2,3,i4(4,5,6,7))} return f");
+-
+-assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {return +i4(1,2,3,4)} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {return i4(1,2,3,4)|0} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + FROUND + "function f() {return f32(i4(1,2,3,4))} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + CF32 + "function f() {return cf4(i4(1,2,3,4))} return f");
+-
+-assertAsmTypeFail('glob', USE_ASM + F32 + "function f() {return +f4(1,2,3,4)} return f");
+-assertAsmTypeFail('glob', USE_ASM + F32 + "function f() {return f4(1,2,3,4)|0} return f");
+-assertAsmTypeFail('glob', USE_ASM + F32 + FROUND + "function f() {return f32(f4(1,2,3,4))} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + CI32 + F32 + "function f() {return ci4(f4(1,2,3,4))} return f");
+-
+-assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + "function f() {return i4(1,2,3,4);} return f"), this)(), [1, 2, 3, 4]);
+-assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + CI32 + "function f() {return ci4(i4(1,2,3,4));} return f"), this)(), [1, 2, 3, 4]);
+-assertEqX4(asmLink(asmCompile('glob', USE_ASM + F32 + "function f() {return f4(1,2,3,4);} return f"), this)(), [1, 2, 3, 4]);
+-assertEqX4(asmLink(asmCompile('glob', USE_ASM + F32 + CF32 + "function f() {return cf4(f4(1,2,3,4));} return f"), this)(), [1, 2, 3, 4]);
+-
+-assertEq(asmLink(asmCompile('glob', USE_ASM + I32 + "function f() {i4(1,2,3,4);} return f"), this)(), undefined);
+-assertEq(asmLink(asmCompile('glob', USE_ASM + F32 + "function f() {f4(1,2,3,4);} return f"), this)(), undefined);
+-
+-// Int32x4 ctor should accept int?
+-assertEqX4(asmLink(asmCompile('glob', 'ffi', 'heap', USE_ASM + I32 + CI32 + "var i32=new glob.Int32Array(heap); function f(i) {i=i|0; return ci4(i4(i32[i>>2], 2, 3, 4))} return f"), this, {}, new ArrayBuffer(0x10000))(0x20000), [0, 2, 3, 4]);
+-// Float32x4 ctor should accept floatish (i.e. float || float? || floatish) and doublit
+-assertEqX4(asmLink(asmCompile('glob', 'ffi', 'heap', USE_ASM + F32 + CF32 + FROUND + "var h=new glob.Float32Array(heap); function f(i) {i=i|0; return cf4(f4(h[i>>2], f32(2), f32(3), f32(4)))} return f"), this, {}, new ArrayBuffer(0x10000))(0x20000), [NaN, 2, 3, 4]);
+-assertEqX4(asmLink(asmCompile('glob', USE_ASM + F32 + CF32 + FROUND + "function f(i) {i=i|0; return cf4(f4(f32(1) + f32(2), f32(2), f32(3), f32(4)))} return f"), this, {}, new ArrayBuffer(0x10000))(0x20000), [3, 2, 3, 4]);
+-assertEqX4(asmLink(asmCompile('glob', USE_ASM + F32 + CF32 + FROUND + "function f(i) {i=i|0; return cf4(f4(f32(1) + f32(2), 2.0, 3.0, 4.0))} return f"), this, {}, new ArrayBuffer(0x10000))(0x20000), [3, 2, 3, 4]);
+-// Bool32x4 ctor should accept int?
+-assertEqX4(asmLink(asmCompile('glob', 'ffi', 'heap', USE_ASM + B32 + CB32 + "var i32=new glob.Int32Array(heap); function f(i) {i=i|0; return cb4(b4(i32[i>>2], 2, 0, 4))} return f"), this, {}, new ArrayBuffer(0x10000))(0x20000), [false, true, false, true]);
+-
+-// 1.3.2 Getters - Reading values out of lanes
+-assertAsmTypeFail('glob', USE_ASM + I32 + EXTI4 + "function f() {var x=1; return e(x,1) | 0;} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + EXTI4 + "function f() {var x=1; return e(x + x, 1) | 0;} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + EXTI4 + "function f() {var x=1.; return e(x, 1) | 0;} return f");
+-assertAsmTypeFail('glob', USE_ASM + F32 + EXTF4 + "var f32=glob.Math.fround;" + I32 + "function f() {var x=f32(1); return e(x, 1) | 0;} return f");
+-
+-assertAsmTypeFail('glob', USE_ASM + I32 + EXTI4 + "function f() {var x=i4(1,2,3,4); return x.length|0;} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + EXTI4 + "function f() {var x=e(i4(1,2,3,4),1); return x|0;} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + EXTI4 + "function f() {var x=i4(1,2,3,4); return (e(x,0) > (1>>>0)) | 0;} return f");
+-
+-assertAsmTypeFail('glob', USE_ASM + I32 + EXTI4 + "function f() {var x=i4(1,2,3,4); return e(x,-1) | 0;} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + EXTI4 + "function f() {var x=i4(1,2,3,4); return e(x,4) | 0;} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + EXTI4 + "function f() {var x=i4(1,2,3,4); return e(x,.5) | 0;} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + EXTI4 + "function f() {var x=i4(1,2,3,4); return e(x,x) | 0;} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + EXTF4 + "function f() {var x=i4(1,2,3,4); return e(x,0) | 0;} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + EXTI4 + "function f() {var x=i4(1,2,3,4); var i=0; return e(x,i) | 0;} return f");
+-
+-// The signMask property is no longer supported. Replaced by allTrue / anyTrue.
+-assertAsmTypeFail('glob', USE_ASM + "function f() {var x=42; return x.signMask;} return f");
+-assertAsmTypeFail('glob', USE_ASM + "function f() {var x=42.; return x.signMask;} return f");
+-assertAsmTypeFail('glob', USE_ASM + FROUND + "function f() {var x=f32(42.); return x.signMask;} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + 'function f() { var x=i4(1,2,3,4); return x.signMask | 0 } return f');
+-assertAsmTypeFail('glob', USE_ASM + U32 + 'function f() { var x=u4(1,2,3,4); return x.signMask | 0 } return f');
+-assertAsmTypeFail('glob', USE_ASM + F32 + FROUND + 'var Infinity = glob.Infinity; function f() { var x=f4(0,0,0,0); x=f4(f32(1), f32(-13.37), f32(42), f32(-Infinity)); return x.signMask | 0 } return f');
+-
+-// Check lane extraction.
+-function CheckLanes(innerBody, type, expected) {
+-    var coerceBefore, coerceAfter, extractLane;
+-
+-    if (type === SIMD.Int32x4) {
+-        coerceBefore = '';
+-        coerceAfter = '|0';
+-        extractLane = 'ei';
+-    } else if (type === SIMD.Uint32x4) {
+-        // Coerce Uint32 lanes to double so they can be legally returned.
+-        coerceBefore = '+';
+-        coerceAfter = '';
+-        extractLane = 'eu';
+-    } else if (type === SIMD.Float32x4) {
+-        coerceBefore = '+';
+-        coerceAfter = '';
+-        extractLane = 'ef';
+-        expected = expected.map(Math.fround);
+-    } else if (type === SIMD.Bool32x4) {
+-        coerceBefore = '';
+-        coerceAfter = '|0';
+-        extractLane = 'eb';
+-    } else throw "unexpected type in CheckLanes";
+-
+-    for (var i = 0; i < 4; i++) {
+-        var lane = i;
+-        var laneCheckCode = `"use asm";
+-            var i4=glob.SIMD.Int32x4;
+-            var u4=glob.SIMD.Uint32x4;
+-            var f4=glob.SIMD.Float32x4;
+-            var b4=glob.SIMD.Bool32x4;
+-            var ei=i4.extractLane;
+-            var eu=u4.extractLane;
+-            var ef=f4.extractLane;
+-            var eb=b4.extractLane;
+-            function f() {${innerBody}; return ${coerceBefore}${extractLane}(x, ${lane})${coerceAfter} }
+-            return f;`;
+-        assertEq(asmLink(asmCompile('glob', laneCheckCode), this)(), expected[i]);
+-    }
+-}
+-function CheckLanesI4(innerBody, expected) { return CheckLanes(innerBody, SIMD.Int32x4, expected); }
+-function CheckLanesU4(innerBody, expected) { return CheckLanes(innerBody, SIMD.Uint32x4, expected); }
+-function CheckLanesF4(innerBody, expected) { return CheckLanes(innerBody, SIMD.Float32x4, expected); }
+-function CheckLanesB4(innerBody, expected) { return CheckLanes(innerBody, SIMD.Bool32x4, expected); }
+-
+-CheckLanesI4('var x=i4(0,0,0,0);', [0,0,0,0]);
+-CheckLanesI4('var x=i4(1,2,3,4);', [1,2,3,4]);
+-CheckLanesI4('var x=i4(' + INT32_MIN + ',2,3,' + INT32_MAX + ')', [INT32_MIN,2,3,INT32_MAX]);
+-CheckLanesI4('var x=i4(1,2,3,4); var y=i4(5,6,7,8)', [1,2,3,4]);
+-CheckLanesI4('var a=1; var b=i4(9,8,7,6); var c=13.37; var x=i4(1,2,3,4); var y=i4(5,6,7,8)', [1,2,3,4]);
+-CheckLanesI4('var y=i4(5,6,7,8); var x=i4(1,2,3,4)', [1,2,3,4]);
+-
+-CheckLanesU4('var x=u4(0,0,0,0);', [0,0,0,0]);
+-CheckLanesU4('var x=u4(1,2,3,4000000000);', [1,2,3,4000000000]);
+-CheckLanesU4('var x=u4(' + INT32_MIN + ',2,3,' + UINT32_MAX + ')', [INT32_MIN>>>0,2,3,UINT32_MAX]);
+-CheckLanesU4('var x=u4(1,2,3,4); var y=u4(5,6,7,8)', [1,2,3,4]);
+-CheckLanesU4('var a=1; var b=u4(9,8,7,6); var c=13.37; var x=u4(1,2,3,4); var y=u4(5,6,7,8)', [1,2,3,4]);
+-CheckLanesU4('var y=u4(5,6,7,8); var x=u4(1,2,3,4)', [1,2,3,4]);
+-
+-CheckLanesF4('var x=f4(' + INT32_MAX + ', 2, 3, ' + INT32_MIN + ')', [INT32_MAX, 2, 3, INT32_MIN]);
+-CheckLanesF4('var x=f4(' + (INT32_MAX + 1) + ', 2, 3, 4)', [INT32_MAX + 1, 2, 3, 4]);
+-CheckLanesF4('var x=f4(1.3, 2.4, 3.5, 98.76)', [1.3, 2.4, 3.5, 98.76]);
+-CheckLanesF4('var x=f4(13.37, 2., 3., -0)', [13.37, 2, 3, -0]);
+-
+-CheckLanesB4('var x=b4(0,0,0,0);', [0,0,0,0]);
+-CheckLanesB4('var x=b4(0,1,0,0);', [0,1,0,0]);
+-CheckLanesB4('var x=b4(0,2,0,0);', [0,1,0,0]);
+-CheckLanesB4('var x=b4(-1,0,1,-1);', [1,0,1,1]);
+-
+-// 1.3.3. Variable assignments
+-assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4();} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4(1);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4(1, 2);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4(1, 2, 3);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4(1.0, 2, 3, 4);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4(1, 2.0, 3, 4);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4(1, 2, 3.0, 4);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4(1, 2, 3, 4.0);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4(1, 2, 3, x);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); var c=4.0; x=i4(1, 2, 3, +c);} return f");
+-
+-assertAsmTypeFail('glob', 'ffi', 'heap', USE_ASM + I32 + "var i32=new glob.Int32Array(heap); function f() {var x=i4(1,2,3,4); i32[0] = x;} return f");
+-assertAsmTypeFail('glob', 'ffi', 'heap', USE_ASM + I32 + "var i32=new glob.Int32Array(heap); function f() {var x=i4(1,2,3,4); x = i32[0];} return f");
+-assertAsmTypeFail('glob', 'ffi', 'heap', USE_ASM + F32 + "var f32=new glob.Float32Array(heap); function f() {var x=f4(1,2,3,4); f32[0] = x;} return f");
+-assertAsmTypeFail('glob', 'ffi', 'heap', USE_ASM + F32 + "var f32=new glob.Int32Array(heap); function f() {var x=f4(1,2,3,4); x = f32[0];} return f");
+-
+-CheckI4('', 'var x=i4(1,2,3,4); x=i4(5,6,7,8)', [5, 6, 7, 8]);
+-CheckI4('', 'var x=i4(1,2,3,4); var c=6; x=i4(5,c|0,7,8)', [5, 6, 7, 8]);
+-CheckI4('', 'var x=i4(8,7,6,5); x=i4(e(x,3)|0,e(x,2)|0,e(x,1)|0,e(x,0)|0)', [5, 6, 7, 8]);
+-
+-CheckU4('', 'var x=u4(1,2,3,4); x=u4(5,6,7,4000000000)', [5, 6, 7, 4000000000]);
+-CheckU4('', 'var x=u4(1,2,3,4); var c=6; x=u4(5,c|0,7,8)', [5, 6, 7, 8]);
+-CheckU4('', 'var x=u4(8,7,6,5); x=u4(e(x,3)|0,e(x,2)|0,e(x,1)|0,e(x,0)|0)', [5, 6, 7, 8]);
+-
+-assertAsmTypeFail('glob', USE_ASM + F32 + "function f() {var x=f4(1,2,3,4); var c=4; x=f4(1,2,3,c);} return f");
+-assertAsmTypeFail('glob', USE_ASM + F32 + "function f() {var x=f4(1,2,3,4); var c=4; x=f4(1.,2.,3.,c);} return f");
+-assertAsmTypeFail('glob', USE_ASM + F32 + "function f() {var x=f4(1,2,3,4); var c=4.; x=f4(1,2,3,c);} return f");
+-assertAsmTypeFail('glob', USE_ASM + F32 + "function f() {var x=f4(1,2,3,4); var c=4.; x=f4(1.,2.,3.,c);} return f");
+-
+-CheckF4(FROUND, 'var x=f4(1,2,3,4); var y=f32(7.); x=f4(f32(5),f32(6),y,f32(8))', [5, 6, 7, 8]);
+-CheckF4(FROUND, 'var x=f4(1,2,3,4); x=f4(f32(5),f32(6),f32(7),f32(8))', [5, 6, 7, 8]);
+-CheckF4(FROUND, 'var x=f4(1,2,3,4); x=f4(f32(5.),f32(6.),f32(7.),f32(8.))', [5, 6, 7, 8]);
+-CheckF4('', 'var x=f4(1.,2.,3.,4.); x=f4(5.,6.,7.,8.)', [5, 6, 7, 8]);
+-CheckF4('', 'var x=f4(1.,2.,3.,4.); x=f4(1,2,3,4)', [1, 2, 3, 4]);
+-CheckF4(FROUND, 'var x=f4(1.,2.,3.,4.); var y=f32(7.); x=f4(9, 4, 2, 1)', [9, 4, 2, 1]);
+-CheckF4('', 'var x=f4(8.,7.,6.,5.); x=f4(e(x,3),e(x,2),e(x,1),e(x,0))', [5, 6, 7, 8]);
+-
+-// Optimization for all lanes from the same definition.
+-CheckI4('', 'var x=i4(1,2,3,4); var c=6; x=i4(c|0,c|0,c|0,c|0)', [6, 6, 6, 6]);
+-CheckF4(FROUND, 'var x=f4(1,2,3,4); var y=f32(7.); x=f4(y,y,y,y)', [7, 7, 7, 7]);
+-CheckI4('', 'var x=i4(1,2,3,4); var c=0; c=e(x,3)|0; x=i4(c,c,c,c)', [4, 4, 4, 4]);
+-CheckF4(FROUND, 'var x=f4(1,2,3,4); var y=f32(0); y=e(x,2); x=f4(y,y,y,y)', [3, 3, 3, 3]);
+-CheckI4('', 'var x=i4(1,2,3,4); var c=0; var d=0; c=e(x,3)|0; d=e(x,3)|0; x=i4(c,d,d,c)', [4, 4, 4, 4]);
+-CheckF4(FROUND, 'var x=f4(1,2,3,4); var y=f32(0); var z=f32(0); y=e(x,2); z=e(x,2); x=f4(y,z,y,z)', [3, 3, 3, 3]);
+-
+-// Uses in ternary conditionals
+-assertAsmTypeFail('glob', USE_ASM + F32 + "function f() {var x=f4(1,2,3,4); var c=4; c=x?c:c;} return f");
+-assertAsmTypeFail('glob', USE_ASM + F32 + "function f() {var x=f4(1,2,3,4); var c=4; x=1?x:c;} return f");
+-assertAsmTypeFail('glob', USE_ASM + F32 + "function f() {var x=f4(1,2,3,4); var c=4; x=1?c:x;} return f");
+-assertAsmTypeFail('glob', USE_ASM + F32 + I32 + "function f() {var x=f4(1,2,3,4); var y=i4(1,2,3,4); x=1?x:y;} return f");
+-assertAsmTypeFail('glob', USE_ASM + F32 + I32 + "function f() {var x=f4(1,2,3,4); var y=i4(1,2,3,4); x=1?y:y;} return f");
+-assertAsmTypeFail('glob', USE_ASM + B32 + I32 + "function f() {var x=b4(1,2,3,4); var y=i4(1,2,3,4); x=1?y:y;} return f");
+-assertAsmTypeFail('glob', USE_ASM + U32 + I32 + "function f() {var x=u4(1,2,3,4); var y=i4(1,2,3,4); x=1?y:y;} return f");
+-assertAsmTypeFail('glob', USE_ASM + U32 + I32 + "function f() {var x=i4(1,2,3,4); var y=u4(1,2,3,4); x=1?y:y;} return f");
+-
+-CheckF4('', 'var x=f4(1,2,3,4); var y=f4(4,3,2,1); x=3?y:x', [4, 3, 2, 1]);
+-assertEqX4(asmLink(asmCompile('glob', USE_ASM + F32 + CF32 + "function f(x) {x=x|0; var v=f4(1,2,3,4); var w=f4(5,6,7,8); return cf4(x?w:v);} return f"), this)(1), [5,6,7,8]);
+-assertEqX4(asmLink(asmCompile('glob', USE_ASM + F32 + CF32 + "function f(v) {v=cf4(v); var w=f4(5,6,7,8); return cf4(4?w:v);} return f"), this)(SIMD.Float32x4(1,2,3,4)), [5,6,7,8]);
+-assertEqX4(asmLink(asmCompile('glob', USE_ASM + F32 + CF32 + "function f(v, x) {v=cf4(v); x=x|0; var w=f4(5,6,7,8); return cf4(x?w:v);} return f"), this)(SIMD.Float32x4(1,2,3,4), 0), [1,2,3,4]);
+-
+-CheckI4('', 'var x=i4(1,2,3,4); var y=i4(4,3,2,1); x=e(x,0)?y:x', [4, 3, 2, 1]);
+-assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + CI32 + "function f(x) {x=x|0; var v=i4(1,2,3,4); var w=i4(5,6,7,8); return ci4(x?w:v);} return f"), this)(1), [5,6,7,8]);
+-assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + CI32 + "function f(v) {v=ci4(v); var w=i4(5,6,7,8); return ci4(4?w:v);} return f"), this)(SIMD.Int32x4(1,2,3,4)), [5,6,7,8]);
+-assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + CI32 + "function f(v, x) {v=ci4(v); x=x|0; var w=i4(5,6,7,8); return ci4(x?w:v);} return f"), this)(SIMD.Int32x4(1,2,3,4), 0), [1,2,3,4]);
+-
+-// Unsigned SIMD types can't be function arguments or return values.
+-assertAsmTypeFail('glob', USE_ASM + U32 + CU32 + "function f(x) {x=cu4(x);} return f");
+-assertAsmTypeFail('glob', USE_ASM + U32 + CU32 + "function f() {x=u4(0,0,0,0); return cu4(x);} return f");
+-
+-// 1.3.4 Return values
+-assertAsmTypeFail('glob', USE_ASM + I32 + CI32 + "function f() {var x=1; return ci4(x)} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + CI32 + "function f() {var x=1; return ci4(x + x)} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + CI32 + "function f() {var x=1.; return ci4(x)} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + CI32 + FROUND + "function f() {var x=f32(1.); return ci4(x)} return f");
+-
+-assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + CI32 + "function f() {var x=i4(1,2,3,4); return ci4(x)} return f"), this)(), [1,2,3,4]);
+-assertEqX4(asmLink(asmCompile('glob', USE_ASM + F32 + CF32 + "function f() {var x=f4(1,2,3,4); return cf4(x)} return f"), this)(), [1,2,3,4]);
+-assertEqX4(asmLink(asmCompile('glob', USE_ASM + B32 + CB32 + "function f() {var x=b4(1,2,0,4); return cb4(x)} return f"), this)(), [true,true,false,true]);
+-
+-// 1.3.5 Coerce and pass arguments
+-// Via check
+-assertAsmTypeFail('glob', USE_ASM + I32 + CI32 + "function f() {ci4();} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + CI32 + "function f(x) {x=ci4(x); ci4(x, x);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + CI32 + "function f() {ci4(1);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + CI32 + "function f() {ci4(1.);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + CI32 + FROUND + "function f() {ci4(f32(1.));} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + CI32 + F32 + CF32 + "function f(x) {x=cf4(x); ci4(x);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + CI32 + "function f(x) {x=ci4(x); return 1 + ci4(x) | 0;} return f");
+-
+-var i32x4 = SIMD.Int32x4(1, 3, 3, 7);
+-assertEq(asmLink(asmCompile('glob', USE_ASM + I32 + CI32 + "function f(x) {x=ci4(x)} return f"), this)(i32x4), undefined);
+-assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + CI32 + "function f(x) {x=ci4(x); return ci4(x);} return f"), this)(i32x4), [1,3,3,7]);
+-
+-assertAsmTypeFail('glob', USE_ASM + F32 + CF32 + "function f() {cf4();} return f");
+-assertAsmTypeFail('glob', USE_ASM + F32 + CF32 + "function f(x) {x=cf4(x); cf4(x, x);} return f");
+-assertAsmTypeFail('glob', USE_ASM + F32 + CF32 + "function f() {cf4(1);} return f");
+-assertAsmTypeFail('glob', USE_ASM + F32 + CF32 + "function f() {cf4(1.);} return f");
+-assertAsmTypeFail('glob', USE_ASM + F32 + CF32 + FROUND + "function f() {cf4(f32(1.));} return f");
+-assertAsmTypeFail('glob', USE_ASM + F32 + CF32 + F32 + CF32 + "function f(x) {x=cf4(x); cf4(x);} return f");
+-assertAsmTypeFail('glob', USE_ASM + F32 + CF32 + "function f(x) {x=cf4(x); return 1 + cf4(x) | 0;} return f");
+-
+-var f32x4 = SIMD.Float32x4(13.37, 42.42, -0, NaN);
+-assertEq(asmLink(asmCompile('glob', USE_ASM + F32 + CF32 + "function f(x) {x=cf4(x)} return f"), this)(f32x4), undefined);
+-assertEqX4(asmLink(asmCompile('glob', USE_ASM + F32 + CF32 + "function f(x) {x=cf4(x); return cf4(x);} return f"), this)(f32x4), [13.37, 42.42, -0, NaN].map(Math.fround));
+-
+-var b32x4 = SIMD.Bool32x4(true, false, false, true);
+-assertEq(asmLink(asmCompile('glob', USE_ASM + B32 + CB32 + "function f(x) {x=cb4(x)} return f"), this)(b32x4), undefined);
+-assertEqX4(asmLink(asmCompile('glob', USE_ASM + B32 + CB32 + "function f(x) {x=cb4(x); return cb4(x);} return f"), this)(b32x4), [true, false, false, true]);
+-
+-// Legacy coercions
+-assertAsmTypeFail('glob', USE_ASM + I32 + "function f(x) {x=i4();} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + "function f(x) {x=i4(x);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + "function f(x) {x=i4(1,2,3,4);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + "function f(x,y) {x=i4(y);y=+y} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + CI32 + "function f(x) {x=ci4(x); return i4(x);} return f");
+-
+-assertAsmTypeFail('glob', USE_ASM + I32 + "function f(x) {return +i4(1,2,3,4)} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + "function f(x) {return 0|i4(1,2,3,4)} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + FROUND + "function f(x) {return f32(i4(1,2,3,4))} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + "function f(x) {return f4(i4(1,2,3,4))} return f");
+-assertAsmTypeFail('glob', USE_ASM + F32 + CF32 + "function f(x) {x=cf4(x); return f4(x);} return f");
+-
+-
+-function assertCaught(f) {
+-    var caught = false;
+-    try {
+-        f.apply(null, Array.prototype.slice.call(arguments, 1));
+-    } catch (e) {
+-        DEBUG && print('Assert caught: ', e, '\n', e.stack);
+-        assertEq(e instanceof TypeError, true);
+-        caught = true;
+-    }
+-    assertEq(caught, true);
+-}
+-
+-var f = asmLink(asmCompile('glob', USE_ASM + F32 + CF32 + "function f(x) {x=cf4(x); return cf4(x);} return f"), this);
+-assertCaught(f);
+-assertCaught(f, 1);
+-assertCaught(f, {});
+-assertCaught(f, "I sincerely am a SIMD typed object.");
+-assertCaught(f, SIMD.Int32x4(1,2,3,4));
+-assertCaught(f, SIMD.Bool32x4(true, true, false, true));
+-
+-var f = asmLink(asmCompile('glob', USE_ASM + I32 + CI32 + "function f(x) {x=ci4(x); return ci4(x);} return f"), this);
+-assertCaught(f);
+-assertCaught(f, 1);
+-assertCaught(f, {});
+-assertCaught(f, "I sincerely am a SIMD typed object.");
+-assertCaught(f, SIMD.Float32x4(4,3,2,1));
+-assertCaught(f, SIMD.Bool32x4(true, true, false, true));
+-
+-var f = asmLink(asmCompile('glob', USE_ASM + B32 + CB32 + "function f(x) {x=cb4(x); return cb4(x);} return f"), this);
+-assertCaught(f);
+-assertCaught(f, 1);
+-assertCaught(f, {});
+-assertCaught(f, "I sincerely am a SIMD typed object.");
+-assertCaught(f, SIMD.Int32x4(1,2,3,4));
+-assertCaught(f, SIMD.Float32x4(4,3,2,1));
+-
+-// 1.3.6 Globals
+-// 1.3.6.1 Local globals
+-// Read
+-assertAsmTypeFail('glob', USE_ASM + I32 + "var g=i4(1,2,3,4); function f() {var x=4; x=g|0;} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + "var g=i4(1,2,3,4); function f() {var x=4.; x=+g;} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + "var g=i4(1,2,3,4); var f32=glob.Math.fround; function f() {var x=f32(4.); x=f32(g);} return f");
+-
+-assertAsmTypeFail('glob', USE_ASM + F32 + "var g=f4(1., 2., 3., 4.); function f() {var x=4; x=g|0;} return f");
+-assertAsmTypeFail('glob', USE_ASM + F32 + "var g=f4(1., 2., 3., 4.); function f() {var x=4.; x=+g;} return f");
+-assertAsmTypeFail('glob', USE_ASM + F32 + "var g=f4(1., 2., 3., 4.); var f32=glob.Math.fround; function f() {var x=f32(4.); x=f32(g);} return f");
+-
+-assertAsmTypeFail('glob', USE_ASM + F32 + I32 + CI32 + "var g=f4(1., 2., 3., 4.); function f() {var x=i4(1,2,3,4); x=ci4(g);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + CF32 + "var g=i4(1,2,3,4); function f() {var x=f4(1.,2.,3.,4.); x=cf4(g);} return f");
+-assertAsmTypeFail('glob', USE_ASM + U32 + I32 + CI32 + "var g=u4(1,2,3,4); function f() {var x=i4(1,2,3,4); x=ci4(g);} return f");
+-
+-assertAsmTypeFail('glob', USE_ASM + I32 + "var g=0; function f() {var x=i4(1,2,3,4); x=g|0;} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + "var g=0.; function f() {var x=i4(1,2,3,4); x=+g;} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + "var f32=glob.Math.fround; var g=f32(0.); function f() {var x=i4(1,2,3,4); x=f32(g);} return f");
+-
+-// Unsigned SIMD globals are not allowed.
+-assertAsmTypeFail('glob', USE_ASM + U32 + "var g=u4(0,0,0,0); function f() {var x=u4(1,2,3,4); x=g;} return f");
+-
+-assertAsmTypeFail('glob', USE_ASM + F32 + "var g=0; function f() {var x=f4(0.,0.,0.,0.); x=g|0;} return f");
+-assertAsmTypeFail('glob', USE_ASM + F32 + "var g=0.; function f() {var x=f4(0.,0.,0.,0.); x=+g;} return f");
+-assertAsmTypeFail('glob', USE_ASM + F32 + "var f32=glob.Math.fround; var g=f32(0.); function f() {var x=f4(0.,0.,0.,0.); x=f32(g);} return f");
+-
+-CheckI4('var x=i4(1,2,3,4)', '', [1, 2, 3, 4]);
+-CheckI4('var _=42; var h=i4(5,5,5,5); var __=13.37; var x=i4(4,7,9,2);', '', [4,7,9,2]);
+-
+-CheckF4('var x=f4(1.,2.,3.,4.)', '', [1, 2, 3, 4]);
+-CheckF4('var _=42; var h=f4(5.,5.,5.,5.); var __=13.37; var x=f4(4.,13.37,9.,-0.);', '', [4, 13.37, 9, -0]);
+-CheckF4('var x=f4(1,2,3,4)', '', [1, 2, 3, 4]);
+-
+-CheckB4('var x=b4(1,0,3,0)', '', [true, false, true, false]);
+-CheckB4('var _=42; var h=b4(5,0,5,5); var __=13.37; var x=b4(0,0,9,2);', '', [false, false, true, true]);
+-
+-// Write
+-assertAsmTypeFail('glob', USE_ASM + I32 + "var g=i4(1,2,3,4); function f() {var x=4; g=x|0;} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + "var g=i4(1,2,3,4); function f() {var x=4.; g=+x;} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + "var g=i4(1,2,3,4); var f32=glob.Math.fround; function f() {var x=f32(4.); g=f32(x);} return f");
+-
+-assertAsmTypeFail('glob', USE_ASM + F32 + "var g=f4(1., 2., 3., 4.); function f() {var x=4; g=x|0;} return f");
+-assertAsmTypeFail('glob', USE_ASM + F32 + "var g=f4(1., 2., 3., 4.); function f() {var x=4.; g=+x;} return f");
+-assertAsmTypeFail('glob', USE_ASM + F32 + "var g=f4(1., 2., 3., 4.); var f32=glob.Math.fround; function f() {var x=f32(4.); g=f32(x);} return f");
+-
+-assertAsmTypeFail('glob', USE_ASM + F32 + I32 + CI32 + "var g=f4(1., 2., 3., 4.); function f() {var x=i4(1,2,3,4); g=ci4(x);} return f");
+-assertAsmTypeFail('glob', USE_ASM + F32 + I32 + CF32 + "var g=f4(1., 2., 3., 4.); function f() {var x=i4(1,2,3,4); g=cf4(x);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + CF32 + "var g=i4(1,2,3,4); function f() {var x=f4(1.,2.,3.,4.); g=cf4(x);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + CI32 + "var g=i4(1,2,3,4); function f() {var x=f4(1.,2.,3.,4.); g=ci4(x);} return f");
+-
+-CheckI4('var x=i4(0,0,0,0);', 'x=i4(1,2,3,4)', [1,2,3,4]);
+-CheckF4('var x=f4(0.,0.,0.,0.);', 'x=f4(5.,3.,4.,2.)', [5,3,4,2]);
+-CheckB4('var x=b4(0,0,0,0);', 'x=b4(0,0,1,1)', [false, false, true, true]);
+-
+-CheckI4('var x=i4(0,0,0,0); var y=42; var z=3.9; var w=13.37', 'x=i4(1,2,3,4); y=24; z=4.9; w=23.10;', [1,2,3,4]);
+-CheckF4('var x=f4(0,0,0,0); var y=42; var z=3.9; var w=13.37', 'x=f4(1,2,3,4); y=24; z=4.9; w=23.10;', [1,2,3,4]);
+-CheckB4('var x=b4(0,0,0,0); var y=42; var z=3.9; var w=13.37', 'x=b4(1,0,0,0); y=24; z=4.9; w=23.10;', [true, false, false, false]);
+-
+-// 1.3.6.2 Imported globals
+-// Read
+-var Int32x4 = asmLink(asmCompile('glob', 'ffi', USE_ASM + I32 + CI32 + "var g=ci4(ffi.g); function f() {return ci4(g)} return f"), this, {g: SIMD.Int32x4(1,2,3,4)})();
+-assertEq(SIMD.Int32x4.extractLane(Int32x4, 0), 1);
+-assertEq(SIMD.Int32x4.extractLane(Int32x4, 1), 2);
+-assertEq(SIMD.Int32x4.extractLane(Int32x4, 2), 3);
+-assertEq(SIMD.Int32x4.extractLane(Int32x4, 3), 4);
+-
+-for (var v of [1, {}, "totally legit SIMD variable", SIMD.Float32x4(1,2,3,4)])
+-    assertCaught(asmCompile('glob', 'ffi', USE_ASM + I32 + CI32 + "var g=ci4(ffi.g); function f() {return ci4(g)} return f"), this, {g: v});
+-
+-// Unsigned SIMD globals are not allowed.
+-assertAsmTypeFail('glob', 'ffi', USE_ASM + U32 + CU32 + "var g=cu4(ffi.g); function f() {} return f");
+-
+-var Float32x4 = asmLink(asmCompile('glob', 'ffi', USE_ASM + F32 + CF32 + "var g=cf4(ffi.g); function f() {return cf4(g)} return f"), this, {g: SIMD.Float32x4(1,2,3,4)})();
+-assertEq(SIMD.Float32x4.extractLane(Float32x4, 0), 1);
+-assertEq(SIMD.Float32x4.extractLane(Float32x4, 1), 2);
+-assertEq(SIMD.Float32x4.extractLane(Float32x4, 2), 3);
+-assertEq(SIMD.Float32x4.extractLane(Float32x4, 3), 4);
+-
+-for (var v of [1, {}, "totally legit SIMD variable", SIMD.Int32x4(1,2,3,4)])
+-    assertCaught(asmCompile('glob', 'ffi', USE_ASM + F32 + CF32 + "var g=cf4(ffi.g); function f() {return cf4(g)} return f"), this, {g: v});
+-
+-var Bool32x4 = asmLink(asmCompile('glob', 'ffi', USE_ASM + B32 + CB32 + "var g=cb4(ffi.g); function f() {return cb4(g)} return f"), this, {g: SIMD.Bool32x4(false, false, false, true)})();
+-assertEq(SIMD.Bool32x4.extractLane(Bool32x4, 0), false);
+-assertEq(SIMD.Bool32x4.extractLane(Bool32x4, 1), false);
+-assertEq(SIMD.Bool32x4.extractLane(Bool32x4, 2), false);
+-assertEq(SIMD.Bool32x4.extractLane(Bool32x4, 3), true);
+-
+-for (var v of [1, {}, "totally legit SIMD variable", SIMD.Int32x4(1,2,3,4)])
+-    assertCaught(asmCompile('glob', 'ffi', USE_ASM + B32 + CB32 + "var g=cb4(ffi.g); function f() {return cb4(g)} return f"), this, {g: v});
+-
+-// Write
+-var Int32x4 = asmLink(asmCompile('glob', 'ffi', USE_ASM + I32 + CI32 + "var g=ci4(ffi.g); function f() {g=i4(4,5,6,7); return ci4(g)} return f"), this, {g: SIMD.Int32x4(1,2,3,4)})();
+-assertEq(SIMD.Int32x4.extractLane(Int32x4, 0), 4);
+-assertEq(SIMD.Int32x4.extractLane(Int32x4, 1), 5);
+-assertEq(SIMD.Int32x4.extractLane(Int32x4, 2), 6);
+-assertEq(SIMD.Int32x4.extractLane(Int32x4, 3), 7);
+-
+-var Float32x4 = asmLink(asmCompile('glob', 'ffi', USE_ASM + F32 + CF32 + "var g=cf4(ffi.g); function f() {g=f4(4.,5.,6.,7.); return cf4(g)} return f"), this, {g: SIMD.Float32x4(1,2,3,4)})();
+-assertEq(SIMD.Float32x4.extractLane(Float32x4, 0), 4);
+-assertEq(SIMD.Float32x4.extractLane(Float32x4, 1), 5);
+-assertEq(SIMD.Float32x4.extractLane(Float32x4, 2), 6);
+-assertEq(SIMD.Float32x4.extractLane(Float32x4, 3), 7);
+-
+-var Bool32x4 = asmLink(asmCompile('glob', 'ffi', USE_ASM + B32 + CB32 + "var g=cb4(ffi.g); function f() {g=b4(1,1,0,0); return cb4(g)} return f"), this, {g: SIMD.Bool32x4(1,1,1,0)})();
+-assertEq(SIMD.Bool32x4.extractLane(Bool32x4, 0), true);
+-assertEq(SIMD.Bool32x4.extractLane(Bool32x4, 1), true);
+-assertEq(SIMD.Bool32x4.extractLane(Bool32x4, 2), false);
+-assertEq(SIMD.Bool32x4.extractLane(Bool32x4, 3), false);
+-
+-// 2. SIMD operations
+-// 2.1 Compilation
+-assertAsmTypeFail('glob', USE_ASM + "var add = Int32x4.add; return {}");
+-assertAsmTypeFail('glob', USE_ASM + I32A + I32 + "return {}");
+-assertAsmTypeFail('glob', USE_ASM + "var g = 3; var add = g.add; return {}");
+-assertAsmTypeFail('glob', USE_ASM + I32 + "var func = i4.doTheHarlemShake; return {}");
+-assertAsmTypeFail('glob', USE_ASM + I32 + "var div = i4.div; return {}");
+-assertAsmTypeFail('glob', USE_ASM + "var f32 = glob.Math.fround; var i4a = f32.add; return {}");
+-// Operation exists, but in a different type.
+-assertAsmTypeFail('glob', USE_ASM + I32 + "var func = i4.fromUint32x4; return {}");
+-
+-// 2.2 Linking
+-assertAsmLinkAlwaysFail(asmCompile('glob', USE_ASM + I32 + I32A + "function f() {} return f"), {});
+-assertAsmLinkAlwaysFail(asmCompile('glob', USE_ASM + I32 + I32A + "function f() {} return f"), {SIMD: Math.fround});
+-
+-var oldInt32x4Add = SIMD.Int32x4.add;
+-var code = asmCompile('glob', USE_ASM + I32 + I32A + "return {}");
+-for (var v of [42, Math.fround, SIMD.Float32x4.add, function(){}, SIMD.Int32x4.mul]) {
+-    SIMD.Int32x4.add = v;
+-    assertAsmLinkFail(code, {SIMD: {Int32x4: SIMD.Int32x4}});
+-}
+-SIMD.Int32x4.add = oldInt32x4Add; // finally replace the add function with the original one
+-assertEq(asmLink(asmCompile('glob', USE_ASM + I32 + I32A + "function f() {} return f"), {SIMD: {Int32x4: SIMD.Int32x4}})(), undefined);
+-
+-// 2.3. Binary arithmetic operations
+-// 2.3.1 Additions
+-assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4a();} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4a(x);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4a(x, x, x);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4a(13, 37);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4a(23.10, 19.89);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4a(x, 42);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4a(x, 13.37);} return f");
+-
+-assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); var y=4; x=i4a(x, y);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(0,0,0,0); var y=4; x=i4a(y, y);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(0,0,0,0); var y=4; y=i4a(x, x);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + I32A + U32 + "function f() {var x=i4(0,0,0,0); var y=u4(1,2,3,4); y=i4a(x, y);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + I32A + "function f() {var x=i4(0,0,0,0); var y=f4(4,3,2,1); x=i4a(x, y);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + I32A + "function f() {var x=i4(0,0,0,0); var y=f4(4,3,2,1); y=i4a(x, y);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + I32A + "function f() {var x=i4(0,0,0,0); var y=f4(4,3,2,1); y=i4a(x, x);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + F32A + "function f() {var x=i4(0,0,0,0); var y=f4(4,3,2,1); y=f4a(x, x);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + F32A + "function f() {var x=i4(0,0,0,0); var y=f4(4,3,2,1); y=f4a(x, y);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + F32A + "function f() {var x=i4(0,0,0,0); var y=f4(4,3,2,1); x=f4a(y, y);} return f");
+-
+-assertAsmTypeFail('glob', USE_ASM + I32 + I32A + 'function f() {var x=i4(1,2,3,4); var y=0; y=i4a(x,x)|0} return f');
+-assertAsmTypeFail('glob', USE_ASM + I32 + I32A + 'function f() {var x=i4(1,2,3,4); var y=0.; y=+i4a(x,x)} return f');
+-
+-CheckI4(I32A, 'var z=i4(1,2,3,4); var y=i4(0,1,0,3); var x=i4(0,0,0,0); x=i4a(z,y)', [1,3,3,7]);
+-CheckI4(I32A, 'var x=i4(2,3,4,5); var y=i4(0,1,0,3); x=i4a(x,y)', [2,4,4,8]);
+-CheckI4(I32A, 'var x=i4(1,2,3,4); x=i4a(x,x)', [2,4,6,8]);
+-CheckI4(I32A, 'var x=i4(' + INT32_MAX + ',2,3,4); var y=i4(1,1,0,3); x=i4a(x,y)', [INT32_MIN,3,3,7]);
+-CheckI4(I32A, 'var x=i4(' + INT32_MAX + ',2,3,4); var y=i4(1,1,0,3); x=ci4(i4a(x,y))', [INT32_MIN,3,3,7]);
+-
+-CheckU4(U32A, 'var z=u4(1,2,3,4); var y=u4(0,1,0,3); var x=u4(0,0,0,0); x=u4a(z,y)', [1,3,3,7]);
+-CheckU4(U32A, 'var x=u4(2,3,4,5); var y=u4(0,1,0,3); x=u4a(x,y)', [2,4,4,8]);
+-CheckU4(U32A, 'var x=u4(1,2,3,4); x=u4a(x,x)', [2,4,6,8]);
+-
+-CheckF4(F32A, 'var x=f4(1,2,3,4); x=f4a(x,x)', [2,4,6,8]);
+-CheckF4(F32A, 'var x=f4(1,2,3,4); var y=f4(4,3,5,2); x=f4a(x,y)', [5,5,8,6]);
+-CheckF4(F32A, 'var x=f4(13.37,2,3,4); var y=f4(4,3,5,2); x=f4a(x,y)', [Math.fround(13.37) + 4,5,8,6]);
+-CheckF4(F32A, 'var x=f4(13.37,2,3,4); var y=f4(4,3,5,2); x=cf4(f4a(x,y))', [Math.fround(13.37) + 4,5,8,6]);
+-
+-// 2.3.2. Subtracts
+-CheckI4(I32S, 'var x=i4(1,2,3,4); var y=i4(-1,1,0,2); x=i4s(x,y)', [2,1,3,2]);
+-CheckI4(I32S, 'var x=i4(5,4,3,2); var y=i4(1,2,3,4); x=i4s(x,y)', [4,2,0,-2]);
+-CheckI4(I32S, 'var x=i4(1,2,3,4); x=i4s(x,x)', [0,0,0,0]);
+-CheckI4(I32S, 'var x=i4(' + INT32_MIN + ',2,3,4); var y=i4(1,1,0,3); x=i4s(x,y)', [INT32_MAX,1,3,1]);
+-CheckI4(I32S, 'var x=i4(' + INT32_MIN + ',2,3,4); var y=i4(1,1,0,3); x=ci4(i4s(x,y))', [INT32_MAX,1,3,1]);
+-
+-CheckU4(U32S, 'var x=u4(1,2,3,4); var y=u4(-1,1,0,2); x=u4s(x,y)', [2,1,3,2]);
+-CheckU4(U32S, 'var x=u4(5,4,3,2); var y=u4(1,2,3,4); x=u4s(x,y)', [4,2,0,-2>>>0]);
+-CheckU4(U32S, 'var x=u4(1,2,3,4); x=u4s(x,x)', [0,0,0,0]);
+-CheckU4(U32S, 'var x=u4(' + INT32_MIN + ',2,3,4); var y=u4(1,1,0,3); x=u4s(x,y)', [INT32_MAX,1,3,1]);
+-CheckU4(U32S, 'var x=u4(' + INT32_MIN + ',2,3,4); var y=u4(1,1,0,3); x=cu4(u4s(x,y))', [INT32_MAX,1,3,1]);
+-
+-CheckF4(F32S, 'var x=f4(1,2,3,4); x=f4s(x,x)', [0,0,0,0]);
+-CheckF4(F32S, 'var x=f4(1,2,3,4); var y=f4(4,3,5,2); x=f4s(x,y)', [-3,-1,-2,2]);
+-CheckF4(F32S, 'var x=f4(13.37,2,3,4); var y=f4(4,3,5,2); x=f4s(x,y)', [Math.fround(13.37) - 4,-1,-2,2]);
+-CheckF4(F32S, 'var x=f4(13.37,2,3,4); var y=f4(4,3,5,2); x=cf4(f4s(x,y))', [Math.fround(13.37) - 4,-1,-2,2]);
+-
+-{
+-    // Bug 1216099
+-    let code = `
+-        "use asm";
+-        var f4 = global.SIMD.Float32x4;
+-        var f4sub = f4.sub;
+-        const zerox4 = f4(0.0, 0.0, 0.0, 0.0);
+-        function f() {
+-            var newVelx4 = f4(0.0, 0.0, 0.0, 0.0);
+-            var newVelTruex4 = f4(0.0,0.0,0.0,0.0);
+-            newVelTruex4 = f4sub(zerox4, newVelx4);
+-        }
+-        // return statement voluntarily missing
+-    `;
+-    assertAsmTypeFail('global', code);
+-}
+-
+-// 2.3.3. Multiplications / Divisions
+-assertAsmTypeFail('glob', USE_ASM + I32 + "var f4d=i4.div; function f() {} return f");
+-
+-CheckI4(I32M, 'var x=i4(1,2,3,4); var y=i4(-1,1,0,2); x=i4m(x,y)', [-1,2,0,8]);
+-CheckI4(I32M, 'var x=i4(5,4,3,2); var y=i4(1,2,3,4); x=i4m(x,y)', [5,8,9,8]);
+-CheckI4(I32M, 'var x=i4(1,2,3,4); x=i4m(x,x)', [1,4,9,16]);
+-(function() {
+-    var m = INT32_MIN, M = INT32_MAX, imul = Math.imul;
+-    CheckI4(I32M, `var x=i4(${m},${m}, ${M}, ${M}); var y=i4(2,-3,4,-5); x=i4m(x,y)`,
+-            [imul(m, 2), imul(m, -3), imul(M, 4), imul(M, -5)]);
+-    CheckI4(I32M, `var x=i4(${m},${m}, ${M}, ${M}); var y=i4(${m}, ${M}, ${m}, ${M}); x=i4m(x,y)`,
+-            [imul(m, m), imul(m, M), imul(M, m), imul(M, M)]);
+-})();
+-
+-CheckF4(F32M, 'var x=f4(1,2,3,4); x=f4m(x,x)', [1,4,9,16]);
+-CheckF4(F32M, 'var x=f4(1,2,3,4); var y=f4(4,3,5,2); x=f4m(x,y)', [4,6,15,8]);
+-CheckF4(F32M, 'var x=f4(13.37,2,3,4); var y=f4(4,3,5,2); x=f4m(x,y)', [Math.fround(13.37) * 4,6,15,8]);
+-CheckF4(F32M, 'var x=f4(13.37,2,3,4); var y=f4(4,3,5,2); x=cf4(f4m(x,y))', [Math.fround(13.37) * 4,6,15,8]);
+-
+-var f32x4 = SIMD.Float32x4(0, NaN, -0, NaN);
+-var another = SIMD.Float32x4(NaN, -1, -0, NaN);
+-assertEqX4(asmLink(asmCompile('glob', USE_ASM + F32 + F32M + CF32 + "function f(x, y) {x=cf4(x); y=cf4(y); x=f4m(x,y); return cf4(x);} return f"), this)(f32x4, another), [NaN, NaN, 0, NaN]);
+-
+-CheckF4(F32D, 'var x=f4(1,2,3,4); x=f4d(x,x)', [1,1,1,1]);
+-CheckF4(F32D, 'var x=f4(1,2,3,4); var y=f4(4,3,5,2); x=f4d(x,y)', [1/4,2/3,3/5,2]);
+-CheckF4(F32D, 'var x=f4(13.37,1,1,4); var y=f4(4,0,-0.,2); x=f4d(x,y)', [Math.fround(13.37) / 4,+Infinity,-Infinity,2]);
+-
+-var f32x4 = SIMD.Float32x4(0, 0, -0, NaN);
+-var another = SIMD.Float32x4(0, -0, 0, 0);
+-assertEqX4(asmLink(asmCompile('glob', USE_ASM + F32 + F32D + CF32 + "function f(x,y) {x=cf4(x); y=cf4(y); x=f4d(x,y); return cf4(x);} return f"), this)(f32x4, another), [NaN, NaN, NaN, NaN]);
+-
+-// Unary arithmetic operators
+-function CheckUnaryF4(op, checkFunc, assertFunc) {
+-    var _ = asmLink(asmCompile('glob', USE_ASM + F32 + CF32 + 'var op=f4.' + op + '; function f(x){x=cf4(x); return cf4(op(x)); } return f'), this);
+-    return function(input) {
+-        var simd = SIMD.Float32x4(input[0], input[1], input[2], input[3]);
+-
+-        var exp = input.map(Math.fround).map(checkFunc).map(Math.fround);
+-        var obs = _(simd);
+-        assertEqX4(obs, exp, assertFunc);
+-    }
+-}
+-
+-function CheckUnaryI4(op, checkFunc) {
+-    var _ = asmLink(asmCompile('glob', USE_ASM + I32 + CI32 + 'var op=i4.' + op + '; function f(x){x=ci4(x); return ci4(op(x)); } return f'), this);
+-    return function(input) {
+-        var simd = SIMD.Int32x4(input[0], input[1], input[2], input[3]);
+-        assertEqX4(_(simd), input.map(checkFunc).map(function(x) { return x | 0}));
+-    }
+-}
+-
+-function CheckUnaryU4(op, checkFunc) {
+-    var _ = asmLink(asmCompile('glob', USE_ASM + I32 + CI32 + I32U32 + U32 + U32I32 +
+-                               'var op=u4.' + op + '; function f(x){x=ci4(x); return ci4(i4u4(op(u4i4(x)))); } return f'), this);
+-    return function(input) {
+-        var simd = SIMD.Int32x4(input[0], input[1], input[2], input[3]);
+-        var res = SIMD.Uint32x4.fromInt32x4Bits(_(simd));
+-        assertEqX4(res, input.map(checkFunc).map(function(x) { return x >>> 0 }));
+-    }
+-}
+-
+-function CheckUnaryB4(op, checkFunc) {
+-    var _ = asmLink(asmCompile('glob', USE_ASM + B32 + CB32 + 'var op=b4.' + op + '; function f(x){x=cb4(x); return cb4(op(x)); } return f'), this);
+-    return function(input) {
+-        var simd = SIMD.Bool32x4(input[0], input[1], input[2], input[3]);
+-        assertEqX4(_(simd), input.map(checkFunc).map(function(x) { return !!x}));
+-    }
+-}
+-
+-CheckUnaryI4('neg', function(x) { return -x })([1, -2, INT32_MIN, INT32_MAX]);
+-CheckUnaryI4('not', function(x) { return ~x })([1, -2, INT32_MIN, INT32_MAX]);
+-
+-CheckUnaryU4('neg', function(x) { return -x })([1, -2, INT32_MIN, INT32_MAX]);
+-CheckUnaryU4('not', function(x) { return ~x })([1, -2, INT32_MIN, INT32_MAX]);
+-
+-var CheckNotB = CheckUnaryB4('not', function(x) { return !x });
+-CheckNotB([true, false, true, true]);
+-CheckNotB([true, true, true, true]);
+-CheckNotB([false, false, false, false]);
+-
+-var CheckAbs = CheckUnaryF4('abs', Math.abs);
+-CheckAbs([1, 42.42, 0.63, 13.37]);
+-CheckAbs([NaN, -Infinity, Infinity, 0]);
+-
+-var CheckNegF = CheckUnaryF4('neg', function(x) { return -x });
+-CheckNegF([1, 42.42, 0.63, 13.37]);
+-CheckNegF([NaN, -Infinity, Infinity, 0]);
+-
+-var CheckSqrt = CheckUnaryF4('sqrt', function(x) { return Math.sqrt(x); });
+-CheckSqrt([1, 42.42, 0.63, 13.37]);
+-CheckSqrt([NaN, -Infinity, Infinity, 0]);
+-
+-// ReciprocalApproximation and reciprocalSqrtApproximation give approximate results
+-function assertNear(a, b) {
+-    if (a !== a && b === b)
+-        throw 'Observed NaN, expected ' + b;
+-    if (Math.abs(a - b) > 1e-3)
+-        throw 'More than 1e-3 between ' + a + ' and ' + b;
+-}
+-var CheckRecp = CheckUnaryF4('reciprocalApproximation', function(x) { return 1 / x; }, assertNear);
+-CheckRecp([1, 42.42, 0.63, 13.37]);
+-CheckRecp([NaN, -Infinity, Infinity, 0]);
+-
+-var CheckRecp = CheckUnaryF4('reciprocalSqrtApproximation', function(x) { return 1 / Math.sqrt(x); }, assertNear);
+-CheckRecp([1, 42.42, 0.63, 13.37]);
+-CheckRecp([NaN, -Infinity, Infinity, 0]);
+-
+-// Min/Max
+-assertAsmTypeFail('glob', USE_ASM + I32 + "var f4m=i4.min; function f() {} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + "var f4d=i4.max; function f() {} return f");
+-
+-const F32MIN = 'var min = f4.min;'
+-const F32MAX = 'var max = f4.max;'
+-
+-CheckF4(F32MIN, 'var x=f4(1,2,3,4); x=min(x,x)', [1,2,3,4]);
+-CheckF4(F32MIN, 'var x=f4(13.37,2,3,4); var y=f4(4,3,5,2); x=min(x,y)', [4,2,3,2]);
+-CheckF4(F32MIN + FROUND + 'var Infinity = glob.Infinity;', 'var x=f4(0,0,0,0); var y=f4(2310,3,5,0); x=f4(f32(+Infinity),f32(-Infinity),f32(3),f32(-0.)); x=min(x,y)', [2310,-Infinity,3,-0]);
+-
+-CheckF4(F32MIN, 'var x=f4(0,0,-0,-0); var y=f4(0,-0,0,-0); x=min(x,y)', [0,-0,-0,-0]);
+-CheckF4(F32MIN + FROUND + 'var NaN = glob.NaN;', 'var x=f4(0,0,0,0); var y=f4(0,0,0,0); var n=f32(0); n=f32(NaN); x=f4(n,0.,n,0.); y=f4(n,n,0.,0.); x=min(x,y)', [NaN, NaN, NaN, 0]);
+-
+-CheckF4(F32MAX, 'var x=f4(1,2,3,4); x=max(x,x)', [1,2,3,4]);
+-CheckF4(F32MAX, 'var x=f4(13.37,2,3,4); var y=f4(4,3,5,2); x=max(x,y)', [13.37, 3, 5, 4]);
+-CheckF4(F32MAX + FROUND + 'var Infinity = glob.Infinity;', 'var x=f4(0,0,0,0); var y=f4(2310,3,5,0); x=f4(f32(+Infinity),f32(-Infinity),f32(3),f32(-0.)); x=max(x,y)', [+Infinity,3,5,0]);
+-
+-CheckF4(F32MAX, 'var x=f4(0,0,-0,-0); var y=f4(0,-0,0,-0); x=max(x,y)', [0,0,0,-0]);
+-CheckF4(F32MAX + FROUND + 'var NaN = glob.NaN;', 'var x=f4(0,0,0,0); var y=f4(0,0,0,0); var n=f32(0); n=f32(NaN); x=f4(n,0.,n,0.); y=f4(n,n,0.,0.); x=max(x,y)', [NaN, NaN, NaN, 0]);
+-
+-const F32MINNUM = 'var min = f4.minNum;'
+-const F32MAXNUM = 'var max = f4.maxNum;'
+-
+-CheckF4(F32MINNUM, 'var x=f4(1,2,3,4); x=min(x,x)', [1,2,3,4]);
+-CheckF4(F32MINNUM, 'var x=f4(13.37,2,3,4); var y=f4(4,3,5,2); x=min(x,y)', [4,2,3,2]);
+-CheckF4(F32MINNUM + FROUND + 'var Infinity = glob.Infinity;', 'var x=f4(0,0,0,0); var y=f4(2310,3,5,0); x=f4(f32(+Infinity),f32(-Infinity),f32(3),f32(-0.)); x=min(x,y)', [2310,-Infinity,3,-0]);
+-
+-CheckF4(F32MINNUM, 'var x=f4(0,0,-0,-0); var y=f4(0,-0,0,-0); x=min(x,y)', [0,-0,-0,-0]);
+-CheckF4(F32MINNUM + FROUND + 'var NaN = glob.NaN;', 'var x=f4(0,0,0,0); var y=f4(0,0,0,0); var n=f32(0); n=f32(NaN); x=f4(n,0.,n,0.); y=f4(n,n,0.,0.); x=min(x,y)', [NaN, 0, 0, 0]);
+-
+-CheckF4(F32MAXNUM, 'var x=f4(1,2,3,4); x=max(x,x)', [1,2,3,4]);
+-CheckF4(F32MAXNUM, 'var x=f4(13.37,2,3,4); var y=f4(4,3,5,2); x=max(x,y)', [13.37, 3, 5, 4]);
+-CheckF4(F32MAXNUM + FROUND + 'var Infinity = glob.Infinity;', 'var x=f4(0,0,0,0); var y=f4(2310,3,5,0); x=f4(f32(+Infinity),f32(-Infinity),f32(3),f32(-0.)); x=max(x,y)', [+Infinity,3,5,0]);
+-
+-CheckF4(F32MAXNUM, 'var x=f4(0,0,-0,-0); var y=f4(0,-0,0,-0); x=max(x,y)', [0,0,0,-0]);
+-CheckF4(F32MAXNUM + FROUND + 'var NaN = glob.NaN;', 'var x=f4(0,0,0,0); var y=f4(0,0,0,0); var n=f32(0); n=f32(NaN); x=f4(n,0.,n,0.); y=f4(n,n,0.,0.); x=max(x,y)', [NaN, 0, 0, 0]);
+-
+-// ReplaceLane
+-const RLF = 'var r = f4.replaceLane;';
+-
+-assertAsmTypeFail('glob', USE_ASM + F32 + RLF + "function f() {var x = f4(1,2,3,4); x = r(x, 0, 1);} return f");
+-assertAsmTypeFail('glob', USE_ASM + F32 + RLF + "function f() {var x = f4(1,2,3,4); x = r(x, 0, x);} return f");
+-assertAsmTypeFail('glob', USE_ASM + F32 + RLF + FROUND + "function f() {var x = f4(1,2,3,4); x = r(x, 4, f32(1));} return f");
+-assertAsmTypeFail('glob', USE_ASM + F32 + RLF + FROUND + "function f() {var x = f4(1,2,3,4); x = r(x, f32(0), f32(1));} return f");
+-assertAsmTypeFail('glob', USE_ASM + F32 + RLF + FROUND + "function f() {var x = f4(1,2,3,4); x = r(1, 0, f32(1));} return f");
+-assertAsmTypeFail('glob', USE_ASM + F32 + RLF + FROUND + "function f() {var x = f4(1,2,3,4); x = r(1, 0., f32(1));} return f");
+-assertAsmTypeFail('glob', USE_ASM + F32 + RLF + FROUND + "function f() {var x = f4(1,2,3,4); x = r(f32(1), 0, f32(1));} return f");
+-assertAsmTypeFail('glob', USE_ASM + F32 + RLF + FROUND + "function f() {var x = f4(1,2,3,4); var l = 0; x = r(x, l, f32(1));} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + RLF + FROUND + "function f() {var x = f4(1,2,3,4); var y = i4(1,2,3,4); x = r(y, 0, f32(1));} return f");
+-
+-CheckF4(RLF + FROUND, 'var x = f4(1,2,3,4); x = r(x, 0, f32(13.37));', [Math.fround(13.37), 2, 3, 4]);
+-CheckF4(RLF + FROUND, 'var x = f4(1,2,3,4); x = r(x, 1, f32(13.37));', [1, Math.fround(13.37), 3, 4]);
+-CheckF4(RLF + FROUND, 'var x = f4(1,2,3,4); x = r(x, 2, f32(13.37));', [1, 2, Math.fround(13.37), 4]);
+-CheckF4(RLF + FROUND, 'var x = f4(1,2,3,4); x = r(x, 3, f32(13.37));', [1, 2, 3, Math.fround(13.37)]);
+-CheckF4(RLF + FROUND, 'var x = f4(1,2,3,4); x = r(x, 3, f32(13.37) + f32(6.63));', [1, 2, 3, Math.fround(Math.fround(13.37) + Math.fround(6.63))]);
+-
+-CheckF4(RLF + FROUND, 'var x = f4(1,2,3,4); x = r(x, 0, 13.37);', [Math.fround(13.37), 2, 3, 4]);
+-CheckF4(RLF + FROUND, 'var x = f4(1,2,3,4); x = r(x, 1, 13.37);', [1, Math.fround(13.37), 3, 4]);
+-CheckF4(RLF + FROUND, 'var x = f4(1,2,3,4); x = r(x, 2, 13.37);', [1, 2, Math.fround(13.37), 4]);
+-CheckF4(RLF + FROUND, 'var x = f4(1,2,3,4); x = r(x, 3, 13.37);', [1, 2, 3, Math.fround(13.37)]);
+-
+-const RLI = 'var r = i4.replaceLane;';
+-CheckI4(RLI, 'var x = i4(1,2,3,4); x = r(x, 0, 42);', [42, 2, 3, 4]);
+-CheckI4(RLI, 'var x = i4(1,2,3,4); x = r(x, 1, 42);', [1, 42, 3, 4]);
+-CheckI4(RLI, 'var x = i4(1,2,3,4); x = r(x, 2, 42);', [1, 2, 42, 4]);
+-CheckI4(RLI, 'var x = i4(1,2,3,4); x = r(x, 3, 42);', [1, 2, 3, 42]);
+-
+-const RLU = 'var r = u4.replaceLane;';
+-CheckU4(RLU, 'var x = u4(1,2,3,4); x = r(x, 0, 42);', [42, 2, 3, 4]);
+-CheckU4(RLU, 'var x = u4(1,2,3,4); x = r(x, 1, 42);', [1, 42, 3, 4]);
+-CheckU4(RLU, 'var x = u4(1,2,3,4); x = r(x, 2, 42);', [1, 2, 42, 4]);
+-CheckU4(RLU, 'var x = u4(1,2,3,4); x = r(x, 3, 42);', [1, 2, 3, 42]);
+-
+-const RLB = 'var r = b4.replaceLane;';
+-CheckB4(RLB, 'var x = b4(1,1,0,0); x = r(x, 0, 0);', [false, true, false, false]);
+-CheckB4(RLB, 'var x = b4(1,1,0,0); x = r(x, 1, 0);', [true, false, false, false]);
+-CheckB4(RLB, 'var x = b4(1,1,0,0); x = r(x, 2, 2);', [true, true, true, false]);
+-CheckB4(RLB, 'var x = b4(1,1,0,0); x = r(x, 3, 1);', [true, true, false, true]);
+-
+-// Comparisons
+-// Comparison operators produce Bool32x4 vectors.
+-const T = true;
+-const F = false;
+-
+-const EQI32 = 'var eq = i4.equal';
+-const NEI32 = 'var ne = i4.notEqual';
+-const LTI32 = 'var lt = i4.lessThan;';
+-const LEI32 = 'var le = i4.lessThanOrEqual';
+-const GTI32 = 'var gt = i4.greaterThan;';
+-const GEI32 = 'var ge = i4.greaterThanOrEqual';
+-
+-CheckB4(I32+EQI32, 'var x=b4(0,0,0,0); var a=i4(1,2,3,4);  var b=i4(-1,1,0,2); x=eq(a,b)', [F, F, F, F]);
+-CheckB4(I32+EQI32, 'var x=b4(0,0,0,0); var a=i4(-1,1,0,2); var b=i4(1,2,3,4);  x=eq(a,b)', [F, F, F, F]);
+-CheckB4(I32+EQI32, 'var x=b4(0,0,0,0); var a=i4(1,0,3,4);  var b=i4(1,1,7,0);  x=eq(a,b)', [T, F, F, F]);
+-
+-CheckB4(I32+NEI32, 'var x=b4(0,0,0,0); var a=i4(1,2,3,4);  var b=i4(-1,1,0,2); x=ne(a,b)', [T, T, T, T]);
+-CheckB4(I32+NEI32, 'var x=b4(0,0,0,0); var a=i4(-1,1,0,2); var b=i4(1,2,3,4);  x=ne(a,b)', [T, T, T, T]);
+-CheckB4(I32+NEI32, 'var x=b4(0,0,0,0); var a=i4(1,0,3,4);  var b=i4(1,1,7,0);  x=ne(a,b)', [F, T, T, T]);
+-
+-CheckB4(I32+LTI32, 'var x=b4(0,0,0,0); var a=i4(1,2,3,4);  var b=i4(-1,1,0,2); x=lt(a,b)', [F, F, F, F]);
+-CheckB4(I32+LTI32, 'var x=b4(0,0,0,0); var a=i4(-1,1,0,2); var b=i4(1,2,3,4);  x=lt(a,b)', [T, T, T, T]);
+-CheckB4(I32+LTI32, 'var x=b4(0,0,0,0); var a=i4(1,0,3,4);  var b=i4(1,1,7,0);  x=lt(a,b)', [F, T, T, F]);
+-
+-CheckB4(I32+LEI32, 'var x=b4(0,0,0,0); var a=i4(1,2,3,4);  var b=i4(-1,1,0,2); x=le(a,b)', [F, F, F, F]);
+-CheckB4(I32+LEI32, 'var x=b4(0,0,0,0); var a=i4(-1,1,0,2); var b=i4(1,2,3,4);  x=le(a,b)', [T, T, T, T]);
+-CheckB4(I32+LEI32, 'var x=b4(0,0,0,0); var a=i4(1,0,3,4);  var b=i4(1,1,7,0);  x=le(a,b)', [T, T, T, F]);
+-
+-CheckB4(I32+GTI32, 'var x=b4(0,0,0,0); var a=i4(1,2,3,4);  var b=i4(-1,1,0,2); x=gt(a,b)', [T, T, T, T]);
+-CheckB4(I32+GTI32, 'var x=b4(0,0,0,0); var a=i4(-1,1,0,2); var b=i4(1,2,3,4);  x=gt(a,b)', [F, F, F, F]);
+-CheckB4(I32+GTI32, 'var x=b4(0,0,0,0); var a=i4(1,0,3,4);  var b=i4(1,1,7,0);  x=gt(a,b)', [F, F, F, T]);
+-
+-CheckB4(I32+GEI32, 'var x=b4(0,0,0,0); var a=i4(1,2,3,4);  var b=i4(-1,1,0,2); x=ge(a,b)', [T, T, T, T]);
+-CheckB4(I32+GEI32, 'var x=b4(0,0,0,0); var a=i4(-1,1,0,2); var b=i4(1,2,3,4);  x=ge(a,b)', [F, F, F, F]);
+-CheckB4(I32+GEI32, 'var x=b4(0,0,0,0); var a=i4(1,0,3,4);  var b=i4(1,1,7,0);  x=ge(a,b)', [T, F, F, T]);
+-
+-const EQU32 = 'var eq = u4.equal';
+-const NEU32 = 'var ne = u4.notEqual';
+-const LTU32 = 'var lt = u4.lessThan;';
+-const LEU32 = 'var le = u4.lessThanOrEqual';
+-const GTU32 = 'var gt = u4.greaterThan;';
+-const GEU32 = 'var ge = u4.greaterThanOrEqual';
+-
+-CheckB4(U32+EQU32, 'var x=b4(0,0,0,0); var a=u4(1,2,3,4);  var b=u4(-1,1,0,2); x=eq(a,b)', [F, F, F, F]);
+-CheckB4(U32+EQU32, 'var x=b4(0,0,0,0); var a=u4(-1,1,0,2); var b=u4(1,2,3,4);  x=eq(a,b)', [F, F, F, F]);
+-CheckB4(U32+EQU32, 'var x=b4(0,0,0,0); var a=u4(1,0,3,4);  var b=u4(1,1,7,0);  x=eq(a,b)', [T, F, F, F]);
+-
+-CheckB4(U32+NEU32, 'var x=b4(0,0,0,0); var a=u4(1,2,3,4);  var b=u4(-1,1,0,2); x=ne(a,b)', [T, T, T, T]);
+-CheckB4(U32+NEU32, 'var x=b4(0,0,0,0); var a=u4(-1,1,0,2); var b=u4(1,2,3,4);  x=ne(a,b)', [T, T, T, T]);
+-CheckB4(U32+NEU32, 'var x=b4(0,0,0,0); var a=u4(1,0,3,4);  var b=u4(1,1,7,0);  x=ne(a,b)', [F, T, T, T]);
+-
+-CheckB4(U32+LTU32, 'var x=b4(0,0,0,0); var a=u4(1,2,3,4);  var b=u4(-1,1,0,2); x=lt(a,b)', [T, F, F, F]);
+-CheckB4(U32+LTU32, 'var x=b4(0,0,0,0); var a=u4(-1,1,0,2); var b=u4(1,2,3,4);  x=lt(a,b)', [F, T, T, T]);
+-CheckB4(U32+LTU32, 'var x=b4(0,0,0,0); var a=u4(1,0,3,4);  var b=u4(1,1,7,0);  x=lt(a,b)', [F, T, T, F]);
+-
+-CheckB4(U32+LEU32, 'var x=b4(0,0,0,0); var a=u4(1,2,3,4);  var b=u4(-1,1,0,2); x=le(a,b)', [T, F, F, F]);
+-CheckB4(U32+LEU32, 'var x=b4(0,0,0,0); var a=u4(-1,1,0,2); var b=u4(1,2,3,4);  x=le(a,b)', [F, T, T, T]);
+-CheckB4(U32+LEU32, 'var x=b4(0,0,0,0); var a=u4(1,0,3,4);  var b=u4(1,1,7,0);  x=le(a,b)', [T, T, T, F]);
+-
+-CheckB4(U32+GTU32, 'var x=b4(0,0,0,0); var a=u4(1,2,3,4);  var b=u4(-1,1,0,2); x=gt(a,b)', [F, T, T, T]);
+-CheckB4(U32+GTU32, 'var x=b4(0,0,0,0); var a=u4(-1,1,0,2); var b=u4(1,2,3,4);  x=gt(a,b)', [T, F, F, F]);
+-CheckB4(U32+GTU32, 'var x=b4(0,0,0,0); var a=u4(1,0,3,4);  var b=u4(1,1,7,0);  x=gt(a,b)', [F, F, F, T]);
+-
+-CheckB4(U32+GEU32, 'var x=b4(0,0,0,0); var a=u4(1,2,3,4);  var b=u4(-1,1,0,2); x=ge(a,b)', [F, T, T, T]);
+-CheckB4(U32+GEU32, 'var x=b4(0,0,0,0); var a=u4(-1,1,0,2); var b=u4(1,2,3,4);  x=ge(a,b)', [T, F, F, F]);
+-CheckB4(U32+GEU32, 'var x=b4(0,0,0,0); var a=u4(1,0,3,4);  var b=u4(1,1,7,0);  x=ge(a,b)', [T, F, F, T]);
+-
+-const LTF32 = 'var lt=f4.lessThan;';
+-const LEF32 = 'var le=f4.lessThanOrEqual;';
+-const GTF32 = 'var gt=f4.greaterThan;';
+-const GEF32 = 'var ge=f4.greaterThanOrEqual;';
+-const EQF32 = 'var eq=f4.equal;';
+-const NEF32 = 'var ne=f4.notEqual;';
+-
+-assertAsmTypeFail('glob', USE_ASM + F32 + "var lt=f4.lessThan; function f() {var x=f4(1,2,3,4); var y=f4(5,6,7,8); x=lt(x,y);} return f");
+-
+-CheckB4(F32+LTF32, 'var y=f4(1,2,3,4);  var z=f4(-1,1,0,2); var x=b4(0,0,0,0); x=lt(y,z)', [F, F, F, F]);
+-CheckB4(F32+LTF32, 'var y=f4(-1,1,0,2); var z=f4(1,2,3,4);  var x=b4(0,0,0,0); x=lt(y,z)', [T, T, T, T]);
+-CheckB4(F32+LTF32, 'var y=f4(1,0,3,4);  var z=f4(1,1,7,0);  var x=b4(0,0,0,0); x=lt(y,z)', [F, T, T, F]);
+-CheckB4(F32+LTF32 + 'const nan = glob.NaN; const fround=glob.Math.fround', 'var y=f4(0,0,0,0); var z=f4(0,0,0,0); var x=b4(0,0,0,0); y=f4(fround(0.0),fround(-0.0),fround(0.0),fround(nan)); z=f4(fround(-0.0),fround(0.0),fround(nan),fround(0.0)); x=lt(y,z);', [F, F, F, F]);
+-
+-CheckB4(F32+LEF32, 'var y=f4(1,2,3,4);  var z=f4(-1,1,0,2); var x=b4(0,0,0,0); x=le(y,z)', [F, F, F, F]);
+-CheckB4(F32+LEF32, 'var y=f4(-1,1,0,2); var z=f4(1,2,3,4);  var x=b4(0,0,0,0); x=le(y,z)', [T, T, T, T]);
+-CheckB4(F32+LEF32, 'var y=f4(1,0,3,4);  var z=f4(1,1,7,0);  var x=b4(0,0,0,0); x=le(y,z)', [T, T, T, F]);
+-CheckB4(F32+LEF32 + 'const nan = glob.NaN; const fround=glob.Math.fround', 'var y=f4(0,0,0,0); var z=f4(0,0,0,0); var x=b4(0,0,0,0); y=f4(fround(0.0),fround(-0.0),fround(0.0),fround(nan)); z=f4(fround(-0.0),fround(0.0),fround(nan),fround(0.0)); x=le(y,z);', [T, T, F, F]);
+-
+-CheckB4(F32+EQF32, 'var y=f4(1,2,3,4);  var z=f4(-1,1,0,2); var x=b4(0,0,0,0); x=eq(y,z)', [F, F, F, F]);
+-CheckB4(F32+EQF32, 'var y=f4(-1,1,0,2); var z=f4(1,2,3,4);  var x=b4(0,0,0,0); x=eq(y,z)', [F, F, F, F]);
+-CheckB4(F32+EQF32, 'var y=f4(1,0,3,4);  var z=f4(1,1,7,0);  var x=b4(0,0,0,0); x=eq(y,z)', [T, F, F, F]);
+-CheckB4(F32+EQF32 + 'const nan = glob.NaN; const fround=glob.Math.fround', 'var y=f4(0,0,0,0); var z=f4(0,0,0,0); var x=b4(0,0,0,0); y=f4(fround(0.0),fround(-0.0),fround(0.0),fround(nan)); z=f4(fround(-0.0),fround(0.0),fround(nan),fround(0.0)); x=eq(y,z);', [T, T, F, F]);
+-
+-CheckB4(F32+NEF32, 'var y=f4(1,2,3,4);  var z=f4(-1,1,0,2); var x=b4(0,0,0,0); x=ne(y,z)', [T, T, T, T]);
+-CheckB4(F32+NEF32, 'var y=f4(-1,1,0,2); var z=f4(1,2,3,4);  var x=b4(0,0,0,0); x=ne(y,z)', [T, T, T, T]);
+-CheckB4(F32+NEF32, 'var y=f4(1,0,3,4);  var z=f4(1,1,7,0);  var x=b4(0,0,0,0); x=ne(y,z)', [F, T, T, T]);
+-CheckB4(F32+NEF32 + 'const nan = glob.NaN; const fround=glob.Math.fround', 'var y=f4(0,0,0,0); var z=f4(0,0,0,0); var x=b4(0,0,0,0); y=f4(fround(0.0),fround(-0.0),fround(0.0),fround(nan)); z=f4(fround(-0.0),fround(0.0),fround(nan),fround(0.0)); x=ne(y,z);', [F, F, T, T]);
+-
+-CheckB4(F32+GTF32, 'var y=f4(1,2,3,4);  var z=f4(-1,1,0,2); var x=b4(0,0,0,0); x=gt(y,z)', [T, T, T, T]);
+-CheckB4(F32+GTF32, 'var y=f4(-1,1,0,2); var z=f4(1,2,3,4);  var x=b4(0,0,0,0); x=gt(y,z)', [F, F, F, F]);
+-CheckB4(F32+GTF32, 'var y=f4(1,0,3,4);  var z=f4(1,1,7,0);  var x=b4(0,0,0,0); x=gt(y,z)', [F, F, F, T]);
+-CheckB4(F32+GTF32 + 'const nan = glob.NaN; const fround=glob.Math.fround', 'var y=f4(0,0,0,0); var z=f4(0,0,0,0); var x=b4(0,0,0,0); y=f4(fround(0.0),fround(-0.0),fround(0.0),fround(nan)); z=f4(fround(-0.0),fround(0.0),fround(nan),fround(0.0)); x=gt(y,z);', [F, F, F, F]);
+-
+-CheckB4(F32+GEF32, 'var y=f4(1,2,3,4);  var z=f4(-1,1,0,2); var x=b4(0,0,0,0); x=ge(y,z)', [T, T, T, T]);
+-CheckB4(F32+GEF32, 'var y=f4(-1,1,0,2); var z=f4(1,2,3,4);  var x=b4(0,0,0,0); x=ge(y,z)', [F, F, F, F]);
+-CheckB4(F32+GEF32, 'var y=f4(1,0,3,4);  var z=f4(1,1,7,0);  var x=b4(0,0,0,0); x=ge(y,z)', [T, F, F, T]);
+-CheckB4(F32+GEF32 + 'const nan = glob.NaN; const fround=glob.Math.fround', 'var y=f4(0,0,0,0); var z=f4(0,0,0,0); var x=b4(0,0,0,0); y=f4(fround(0.0),fround(-0.0),fround(0.0),fround(nan)); z=f4(fround(-0.0),fround(0.0),fround(nan),fround(0.0)); x=ge(y,z);', [T, T, F, F]);
+-
+-var f = asmLink(asmCompile('glob', USE_ASM + I32 + CI32 + LTI32 + B32 + ANYB4 + 'function f(x){x=ci4(x); var y=i4(-1,0,4,5); var b=b4(0,0,0,0); b=lt(x,y); return anyt(b)|0;} return f'), this);
+-assertEq(f(SIMD.Int32x4(1,2,3,4)), 1);
+-assertEq(f(SIMD.Int32x4(1,2,4,5)), 0);
+-assertEq(f(SIMD.Int32x4(1,2,3,5)), 1);
+-
+-var f = asmLink(asmCompile('glob', USE_ASM + I32 + CI32 + LTI32 + B32 + ALLB4 + 'function f(x){x=ci4(x); var y=i4(-1,0,4,5); var b=b4(0,0,0,0); b=lt(x,y); return allt(b)|0;} return f'), this);
+-assertEq(f(SIMD.Int32x4(-2,-2,3,4)), 1);
+-assertEq(f(SIMD.Int32x4(1,2,4,5)), 0);
+-assertEq(f(SIMD.Int32x4(1,2,3,5)), 0);
+-
+-// Conversions operators
+-const CVTIF = 'var cvt=f4.fromInt32x4;';
+-const CVTFI = 'var cvt=i4.fromFloat32x4;';
+-const CVTUF = 'var cvt=f4.fromUint32x4;';
+-const CVTFU = 'var cvt=u4.fromFloat32x4;';
+-
+-assertAsmTypeFail('glob', USE_ASM + I32 + "var cvt=i4.fromInt32x4; return {}");
+-assertAsmTypeFail('glob', USE_ASM + I32 + "var cvt=i4.fromUint32x4; return {}");
+-assertAsmTypeFail('glob', USE_ASM + U32 + "var cvt=u4.fromInt32x4; return {}");
+-assertAsmTypeFail('glob', USE_ASM + U32 + "var cvt=u4.fromUint32x4; return {}");
+-assertAsmTypeFail('glob', USE_ASM + F32 + "var cvt=f4.fromFloat32x4; return {}");
+-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + CVTIF + "function f() {var x=i4(1,2,3,4); x=cvt(x);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + CVTIF + "function f() {var x=f4(1,2,3,4); x=cvt(x);} return f");
+-
+-var f = asmLink(asmCompile('glob', USE_ASM + I32 + F32 + CF32 + CI32 + CVTIF + 'function f(x){x=ci4(x); var y=f4(0,0,0,0); y=cvt(x); return cf4(y);} return f'), this);
+-assertEqX4(f(SIMD.Int32x4(1,2,3,4)), [1, 2, 3, 4]);
+-assertEqX4(f(SIMD.Int32x4(0,INT32_MIN,INT32_MAX,-1)), [0, Math.fround(INT32_MIN), Math.fround(INT32_MAX), -1]);
+-
+-var f = asmLink(asmCompile('glob', USE_ASM + I32 + U32 + U32I32 + F32 + CF32 + CI32 + CVTUF +
+-                           'function f(x){x=ci4(x); var y=f4(0,0,0,0); y=cvt(u4i4(x)); return cf4(y);} return f'), this);
+-assertEqX4(f(SIMD.Int32x4(1,2,3,4)), [1, 2, 3, 4]);
+-assertEqX4(f(SIMD.Int32x4(0,INT32_MIN,INT32_MAX,-1)), [0, Math.fround(INT32_MAX+1), Math.fround(INT32_MAX), Math.fround(UINT32_MAX)]);
+-
+-var f = asmLink(asmCompile('glob', USE_ASM + I32 + CI32 + F32 + CF32 + CVTFI + 'function f(x){x=cf4(x); var y=i4(0,0,0,0); y=cvt(x); return ci4(y);} return f'), this);
+-assertEqX4(f(SIMD.Float32x4(1,2,3,4)), [1, 2, 3, 4]);
+-// Test that INT32_MIN (exactly representable as an float32) and the first
+-// integer representable as an float32 can be converted.
+-assertEqX4(f(SIMD.Float32x4(INT32_MIN, INT32_MAX - 64, -0, 0)), [INT32_MIN, INT32_MAX - 64, 0, 0].map(Math.fround));
+-// Test boundaries: first integer less than INT32_MIN and representable as a float32
+-assertThrowsInstanceOf(() => f(SIMD.Float32x4(INT32_MIN - 129, 0, 0, 0)), RangeError);
+-// INT_MAX + 1
+-assertThrowsInstanceOf(() => f(SIMD.Float32x4(Math.pow(2, 31), 0, 0, 0)), RangeError);
+-// Special values
+-assertThrowsInstanceOf(() => f(SIMD.Float32x4(NaN, 0, 0, 0)), RangeError);
+-assertThrowsInstanceOf(() => f(SIMD.Float32x4(Infinity, 0, 0, 0)), RangeError);
+-assertThrowsInstanceOf(() => f(SIMD.Float32x4(-Infinity, 0, 0, 0)), RangeError);
+-
+-var f = asmLink(asmCompile('glob', USE_ASM + I32 + CI32 + U32 + I32U32 + F32 + CF32 + CVTFU +
+-                           'function f(x){x=cf4(x); var y=u4(0,0,0,0); y=cvt(x); return ci4(i4u4(y));} return f'), this);
+-assertEqX4(f(SIMD.Float32x4(1,2,3,4)), [1, 2, 3, 4]);
+-// TODO: Test negative numbers > -1. They should truncate to 0. See https://github.com/tc39/ecmascript_simd/issues/315
+-assertEqX4(SIMD.Uint32x4.fromInt32x4Bits(f(SIMD.Float32x4(0xffffff00, INT32_MAX+1, -0, 0))),
+-           [0xffffff00, INT32_MAX+1, 0, 0].map(Math.fround));
+-// Test boundaries: -1 or less.
+-assertThrowsInstanceOf(() => f(SIMD.Float32x4(-1, 0, 0, 0)), RangeError);
+-assertThrowsInstanceOf(() => f(SIMD.Float32x4(Math.pow(2, 32), 0, 0, 0)), RangeError);
+-// Special values
+-assertThrowsInstanceOf(() => f(SIMD.Float32x4(NaN, 0, 0, 0)), RangeError);
+-assertThrowsInstanceOf(() => f(SIMD.Float32x4(Infinity, 0, 0, 0)), RangeError);
+-assertThrowsInstanceOf(() => f(SIMD.Float32x4(-Infinity, 0, 0, 0)), RangeError);
+-
+-// Cast operators
+-const CVTIFB = 'var cvt=f4.fromInt32x4Bits;';
+-const CVTFIB = 'var cvt=i4.fromFloat32x4Bits;';
+-
+-var cast = (function() {
+-    var i32 = new Int32Array(1);
+-    var f32 = new Float32Array(i32.buffer);
+-
+-    function fromInt32Bits(x) {
+-        i32[0] = x;
+-        return f32[0];
+-    }
+-
+-    function fromFloat32Bits(x) {
+-        f32[0] = x;
+-        return i32[0];
+-    }
+-
+-    return {
+-        fromInt32Bits,
+-        fromFloat32Bits
+-    }
+-})();
+-
+-assertAsmTypeFail('glob', USE_ASM + I32 + "var cvt=i4.fromInt32x4; return {}");
+-assertAsmTypeFail('glob', USE_ASM + F32 + "var cvt=f4.fromFloat32x4; return {}");
+-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + CVTIFB + "function f() {var x=i4(1,2,3,4); x=cvt(x);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + CVTIFB + "function f() {var x=f4(1,2,3,4); x=cvt(x);} return f");
+-
+-var f = asmLink(asmCompile('glob', USE_ASM + I32 + F32 + CVTIFB + CF32 + CI32 + 'function f(x){x=ci4(x); var y=f4(0,0,0,0); y=cvt(x); return cf4(y);} return f'), this);
+-assertEqX4(f(SIMD.Int32x4(1,2,3,4)), [1, 2, 3, 4].map(cast.fromInt32Bits));
+-assertEqX4(f(SIMD.Int32x4(0,INT32_MIN,INT32_MAX,-1)), [0, INT32_MIN, INT32_MAX, -1].map(cast.fromInt32Bits));
+-
+-var f = asmLink(asmCompile('glob', USE_ASM + I32 + F32 + F32A + CVTIFB + CF32 + CI32 + 'function f(x){x=ci4(x); var y=f4(0,0,0,0); var z=f4(1,1,1,1); y=cvt(x); y=f4a(y, z); return cf4(y)} return f'), this);
+-assertEqX4(f(SIMD.Int32x4(1,2,3,4)), [1, 2, 3, 4].map(cast.fromInt32Bits).map((x) => x+1));
+-assertEqX4(f(SIMD.Int32x4(0,INT32_MIN,INT32_MAX,-1)), [0, INT32_MIN, INT32_MAX, -1].map(cast.fromInt32Bits).map((x) => x+1));
+-
+-var f = asmLink(asmCompile('glob', USE_ASM + I32 + CI32 + F32 + CF32 + CVTFIB + 'function f(x){x=cf4(x); var y=i4(0,0,0,0); y=cvt(x); return ci4(y);} return f'), this);
+-assertEqX4(f(SIMD.Float32x4(1,2,3,4)), [1, 2, 3, 4].map(cast.fromFloat32Bits));
+-assertEqX4(f(SIMD.Float32x4(-0,NaN,+Infinity,-Infinity)), [-0, NaN, +Infinity, -Infinity].map(cast.fromFloat32Bits));
+-
+-var f = asmLink(asmCompile('glob', USE_ASM + I32 + CI32 + F32 + CF32 + I32A + CVTFIB + 'function f(x){x=cf4(x); var y=i4(0,0,0,0); var z=i4(1,1,1,1); y=cvt(x); y=i4a(y,z); return ci4(y);} return f'), this);
+-assertEqX4(f(SIMD.Float32x4(1,2,3,4)), [1, 2, 3, 4].map(cast.fromFloat32Bits).map((x) => x+1));
+-assertEqX4(f(SIMD.Float32x4(-0,NaN,+Infinity,-Infinity)), [-0, NaN, +Infinity, -Infinity].map(cast.fromFloat32Bits).map((x) => x+1));
+-
+-// Bitwise ops
+-const ANDI32 = 'var andd=i4.and;';
+-const ORI32 = 'var orr=i4.or;';
+-const XORI32 = 'var xorr=i4.xor;';
+-
+-CheckI4(ANDI32, 'var x=i4(42,1337,-1,13); var y=i4(2, 4, 7, 15); x=andd(x,y)', [42 & 2, 1337 & 4, -1 & 7, 13 & 15]);
+-CheckI4(ORI32, ' var x=i4(42,1337,-1,13); var y=i4(2, 4, 7, 15); x=orr(x,y)',  [42 | 2, 1337 | 4, -1 | 7, 13 | 15]);
+-CheckI4(XORI32, 'var x=i4(42,1337,-1,13); var y=i4(2, 4, 7, 15); x=xorr(x,y)', [42 ^ 2, 1337 ^ 4, -1 ^ 7, 13 ^ 15]);
+-
+-const ANDU32 = 'var andd=u4.and;';
+-const ORU32 = 'var orr=u4.or;';
+-const XORU32 = 'var xorr=u4.xor;';
+-
+-CheckU4(ANDU32, 'var x=u4(42,1337,-1,13); var y=u4(2, 4, 7, 15); x=andd(x,y)', [42 & 2, 1337 & 4, (-1 & 7) >>> 0, 13 & 15]);
+-CheckU4(ORU32, ' var x=u4(42,1337,-1,13); var y=u4(2, 4, 7, 15); x=orr(x,y)',  [42 | 2, 1337 | 4, (-1 | 7) >>> 0, 13 | 15]);
+-CheckU4(XORU32, 'var x=u4(42,1337,-1,13); var y=u4(2, 4, 7, 15); x=xorr(x,y)', [42 ^ 2, 1337 ^ 4, (-1 ^ 7) >>> 0, 13 ^ 15]);
+-
+-const ANDB32 = 'var andd=b4.and;';
+-const ORB32 = 'var orr=b4.or;';
+-const XORB32 = 'var xorr=b4.xor;';
+-
+-CheckB4(ANDB32, 'var x=b4(1,0,1,0); var y=b4(1,1,0,0); x=andd(x,y)', [true, false, false, false]);
+-CheckB4(ORB32, ' var x=b4(1,0,1,0); var y=b4(1,1,0,0); x=orr(x,y)',  [true, true, true, false]);
+-CheckB4(XORB32, 'var x=b4(1,0,1,0); var y=b4(1,1,0,0); x=xorr(x,y)', [false, true, true, false]);
+-
+-// No bitwise ops on Float32x4.
+-const ANDF32 = 'var andd=f4.and;';
+-const ORF32 = 'var orr=f4.or;';
+-const XORF32 = 'var xorr=f4.xor;';
+-const NOTF32 = 'var nott=f4.not;';
+-
+-assertAsmTypeFail('glob', USE_ASM + F32 + CF32 + ANDF32 + 'function f() {var x=f4(42, 13.37,-1.42, 23.10); var y=f4(19.89, 2.4, 8.15, 16.36); x=andd(x,y);} return f');
+-assertAsmTypeFail('glob', USE_ASM + F32 + CF32 + ORF32 + 'function f() {var x=f4(42, 13.37,-1.42, 23.10); var y=f4(19.89, 2.4, 8.15, 16.36); x=orr(x,y);} return f');
+-assertAsmTypeFail('glob', USE_ASM + F32 + CF32 + XORF32 + 'function f() {var x=f4(42, 13.37,-1.42, 23.10); var y=f4(19.89, 2.4, 8.15, 16.36); x=xorr(x,y);} return f');
+-assertAsmTypeFail('glob', USE_ASM + F32 + CF32 + NOTF32 + 'function f() {var x=f4(42, 13.37,-1.42, 23.10); x=nott(x);} return f');
+-
+-// Logical ops
+-const LSHI = 'var lsh=i4.shiftLeftByScalar;'
+-const RSHI = 'var rsh=i4.shiftRightByScalar;'
+-
+-assertAsmTypeFail('glob', USE_ASM + I32 + CI32 + F32 + FROUND + LSHI + "function f() {var x=f4(1,2,3,4); return ci4(lsh(x,f32(42)));} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + CI32 + F32 + FROUND + LSHI + "function f() {var x=f4(1,2,3,4); return ci4(lsh(x,42));} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + CI32 + FROUND + LSHI + "function f() {var x=i4(1,2,3,4); return ci4(lsh(x,42.0));} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + CI32 + FROUND + LSHI + "function f() {var x=i4(1,2,3,4); return ci4(lsh(x,f32(42)));} return f");
+-
+-var input = 'i4(0, 1, ' + INT32_MIN + ', ' + INT32_MAX + ')';
+-var vinput = [0, 1, INT32_MIN, INT32_MAX];
+-
+-function Lsh(i) { return function(x) { return (x << (i & 31)) | 0 } }
+-function Rsh(i) { return function(x) { return (x >> (i & 31)) | 0 } }
+-
+-var asmLsh = asmLink(asmCompile('glob', USE_ASM + I32 + CI32 + LSHI + 'function f(x, y){x=x|0;y=y|0; var v=' + input + ';return ci4(lsh(v, x+y))} return f;'), this)
+-var asmRsh = asmLink(asmCompile('glob', USE_ASM + I32 + CI32 + RSHI + 'function f(x, y){x=x|0;y=y|0; var v=' + input + ';return ci4(rsh(v, x+y))} return f;'), this)
+-
+-for (var i = 1; i < 64; i++) {
+-    CheckI4(LSHI,  'var x=' + input + '; x=lsh(x, ' + i + ')',   vinput.map(Lsh(i)));
+-    CheckI4(RSHI,  'var x=' + input + '; x=rsh(x, ' + i + ')',   vinput.map(Rsh(i)));
+-
+-    assertEqX4(asmLsh(i, 3),  vinput.map(Lsh(i + 3)));
+-    assertEqX4(asmRsh(i, 3),  vinput.map(Rsh(i + 3)));
+-}
+-
+-// Same thing for Uint32x4.
+-const LSHU = 'var lsh=u4.shiftLeftByScalar;'
+-const RSHU = 'var rsh=u4.shiftRightByScalar;'
+-
+-input = 'u4(0, 1, 0x80008000, ' + INT32_MAX + ')';
+-vinput = [0, 1, 0x80008000, INT32_MAX];
+-
+-function uLsh(i) { return function(x) { return (x << (i & 31)) >>> 0 } }
+-function uRsh(i) { return function(x) { return (x >>> (i & 31)) } }
+-
+-// Need to bitcast to Int32x4 before returning result.
+-asmLsh = asmLink(asmCompile('glob', USE_ASM + U32 + CU32 + LSHU + I32 + CI32 + I32U32 +
+-                            'function f(x, y){x=x|0;y=y|0; var v=' + input + ';return ci4(i4u4(lsh(v, x+y)));} return f;'), this)
+-asmRsh = asmLink(asmCompile('glob', USE_ASM + U32 + CU32 + RSHU + I32 + CI32 + I32U32 +
+-                            'function f(x, y){x=x|0;y=y|0; var v=' + input + ';return ci4(i4u4(rsh(v, x+y)));} return f;'), this)
+-
+-for (var i = 1; i < 64; i++) {
+-    // Constant shifts.
+-    CheckU4(LSHU,  'var x=' + input + '; x=lsh(x, ' + i + ')', vinput.map(uLsh(i)));
+-    CheckU4(RSHU,  'var x=' + input + '; x=rsh(x, ' + i + ')', vinput.map(uRsh(i)));
+-
+-    // Dynamically computed shifts. The asm function returns a Int32x4.
+-    assertEqX4(SIMD.Uint32x4.fromInt32x4Bits(asmLsh(i, 3)), vinput.map(uLsh(i + 3)));
+-    assertEqX4(SIMD.Uint32x4.fromInt32x4Bits(asmRsh(i, 3)), vinput.map(uRsh(i + 3)));
+-}
+-
+-// Select
+-const I32SEL = 'var i4sel = i4.select;'
+-const U32SEL = 'var u4sel = u4.select;'
+-const F32SEL = 'var f4sel = f4.select;'
+-
+-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + B32 + CI32 + I32SEL + "function f() {var x=f4(1,2,3,4); return ci4(i4sel(x,x,x));} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + B32 + CI32 + I32SEL + "function f() {var m=f4(1,2,3,4); var x=i4(1,2,3,4); return ci4(i4sel(m,x,x));} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + B32 + CI32 + I32SEL + "function f() {var m=f4(1,2,3,4); var x=f4(1,2,3,4); return ci4(i4sel(m,x,x));} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + B32 + CI32 + I32SEL + "function f() {var m=i4(1,2,3,4); var x=f4(1,2,3,4); return ci4(i4sel(m,x,x));} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + B32 + CI32 + I32SEL + "function f() {var m=i4(1,2,3,4); var x=i4(1,2,3,4); return ci4(i4sel(m,x,x));} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + B32 + CI32 + I32SEL + "function f() {var m=b4(1,2,3,4); var x=f4(1,2,3,4); return ci4(i4sel(m,x,x));} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + B32 + CI32 + I32SEL + "function f() {var m=b4(1,2,3,4); var x=f4(1,2,3,4); var y=i4(5,6,7,8); return ci4(i4sel(m,x,y));} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + B32 + CI32 + I32SEL + "function f() {var m=b4(1,2,3,4); var x=i4(1,2,3,4); var y=f4(5,6,7,8); return ci4(i4sel(m,x,y));} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + B32 + CI32 + I32SEL + "function f() {var m=b4(1,2,3,4); var x=f4(1,2,3,4); var y=f4(5,6,7,8); return ci4(i4sel(m,x,y));} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + B32 + CI32 + I32SEL + "function f() {var m=b4(1,2,3,4); var x=i4(1,2,3,4); var y=b4(5,6,7,8); return ci4(i4sel(m,x,y));} return f");
+-
+-assertAsmTypeFail('glob', USE_ASM + F32 + CF32 + F32SEL + "function f() {var m=f4(1,2,3,4); return cf4(f4sel(x,x,x));} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + CF32 + F32SEL + "function f() {var m=f4(1,2,3,4); var x=i4(1,2,3,4); return cf4(f4sel(m,x,x));} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + CF32 + F32SEL + "function f() {var m=f4(1,2,3,4); var x=f4(1,2,3,4); return cf4(f4sel(m,x,x));} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + CF32 + F32SEL + "function f() {var m=i4(1,2,3,4); var x=f4(1,2,3,4); var y=i4(5,6,7,8); return cf4(f4sel(m,x,y));} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + CF32 + F32SEL + "function f() {var m=i4(1,2,3,4); var x=i4(1,2,3,4); var y=f4(5,6,7,8); return cf4(f4sel(m,x,y));} return f");
+-
+-assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + B32 + CI32 + I32SEL + "function f() {var m=b4(0,0,0,0); var x=i4(1,2,3,4); var y=i4(5,6,7,8); return ci4(i4sel(m,x,y)); } return f"), this)(), [5, 6, 7, 8]);
+-assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + B32 + CI32 + I32SEL + "function f() {var m=b4(1,1,1,1); var x=i4(1,2,3,4); var y=i4(5,6,7,8); return ci4(i4sel(m,x,y)); } return f"), this)(), [1, 2, 3, 4]);
+-assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + B32 + CI32 + I32SEL + "function f() {var m=b4(0,1,0,1); var x=i4(1,2,3,4); var y=i4(5,6,7,8); return ci4(i4sel(m,x,y)); } return f"), this)(), [5, 2, 7, 4]);
+-assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + B32 + CI32 + I32SEL + "function f() {var m=b4(0,0,1,1); var x=i4(1,2,3,4); var y=i4(5,6,7,8); return ci4(i4sel(m,x,y)); } return f"), this)(), [5, 6, 3, 4]);
+-
+-assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + B32 + F32 + CF32 + F32SEL + "function f() {var m=b4(0,0,0,0); var x=f4(1,2,3,4); var y=f4(5,6,7,8); return cf4(f4sel(m,x,y)); } return f"), this)(), [5, 6, 7, 8]);
+-assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + B32 + F32 + CF32 + F32SEL + "function f() {var m=b4(1,1,1,1); var x=f4(1,2,3,4); var y=f4(5,6,7,8); return cf4(f4sel(m,x,y)); } return f"), this)(), [1, 2, 3, 4]);
+-assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + B32 + F32 + CF32 + F32SEL + "function f() {var m=b4(0,1,0,1); var x=f4(1,2,3,4); var y=f4(5,6,7,8); return cf4(f4sel(m,x,y)); } return f"), this)(), [5, 2, 7, 4]);
+-assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + B32 + F32 + CF32 + F32SEL + "function f() {var m=b4(0,0,1,1); var x=f4(1,2,3,4); var y=f4(5,6,7,8); return cf4(f4sel(m,x,y)); } return f"), this)(), [5, 6, 3, 4]);
+-
+-CheckU4(B32 + U32SEL, "var m=b4(0,0,0,0); var x=u4(1,2,3,4); var y=u4(5,6,7,8); x=u4sel(m,x,y);", [5, 6, 7, 8]);
+-CheckU4(B32 + U32SEL, "var m=b4(1,1,1,1); var x=u4(1,2,3,4); var y=u4(5,6,7,8); x=u4sel(m,x,y);", [1, 2, 3, 4]);
+-CheckU4(B32 + U32SEL, "var m=b4(0,1,0,1); var x=u4(1,2,3,4); var y=u4(5,6,7,8); x=u4sel(m,x,y);", [5, 2, 7, 4]);
+-CheckU4(B32 + U32SEL, "var m=b4(0,0,1,1); var x=u4(1,2,3,4); var y=u4(5,6,7,8); x=u4sel(m,x,y);", [5, 6, 3, 4]);
+-
+-// Splat
+-const I32SPLAT = 'var splat=i4.splat;'
+-const U32SPLAT = 'var splat=u4.splat;'
+-const F32SPLAT = 'var splat=f4.splat;'
+-const B32SPLAT = 'var splat=b4.splat;'
+-
+-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + I32SPLAT + "function f() {var m=i4(1,2,3,4); var p=f4(1.,2.,3.,4.); p=splat(f32(1));} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + I32SPLAT + "function f() {var m=i4(1,2,3,4); m=splat(1, 2)} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + I32SPLAT + "function f() {var m=i4(1,2,3,4); m=splat()} return f");
+-
+-assertAsmTypeFail('glob', USE_ASM + I32 + I32SPLAT + "function f() {var m=i4(1,2,3,4); m=splat(m);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + I32SPLAT + "function f() {var m=i4(1,2,3,4); m=splat(1.0);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + I32SPLAT + FROUND + "function f() {var m=i4(1,2,3,4); m=splat(f32(1.0));} return f");
+-
+-assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + CI32 + I32SPLAT + 'function f(){return ci4(splat(42));} return f'), this)(), [42, 42, 42, 42]);
+-assertEqX4(asmLink(asmCompile('glob', USE_ASM + B32 + CB32 + B32SPLAT + 'function f(){return cb4(splat(42));} return f'), this)(), [true, true, true, true]);
+-assertEqX4(asmLink(asmCompile('glob', USE_ASM + B32 + CB32 + B32SPLAT + 'function f(){return cb4(splat(0));} return f'), this)(), [false, false, false, false]);
+-CheckU4(B32 + U32SPLAT, "var x=u4(1,2,3,4); x=splat(0);", [0, 0, 0, 0]);
+-CheckU4(B32 + U32SPLAT, "var x=u4(1,2,3,4); x=splat(0xaabbccdd);", [0xaabbccdd, 0xaabbccdd, 0xaabbccdd, 0xaabbccdd]);
+-
+-const l33t = Math.fround(13.37);
+-assertEqX4(asmLink(asmCompile('glob', USE_ASM + F32 + CF32 + F32SPLAT + FROUND + 'function f(){return cf4(splat(f32(1)));} return f'), this)(), [1, 1, 1, 1]);
+-assertEqX4(asmLink(asmCompile('glob', USE_ASM + F32 + CF32 + F32SPLAT + FROUND + 'function f(){return cf4(splat(1.0));} return f'), this)(), [1, 1, 1, 1]);
+-assertEqX4(asmLink(asmCompile('glob', USE_ASM + F32 + CF32 + F32SPLAT + FROUND + 'function f(){return cf4(splat(f32(1 >>> 0)));} return f'), this)(), [1, 1, 1, 1]);
+-assertEqX4(asmLink(asmCompile('glob', USE_ASM + F32 + CF32 + F32SPLAT + FROUND + 'function f(){return cf4(splat(f32(13.37)));} return f'), this)(), [l33t, l33t, l33t, l33t]);
+-assertEqX4(asmLink(asmCompile('glob', USE_ASM + F32 + CF32 + F32SPLAT + FROUND + 'function f(){return cf4(splat(13.37));} return f'), this)(), [l33t, l33t, l33t, l33t]);
+-
+-var i32view = new Int32Array(heap);
+-var f32view = new Float32Array(heap);
+-i32view[0] = 42;
+-assertEqX4(asmLink(asmCompile('glob', 'ffi', 'heap', USE_ASM + I32 + CI32 + I32SPLAT + 'var i32=new glob.Int32Array(heap); function f(){return ci4(splat(i32[0]));} return f'), this, {}, heap)(), [42, 42, 42, 42]);
+-f32view[0] = 42;
+-assertEqX4(asmLink(asmCompile('glob', 'ffi', 'heap', USE_ASM + F32 + CF32 + F32SPLAT + 'var f32=new glob.Float32Array(heap); function f(){return cf4(splat(f32[0]));} return f'), this, {}, heap)(), [42, 42, 42, 42]);
+-assertEqX4(asmLink(asmCompile('glob', 'ffi', 'heap', USE_ASM + F32 + CF32 + F32SPLAT + FROUND + 'function f(){return cf4(splat(f32(1) + f32(2)));} return f'), this, {}, heap)(), [3, 3, 3, 3]);
+-
+-// Dead code
+-assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + CI32 + 'function f(){var x=i4(1,2,3,4); return ci4(x); x=i4(5,6,7,8); return ci4(x);} return f'), this)(), [1, 2, 3, 4]);
+-assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + CI32 + EXTI4 + 'function f(){var x=i4(1,2,3,4); var c=0; return ci4(x); c=e(x,0)|0; return ci4(x);} return f'), this)(), [1, 2, 3, 4]);
+-assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + CI32 + I32A + 'function f(){var x=i4(1,2,3,4); var c=0; return ci4(x); x=i4a(x,x); return ci4(x);} return f'), this)(), [1, 2, 3, 4]);
+-assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + CI32 + I32S + 'function f(){var x=i4(1,2,3,4); var c=0; return ci4(x); x=i4s(x,x); return ci4(x);} return f'), this)(), [1, 2, 3, 4]);
+-
+-// Swizzle
+-assertAsmTypeFail('glob', USE_ASM + I32 + "var swizzle=i4.swizzle; function f() {var x=i4(1,2,3,4); x=swizzle(x, -1, 0, 0, 0);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + "var swizzle=i4.swizzle; function f() {var x=i4(1,2,3,4); x=swizzle(x, 4, 0, 0, 0);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + "var swizzle=i4.swizzle; function f() {var x=i4(1,2,3,4); x=swizzle(x, 0.0, 0, 0, 0);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + "var swizzle=i4.swizzle; function f() {var x=i4(1,2,3,4); x=swizzle(x);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + "var swizzle=i4.swizzle; function f() {var x=i4(1,2,3,4); x=swizzle(x, 0, 0, 0, x);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + "var swizzle=i4.swizzle; function f() {var x=i4(1,2,3,4); var y=42; x=swizzle(x, 0, 0, 0, y);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + "var swizzle=i4.swizzle; function f() {var x=f4(1,2,3,4); x=swizzle(x, 0, 0, 0, 0);} return f");
+-
+-function swizzle(arr, lanes) {
+-    return [arr[lanes[0]], arr[lanes[1]], arr[lanes[2]], arr[lanes[3]]];
+-}
+-
+-var before = Date.now();
+-for (var i = 0; i < Math.pow(4, 4); i++) {
+-    var lanes = [i & 3, (i >> 2) & 3, (i >> 4) & 3, (i >> 6) & 3];
+-    CheckI4('var swizzle=i4.swizzle;', 'var x=i4(1,2,3,4); x=swizzle(x, ' + lanes.join(',') + ')', swizzle([1,2,3,4], lanes));
+-    CheckU4('var swizzle=u4.swizzle;', 'var x=u4(1,2,3,4); x=swizzle(x, ' + lanes.join(',') + ')', swizzle([1,2,3,4], lanes));
+-    CheckF4('var swizzle=f4.swizzle;', 'var x=f4(1,2,3,4); x=swizzle(x, ' + lanes.join(',') + ')', swizzle([1,2,3,4], lanes));
+-}
+-DEBUG && print('time for checking all swizzles:', Date.now() - before);
+-
+-// Shuffle
+-assertAsmTypeFail('glob', USE_ASM + I32 + "var shuffle=i4.shuffle; function f() {var x=i4(1,2,3,4); var y=i4(1,2,3,4); x=shuffle(x, y, -1, 0, 0);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + "var shuffle=i4.shuffle; function f() {var x=i4(1,2,3,4); var y=i4(1,2,3,4); x=shuffle(x, y, 8, 0, 0, 0);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + "var shuffle=i4.shuffle; function f() {var x=i4(1,2,3,4); var y=i4(1,2,3,4); x=shuffle(x, y, 0.0, 0, 0, 0);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + "var shuffle=i4.shuffle; function f() {var x=i4(1,2,3,4); var y=i4(1,2,3,4); x=shuffle(x);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + "var shuffle=i4.shuffle; function f() {var x=i4(1,2,3,4); var y=i4(1,2,3,4); x=shuffle(x, 0, 0, 0, 0, 0);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + "var shuffle=i4.shuffle; function f() {var x=i4(1,2,3,4); var y=i4(1,2,3,4); x=shuffle(x, y, 0, 0, 0, x);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + "var shuffle=i4.shuffle; function f() {var x=i4(1,2,3,4); var y=i4(1,2,3,4); var z=42; x=shuffle(x, y, 0, 0, 0, z);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + "var shuffle=i4.shuffle; function f() {var x=f4(1,2,3,4); x=shuffle(x, x, 0, 0, 0, 0);} return f");
+-
+-function shuffle(lhs, rhs, lanes) {
+-    return [(lanes[0] < 4 ? lhs : rhs)[lanes[0] % 4],
+-            (lanes[1] < 4 ? lhs : rhs)[lanes[1] % 4],
+-            (lanes[2] < 4 ? lhs : rhs)[lanes[2] % 4],
+-            (lanes[3] < 4 ? lhs : rhs)[lanes[3] % 4]];
+-}
+-
+-before = Date.now();
+-
+-const LANE_SELECTORS = [
+-    // Four of lhs or four of rhs, equivalent to swizzle
+-    [0, 1, 2, 3],
+-    [4, 5, 6, 7],
+-    [0, 2, 3, 1],
+-    [4, 7, 4, 6],
+-    // One of lhs, three of rhs
+-    [0, 4, 5, 6],
+-    [4, 0, 5, 6],
+-    [4, 5, 0, 6],
+-    [4, 5, 6, 0],
+-    // Two of lhs, two of rhs
+-    //      in one shufps
+-    [1, 2, 4, 5],
+-    [4, 5, 1, 2],
+-    //      in two shufps
+-    [7, 0, 5, 2],
+-    [0, 7, 5, 2],
+-    [0, 7, 2, 5],
+-    [7, 0, 2, 5],
+-    // Three of lhs, one of rhs
+-    [7, 0, 1, 2],
+-    [0, 7, 1, 2],
+-    [0, 1, 7, 2],
+-    [0, 1, 2, 7],
+-    // Impl-specific special cases for swizzle
+-    [2, 3, 2, 3],
+-    [0, 1, 0, 1],
+-    [0, 0, 1, 1],
+-    [2, 2, 3, 3],
+-    // Impl-specific special cases for shuffle (case and swapped case)
+-    [2, 3, 6, 7], [6, 7, 2, 3],
+-    [0, 1, 4, 5], [4, 5, 0, 1],
+-    [0, 4, 1, 5], [4, 0, 5, 1],
+-    [2, 6, 3, 7], [6, 2, 7, 3],
+-    [4, 1, 2, 3], [0, 5, 6, 7],
+-    // Insert one element from rhs into lhs keeping other elements unchanged
+-    [7, 1, 2, 3],
+-    [0, 7, 2, 3],
+-    [0, 1, 7, 2],
+-    // These are effectively vector selects
+-    [0, 5, 2, 3],
+-    [0, 1, 6, 3],
+-    [4, 5, 2, 3],
+-    [4, 1, 6, 3]
+-];
+-
+-for (var lanes of LANE_SELECTORS) {
+-    CheckI4('var shuffle=i4.shuffle;', 'var x=i4(1,2,3,4); var y=i4(5,6,7,8); x=shuffle(x, y, ' + lanes.join(',') + ')', shuffle([1,2,3,4], [5,6,7,8], lanes));
+-    CheckU4('var shuffle=u4.shuffle;', 'var x=u4(1,2,3,4); var y=u4(5,6,7,8); x=shuffle(x, y, ' + lanes.join(',') + ')', shuffle([1,2,3,4], [5,6,7,8], lanes));
+-    CheckF4('var shuffle=f4.shuffle;', 'var x=f4(1,2,3,4); var y=f4(5,6,7,8); x=shuffle(x, y, ' + lanes.join(',') + ')', shuffle([1,2,3,4], [5,6,7,8], lanes));
+-}
+-DEBUG && print('time for checking all shuffles:', Date.now() - before);
+-
+-// 3. Function calls
+-// 3.1. No math builtins
+-assertAsmTypeFail('glob', USE_ASM + I32 + "var fround=glob.Math.fround; function f() {var x=i4(1,2,3,4); return +fround(x);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + "var sin=glob.Math.sin; function f() {var x=i4(1,2,3,4); return +sin(x);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + "var ceil=glob.Math.ceil; function f() {var x=i4(1,2,3,4); return +ceil(x);} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + "var pow=glob.Math.pow; function f() {var x=i4(1,2,3,4); return +pow(1.0, x);} return f");
+-
+-assertAsmTypeFail('glob', USE_ASM + I32 + "var fround=glob.Math.fround; function f() {var x=i4(1,2,3,4); x=i4(fround(3));} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + "var sin=glob.Math.sin; function f() {var x=i4(1,2,3,4); x=i4(sin(3.0));} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + "var ceil=glob.Math.sin; function f() {var x=i4(1,2,3,4); x=i4(ceil(3.0));} return f");
+-assertAsmTypeFail('glob', USE_ASM + I32 + "var pow=glob.Math.pow; function f() {var x=i4(1,2,3,4); x=i4(pow(1.0, 2.0));} return f");
+-
+-// 3.2. FFI calls
+-// Can't pass SIMD arguments to FFI
+-assertAsmTypeFail('glob', 'ffi', USE_ASM + I32 + "var func=ffi.func; function f() {var x=i4(1,2,3,4); func(x);} return f");
+-assertAsmTypeFail('glob', 'ffi', USE_ASM + U32 + "var func=ffi.func; function f() {var x=u4(1,2,3,4); func(x);} return f");
+-assertAsmTypeFail('glob', 'ffi', USE_ASM + F32 + "var func=ffi.func; function f() {var x=f4(1,2,3,4); func(x);} return f");
+-assertAsmTypeFail('glob', 'ffi', USE_ASM + B32 + "var func=ffi.func; function f() {var x=b4(1,2,3,4); func(x);} return f");
+-
+-// Can't have FFI return SIMD values
+-assertAsmTypeFail('glob', 'ffi', USE_ASM + I32 + "var func=ffi.func; function f() {var x=i4(1,2,3,4); x=i4(func());} return f");
+-assertAsmTypeFail('glob', 'ffi', USE_ASM + U32 + "var func=ffi.func; function f() {var x=u4(1,2,3,4); x=i4(func());} return f");
+-assertAsmTypeFail('glob', 'ffi', USE_ASM + F32 + "var func=ffi.func; function f() {var x=f4(1,2,3,4); x=f4(func());} return f");
+-assertAsmTypeFail('glob', 'ffi', USE_ASM + B32 + "var func=ffi.func; function f() {var x=b4(1,2,3,4); x=b4(func());} return f");
+-
+-// 3.3 Internal calls
+-// asm.js -> asm.js
+-// Retrieving values from asm.js
+-var code = USE_ASM + I32 + CI32 + I32A + EXTI4 + `
+-    var check = ffi.check;
+-
+-    function g() {
+-        var i = 0;
+-        var y = i4(0,0,0,0);
+-        var tmp = i4(0,0,0,0); var z = i4(1,1,1,1);
+-        var w = i4(5,5,5,5);
+-        for (; (i|0) < 30; i = i + 1 |0)
+-            y = i4a(z, y);
+-        y = i4a(w, y);
+-        check(e(y,0) | 0, e(y,1) | 0, e(y,2) | 0, e(y,3) | 0);
+-        return ci4(y);
+-    }
+-
+-    function f(x) {
+-        x = ci4(x);
+-        var y = i4(0,0,0,0);
+-        y = ci4(g());
+-        check(e(y,0) | 0, e(y,1) | 0, e(y,2) | 0, e(y,3) | 0);
+-        return ci4(x);
+-    }
+-    return f;
+-`;
+-
+-var v4 = SIMD.Int32x4(1,2,3,4);
+-function check(x, y, z, w) {
+-    assertEq(x, 35);
+-    assertEq(y, 35);
+-    assertEq(z, 35);
+-    assertEq(w, 35);
+-}
+-var ffi = {check};
+-assertEqX4(asmLink(asmCompile('glob', 'ffi', code), this, ffi)(v4), [1,2,3,4]);
+-
+-// Passing arguments from asm.js to asm.js
+-var code = USE_ASM + I32 + CI32 + I32A + EXTI4 + `
+-    var assertEq = ffi.assertEq;
+-
+-    function internal([args]) {
+-        [coerc]
+-        assertEq(e([last],0) | 0, [i] | 0);
+-        assertEq(e([last],1) | 0, [i] + 1 |0);
+-        assertEq(e([last],2) | 0, [i] + 2 |0);
+-        assertEq(e([last],3) | 0, [i] + 3 |0);
+-    }
+-
+-    function external() {
+-        [decls]
+-        internal([args]);
+-    }
+-    return external;
+-`;
+-
+-var ffi = {assertEq};
+-var args = '';
+-var decls = '';
+-var coerc = '';
+-for (var i = 1; i < 10; ++i) {
+-    var j = i;
+-    args += ((i > 1) ? ', ':'') + 'x' + i;
+-    decls += 'var x' + i + ' = i4(' + j++ + ', ' + j++ + ', ' + j++ + ', ' + j++ + ');\n';
+-    coerc += 'x' + i + ' = ci4(x' + i + ');\n';
+-    last = 'x' + i;
+-    var c = code.replace(/\[args\]/g, args)
+-                .replace(/\[last\]/g, last)
+-                .replace(/\[decls\]/i, decls)
+-                .replace(/\[coerc\]/i, coerc)
+-                .replace(/\[i\]/g, i);
+-    asmLink(asmCompile('glob', 'ffi', c), this, ffi)();
+-}
+-
+-// Bug 1240524
+-assertAsmTypeFail(USE_ASM + B32 + 'var x = b4(0, 0, 0, 0); frd(x);');
+-
+-// Passing boolean results to extern functions.
+-// Verify that these functions are typed correctly.
+-function isone(x) { return (x===1)|0 }
+-var f = asmLink(asmCompile('glob', 'ffi', USE_ASM + B32 + CB32 + ANYB4 + 'var isone=ffi.isone; function f(i) { i=cb4(i); return isone(anyt(i)|0)|0; } return f'), this, {isone:isone});
+-assertEq(f(SIMD.Bool32x4(0,0,1,0)), 1)
+-assertEq(f(SIMD.Bool32x4(0,0,0,0)), 0)
+-assertAsmTypeFail('glob', 'ffi', USE_ASM + B32 + CB32 + ANYB4 + 'var isone=ffi.isone; function f(i) { i=cb4(i); return isone(anyt(i))|0; } return f');
+-
+-var f = asmLink(asmCompile('glob', 'ffi', USE_ASM + B32 + CB32 + ALLB4 + 'var isone=ffi.isone; function f(i) { i=cb4(i); return isone(allt(i)|0)|0; } return f'), this, {isone:isone});
+-assertEq(f(SIMD.Bool32x4(1,1,1,1)), 1)
+-assertEq(f(SIMD.Bool32x4(0,1,0,0)), 0)
+-assertAsmTypeFail('glob', 'ffi', USE_ASM + B32 + CB32 + ALLB4 + 'var isone=ffi.isone; function f(i) { i=cb4(i); return isone(allt(i))|0; } return f');
+-
+-var f = asmLink(asmCompile('glob', 'ffi', USE_ASM + B32 + CB32 + EXTB4 + 'var isone=ffi.isone; function f(i) { i=cb4(i); return isone(e(i,2)|0)|0; } return f'), this, {isone:isone});
+-assertEq(f(SIMD.Bool32x4(1,1,1,1)), 1)
+-assertEq(f(SIMD.Bool32x4(0,1,0,0)), 0)
+-assertAsmTypeFail('glob', 'ffi', USE_ASM + B32 + CB32 + EXTB4 + 'var isone=ffi.isone; function f(i) { i=cb4(i); return isone(e(i,2))|0; } return f');
+-
+-// Stress-test for register spilling code and stack depth checks
+-var code = `
+-    "use asm";
+-    var i4 = glob.SIMD.Int32x4;
+-    var i4a = i4.add;
+-    var e = i4.extractLane;
+-    var assertEq = ffi.assertEq;
+-    function g() {
+-        var x = i4(1,2,3,4);
+-        var y = i4(2,3,4,5);
+-        var z = i4(0,0,0,0);
+-        z = i4a(x, y);
+-        assertEq(e(z,0) | 0, 3);
+-        assertEq(e(z,1) | 0, 5);
+-        assertEq(e(z,2) | 0, 7);
+-        assertEq(e(z,3) | 0, 9);
+-    }
+-    return g
+-`
+-asmLink(asmCompile('glob', 'ffi', code), this, assertEqFFI)();
+-
+-(function() {
+-    var code = `
+-        "use asm";
+-        var i4 = glob.SIMD.Int32x4;
+-        var i4a = i4.add;
+-        var e = i4.extractLane;
+-        var assertEq = ffi.assertEq;
+-        var one = ffi.one;
+-
+-        // Function call with arguments on the stack (1 on x64, 3 on x86)
+-        function h(x1, x2, x3, x4, x5, x6, x7) {
+-            x1=x1|0
+-            x2=x2|0
+-            x3=x3|0
+-            x4=x4|0
+-            x5=x5|0
+-            x6=x6|0
+-            x7=x7|0
+-            return x1 + x2 |0
+-        }
+-
+-        function g() {
+-            var x = i4(1,2,3,4);
+-            var y = i4(2,3,4,5);
+-            var z = i4(0,0,0,0);
+-            var w = 1;
+-            z = i4a(x, y);
+-            w = w + (one() | 0) | 0;
+-            assertEq(e(z,0) | 0, 3);
+-            assertEq(e(z,1) | 0, 5);
+-            assertEq(e(z,2) | 0, 7);
+-            assertEq(e(z,3) | 0, 9);
+-            h(1, 2, 3, 4, 42, 42, 42)|0
+-            return w | 0;
+-        }
+-        return g
+-    `;
+-
+-    asmLink(asmCompile('glob', 'ffi', code), this, {assertEq: assertEq, one: () => 1})();
+-})();
+-
+-// Function calls with mixed arguments on the stack (SIMD and scalar). In the
+-// worst case (x64), we have 6 int arg registers and 8 float registers.
+-(function() {
+-    var code = `
+-        "use asm";
+-        var i4 = glob.SIMD.Int32x4;
+-        var e = i4.extractLane;
+-        var ci4 = i4.check;
+-        function h(
+-            // In registers:
+-            gpr1, gpr2, gpr3, gpr4, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8,
+-            // On the stack:
+-            sint1, ssimd1, sdouble1, ssimd2, sint2, sint3, sint4, ssimd3, sdouble2
+-            )
+-        {
+-            gpr1=gpr1|0;
+-            gpr2=gpr2|0;
+-            gpr3=gpr3|0;
+-            gpr4=gpr4|0;
+-
+-            xmm1=+xmm1;
+-            xmm2=+xmm2;
+-            xmm3=+xmm3;
+-            xmm4=+xmm4;
+-            xmm5=+xmm5;
+-            xmm6=+xmm6;
+-            xmm7=+xmm7;
+-            xmm8=+xmm8;
+-
+-            sint1=sint1|0;
+-            ssimd1=ci4(ssimd1);
+-            sdouble1=+sdouble1;
+-            ssimd2=ci4(ssimd2);
+-            sint2=sint2|0;
+-            sint3=sint3|0;
+-            sint4=sint4|0;
+-            ssimd3=ci4(ssimd3);
+-            sdouble2=+sdouble2;
+-
+-            return (e(ssimd1,0)|0) + (e(ssimd2,1)|0) + (e(ssimd3,2)|0) + sint2 + gpr3 | 0;
+-        }
+-
+-        function g() {
+-            var simd1 = i4(1,2,3,4);
+-            var simd2 = i4(5,6,7,8);
+-            var simd3 = i4(9,10,11,12);
+-            return h(1, 2, 3, 4,
+-                     1., 2., 3., 4., 5., 6., 7., 8.,
+-                     5, simd1, 9., simd2, 6, 7, 8, simd3, 10.) | 0;
+-        }
+-        return g
+-    `;
+-
+-    assertEq(asmLink(asmCompile('glob', 'ffi', code), this)(), 1 + 6 + 11 + 6 + 3);
+-})();
+-
+-// Check that the interrupt callback doesn't erase high components of simd
+-// registers:
+-
+-// WARNING: must be the last test in this file
+-(function() {
+-    var iters = 2000000;
+-    var code = `
+-    "use asm";
+-    var i4 = glob.SIMD.Int32x4;
+-    var i4a = i4.add;
+-    var ci4 = i4.check;
+-    function _() {
+-        var i = 0;
+-        var n = i4(0,0,0,0);
+-        var one = i4(1,1,1,1);
+-        for (; (i>>>0) < ` + iters + `; i=(i+1)>>>0) {
+-            n = i4a(n, one);
+-        }
+-        return ci4(n);
+-    }
+-    return _;`;
+-    // This test relies on the fact that setting the timeout will call the
+-    // interrupt callback at fixed intervals, even before the timeout.
+-    timeout(1000);
+-    var x4 = asmLink(asmCompile('glob', code), this)();
+-    assertEq(SIMD.Int32x4.extractLane(x4,0), iters);
+-    assertEq(SIMD.Int32x4.extractLane(x4,1), iters);
+-    assertEq(SIMD.Int32x4.extractLane(x4,2), iters);
+-    assertEq(SIMD.Int32x4.extractLane(x4,3), iters);
+-})();
+-
+-} catch(e) {
+-    print('Stack:', e.stack)
+-    print('Error:', e)
+-    throw e;
+-}
+diff --git a/js/src/jit-test/tests/asm.js/testZOOB.js b/js/src/jit-test/tests/asm.js/testZOOB.js
+--- a/js/src/jit-test/tests/asm.js/testZOOB.js
++++ b/js/src/jit-test/tests/asm.js/testZOOB.js
+@@ -95,116 +95,16 @@ function testFloat64(ctor, shift, scale,
+ 
+ function assertEqX4(observed, expected) {
+     assertEq(observed.x, expected.x);
+     assertEq(observed.y, expected.y);
+     assertEq(observed.z, expected.z);
+     assertEq(observed.w, expected.w);
+ }
+ 
+-function testSimdX4(ctor, shift, scale, disp, simdName, simdCtor) {
+-    var arr = new ctor(ab);
+-
+-    var c = asmCompile('glob', 'imp', 'b',
+-                       USE_ASM +
+-                       'var arr=new glob.' + ctor.name + '(b); ' +
+-                       'var SIMD_' + simdName + ' = glob.SIMD.' + simdName + '; ' +
+-                       'var SIMD_' + simdName + '_check = SIMD_' + simdName + '.check; ' +
+-                       'var SIMD_' + simdName + '_load = SIMD_' + simdName + '.load; ' +
+-                       'var SIMD_' + simdName + '_load2 = SIMD_' + simdName + '.load2; ' +
+-                       'var SIMD_' + simdName + '_load1 = SIMD_' + simdName + '.load1; ' +
+-                       'var SIMD_' + simdName + '_store = SIMD_' + simdName + '.store; ' +
+-                       'var SIMD_' + simdName + '_store2 = SIMD_' + simdName + '.store2; ' +
+-                       'var SIMD_' + simdName + '_store1 = SIMD_' + simdName + '.store1; ' +
+-                       'function load(i) {i=i|0; return SIMD_' + simdName + '_check(SIMD_' + simdName + '_load(arr, ((i<<' + scale + ')+' + disp + ')>>' + shift + ')) } ' +
+-                       'function load2(i) {i=i|0; return SIMD_' + simdName + '_check(SIMD_' + simdName + '_load2(arr, ((i<<' + scale + ')+' + disp + ')>>' + shift + ')) } ' +
+-                       'function load1(i) {i=i|0; return SIMD_' + simdName + '_check(SIMD_' + simdName + '_load1(arr, ((i<<' + scale + ')+' + disp + ')>>' + shift + ')) } ' +
+-                       'function store(i,j) {i=i|0;j=SIMD_' + simdName + '_check(j); SIMD_' + simdName + '_store(arr, ((i<<' + scale + ')+' + disp + ')>>' + shift + ', j) } ' +
+-                       'function store2(i,j) {i=i|0;j=SIMD_' + simdName + '_check(j); SIMD_' + simdName + '_store2(arr, ((i<<' + scale + ')+' + disp + ')>>' + shift + ', j) } ' +
+-                       'function store1(i,j) {i=i|0;j=SIMD_' + simdName + '_check(j); SIMD_' + simdName + '_store1(arr, ((i<<' + scale + ')+' + disp + ')>>' + shift + ', j) } ' +
+-                       'return { load: load, load2: load2, load1: load1, store: store, store2 : store2, store1 : store1 }');
+-    var f = asmLink(c, this, null, ab);
+-
+-    const RuntimeError = WebAssembly.RuntimeError;
+-
+-    for (var i of indices) {
+-        var index = ((i<<scale)+disp)>>shift;
+-
+-        var v, v2, v1;
+-        var t = false, t2 = false, t1 = false;
+-        try { v = simdCtor.load(arr, index); }
+-        catch (e) {
+-            assertEq(e instanceof RangeError, true);
+-            t = true;
+-        }
+-        try { v2 = simdCtor.load2(arr, index); }
+-        catch (e) {
+-            assertEq(e instanceof RangeError, true);
+-            t2 = true;
+-        }
+-        try { v1 = simdCtor.load1(arr, index); }
+-        catch (e) {
+-            assertEq(e instanceof RangeError, true);
+-            t1 = true;
+-        }
+-
+-        // Loads
+-        var l, l2, l1;
+-        var r = false, r2 = false, r1 = false;
+-        try { l = f.load(i); }
+-        catch (e) {
+-            assertEq(e instanceof RuntimeError, true);
+-            r = true;
+-        }
+-        try { l2 = f.load2(i); }
+-        catch (e) {
+-            assertEq(e instanceof RuntimeError, true);
+-            r2 = true;
+-        }
+-        try { l1 = f.load1(i); }
+-        catch (e) {
+-            assertEq(e instanceof RuntimeError, true);
+-            r1 = true;
+-        }
+-        assertEq(t, r);
+-        assertEq(t2, r2);
+-        assertEq(t1, r1);
+-        if (!t) assertEqX4(v, l);
+-        if (!t2) assertEqX4(v2, l2);
+-        if (!t1) assertEqX4(v1, l1);
+-
+-        // Stores
+-        if (!t) {
+-            simdCtor.store(arr, index, simdCtor.neg(v));
+-            f.store(i, v);
+-            assertEqX4(simdCtor.load(arr, index), v);
+-        } else
+-            assertThrowsInstanceOf(() => f.store(i, simdCtor()), RuntimeError);
+-        if (!t2) {
+-            simdCtor.store2(arr, index, simdCtor.neg(v2));
+-            f.store2(i, v2);
+-            assertEqX4(simdCtor.load2(arr, index), v2);
+-        } else
+-            assertThrowsInstanceOf(() => f.store2(i, simdCtor()), RuntimeError);
+-        if (!t1) {
+-            simdCtor.store1(arr, index, simdCtor.neg(v1));
+-            f.store1(i, v1);
+-            assertEqX4(simdCtor.load1(arr, index), v1);
+-        } else
+-            assertThrowsInstanceOf(() => f.store1(i, simdCtor()), RuntimeError);
+-    }
+-}
+-
+-function testFloat32x4(ctor, shift, scale, disp) {
+-    testSimdX4(ctor, shift, scale, disp, 'Float32x4', SIMD.Float32x4);
+-}
+-function testInt32x4(ctor, shift, scale, disp) {
+-    testSimdX4(ctor, shift, scale, disp, 'Int32x4', SIMD.Int32x4);
+-}
+-
+ function test(tester, ctor, shift) {
+     var arr = new ctor(ab);
+     for (var i = 0; i < arr.length; i++)
+         arr[i] = Math.imul(i, Math.imul((i & 1), 2) - 1);
+     for (scale of [0,1,2,3]) {
+         for (disp of [0,1,2,8,Math.pow(2,30),Math.pow(2,31)-1,Math.pow(2,31),Math.pow(2,32)-1])
+             tester(ctor, shift, scale, disp);
+     }
+@@ -218,20 +118,8 @@ function test(tester, ctor, shift) {
+ test(testInt, Int8Array, 0);
+ test(testInt, Uint8Array, 0);
+ test(testInt, Int16Array, 1);
+ test(testInt, Uint16Array, 1);
+ test(testInt, Int32Array, 2);
+ test(testInt, Uint32Array, 2);
+ test(testFloat32, Float32Array, 2);
+ test(testFloat64, Float64Array, 3);
+-if (typeof SIMD !== 'undefined' && isSimdAvailable()) {
+-    // Avoid pathological --ion-eager compile times due to bails in loops
+-    setJitCompilerOption('ion.warmup.trigger', 1000000);
+-
+-    // Use a fresh ArrayBuffer so prepareForAsmJS can allocated a guard page
+-    // which SIMD.js needs. Since the original ArrayBuffer was prepared for
+-    // asm.js that didn't use SIMD.js, it has no guard page (on 32-bit).
+-    ab = new ArrayBuffer(BUF_MIN);
+-
+-    test(testInt32x4, Uint8Array, 0);
+-    test(testFloat32x4, Uint8Array, 0);
+-}
+diff --git a/js/src/jit/BaselineBailouts.cpp b/js/src/jit/BaselineBailouts.cpp
+--- a/js/src/jit/BaselineBailouts.cpp
++++ b/js/src/jit/BaselineBailouts.cpp
+@@ -2004,17 +2004,16 @@ jit::FinishBailoutToBaseline(BaselineBai
+       case Bailout_Hole:
+       case Bailout_NegativeIndex:
+       case Bailout_NonInt32Input:
+       case Bailout_NonNumericInput:
+       case Bailout_NonBooleanInput:
+       case Bailout_NonObjectInput:
+       case Bailout_NonStringInput:
+       case Bailout_NonSymbolInput:
+-      case Bailout_UnexpectedSimdInput:
+       case Bailout_NonSharedTypedArrayInput:
+       case Bailout_Debugger:
+       case Bailout_UninitializedThis:
+       case Bailout_BadDerivedConstructorReturn:
+         // Do nothing.
+         break;
+ 
+       case Bailout_FirstExecution:
+diff --git a/js/src/jit/BaselineIC.cpp b/js/src/jit/BaselineIC.cpp
+--- a/js/src/jit/BaselineIC.cpp
++++ b/js/src/jit/BaselineIC.cpp
+@@ -9,17 +9,16 @@
+ #include "mozilla/DebugOnly.h"
+ #include "mozilla/TemplateLib.h"
+ 
+ #include "jsfriendapi.h"
+ #include "jslibmath.h"
+ #include "jstypes.h"
+ 
+ #include "builtin/Eval.h"
+-#include "builtin/SIMDConstants.h"
+ #include "gc/Policy.h"
+ #include "jit/BaselineCacheIRCompiler.h"
+ #include "jit/BaselineDebugModeOSR.h"
+ #include "jit/BaselineJIT.h"
+ #include "jit/InlinableNatives.h"
+ #include "jit/JitSpewer.h"
+ #include "jit/Linker.h"
+ #include "jit/Lowering.h"
+@@ -1732,81 +1731,16 @@ TryAttachFunCallStub(JSContext* cx, ICCa
+         *attached = true;
+         stub->addNewStub(newStub);
+         return true;
+     }
+ 
+     return true;
+ }
+ 
+-// Check if target is a native SIMD operation which returns a SIMD type.
+-// If so, set res to a template object matching the SIMD type produced and return true.
+-static bool
+-GetTemplateObjectForSimd(JSContext* cx, JSFunction* target, MutableHandleObject res)
+-{
+-    if (!target->hasJitInfo())
+-        return false;
+-
+-    const JSJitInfo* jitInfo = target->jitInfo();
+-    if (jitInfo->type() != JSJitInfo::InlinableNative)
+-        return false;
+-
+-    // Check if this is a native inlinable SIMD operation.
+-    SimdType ctrlType;
+-    switch (jitInfo->inlinableNative) {
+-      case InlinableNative::SimdInt8x16:   ctrlType = SimdType::Int8x16;   break;
+-      case InlinableNative::SimdUint8x16:  ctrlType = SimdType::Uint8x16;  break;
+-      case InlinableNative::SimdInt16x8:   ctrlType = SimdType::Int16x8;   break;
+-      case InlinableNative::SimdUint16x8:  ctrlType = SimdType::Uint16x8;  break;
+-      case InlinableNative::SimdInt32x4:   ctrlType = SimdType::Int32x4;   break;
+-      case InlinableNative::SimdUint32x4:  ctrlType = SimdType::Uint32x4;  break;
+-      case InlinableNative::SimdFloat32x4: ctrlType = SimdType::Float32x4; break;
+-      case InlinableNative::SimdBool8x16:  ctrlType = SimdType::Bool8x16;  break;
+-      case InlinableNative::SimdBool16x8:  ctrlType = SimdType::Bool16x8;  break;
+-      case InlinableNative::SimdBool32x4:  ctrlType = SimdType::Bool32x4;  break;
+-      // This is not an inlinable SIMD operation.
+-      default: return false;
+-    }
+-
+-    // The controlling type is not necessarily the return type.
+-    // Check the actual operation.
+-    SimdOperation simdOp = SimdOperation(jitInfo->nativeOp);
+-    SimdType retType;
+-
+-    switch(simdOp) {
+-      case SimdOperation::Fn_allTrue:
+-      case SimdOperation::Fn_anyTrue:
+-      case SimdOperation::Fn_extractLane:
+-        // These operations return a scalar. No template object needed.
+-        return false;
+-
+-      case SimdOperation::Fn_lessThan:
+-      case SimdOperation::Fn_lessThanOrEqual:
+-      case SimdOperation::Fn_equal:
+-      case SimdOperation::Fn_notEqual:
+-      case SimdOperation::Fn_greaterThan:
+-      case SimdOperation::Fn_greaterThanOrEqual:
+-        // These operations return a boolean vector with the same shape as the
+-        // controlling type.
+-        retType = GetBooleanSimdType(ctrlType);
+-        break;
+-
+-      default:
+-        // All other operations return the controlling type.
+-        retType = ctrlType;
+-        break;
+-    }
+-
+-    // Create a template object based on retType.
+-    RootedGlobalObject global(cx, cx->global());
+-    Rooted<SimdTypeDescr*> descr(cx, GlobalObject::getOrCreateSimdTypeDescr(cx, global, retType));
+-    res.set(cx->realm()->jitRealm()->getSimdTemplateObjectFor(cx, descr));
+-    return true;
+-}
+-
+ static bool
+ GetTemplateObjectForNative(JSContext* cx, HandleFunction target, const CallArgs& args,
+                            MutableHandleObject res, bool* skipAttach)
+ {
+     Native native = target->native();
+ 
+     // Check for natives to which template objects can be attached. This is
+     // done to provide templates to Ion for inlining these natives later on.
+@@ -1880,38 +1814,29 @@ GetTemplateObjectForNative(JSContext* cx
+         return !!res;
+     }
+ 
+     if (native == js::intrinsic_NewStringIterator) {
+         res.set(NewStringIteratorObject(cx, TenuredObject));
+         return !!res;
+     }
+ 
+-    if (JitSupportsSimd() && GetTemplateObjectForSimd(cx, target, res))
+-       return !!res;
+-
+     return true;
+ }
+ 
+ static bool
+ GetTemplateObjectForClassHook(JSContext* cx, JSNative hook, CallArgs& args,
+                               MutableHandleObject templateObject)
+ {
+     if (hook == TypedObject::construct) {
+         Rooted<TypeDescr*> descr(cx, &args.callee().as<TypeDescr>());
+         templateObject.set(TypedObject::createZeroed(cx, descr, gc::TenuredHeap));
+         return !!templateObject;
+     }
+ 
+-    if (hook == SimdTypeDescr::call && JitSupportsSimd()) {
+-        Rooted<SimdTypeDescr*> descr(cx, &args.callee().as<SimdTypeDescr>());
+-        templateObject.set(cx->realm()->jitRealm()->getSimdTemplateObjectFor(cx, descr));
+-        return !!templateObject;
+-    }
+-
+     return true;
+ }
+ 
+ static bool
+ IsOptimizableConstStringSplit(const Value& callee, int argc, Value* args)
+ {
+     if (argc != 2 || !args[0].isString() || !args[1].isString())
+         return false;
+diff --git a/js/src/jit/BaselineInspector.cpp b/js/src/jit/BaselineInspector.cpp
+--- a/js/src/jit/BaselineInspector.cpp
++++ b/js/src/jit/BaselineInspector.cpp
+@@ -658,35 +658,16 @@ BaselineInspector::getTemplateObjectForC
+     for (ICStub* stub = entry.firstStub(); stub; stub = stub->next()) {
+         if (stub->isCall_ClassHook() && stub->toCall_ClassHook()->clasp() == clasp)
+             return stub->toCall_ClassHook()->templateObject();
+     }
+ 
+     return nullptr;
+ }
+ 
+-JSObject*
+-BaselineInspector::getTemplateObjectForSimdCtor(jsbytecode* pc, SimdType simdType)
+-{
+-    if (!hasBaselineScript())
+-        return nullptr;
+-
+-    const ICEntry& entry = icEntryFromPC(pc);
+-    for (ICStub* stub = entry.firstStub(); stub; stub = stub->next()) {
+-        if (stub->isCall_ClassHook() && stub->toCall_ClassHook()->clasp() == &SimdTypeDescr::class_) {
+-            JSObject* templateObj = stub->toCall_ClassHook()->templateObject();
+-            InlineTypedObject& typedObj = templateObj->as<InlineTypedObject>();
+-            if (typedObj.typeDescr().as<SimdTypeDescr>().type() == simdType)
+-                return templateObj;
+-        }
+-    }
+-
+-    return nullptr;
+-}
+-
+ LexicalEnvironmentObject*
+ BaselineInspector::templateNamedLambdaObject()
+ {
+     if (!hasBaselineScript())
+         return nullptr;
+ 
+     JSObject* res = baselineScript()->templateEnvironment();
+     if (script->bodyScope()->hasEnvironment())
+diff --git a/js/src/jit/BaselineInspector.h b/js/src/jit/BaselineInspector.h
+--- a/js/src/jit/BaselineInspector.h
++++ b/js/src/jit/BaselineInspector.h
+@@ -126,17 +126,16 @@ class BaselineInspector
+     bool hasSeenDoubleResult(jsbytecode* pc);
+     bool hasSeenNonStringIterMore(jsbytecode* pc);
+ 
+     MOZ_MUST_USE bool isOptimizableConstStringSplit(jsbytecode* pc, JSString** strOut,
+                                                     JSString** sepOut, ArrayObject** objOut);
+     JSObject* getTemplateObject(jsbytecode* pc);
+     JSObject* getTemplateObjectForNative(jsbytecode* pc, Native native);
+     JSObject* getTemplateObjectForClassHook(jsbytecode* pc, const Class* clasp);
+-    JSObject* getTemplateObjectForSimdCtor(jsbytecode* pc, SimdType simdType);
+ 
+     // Sometimes the group a template object will have is known, even if the
+     // object itself isn't.
+     ObjectGroup* getTemplateObjectGroup(jsbytecode* pc);
+ 
+     JSFunction* getSingleCallee(jsbytecode* pc);
+ 
+     LexicalEnvironmentObject* templateNamedLambdaObject();
+diff --git a/js/src/jit/CodeGenerator.cpp b/js/src/jit/CodeGenerator.cpp
+--- a/js/src/jit/CodeGenerator.cpp
++++ b/js/src/jit/CodeGenerator.cpp
+@@ -394,17 +394,16 @@ MNewStringObject::templateObj() const
+ {
+     return &templateObj_->as<StringObject>();
+ }
+ 
+ CodeGenerator::CodeGenerator(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm)
+   : CodeGeneratorSpecific(gen, graph, masm)
+   , ionScriptLabels_(gen->alloc())
+   , scriptCounts_(nullptr)
+-  , simdTemplatesToReadBarrier_(0)
+   , realmStubsToReadBarrier_(0)
+ {
+ }
+ 
+ CodeGenerator::~CodeGenerator()
+ {
+     js_delete(scriptCounts_);
+ }
+@@ -6336,92 +6335,16 @@ CodeGenerator::visitNewTypedObject(LNewT
+                                    StoreRegisterTo(object));
+ 
+     TemplateObject templateObj(templateObject);
+     masm.createGCObject(object, temp, templateObj, initialHeap, ool->entry());
+ 
+     masm.bind(ool->rejoin());
+ }
+ 
+-void
+-CodeGenerator::visitSimdBox(LSimdBox* lir)
+-{
+-    FloatRegister in = ToFloatRegister(lir->input());
+-    Register object = ToRegister(lir->output());
+-    Register temp = ToRegister(lir->temp());
+-    InlineTypedObject* templateObject = lir->mir()->templateObject();
+-    gc::InitialHeap initialHeap = lir->mir()->initialHeap();
+-    MIRType type = lir->mir()->input()->type();
+-
+-    addSimdTemplateToReadBarrier(lir->mir()->simdType());
+-
+-    MOZ_ASSERT(lir->safepoint()->liveRegs().has(in), "Save the input register across oolCallVM");
+-    OutOfLineCode* ool = oolCallVM(NewTypedObjectInfo, lir,
+-                                   ArgList(ImmGCPtr(templateObject), Imm32(initialHeap)),
+-                                   StoreRegisterTo(object));
+-
+-    TemplateObject templateObj(templateObject);
+-    masm.createGCObject(object, temp, templateObj, initialHeap, ool->entry());
+-    masm.bind(ool->rejoin());
+-
+-    Address objectData(object, InlineTypedObject::offsetOfDataStart());
+-    switch (type) {
+-      case MIRType::Int8x16:
+-      case MIRType::Int16x8:
+-      case MIRType::Int32x4:
+-      case MIRType::Bool8x16:
+-      case MIRType::Bool16x8:
+-      case MIRType::Bool32x4:
+-        masm.storeUnalignedSimd128Int(in, objectData);
+-        break;
+-      case MIRType::Float32x4:
+-        masm.storeUnalignedSimd128Float(in, objectData);
+-        break;
+-      default:
+-        MOZ_CRASH("Unknown SIMD kind when generating code for SimdBox.");
+-    }
+-}
+-
+-void
+-CodeGenerator::addSimdTemplateToReadBarrier(SimdType simdType)
+-{
+-    simdTemplatesToReadBarrier_ |= 1 << uint32_t(simdType);
+-}
+-
+-void
+-CodeGenerator::visitSimdUnbox(LSimdUnbox* lir)
+-{
+-    Register object = ToRegister(lir->input());
+-    FloatRegister simd = ToFloatRegister(lir->output());
+-    Register temp = ToRegister(lir->temp());
+-    Label bail;
+-
+-    masm.branchIfNotSimdObject(object, temp, lir->mir()->simdType(), &bail);
+-
+-    // Load the value from the data of the InlineTypedObject.
+-    Address objectData(object, InlineTypedObject::offsetOfDataStart());
+-    switch (lir->mir()->type()) {
+-      case MIRType::Int8x16:
+-      case MIRType::Int16x8:
+-      case MIRType::Int32x4:
+-      case MIRType::Bool8x16:
+-      case MIRType::Bool16x8:
+-      case MIRType::Bool32x4:
+-        masm.loadUnalignedSimd128Int(objectData, simd);
+-        break;
+-      case MIRType::Float32x4:
+-        masm.loadUnalignedSimd128Float(objectData, simd);
+-        break;
+-      default:
+-        MOZ_CRASH("The impossible happened!");
+-    }
+-
+-    bailoutFrom(&bail, lir->snapshot());
+-}
+-
+ typedef js::NamedLambdaObject* (*NewNamedLambdaObjectFn)(JSContext*, HandleFunction, gc::InitialHeap);
+ static const VMFunction NewNamedLambdaObjectInfo =
+     FunctionInfo<NewNamedLambdaObjectFn>(NamedLambdaObject::createTemplateObject,
+                                          "NamedLambdaObject::createTemplateObject");
+ 
+ void
+ CodeGenerator::visitNewNamedLambdaObject(LNewNamedLambdaObject* lir)
+ {
+@@ -7132,17 +7055,17 @@ CodeGenerator::visitWasmCallI64(LWasmCal
+ }
+ 
+ void
+ CodeGenerator::visitWasmLoadGlobalVar(LWasmLoadGlobalVar* ins)
+ {
+     MWasmLoadGlobalVar* mir = ins->mir();
+ 
+     MIRType type = mir->type();
+-    MOZ_ASSERT(IsNumberType(type) || IsSimdType(type));
++    MOZ_ASSERT(IsNumberType(type));
+ 
+     Register tls = ToRegister(ins->tlsPtr());
+     Address addr(tls, offsetof(wasm::TlsData, globalArea) + mir->globalDataOffset());
+     if (mir->isIndirect()) {
+         Register tmp = ToRegister(ins->addrTemp());
+         masm.loadPtr(addr, tmp);
+         addr = Address(tmp, 0);
+     }
+@@ -7159,33 +7082,29 @@ CodeGenerator::visitWasmLoadGlobalVar(LW
+       // Aligned access: code is aligned on PageSize + there is padding
+       // before the global data section.
+       case MIRType::Int8x16:
+       case MIRType::Int16x8:
+       case MIRType::Int32x4:
+       case MIRType::Bool8x16:
+       case MIRType::Bool16x8:
+       case MIRType::Bool32x4:
+-        masm.loadInt32x4(addr, ToFloatRegister(ins->output()));
+-        break;
+       case MIRType::Float32x4:
+-        masm.loadFloat32x4(addr, ToFloatRegister(ins->output()));
+-        break;
+       default:
+         MOZ_CRASH("unexpected type in visitWasmLoadGlobalVar");
+     }
+ }
+ 
+ void
+ CodeGenerator::visitWasmStoreGlobalVar(LWasmStoreGlobalVar* ins)
+ {
+     MWasmStoreGlobalVar* mir = ins->mir();
+ 
+     MIRType type = mir->value()->type();
+-    MOZ_ASSERT(IsNumberType(type) || IsSimdType(type));
++    MOZ_ASSERT(IsNumberType(type));
+ 
+     Register tls = ToRegister(ins->tlsPtr());
+     Address addr(tls, offsetof(wasm::TlsData, globalArea) + mir->globalDataOffset());
+     if (mir->isIndirect()) {
+         Register tmp = ToRegister(ins->addrTemp());
+         masm.loadPtr(addr, tmp);
+         addr = Address(tmp, 0);
+     }
+@@ -7202,21 +7121,17 @@ CodeGenerator::visitWasmStoreGlobalVar(L
+       // Aligned access: code is aligned on PageSize + there is padding
+       // before the global data section.
+       case MIRType::Int8x16:
+       case MIRType::Int16x8:
+       case MIRType::Int32x4:
+       case MIRType::Bool8x16:
+       case MIRType::Bool16x8:
+       case MIRType::Bool32x4:
+-        masm.storeInt32x4(ToFloatRegister(ins->value()), addr);
+-        break;
+       case MIRType::Float32x4:
+-        masm.storeFloat32x4(ToFloatRegister(ins->value()), addr);
+-        break;
+       default:
+         MOZ_CRASH("unexpected type in visitWasmStoreGlobalVar");
+     }
+ }
+ 
+ void
+ CodeGenerator::visitWasmLoadGlobalVarI64(LWasmLoadGlobalVarI64* ins)
+ {
+@@ -10278,17 +10193,16 @@ CodeGenerator::link(JSContext* cx, Compi
+ 
+     RootedScript script(cx, gen->info().script());
+     OptimizationLevel optimizationLevel = gen->optimizationInfo().level();
+ 
+     // Perform any read barriers which were skipped while compiling the
+     // script, which may have happened off-thread.
+     const JitRealm* jr = gen->realm->jitRealm();
+     jr->performStubReadBarriers(realmStubsToReadBarrier_);
+-    jr->performSIMDTemplateReadBarriers(simdTemplatesToReadBarrier_);
+ 
+     // We finished the new IonScript. Invalidate the current active IonScript,
+     // so we can replace it with this new (probably higher optimized) version.
+     if (script->hasIonScript()) {
+         MOZ_ASSERT(script->ionScript()->isRecompiling());
+         // Do a normal invalidate, except don't cancel offThread compilations,
+         // since that will cancel this compilation too.
+         Invalidate(cx, script, /* resetUses */ false, /* cancelOffThread*/ false);
+@@ -11502,29 +11416,27 @@ CodeGenerator::visitLoadUnboxedScalar(LL
+ {
+     Register elements = ToRegister(lir->elements());
+     Register temp = lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
+     AnyRegister out = ToAnyRegister(lir->output());
+ 
+     const MLoadUnboxedScalar* mir = lir->mir();
+ 
+     Scalar::Type readType = mir->readType();
+-    unsigned numElems = mir->numElems();
+-
+     int width = Scalar::byteSize(mir->storageType());
+     bool canonicalizeDouble = mir->canonicalizeDoubles();
+ 
+     Label fail;
+     if (lir->index()->isConstant()) {
+         Address source(elements, ToInt32(lir->index()) * width + mir->offsetAdjustment());
+-        masm.loadFromTypedArray(readType, source, out, temp, &fail, canonicalizeDouble, numElems);
++        masm.loadFromTypedArray(readType, source, out, temp, &fail, canonicalizeDouble);
+     } else {
+         BaseIndex source(elements, ToRegister(lir->index()), ScaleFromElemWidth(width),
+                          mir->offsetAdjustment());
+-        masm.loadFromTypedArray(readType, source, out, temp, &fail, canonicalizeDouble, numElems);
++        masm.loadFromTypedArray(readType, source, out, temp, &fail, canonicalizeDouble);
+     }
+ 
+     if (fail.used())
+         bailoutFrom(&fail, lir->snapshot());
+ }
+ 
+ void
+ CodeGenerator::visitLoadTypedArrayElementHole(LLoadTypedArrayElementHole* lir)
+@@ -11793,23 +11705,20 @@ CodeGenerator::visitLoadElementFromState
+ 
+     addOutOfLineCode(jumpTable, lir->mir());
+     masm.bind(&join);
+ }
+ 
+ template <typename T>
+ static inline void
+ StoreToTypedArray(MacroAssembler& masm, Scalar::Type writeType, const LAllocation* value,
+-                  const T& dest, unsigned numElems = 0)
+-{
+-    if (Scalar::isSimdType(writeType) ||
+-        writeType == Scalar::Float32 ||
+-        writeType == Scalar::Float64)
+-    {
+-        masm.storeToTypedFloatArray(writeType, ToFloatRegister(value), dest, numElems);
++                  const T& dest)
++{
++    if (writeType == Scalar::Float32 || writeType == Scalar::Float64) {
++        masm.storeToTypedFloatArray(writeType, ToFloatRegister(value), dest);
+     } else {
+         if (value->isConstant())
+             masm.storeToTypedIntArray(writeType, Imm32(ToInt32(value)), dest);
+         else
+             masm.storeToTypedIntArray(writeType, ToRegister(value), dest);
+     }
+ }
+ 
+@@ -11817,27 +11726,26 @@ void
+ CodeGenerator::visitStoreUnboxedScalar(LStoreUnboxedScalar* lir)
+ {
+     Register elements = ToRegister(lir->elements());
+     const LAllocation* value = lir->value();
+ 
+     const MStoreUnboxedScalar* mir = lir->mir();
+ 
+     Scalar::Type writeType = mir->writeType();
+-    unsigned numElems = mir->numElems();
+ 
+     int width = Scalar::byteSize(mir->storageType());
+ 
+     if (lir->index()->isConstant()) {
+         Address dest(elements, ToInt32(lir->index()) * width + mir->offsetAdjustment());
+-        StoreToTypedArray(masm, writeType, value, dest, numElems);
++        StoreToTypedArray(masm, writeType, value, dest);
+     } else {
+         BaseIndex dest(elements, ToRegister(lir->index()), ScaleFromElemWidth(width),
+                        mir->offsetAdjustment());
+-        StoreToTypedArray(masm, writeType, value, dest, numElems);
++        StoreToTypedArray(masm, writeType, value, dest);
+     }
+ }
+ 
+ void
+ CodeGenerator::visitStoreTypedArrayElementHole(LStoreTypedArrayElementHole* lir)
+ {
+     Register elements = ToRegister(lir->elements());
+     const LAllocation* value = lir->value();
+diff --git a/js/src/jit/CodeGenerator.h b/js/src/jit/CodeGenerator.h
+--- a/js/src/jit/CodeGenerator.h
++++ b/js/src/jit/CodeGenerator.h
+@@ -312,34 +312,19 @@ class CodeGenerator final : public CodeG
+ 
+     // Script counts created during code generation.
+     IonScriptCounts* scriptCounts_;
+ 
+ #if defined(JS_ION_PERF)
+     PerfSpewer perfSpewer_;
+ #endif
+ 
+-    // This integer is a bit mask of all SimdTypeDescr::Type indexes.  When a
+-    // MSimdBox instruction is encoded, it might have either been created by
+-    // IonBuilder, or by the Eager Simd Unbox phase.
+-    //
+-    // As the template objects are weak references, the JitRealm is using
+-    // Read Barriers, but such barrier cannot be used during the compilation. To
+-    // work around this issue, the barriers are captured during
+-    // CodeGenerator::link.
+-    //
+-    // Instead of saving the pointers, we just save the index of the Read
+-    // Barriered objects in a bit mask.
+-    uint32_t simdTemplatesToReadBarrier_;
+-
+     // Bit mask of JitRealm stubs that are to be read-barriered.
+     uint32_t realmStubsToReadBarrier_;
+ 
+-    void addSimdTemplateToReadBarrier(SimdType simdType);
+-
+ #define LIR_OP(op) void visit##op(L##op* ins);
+     LIR_OPCODE_LIST(LIR_OP)
+ #undef LIR_OP
+ };
+ 
+ } // namespace jit
+ } // namespace js
+ 
+diff --git a/js/src/jit/EagerSimdUnbox.cpp b/js/src/jit/EagerSimdUnbox.cpp
+deleted file mode 100644
+--- a/js/src/jit/EagerSimdUnbox.cpp
++++ /dev/null
+@@ -1,128 +0,0 @@
+-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+- * vim: set ts=8 sts=4 et sw=4 tw=99:
+- * This Source Code Form is subject to the terms of the Mozilla Public
+- * License, v. 2.0. If a copy of the MPL was not distributed with this
+- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+-
+-#include "jit/EagerSimdUnbox.h"
+-
+-#include "jit/MIR.h"
+-#include "jit/MIRGenerator.h"
+-#include "jit/MIRGraph.h"
+-
+-namespace js {
+-namespace jit {
+-
+-// Do not optimize any Phi instruction which has conflicting Unbox operations,
+-// as this might imply some intended polymorphism.
+-static bool
+-CanUnboxSimdPhi(const JitRealm* jitRealm, MPhi* phi, SimdType unboxType)
+-{
+-    MOZ_ASSERT(phi->type() == MIRType::Object);
+-
+-    // If we are unboxing, we are more than likely to have boxed this SIMD type
+-    // once in baseline, otherwise, we cannot create a MSimdBox as we have no
+-    // template object to use.
+-    if (!jitRealm->maybeGetSimdTemplateObjectFor(unboxType))
+-        return false;
+-
+-    MResumePoint* entry = phi->block()->entryResumePoint();
+-    MIRType mirType = SimdTypeToMIRType(unboxType);
+-    for (MUseIterator i(phi->usesBegin()), e(phi->usesEnd()); i != e; i++) {
+-        // If we cannot recover the Simd object at the entry of the basic block,
+-        // then we would have to box the content anyways.
+-        if ((*i)->consumer() == entry && !entry->isRecoverableOperand(*i))
+-            return false;
+-
+-        if (!(*i)->consumer()->isDefinition())
+-            continue;
+-
+-        MDefinition* def = (*i)->consumer()->toDefinition();
+-        if (def->isSimdUnbox() && def->toSimdUnbox()->type() != mirType)
+-            return false;
+-    }
+-
+-    return true;
+-}
+-
+-static void
+-UnboxSimdPhi(const JitRealm* jitRealm, MIRGraph& graph, MPhi* phi, SimdType unboxType)
+-{
+-    TempAllocator& alloc = graph.alloc();
+-
+-    // Unbox and replace all operands.
+-    for (size_t i = 0, e = phi->numOperands(); i < e; i++) {
+-        MDefinition* op = phi->getOperand(i);
+-        MSimdUnbox* unbox = MSimdUnbox::New(alloc, op, unboxType);
+-        op->block()->insertAtEnd(unbox);
+-        phi->replaceOperand(i, unbox);
+-    }
+-
+-    // Change the MIRType of the Phi.
+-    MIRType mirType = SimdTypeToMIRType(unboxType);
+-    phi->setResultType(mirType);
+-
+-    MBasicBlock* phiBlock = phi->block();
+-    MInstruction* atRecover = phiBlock->safeInsertTop(nullptr, MBasicBlock::IgnoreRecover);
+-    MInstruction* at = phiBlock->safeInsertTop(atRecover);
+-
+-    // Note, we capture the uses-list now, as new instructions are not visited.
+-    MUseIterator i(phi->usesBegin()), e(phi->usesEnd());
+-
+-    // Add a MSimdBox, and replace all the Phi uses with it.
+-    JSObject* templateObject = jitRealm->maybeGetSimdTemplateObjectFor(unboxType);
+-    InlineTypedObject* inlineTypedObject = &templateObject->as<InlineTypedObject>();
+-    MSimdBox* recoverBox = MSimdBox::New(alloc, nullptr, phi, inlineTypedObject, unboxType, gc::DefaultHeap);
+-    recoverBox->setRecoveredOnBailout();
+-    phiBlock->insertBefore(atRecover, recoverBox);
+-
+-    MSimdBox* box = nullptr;
+-    while (i != e) {
+-        MUse* use = *i++;
+-        MNode* ins = use->consumer();
+-
+-        if ((ins->isDefinition() && ins->toDefinition()->isRecoveredOnBailout()) ||
+-            (ins->isResumePoint() && ins->toResumePoint()->isRecoverableOperand(use)))
+-        {
+-            use->replaceProducer(recoverBox);
+-            continue;
+-        }
+-
+-        if (!box) {
+-            box = MSimdBox::New(alloc, nullptr, phi, inlineTypedObject, unboxType, gc::DefaultHeap);
+-            phiBlock->insertBefore(at, box);
+-        }
+-
+-        use->replaceProducer(box);
+-    }
+-}
+-
+-bool
+-EagerSimdUnbox(MIRGenerator* mir, MIRGraph& graph)
+-{
+-    const JitRealm* jitRealm = mir->realm->jitRealm();
+-    for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
+-        if (mir->shouldCancel("Eager Simd Unbox"))
+-            return false;
+-
+-        for (MInstructionReverseIterator ins = block->rbegin(); ins != block->rend(); ins++) {
+-            if (!ins->isSimdUnbox())
+-                continue;
+-
+-            MSimdUnbox* unbox = ins->toSimdUnbox();
+-            if (!unbox->input()->isPhi())
+-                continue;
+-
+-            MPhi* phi = unbox->input()->toPhi();
+-            if (!CanUnboxSimdPhi(jitRealm, phi, unbox->simdType()))
+-                continue;
+-
+-            UnboxSimdPhi(jitRealm, graph, phi, unbox->simdType());
+-        }
+-    }
+-
+-    return true;
+-}
+-
+-} /* namespace jit */
+-} /* namespace js */
+diff --git a/js/src/jit/EagerSimdUnbox.h b/js/src/jit/EagerSimdUnbox.h
+deleted file mode 100644
+--- a/js/src/jit/EagerSimdUnbox.h
++++ /dev/null
+@@ -1,25 +0,0 @@
+-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+- * vim: set ts=8 sts=4 et sw=4 tw=99:
+- * This Source Code Form is subject to the terms of the Mozilla Public
+- * License, v. 2.0. If a copy of the MPL was not distributed with this
+- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+-
+-// This file declares eager SIMD unboxing.
+-#ifndef jit_EagerSimdUnbox_h
+-#define jit_EagerSimdUnbox_h
+-
+-#include "mozilla/Attributes.h"
+-
+-namespace js {
+-namespace jit {
+-
+-class MIRGenerator;
+-class MIRGraph;
+-
+-MOZ_MUST_USE bool
+-EagerSimdUnbox(MIRGenerator* mir, MIRGraph& graph);
+-
+-} // namespace jit
+-} // namespace js
+-
+-#endif /* jit_EagerSimdUnbox_h */
+diff --git a/js/src/jit/InlinableNatives.h b/js/src/jit/InlinableNatives.h
+--- a/js/src/jit/InlinableNatives.h
++++ b/js/src/jit/InlinableNatives.h
+@@ -93,27 +93,16 @@
+     _(IntrinsicStringReplaceString) \
+     _(IntrinsicStringSplitString)   \
+                                     \
+     _(Object)                       \
+     _(ObjectCreate)                 \
+     _(ObjectIs)                     \
+     _(ObjectToString)               \
+                                     \
+-    _(SimdInt32x4)                  \
+-    _(SimdUint32x4)                 \
+-    _(SimdInt16x8)                  \
+-    _(SimdUint16x8)                 \
+-    _(SimdInt8x16)                  \
+-    _(SimdUint8x16)                 \
+-    _(SimdFloat32x4)                \
+-    _(SimdBool32x4)                 \
+-    _(SimdBool16x8)                 \
+-    _(SimdBool8x16)                 \
+-                                    \
+     _(TestBailout)                  \
+     _(TestAssertFloat32)            \
+     _(TestAssertRecoveredOnBailout) \
+                                     \
+     _(IntrinsicUnsafeSetReservedSlot) \
+     _(IntrinsicUnsafeGetReservedSlot) \
+     _(IntrinsicUnsafeGetObjectFromReservedSlot) \
+     _(IntrinsicUnsafeGetInt32FromReservedSlot) \
+diff --git a/js/src/jit/Ion.cpp b/js/src/jit/Ion.cpp
+--- a/js/src/jit/Ion.cpp
++++ b/js/src/jit/Ion.cpp
+@@ -16,17 +16,16 @@
+ #include "jit/AliasAnalysis.h"
+ #include "jit/AlignmentMaskAnalysis.h"
+ #include "jit/BacktrackingAllocator.h"
+ #include "jit/BaselineFrame.h"
+ #include "jit/BaselineInspector.h"
+ #include "jit/BaselineJIT.h"
+ #include "jit/CacheIRSpewer.h"
+ #include "jit/CodeGenerator.h"
+-#include "jit/EagerSimdUnbox.h"
+ #include "jit/EdgeCaseAnalysis.h"
+ #include "jit/EffectiveAddressAnalysis.h"
+ #include "jit/FoldLinearArithConstants.h"
+ #include "jit/InstructionReordering.h"
+ #include "jit/IonAnalysis.h"
+ #include "jit/IonBuilder.h"
+ #include "jit/IonIC.h"
+ #include "jit/IonOptimizationLevels.h"
+@@ -438,27 +437,16 @@ JitRealm::performStubReadBarriers(uint32
+     while (stubsToBarrier) {
+         auto stub = PopNextBitmaskValue<StubIndex>(&stubsToBarrier);
+         const ReadBarrieredJitCode& jitCode = stubs_[stub];
+         MOZ_ASSERT(jitCode);
+         jitCode.get();
+     }
+ }
+ 
+-void
+-JitRealm::performSIMDTemplateReadBarriers(uint32_t simdTemplatesToBarrier) const
+-{
+-    while (simdTemplatesToBarrier) {
+-        auto type = PopNextBitmaskValue<SimdType>(&simdTemplatesToBarrier);
+-        const ReadBarrieredObject& tpl = simdTemplateObjects_[type];
+-        MOZ_ASSERT(tpl);
+-        tpl.get();
+-    }
+-}
+-
+ bool
+ JitZone::init(JSContext* cx)
+ {
+     if (!baselineCacheIRStubCodes_.init()) {
+         ReportOutOfMemory(cx);
+         return false;
+     }
+ 
+@@ -643,21 +631,16 @@ JitRealm::sweep(JS::Realm* realm)
+         if (!stubCodes_->lookup(it.key))
+            it = BailoutReturnStubInfo();
+     }
+ 
+     for (ReadBarrieredJitCode& stub : stubs_) {
+         if (stub && IsAboutToBeFinalized(&stub))
+             stub.set(nullptr);
+     }
+-
+-    for (ReadBarrieredObject& obj : simdTemplateObjects_) {
+-        if (obj && IsAboutToBeFinalized(&obj))
+-            obj.set(nullptr);
+-    }
+ }
+ 
+ void
+ JitZone::sweep()
+ {
+     baselineCacheIRStubCodes_.sweep();
+ }
+ 
+@@ -1479,27 +1462,16 @@ OptimizeMIR(MIRGenerator* mir)
+             return false;
+         gs.spewPass("Apply types");
+         AssertExtendedGraphCoherency(graph);
+ 
+         if (mir->shouldCancel("Apply types"))
+             return false;
+     }
+ 
+-    if (!JitOptions.disableRecoverIns && mir->optimizationInfo().eagerSimdUnboxEnabled()) {
+-        AutoTraceLog log(logger, TraceLogger_EagerSimdUnbox);
+-        if (!EagerSimdUnbox(mir, graph))
+-            return false;
+-        gs.spewPass("Eager Simd Unbox");
+-        AssertGraphCoherency(graph);
+-
+-        if (mir->shouldCancel("Eager Simd Unbox"))
+-            return false;
+-    }
+-
+     if (mir->optimizationInfo().amaEnabled()) {
+         AutoTraceLog log(logger, TraceLogger_AlignmentMaskAnalysis);
+         AlignmentMaskAnalysis ama(graph);
+         if (!ama.analyze())
+             return false;
+         gs.spewPass("Alignment Mask Analysis");
+         AssertExtendedGraphCoherency(graph);
+ 
+diff --git a/js/src/jit/IonBuilder.cpp b/js/src/jit/IonBuilder.cpp
+--- a/js/src/jit/IonBuilder.cpp
++++ b/js/src/jit/IonBuilder.cpp
+@@ -7859,21 +7859,16 @@ IonBuilder::getElemTryTypedObject(bool* 
+     if (elemPrediction.isUseless())
+         return Ok();
+ 
+     uint32_t elemSize;
+     if (!elemPrediction.hasKnownSize(&elemSize))
+         return Ok();
+ 
+     switch (elemPrediction.kind()) {
+-      case type::Simd:
+-        // FIXME (bug 894105): load into a MIRType::float32x4 etc
+-        trackOptimizationOutcome(TrackedOutcome::GenericFailure);
+-        return Ok();
+-
+       case type::Struct:
+       case type::Array:
+         return getElemTryComplexElemOfTypedObject(emitted,
+                                                   obj,
+                                                   index,
+                                                   objPrediction,
+                                                   elemPrediction,
+                                                   elemSize);
+@@ -8921,21 +8916,16 @@ IonBuilder::setElemTryTypedObject(bool* 
+     if (elemPrediction.isUseless())
+         return Ok();
+ 
+     uint32_t elemSize;
+     if (!elemPrediction.hasKnownSize(&elemSize))
+         return Ok();
+ 
+     switch (elemPrediction.kind()) {
+-      case type::Simd:
+-        // FIXME (bug 894105): store a MIRType::float32x4 etc
+-        trackOptimizationOutcome(TrackedOutcome::GenericFailure);
+-        return Ok();
+-
+       case type::Reference:
+         return setElemTryReferenceElemOfTypedObject(emitted, obj, index,
+                                                     objPrediction, value, elemPrediction);
+ 
+       case type::Scalar:
+         return setElemTryScalarElemOfTypedObject(emitted,
+                                                  obj,
+                                                  index,
+@@ -10553,20 +10543,16 @@ IonBuilder::getPropTryTypedObject(bool* 
+ {
+     TypedObjectPrediction fieldPrediction;
+     size_t fieldOffset;
+     size_t fieldIndex;
+     if (!typedObjectHasField(obj, name, &fieldOffset, &fieldPrediction, &fieldIndex))
+         return Ok();
+ 
+     switch (fieldPrediction.kind()) {
+-      case type::Simd:
+-        // FIXME (bug 894104): load into a MIRType::float32x4 etc
+-        return Ok();
+-
+       case type::Struct:
+       case type::Array:
+         return getPropTryComplexPropOfTypedObject(emitted,
+                                                   obj,
+                                                   fieldOffset,
+                                                   fieldPrediction,
+                                                   fieldIndex);
+ 
+@@ -11698,20 +11684,16 @@ IonBuilder::setPropTryTypedObject(bool* 
+ {
+     TypedObjectPrediction fieldPrediction;
+     size_t fieldOffset;
+     size_t fieldIndex;
+     if (!typedObjectHasField(obj, name, &fieldOffset, &fieldPrediction, &fieldIndex))
+         return Ok();
+ 
+     switch (fieldPrediction.kind()) {
+-      case type::Simd:
+-        // FIXME (bug 894104): store into a MIRType::float32x4 etc
+-        return Ok();
+-
+       case type::Reference:
+         return setPropTryReferencePropOfTypedObject(emitted, obj, fieldOffset,
+                                                     value, fieldPrediction, name);
+ 
+       case type::Scalar:
+         return setPropTryScalarPropOfTypedObject(emitted, obj, fieldOffset,
+                                                  value, fieldPrediction);
+ 
+diff --git a/js/src/jit/IonBuilder.h b/js/src/jit/IonBuilder.h
+--- a/js/src/jit/IonBuilder.h
++++ b/js/src/jit/IonBuilder.h
+@@ -726,61 +726,16 @@ class IonBuilder
+     InliningResult inlinePossiblyWrappedTypedArrayLength(CallInfo& callInfo);
+     InliningResult inlineSetDisjointTypedElements(CallInfo& callInfo);
+ 
+     // TypedObject intrinsics and natives.
+     InliningResult inlineObjectIsTypeDescr(CallInfo& callInfo);
+     InliningResult inlineSetTypedObjectOffset(CallInfo& callInfo);
+     InliningResult inlineConstructTypedObject(CallInfo& callInfo, TypeDescr* target);
+ 
+-    // SIMD intrinsics and natives.
+-    InliningResult inlineConstructSimdObject(CallInfo& callInfo, SimdTypeDescr* target);
+-
+-    // SIMD helpers.
+-    bool canInlineSimd(CallInfo& callInfo, JSNative native, unsigned numArgs,
+-                       InlineTypedObject** templateObj);
+-    MDefinition* unboxSimd(MDefinition* ins, SimdType type);
+-    InliningResult boxSimd(CallInfo& callInfo, MDefinition* ins, InlineTypedObject* templateObj);
+-    MDefinition* convertToBooleanSimdLane(MDefinition* scalar);
+-
+-    InliningResult inlineSimd(CallInfo& callInfo, JSFunction* target, SimdType type);
+-
+-    InliningResult inlineSimdBinaryArith(CallInfo& callInfo, JSNative native,
+-                                         MSimdBinaryArith::Operation op, SimdType type);
+-    InliningResult inlineSimdBinaryBitwise(CallInfo& callInfo, JSNative native,
+-                                           MSimdBinaryBitwise::Operation op, SimdType type);
+-    InliningResult inlineSimdBinarySaturating(CallInfo& callInfo, JSNative native,
+-                                              MSimdBinarySaturating::Operation op, SimdType type);
+-    InliningResult inlineSimdShift(CallInfo& callInfo, JSNative native, MSimdShift::Operation op,
+-                                   SimdType type);
+-    InliningResult inlineSimdComp(CallInfo& callInfo, JSNative native,
+-                                  MSimdBinaryComp::Operation op, SimdType type);
+-    InliningResult inlineSimdUnary(CallInfo& callInfo, JSNative native,
+-                                   MSimdUnaryArith::Operation op, SimdType type);
+-    InliningResult inlineSimdExtractLane(CallInfo& callInfo, JSNative native, SimdType type);
+-    InliningResult inlineSimdReplaceLane(CallInfo& callInfo, JSNative native, SimdType type);
+-    InliningResult inlineSimdSplat(CallInfo& callInfo, JSNative native, SimdType type);
+-    InliningResult inlineSimdShuffle(CallInfo& callInfo, JSNative native, SimdType type,
+-                                     unsigned numVectors);
+-    InliningResult inlineSimdCheck(CallInfo& callInfo, JSNative native, SimdType type);
+-    InliningResult inlineSimdConvert(CallInfo& callInfo, JSNative native, bool isCast,
+-                                     SimdType from, SimdType to);
+-    InliningResult inlineSimdSelect(CallInfo& callInfo, JSNative native, SimdType type);
+-
+-    bool prepareForSimdLoadStore(CallInfo& callInfo, Scalar::Type simdType,
+-                                 MInstruction** elements, MDefinition** index,
+-                                 Scalar::Type* arrayType);
+-    InliningResult inlineSimdLoad(CallInfo& callInfo, JSNative native, SimdType type,
+-                                  unsigned numElems);
+-    InliningResult inlineSimdStore(CallInfo& callInfo, JSNative native, SimdType type,
+-                                   unsigned numElems);
+-
+-    InliningResult inlineSimdAnyAllTrue(CallInfo& callInfo, bool IsAllTrue, JSNative native,
+-                                        SimdType type);
+-
+     // Utility intrinsics.
+     InliningResult inlineIsCallable(CallInfo& callInfo);
+     InliningResult inlineIsConstructor(CallInfo& callInfo);
+     InliningResult inlineIsObject(CallInfo& callInfo);
+     InliningResult inlineToObject(CallInfo& callInfo);
+     InliningResult inlineIsWrappedArrayConstructor(CallInfo& callInfo);
+     InliningResult inlineToInteger(CallInfo& callInfo);
+     InliningResult inlineToString(CallInfo& callInfo);
+diff --git a/js/src/jit/IonOptimizationLevels.cpp b/js/src/jit/IonOptimizationLevels.cpp
+--- a/js/src/jit/IonOptimizationLevels.cpp
++++ b/js/src/jit/IonOptimizationLevels.cpp
+@@ -22,17 +22,16 @@ const uint32_t OptimizationInfo::Compile
+ 
+ void
+ OptimizationInfo::initNormalOptimizationInfo()
+ {
+     level_ = OptimizationLevel::Normal;
+ 
+     autoTruncate_ = true;
+     eaa_ = true;
+-    eagerSimdUnbox_ = true;
+     edgeCaseAnalysis_ = true;
+     eliminateRedundantChecks_ = true;
+     inlineInterpreted_ = true;
+     inlineNative_ = true;
+     licm_ = true;
+     loopUnrolling_ = true;
+     gvn_ = true;
+     rangeAnalysis_ = true;
+@@ -64,17 +63,16 @@ OptimizationInfo::initWasmOptimizationIn
+ 
+     // Take normal option values for not specified values.
+     initNormalOptimizationInfo();
+ 
+     level_ = OptimizationLevel::Wasm;
+ 
+     ama_ = true;
+     autoTruncate_ = false;
+-    eagerSimdUnbox_ = false;           // wasm has no boxing / unboxing.
+     edgeCaseAnalysis_ = false;
+     eliminateRedundantChecks_ = false;
+     scalarReplacement_ = false;        // wasm has no objects.
+     sincos_ = false;
+     sink_ = false;
+ }
+ 
+ uint32_t
+diff --git a/js/src/jit/IonOptimizationLevels.h b/js/src/jit/IonOptimizationLevels.h
+--- a/js/src/jit/IonOptimizationLevels.h
++++ b/js/src/jit/IonOptimizationLevels.h
+@@ -60,19 +60,16 @@ class OptimizationInfo
+     bool eliminateRedundantChecks_;
+ 
+     // Toggles whether interpreted scripts get inlined.
+     bool inlineInterpreted_;
+ 
+     // Toggles whether native scripts get inlined.
+     bool inlineNative_;
+ 
+-    // Toggles whether eager unboxing of SIMD is used.
+-    bool eagerSimdUnbox_;
+-
+     // Toggles whether global value numbering is used.
+     bool gvn_;
+ 
+     // Toggles whether loop invariant code motion is performed.
+     bool licm_;
+ 
+     // Toggles whether Range Analysis is used.
+     bool rangeAnalysis_;
+@@ -152,17 +149,16 @@ class OptimizationInfo
+     constexpr OptimizationInfo()
+       : level_(OptimizationLevel::Normal),
+         eaa_(false),
+         ama_(false),
+         edgeCaseAnalysis_(false),
+         eliminateRedundantChecks_(false),
+         inlineInterpreted_(false),
+         inlineNative_(false),
+-        eagerSimdUnbox_(false),
+         gvn_(false),
+         licm_(false),
+         rangeAnalysis_(false),
+         loopUnrolling_(false),
+         reordering_(false),
+         autoTruncate_(false),
+         sincos_(false),
+         sink_(false),
+@@ -193,20 +189,16 @@ class OptimizationInfo
+     }
+ 
+     bool inlineNative() const {
+         return inlineNative_ && !JitOptions.disableInlining;
+     }
+ 
+     uint32_t compilerWarmUpThreshold(JSScript* script, jsbytecode* pc = nullptr) const;
+ 
+-    bool eagerSimdUnboxEnabled() const {
+-        return eagerSimdUnbox_ && !JitOptions.disableEagerSimdUnbox;
+-    }
+-
+     bool gvnEnabled() const {
+         return gvn_ && !JitOptions.disableGvn;
+     }
+ 
+     bool licmEnabled() const {
+         return licm_ && !JitOptions.disableLicm;
+     }
+ 
+diff --git a/js/src/jit/IonTypes.h b/js/src/jit/IonTypes.h
+--- a/js/src/jit/IonTypes.h
++++ b/js/src/jit/IonTypes.h
+@@ -99,19 +99,16 @@ enum BailoutKind
+     // Unbox expects a given type, bails out if it doesn't get it.
+     Bailout_NonInt32Input,
+     Bailout_NonNumericInput, // unboxing a double works with int32 too
+     Bailout_NonBooleanInput,
+     Bailout_NonObjectInput,
+     Bailout_NonStringInput,
+     Bailout_NonSymbolInput,
+ 
+-    // SIMD Unbox expects a given type, bails out if it doesn't match.
+-    Bailout_UnexpectedSimdInput,
+-
+     // Atomic operations require shared memory, bail out if the typed array
+     // maps unshared memory.
+     Bailout_NonSharedTypedArrayInput,
+ 
+     // We hit a |debugger;| statement.
+     Bailout_Debugger,
+ 
+     // |this| used uninitialized in a derived constructor
+@@ -203,18 +200,16 @@ BailoutKindString(BailoutKind kind)
+       case Bailout_NonBooleanInput:
+         return "Bailout_NonBooleanInput";
+       case Bailout_NonObjectInput:
+         return "Bailout_NonObjectInput";
+       case Bailout_NonStringInput:
+         return "Bailout_NonStringInput";
+       case Bailout_NonSymbolInput:
+         return "Bailout_NonSymbolInput";
+-      case Bailout_UnexpectedSimdInput:
+-        return "Bailout_UnexpectedSimdInput";
+       case Bailout_NonSharedTypedArrayInput:
+         return "Bailout_NonSharedTypedArrayInput";
+       case Bailout_Debugger:
+         return "Bailout_Debugger";
+       case Bailout_UninitializedThis:
+         return "Bailout_UninitializedThis";
+       case Bailout_BadDerivedConstructorReturn:
+         return "Bailout_BadDerivedConstructorReturn";
+@@ -247,16 +242,29 @@ BailoutKindString(BailoutKind kind)
+ 
+ static const uint32_t ELEMENT_TYPE_BITS = 5;
+ static const uint32_t ELEMENT_TYPE_SHIFT = 0;
+ static const uint32_t ELEMENT_TYPE_MASK = (1 << ELEMENT_TYPE_BITS) - 1;
+ static const uint32_t VECTOR_SCALE_BITS = 3;
+ static const uint32_t VECTOR_SCALE_SHIFT = ELEMENT_TYPE_BITS + ELEMENT_TYPE_SHIFT;
+ static const uint32_t VECTOR_SCALE_MASK = (1 << VECTOR_SCALE_BITS) - 1;
+ 
++// The integer SIMD types have a lot of operations that do the exact same thing
++// for signed and unsigned integer types. Sometimes it is simpler to treat
++// signed and unsigned integer SIMD types as the same type, using a SimdSign to
++// distinguish the few cases where there is a difference.
++enum class SimdSign {
++    // Signedness is not applicable to this type. (i.e., Float or Bool).
++    NotApplicable,
++    // Treat as an unsigned integer with a range 0 .. 2^N-1.
++    Unsigned,
++    // Treat as a signed integer in two's complement encoding.
++    Signed,
++};
++
+ class SimdConstant {
+   public:
+     enum Type {
+         Int8x16,
+         Int16x8,
+         Int32x4,
+         Float32x4,
+         Undefined = -1
+@@ -452,49 +460,16 @@ enum class MIRType
+ };
+ 
+ static inline bool
+ IsSimdType(MIRType type)
+ {
+     return ((unsigned(type) >> VECTOR_SCALE_SHIFT) & VECTOR_SCALE_MASK) != 0;
+ }
+ 
+-// Returns the number of vector elements (hereby called "length") for a given
+-// SIMD kind. It is the Y part of the name "Foo x Y".
+-static inline unsigned
+-SimdTypeToLength(MIRType type)
+-{
+-    MOZ_ASSERT(IsSimdType(type));
+-    return 1 << ((unsigned(type) >> VECTOR_SCALE_SHIFT) & VECTOR_SCALE_MASK);
+-}
+-
+-// Get the type of the individual lanes in a SIMD type.
+-// For example, Int32x4 -> Int32, Float32x4 -> Float32 etc.
+-static inline MIRType
+-SimdTypeToLaneType(MIRType type)
+-{
+-    MOZ_ASSERT(IsSimdType(type));
+-    static_assert(unsigned(MIRType::Last) <= ELEMENT_TYPE_MASK,
+-                  "ELEMENT_TYPE_MASK should be larger than the last MIRType");
+-    return MIRType((unsigned(type) >> ELEMENT_TYPE_SHIFT) & ELEMENT_TYPE_MASK);
+-}
+-
+-// Get the type expected when inserting a lane into a SIMD type.
+-// This is the argument type expected by the MSimdValue constructors as well as
+-// MSimdSplat and MSimdInsertElement.
+-static inline MIRType
+-SimdTypeToLaneArgumentType(MIRType type)
+-{
+-    MIRType laneType = SimdTypeToLaneType(type);
+-
+-    // Boolean lanes should be pre-converted to an Int32 with the values 0 or -1.
+-    // All other lane types are inserted directly.
+-    return laneType == MIRType::Boolean ? MIRType::Int32 : laneType;
+-}
+-
+ static inline MIRType
+ MIRTypeFromValueType(JSValueType type)
+ {
+     // This function does not deal with magic types. Magic constants should be
+     // filtered out in MIRTypeFromValue.
+     switch (type) {
+       case JSVAL_TYPE_DOUBLE:
+         return MIRType::Double;
+@@ -685,34 +660,16 @@ IsFloatingPointType(MIRType type)
+ 
+ static inline bool
+ IsNullOrUndefined(MIRType type)
+ {
+     return type == MIRType::Null || type == MIRType::Undefined;
+ }
+ 
+ static inline bool
+-IsFloatingPointSimdType(MIRType type)
+-{
+-    return type == MIRType::Float32x4;
+-}
+-
+-static inline bool
+-IsIntegerSimdType(MIRType type)
+-{
+-    return IsSimdType(type) && SimdTypeToLaneType(type) == MIRType::Int32;
+-}
+-
+-static inline bool
+-IsBooleanSimdType(MIRType type)
+-{
+-    return IsSimdType(type) && SimdTypeToLaneType(type) == MIRType::Boolean;
+-}
+-
+-static inline bool
+ IsMagicType(MIRType type)
+ {
+     return type == MIRType::MagicHole ||
+            type == MIRType::MagicOptimizedOut ||
+            type == MIRType::MagicIsConstructing ||
+            type == MIRType::MagicOptimizedArguments ||
+            type == MIRType::MagicUninitializedLexical;
+ }
+@@ -730,28 +687,20 @@ ScalarTypeToMIRType(Scalar::Type type)
+       case Scalar::Uint8Clamped:
+         return MIRType::Int32;
+       case Scalar::Int64:
+         return MIRType::Int64;
+       case Scalar::Float32:
+         return MIRType::Float32;
+       case Scalar::Float64:
+         return MIRType::Double;
+-      case Scalar::Float32x4:
+-        return MIRType::Float32x4;
+-      case Scalar::Int8x16:
+-        return MIRType::Int8x16;
+-      case Scalar::Int16x8:
+-        return MIRType::Int16x8;
+-      case Scalar::Int32x4:
+-        return MIRType::Int32x4;
+       case Scalar::MaxTypedArrayViewType:
+         break;
+     }
+-    MOZ_CRASH("unexpected SIMD kind");
++    MOZ_CRASH("unexpected kind");
+ }
+ 
+ static inline unsigned
+ ScalarTypeToLength(Scalar::Type type)
+ {
+     switch (type) {
+       case Scalar::Int8:
+       case Scalar::Uint8:
+@@ -759,27 +708,20 @@ ScalarTypeToLength(Scalar::Type type)
+       case Scalar::Uint16:
+       case Scalar::Int32:
+       case Scalar::Uint32:
+       case Scalar::Int64:
+       case Scalar::Float32:
+       case Scalar::Float64:
+       case Scalar::Uint8Clamped:
+         return 1;
+-      case Scalar::Float32x4:
+-      case Scalar::Int32x4:
+-        return 4;
+-      case Scalar::Int16x8:
+-        return 8;
+-      case Scalar::Int8x16:
+-        return 16;
+       case Scalar::MaxTypedArrayViewType:
+         break;
+     }
+-    MOZ_CRASH("unexpected SIMD kind");
++    MOZ_CRASH("unexpected kind");
+ }
+ 
+ static inline const char*
+ PropertyNameToExtraName(PropertyName* name)
+ {
+     JS::AutoCheckCannotGC nogc;
+     if (!name->hasLatin1Chars())
+         return nullptr;
+diff --git a/js/src/jit/JSJitFrameIter.h b/js/src/jit/JSJitFrameIter.h
+--- a/js/src/jit/JSJitFrameIter.h
++++ b/js/src/jit/JSJitFrameIter.h
+@@ -403,19 +403,17 @@ struct MaybeReadFallback
+ 
+     NoGCValue noGCPlaceholder(const Value& v) const {
+         if (v.isMagic(JS_OPTIMIZED_OUT))
+             return NoGC_MagicOptimizedOut;
+         return NoGC_UndefinedValue;
+     }
+ };
+ 
+-
+ class RResumePoint;
+-class RSimdBox;
+ 
+ // Reads frame information in snapshot-encoding order (that is, outermost frame
+ // to innermost frame).
+ class SnapshotIterator
+ {
+   protected:
+     SnapshotReader snapshot_;
+     RecoverReader recover_;
+@@ -467,17 +465,16 @@ class SnapshotIterator
+     Value fromInstructionResult(uint32_t index) const;
+ 
+     Value allocationValue(const RValueAllocation& a, ReadMethod rm = RM_Normal);
+     MOZ_MUST_USE bool allocationReadable(const RValueAllocation& a, ReadMethod rm = RM_Normal);
+     void writeAllocationValuePayload(const RValueAllocation& a, const Value& v);
+     void warnUnreadableAllocation();
+ 
+   private:
+-    friend class RSimdBox;
+     const FloatRegisters::RegisterContent* floatAllocationPointer(const RValueAllocation& a) const;
+ 
+   public:
+     // Handle iterating over RValueAllocations of the snapshots.
+     inline RValueAllocation readAllocation() {
+         MOZ_ASSERT(moreAllocations());
+         return snapshot_.readAllocation();
+     }
+diff --git a/js/src/jit/JitOptions.cpp b/js/src/jit/JitOptions.cpp
+--- a/js/src/jit/JitOptions.cpp
++++ b/js/src/jit/JitOptions.cpp
+@@ -81,19 +81,16 @@ DefaultJitOptions::DefaultJitOptions()
+     SET_DEFAULT(disableInlineBacktracking, false);
+ 
+     // Toggles whether Alignment Mask Analysis is globally disabled.
+     SET_DEFAULT(disableAma, false);
+ 
+     // Toggles whether Effective Address Analysis is globally disabled.
+     SET_DEFAULT(disableEaa, false);
+ 
+-    // Toggle whether eager simd unboxing is globally disabled.
+-    SET_DEFAULT(disableEagerSimdUnbox, false);
+-
+     // Toggles whether Edge Case Analysis is gobally disabled.
+     SET_DEFAULT(disableEdgeCaseAnalysis, false);
+ 
+     // Toggle whether global value numbering is globally disabled.
+     SET_DEFAULT(disableGvn, false);
+ 
+     // Toggles whether inlining is globally disabled.
+     SET_DEFAULT(disableInlining, false);
+diff --git a/js/src/jit/JitOptions.h b/js/src/jit/JitOptions.h
+--- a/js/src/jit/JitOptions.h
++++ b/js/src/jit/JitOptions.h
+@@ -45,17 +45,16 @@ struct DefaultJitOptions
+ #ifdef CHECK_OSIPOINT_REGISTERS
+     bool checkOsiPointRegisters;
+ #endif
+     bool checkRangeAnalysis;
+     bool runExtraChecks;
+     bool disableInlineBacktracking;
+     bool disableAma;
+     bool disableEaa;
+-    bool disableEagerSimdUnbox;
+     bool disableEdgeCaseAnalysis;
+     bool disableGvn;
+     bool disableInlining;
+     bool disableLicm;
+     bool disableLoopUnrolling;
+     bool disableOptimizationTracking;
+     bool disablePgo;
+     bool disableInstructionReordering;
+diff --git a/js/src/jit/JitRealm.h b/js/src/jit/JitRealm.h
+--- a/js/src/jit/JitRealm.h
++++ b/js/src/jit/JitRealm.h
+@@ -500,49 +500,28 @@ class JitRealm
+         RegExpMatcher,
+         RegExpSearcher,
+         RegExpTester,
+         Count
+     };
+ 
+     mozilla::EnumeratedArray<StubIndex, StubIndex::Count, ReadBarrieredJitCode> stubs_;
+ 
+-    // The same approach is taken for SIMD template objects.
+-
+-    mozilla::EnumeratedArray<SimdType, SimdType::Count, ReadBarrieredObject> simdTemplateObjects_;
+-
+     JitCode* generateStringConcatStub(JSContext* cx);
+     JitCode* generateRegExpMatcherStub(JSContext* cx);
+     JitCode* generateRegExpSearcherStub(JSContext* cx);
+     JitCode* generateRegExpTesterStub(JSContext* cx);
+ 
+     JitCode* getStubNoBarrier(StubIndex stub, uint32_t* requiredBarriersOut) const {
+         MOZ_ASSERT(CurrentThreadIsIonCompiling());
+         *requiredBarriersOut |= 1 << uint32_t(stub);
+         return stubs_[stub].unbarrieredGet();
+     }
+ 
+   public:
+-    JSObject* getSimdTemplateObjectFor(JSContext* cx, Handle<SimdTypeDescr*> descr) {
+-        ReadBarrieredObject& tpl = simdTemplateObjects_[descr->type()];
+-        if (!tpl)
+-            tpl.set(TypedObject::createZeroed(cx, descr, gc::TenuredHeap));
+-        return tpl.get();
+-    }
+-
+-    JSObject* maybeGetSimdTemplateObjectFor(SimdType type) const {
+-        // This function is used by Eager Simd Unbox phase which can run
+-        // off-thread, so we cannot use the usual read barrier. For more
+-        // information, see the comment above
+-        // CodeGenerator::simdRefreshTemplatesDuringLink_.
+-
+-        MOZ_ASSERT(CurrentThreadIsIonCompiling());
+-        return simdTemplateObjects_[type].unbarrieredGet();
+-    }
+-
+     JitCode* getStubCode(uint32_t key) {
+         ICStubCodeMap::Ptr p = stubCodes_->lookup(key);
+         if (p)
+             return p->value();
+         return nullptr;
+     }
+     MOZ_MUST_USE bool putStubCode(JSContext* cx, uint32_t key, Handle<JitCode*> stubCode) {
+         MOZ_ASSERT(stubCode);
+@@ -613,25 +592,23 @@ class JitRealm
+ 
+     MOZ_MUST_USE bool ensureRegExpTesterStubExists(JSContext* cx) {
+         if (stubs_[RegExpTester])
+             return true;
+         stubs_[RegExpTester] = generateRegExpTesterStub(cx);
+         return stubs_[RegExpTester];
+     }
+ 
+-    // Perform the necessary read barriers on stubs and SIMD template object
+-    // described by the bitmasks passed in. This function can only be called
+-    // from the main thread.
++    // Perform the necessary read barriers on stubs described by the bitmasks
++    // passed in. This function can only be called from the main thread.
+     //
+-    // The stub and template object pointers must still be valid by the time
+-    // these methods are called. This is arranged by cancelling off-thread Ion
+-    // compilation at the start of GC and at the start of sweeping.
++    // The stub pointers must still be valid by the time these methods are
++    // called. This is arranged by cancelling off-thread Ion compilation at the
++    // start of GC and at the start of sweeping.
+     void performStubReadBarriers(uint32_t stubsToBarrier) const;
+-    void performSIMDTemplateReadBarriers(uint32_t simdTemplatesToBarrier) const;
+ 
+     size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+ 
+     bool stringsCanBeInNursery;
+ };
+ 
+ // Called from Zone::discardJitCode().
+ void InvalidateAll(FreeOp* fop, JS::Zone* zone);
+diff --git a/js/src/jit/Lowering.cpp b/js/src/jit/Lowering.cpp
+--- a/js/src/jit/Lowering.cpp
++++ b/js/src/jit/Lowering.cpp
+@@ -3666,18 +3666,17 @@ void
+ LIRGenerator::visitLoadUnboxedScalar(MLoadUnboxedScalar* ins)
+ {
+     MOZ_ASSERT(IsValidElementsType(ins->elements(), ins->offsetAdjustment()));
+     MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
+ 
+     const LUse elements = useRegister(ins->elements());
+     const LAllocation index = useRegisterOrConstant(ins->index());
+ 
+-    MOZ_ASSERT(IsNumberType(ins->type()) || IsSimdType(ins->type()) ||
+-               ins->type() == MIRType::Boolean);
++    MOZ_ASSERT(IsNumberType(ins->type()) || ins->type() == MIRType::Boolean);
+ 
+     // We need a temp register for Uint32Array with known double result.
+     LDefinition tempDef = LDefinition::BogusTemp();
+     if (ins->readType() == Scalar::Uint32 && IsFloatingPointType(ins->type()))
+         tempDef = temp();
+ 
+     Synchronization sync = Synchronization::Load();
+     if (ins->requiresMemoryBarrier()) {
+@@ -3748,22 +3747,17 @@ LIRGenerator::visitLoadTypedArrayElement
+ }
+ 
+ void
+ LIRGenerator::visitStoreUnboxedScalar(MStoreUnboxedScalar* ins)
+ {
+     MOZ_ASSERT(IsValidElementsType(ins->elements(), ins->offsetAdjustment()));
+     MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
+ 
+-    if (ins->isSimdWrite()) {
+-        MOZ_ASSERT_IF(ins->writeType() == Scalar::Float32x4, ins->value()->type() == MIRType::Float32x4);
+-        MOZ_ASSERT_IF(ins->writeType() == Scalar::Int8x16, ins->value()->type() == MIRType::Int8x16);
+-        MOZ_ASSERT_IF(ins->writeType() == Scalar::Int16x8, ins->value()->type() == MIRType::Int16x8);
+-        MOZ_ASSERT_IF(ins->writeType() == Scalar::Int32x4, ins->value()->type() == MIRType::Int32x4);
+-    } else if (ins->isFloatWrite()) {
++    if (ins->isFloatWrite()) {
+         MOZ_ASSERT_IF(ins->writeType() == Scalar::Float32, ins->value()->type() == MIRType::Float32);
+         MOZ_ASSERT_IF(ins->writeType() == Scalar::Float64, ins->value()->type() == MIRType::Double);
+     } else {
+         MOZ_ASSERT(ins->value()->type() == MIRType::Int32);
+     }
+ 
+     LUse elements = useRegister(ins->elements());
+     LAllocation index = useRegisterOrConstant(ins->index());
+@@ -4687,17 +4681,17 @@ LIRGenerator::visitWasmParameter(MWasmPa
+ #if defined(JS_NUNBOX32)
+             LInt64Allocation(LArgument(abi.offsetFromArgBase() + INT64HIGH_OFFSET),
+                              LArgument(abi.offsetFromArgBase() + INT64LOW_OFFSET))
+ #else
+             LInt64Allocation(LArgument(abi.offsetFromArgBase()))
+ #endif
+         );
+     } else {
+-        MOZ_ASSERT(IsNumberType(ins->type()) || IsSimdType(ins->type()));
++        MOZ_ASSERT(IsNumberType(ins->type()));
+         defineFixed(new(alloc()) LWasmParameter, ins, LArgument(abi.offsetFromArgBase()));
+     }
+ }
+ 
+ void
+ LIRGenerator::visitWasmReturn(MWasmReturn* ins)
+ {
+     MDefinition* rval = ins->getOperand(0);
+@@ -4707,18 +4701,16 @@ LIRGenerator::visitWasmReturn(MWasmRetur
+         return;
+     }
+ 
+     LWasmReturn* lir = new(alloc()) LWasmReturn;
+     if (rval->type() == MIRType::Float32)
+         lir->setOperand(0, useFixed(rval, ReturnFloat32Reg));
+     else if (rval->type() == MIRType::Double)
+         lir->setOperand(0, useFixed(rval, ReturnDoubleReg));
+-    else if (IsSimdType(rval->type()))
+-        lir->setOperand(0, useFixed(rval, ReturnSimd128Reg));
+     else if (rval->type() == MIRType::Int32)
+         lir->setOperand(0, useFixed(rval, ReturnReg));
+     else
+         MOZ_CRASH("Unexpected wasm return type");
+ 
+     add(lir);
+ }
+ 
+@@ -4728,17 +4720,17 @@ LIRGenerator::visitWasmReturnVoid(MWasmR
+     add(new(alloc()) LWasmReturnVoid);
+ }
+ 
+ void
+ LIRGenerator::visitWasmStackArg(MWasmStackArg* ins)
+ {
+     if (ins->arg()->type() == MIRType::Int64) {
+         add(new(alloc()) LWasmStackArgI64(useInt64RegisterOrConstantAtStart(ins->arg())), ins);
+-    } else if (IsFloatingPointType(ins->arg()->type()) || IsSimdType(ins->arg()->type())) {
++    } else if (IsFloatingPointType(ins->arg()->type())) {
+         MOZ_ASSERT(!ins->arg()->isEmittedAtUses());
+         add(new(alloc()) LWasmStackArg(useRegisterAtStart(ins->arg())), ins);
+     } else {
+         add(new(alloc()) LWasmStackArg(useRegisterOrConstantAtStart(ins->arg())), ins);
+     }
+ }
+ 
+ template <typename LClass>
+@@ -4864,227 +4856,16 @@ void
+ LIRGenerator::visitRecompileCheck(MRecompileCheck* ins)
+ {
+     LRecompileCheck* lir = new(alloc()) LRecompileCheck(temp());
+     add(lir, ins);
+     assignSafepoint(lir, ins);
+ }
+ 
+ void
+-LIRGenerator::visitSimdBox(MSimdBox* ins)
+-{
+-    MOZ_ASSERT(IsSimdType(ins->input()->type()));
+-    LUse in = useRegister(ins->input());
+-    LSimdBox* lir = new(alloc()) LSimdBox(in, temp());
+-    define(lir, ins);
+-    assignSafepoint(lir, ins);
+-}
+-
+-void
+-LIRGenerator::visitSimdUnbox(MSimdUnbox* ins)
+-{
+-    MOZ_ASSERT(ins->input()->type() == MIRType::Object);
+-    MOZ_ASSERT(IsSimdType(ins->type()));
+-    LUse in = useRegister(ins->input());
+-    LSimdUnbox* lir = new(alloc()) LSimdUnbox(in, temp());
+-    assignSnapshot(lir, Bailout_UnexpectedSimdInput);
+-    define(lir, ins);
+-}
+-
+-void
+-LIRGenerator::visitSimdConstant(MSimdConstant* ins)
+-{
+-    MOZ_ASSERT(IsSimdType(ins->type()));
+-
+-    switch (ins->type()) {
+-      case MIRType::Int8x16:
+-      case MIRType::Int16x8:
+-      case MIRType::Int32x4:
+-      case MIRType::Bool8x16:
+-      case MIRType::Bool16x8:
+-      case MIRType::Bool32x4:
+-        define(new(alloc()) LSimd128Int(), ins);
+-        break;
+-      case MIRType::Float32x4:
+-        define(new(alloc()) LSimd128Float(), ins);
+-        break;
+-      default:
+-        MOZ_CRASH("Unknown SIMD kind when generating constant");
+-    }
+-}
+-
+-void
+-LIRGenerator::visitSimdConvert(MSimdConvert* ins)
+-{
+-    MOZ_ASSERT(IsSimdType(ins->type()));
+-    MDefinition* input = ins->input();
+-    LUse use = useRegister(input);
+-    if (ins->type() == MIRType::Int32x4) {
+-        MOZ_ASSERT(input->type() == MIRType::Float32x4);
+-        switch (ins->signedness()) {
+-          case SimdSign::Signed: {
+-              LFloat32x4ToInt32x4* lir = new(alloc()) LFloat32x4ToInt32x4(use, temp());
+-              if (!gen->compilingWasm())
+-                  assignSnapshot(lir, Bailout_BoundsCheck);
+-              define(lir, ins);
+-              break;
+-          }
+-          case SimdSign::Unsigned: {
+-              LFloat32x4ToUint32x4* lir =
+-                new (alloc()) LFloat32x4ToUint32x4(use, temp(), temp(LDefinition::SIMD128INT));
+-              if (!gen->compilingWasm())
+-                  assignSnapshot(lir, Bailout_BoundsCheck);
+-              define(lir, ins);
+-              break;
+-          }
+-          default:
+-            MOZ_CRASH("Unexpected SimdConvert sign");
+-        }
+-    } else if (ins->type() == MIRType::Float32x4) {
+-        MOZ_ASSERT(input->type() == MIRType::Int32x4);
+-        MOZ_ASSERT(ins->signedness() == SimdSign::Signed, "Unexpected SimdConvert sign");
+-        define(new(alloc()) LInt32x4ToFloat32x4(use), ins);
+-    } else {
+-        MOZ_CRASH("Unknown SIMD kind when generating constant");
+-    }
+-}
+-
+-void
+-LIRGenerator::visitSimdReinterpretCast(MSimdReinterpretCast* ins)
+-{
+-    MOZ_ASSERT(IsSimdType(ins->type()) && IsSimdType(ins->input()->type()));
+-    MDefinition* input = ins->input();
+-    LUse use = useRegisterAtStart(input);
+-    // :TODO: (Bug 1132894) We have to allocate a different register as redefine
+-    // and/or defineReuseInput are not yet capable of reusing the same register
+-    // with a different register type.
+-    define(new(alloc()) LSimdReinterpretCast(use), ins);
+-}
+-
+-void
+-LIRGenerator::visitSimdAllTrue(MSimdAllTrue* ins)
+-{
+-    MDefinition* input = ins->input();
+-    MOZ_ASSERT(IsBooleanSimdType(input->type()));
+-
+-    LUse use = useRegisterAtStart(input);
+-    define(new(alloc()) LSimdAllTrue(use), ins);
+-}
+-
+-void
+-LIRGenerator::visitSimdAnyTrue(MSimdAnyTrue* ins)
+-{
+-    MDefinition* input = ins->input();
+-    MOZ_ASSERT(IsBooleanSimdType(input->type()));
+-
+-    LUse use = useRegisterAtStart(input);
+-    define(new(alloc()) LSimdAnyTrue(use), ins);
+-}
+-
+-void
+-LIRGenerator::visitSimdUnaryArith(MSimdUnaryArith* ins)
+-{
+-    MOZ_ASSERT(IsSimdType(ins->input()->type()));
+-    MOZ_ASSERT(IsSimdType(ins->type()));
+-
+-    // Cannot be at start, as the ouput is used as a temporary to store values.
+-    LUse in = use(ins->input());
+-
+-    switch (ins->type()) {
+-      case MIRType::Int8x16:
+-      case MIRType::Bool8x16:
+-        define(new (alloc()) LSimdUnaryArithIx16(in), ins);
+-        break;
+-      case MIRType::Int16x8:
+-      case MIRType::Bool16x8:
+-        define(new (alloc()) LSimdUnaryArithIx8(in), ins);
+-        break;
+-      case MIRType::Int32x4:
+-      case MIRType::Bool32x4:
+-        define(new (alloc()) LSimdUnaryArithIx4(in), ins);
+-        break;
+-      case MIRType::Float32x4:
+-        define(new (alloc()) LSimdUnaryArithFx4(in), ins);
+-        break;
+-      default:
+-        MOZ_CRASH("Unknown SIMD kind for unary operation");
+-    }
+-}
+-
+-void
+-LIRGenerator::visitSimdBinaryComp(MSimdBinaryComp* ins)
+-{
+-    MOZ_ASSERT(IsSimdType(ins->lhs()->type()));
+-    MOZ_ASSERT(IsSimdType(ins->rhs()->type()));
+-    MOZ_ASSERT(IsBooleanSimdType(ins->type()));
+-
+-    if (ShouldReorderCommutative(ins->lhs(), ins->rhs(), ins))
+-        ins->reverse();
+-
+-    switch (ins->specialization()) {
+-      case MIRType::Int8x16: {
+-          MOZ_ASSERT(ins->signedness() == SimdSign::Signed);
+-          LSimdBinaryCompIx16* add = new (alloc()) LSimdBinaryCompIx16();
+-          lowerForFPU(add, ins, ins->lhs(), ins->rhs());
+-          return;
+-      }
+-      case MIRType::Int16x8: {
+-          MOZ_ASSERT(ins->signedness() == SimdSign::Signed);
+-          LSimdBinaryCompIx8* add = new (alloc()) LSimdBinaryCompIx8();
+-          lowerForFPU(add, ins, ins->lhs(), ins->rhs());
+-          return;
+-      }
+-      case MIRType::Int32x4: {
+-          MOZ_ASSERT(ins->signedness() == SimdSign::Signed);
+-          LSimdBinaryCompIx4* add = new (alloc()) LSimdBinaryCompIx4();
+-          lowerForCompIx4(add, ins, ins->lhs(), ins->rhs());
+-          return;
+-      }
+-      case MIRType::Float32x4: {
+-          MOZ_ASSERT(ins->signedness() == SimdSign::NotApplicable);
+-          LSimdBinaryCompFx4* add = new (alloc()) LSimdBinaryCompFx4();
+-          lowerForCompFx4(add, ins, ins->lhs(), ins->rhs());
+-          return;
+-      }
+-      default:
+-        MOZ_CRASH("Unknown compare type when comparing values");
+-    }
+-}
+-
+-void
+-LIRGenerator::visitSimdBinaryBitwise(MSimdBinaryBitwise* ins)
+-{
+-    MOZ_ASSERT(IsSimdType(ins->lhs()->type()));
+-    MOZ_ASSERT(IsSimdType(ins->rhs()->type()));
+-    MOZ_ASSERT(IsSimdType(ins->type()));
+-
+-    MDefinition* lhs = ins->lhs();
+-    MDefinition* rhs = ins->rhs();
+-    ReorderCommutative(&lhs, &rhs, ins);
+-    LSimdBinaryBitwise* lir = new(alloc()) LSimdBinaryBitwise;
+-    lowerForFPU(lir, ins, lhs, rhs);
+-}
+-
+-void
+-LIRGenerator::visitSimdShift(MSimdShift* ins)
+-{
+-    MOZ_ASSERT(IsIntegerSimdType(ins->type()));
+-    MOZ_ASSERT(ins->lhs()->type() == ins->type());
+-    MOZ_ASSERT(ins->rhs()->type() == MIRType::Int32);
+-
+-    LUse vector = useRegisterAtStart(ins->lhs());
+-    LAllocation value = useRegisterOrConstant(ins->rhs());
+-    // We need a temp register to mask the shift amount, but not if the shift
+-    // amount is a constant.
+-    LDefinition tempReg = value.isConstant() ? LDefinition::BogusTemp() : temp();
+-    LSimdShift* lir = new(alloc()) LSimdShift(vector, value, tempReg);
+-    defineReuseInput(lir, ins, 0);
+-}
+-
+-void
+ LIRGenerator::visitLexicalCheck(MLexicalCheck* ins)
+ {
+     MDefinition* input = ins->input();
+     MOZ_ASSERT(input->type() == MIRType::Value);
+     LLexicalCheck* lir = new(alloc()) LLexicalCheck(useBox(input));
+     assignSnapshot(lir, ins->bailoutKind());
+     add(lir, ins);
+     redefine(ins, input);
+diff --git a/js/src/jit/MCallOptimize.cpp b/js/src/jit/MCallOptimize.cpp
+--- a/js/src/jit/MCallOptimize.cpp
++++ b/js/src/jit/MCallOptimize.cpp
+@@ -10,17 +10,16 @@
+ 
+ #include "builtin/AtomicsObject.h"
+ #include "builtin/intl/Collator.h"
+ #include "builtin/intl/DateTimeFormat.h"
+ #include "builtin/intl/NumberFormat.h"
+ #include "builtin/intl/PluralRules.h"
+ #include "builtin/intl/RelativeTimeFormat.h"
+ #include "builtin/MapObject.h"
+-#include "builtin/SIMDConstants.h"
+ #include "builtin/String.h"
+ #include "builtin/TestingFunctions.h"
+ #include "builtin/TypedObject.h"
+ #include "jit/BaselineInspector.h"
+ #include "jit/InlinableNatives.h"
+ #include "jit/IonBuilder.h"
+ #include "jit/Lowering.h"
+ #include "jit/MIR.h"
+@@ -253,38 +252,16 @@ IonBuilder::inlineNativeCall(CallInfo& c
+         return inlineObject(callInfo);
+       case InlinableNative::ObjectCreate:
+         return inlineObjectCreate(callInfo);
+       case InlinableNative::ObjectIs:
+         return inlineObjectIs(callInfo);
+       case InlinableNative::ObjectToString:
+         return inlineObjectToString(callInfo);
+ 
+-      // SIMD natives.
+-      case InlinableNative::SimdInt32x4:
+-        return inlineSimd(callInfo, target, SimdType::Int32x4);
+-      case InlinableNative::SimdUint32x4:
+-        return inlineSimd(callInfo, target, SimdType::Uint32x4);
+-      case InlinableNative::SimdInt16x8:
+-        return inlineSimd(callInfo, target, SimdType::Int16x8);
+-      case InlinableNative::SimdUint16x8:
+-        return inlineSimd(callInfo, target, SimdType::Uint16x8);
+-      case InlinableNative::SimdInt8x16:
+-        return inlineSimd(callInfo, target, SimdType::Int8x16);
+-      case InlinableNative::SimdUint8x16:
+-        return inlineSimd(callInfo, target, SimdType::Uint8x16);
+-      case InlinableNative::SimdFloat32x4:
+-        return inlineSimd(callInfo, target, SimdType::Float32x4);
+-      case InlinableNative::SimdBool32x4:
+-        return inlineSimd(callInfo, target, SimdType::Bool32x4);
+-      case InlinableNative::SimdBool16x8:
+-        return inlineSimd(callInfo, target, SimdType::Bool16x8);
+-      case InlinableNative::SimdBool8x16:
+-        return inlineSimd(callInfo, target, SimdType::Bool8x16);
+-
+       // Testing functions.
+       case InlinableNative::TestBailout:
+         return inlineBailout(callInfo);
+       case InlinableNative::TestAssertFloat32:
+         return inlineAssertFloat32(callInfo);
+       case InlinableNative::TestAssertRecoveredOnBailout:
+         return inlineAssertRecoveredOnBailout(callInfo);
+ 
+@@ -460,19 +437,16 @@ IonBuilder::InliningResult
+ IonBuilder::inlineNonFunctionCall(CallInfo& callInfo, JSObject* target)
+ {
+     // Inline a call to a non-function object, invoking the object's call or
+     // construct hook.
+ 
+     if (callInfo.constructing() && target->constructHook() == TypedObject::construct)
+         return inlineConstructTypedObject(callInfo, &target->as<TypeDescr>());
+ 
+-    if (!callInfo.constructing() && target->callHook() == SimdTypeDescr::call)
+-        return inlineConstructSimdObject(callInfo, &target->as<SimdTypeDescr>());
+-
+     return InliningStatus_NotInlined;
+ }
+ 
+ TemporaryTypeSet*
+ IonBuilder::getInlineReturnTypeSet()
+ {
+     return bytecodeTypes(pc);
+ }
+@@ -3826,775 +3800,15 @@ IonBuilder::inlineConstructTypedObject(C
+     MNewTypedObject* ins = MNewTypedObject::New(alloc(), constraints(), templateObject,
+                                                 templateObject->group()->initialHeap(constraints()));
+     current->add(ins);
+     current->push(ins);
+ 
+     return InliningStatus_Inlined;
+ }
+ 
+-// Main entry point for SIMD inlining.
+-// When the controlling simdType is an integer type, sign indicates whether the lanes should
+-// be treated as signed or unsigned integers.
+-IonBuilder::InliningResult
+-IonBuilder::inlineSimd(CallInfo& callInfo, JSFunction* target, SimdType type)
+-{
+-    if (!JitSupportsSimd()) {
+-        trackOptimizationOutcome(TrackedOutcome::NoSimdJitSupport);
+-        return InliningStatus_NotInlined;
+-    }
+-
+-    JSNative native = target->native();
+-    SimdOperation simdOp = SimdOperation(target->jitInfo()->nativeOp);
+-
+-    switch(simdOp) {
+-      case SimdOperation::Constructor:
+-        // SIMD constructor calls are handled via inlineNonFunctionCall(), so
+-        // they won't show up here where target is required to be a JSFunction.
+-        // See also inlineConstructSimdObject().
+-        MOZ_CRASH("SIMD constructor call not expected.");
+-      case SimdOperation::Fn_check:
+-        return inlineSimdCheck(callInfo, native, type);
+-      case SimdOperation::Fn_splat:
+-        return inlineSimdSplat(callInfo, native, type);
+-      case SimdOperation::Fn_extractLane:
+-        return inlineSimdExtractLane(callInfo, native, type);
+-      case SimdOperation::Fn_replaceLane:
+-        return inlineSimdReplaceLane(callInfo, native, type);
+-      case SimdOperation::Fn_select:
+-        return inlineSimdSelect(callInfo, native, type);
+-      case SimdOperation::Fn_swizzle:
+-        return inlineSimdShuffle(callInfo, native, type, 1);
+-      case SimdOperation::Fn_shuffle:
+-        return inlineSimdShuffle(callInfo, native, type, 2);
+-
+-        // Unary arithmetic.
+-      case SimdOperation::Fn_abs:
+-        return inlineSimdUnary(callInfo, native, MSimdUnaryArith::abs, type);
+-      case SimdOperation::Fn_neg:
+-        return inlineSimdUnary(callInfo, native, MSimdUnaryArith::neg, type);
+-      case SimdOperation::Fn_not:
+-        return inlineSimdUnary(callInfo, native, MSimdUnaryArith::not_, type);
+-      case SimdOperation::Fn_reciprocalApproximation:
+-        return inlineSimdUnary(callInfo, native, MSimdUnaryArith::reciprocalApproximation,
+-                               type);
+-      case SimdOperation::Fn_reciprocalSqrtApproximation:
+-        return inlineSimdUnary(callInfo, native, MSimdUnaryArith::reciprocalSqrtApproximation,
+-                               type);
+-      case SimdOperation::Fn_sqrt:
+-        return inlineSimdUnary(callInfo, native, MSimdUnaryArith::sqrt, type);
+-
+-        // Binary arithmetic.
+-      case SimdOperation::Fn_add:
+-        return inlineSimdBinaryArith(callInfo, native, MSimdBinaryArith::Op_add, type);
+-      case SimdOperation::Fn_sub:
+-        return inlineSimdBinaryArith(callInfo, native, MSimdBinaryArith::Op_sub, type);
+-      case SimdOperation::Fn_mul:
+-        return inlineSimdBinaryArith(callInfo, native, MSimdBinaryArith::Op_mul, type);
+-      case SimdOperation::Fn_div:
+-        return inlineSimdBinaryArith(callInfo, native, MSimdBinaryArith::Op_div, type);
+-      case SimdOperation::Fn_max:
+-        return inlineSimdBinaryArith(callInfo, native, MSimdBinaryArith::Op_max, type);
+-      case SimdOperation::Fn_min:
+-        return inlineSimdBinaryArith(callInfo, native, MSimdBinaryArith::Op_min, type);
+-      case SimdOperation::Fn_maxNum:
+-        return inlineSimdBinaryArith(callInfo, native, MSimdBinaryArith::Op_maxNum, type);
+-      case SimdOperation::Fn_minNum:
+-        return inlineSimdBinaryArith(callInfo, native, MSimdBinaryArith::Op_minNum, type);
+-
+-        // Binary saturating.
+-      case SimdOperation::Fn_addSaturate:
+-        return inlineSimdBinarySaturating(callInfo, native, MSimdBinarySaturating::add, type);
+-      case SimdOperation::Fn_subSaturate:
+-        return inlineSimdBinarySaturating(callInfo, native, MSimdBinarySaturating::sub, type);
+-
+-        // Binary bitwise.
+-      case SimdOperation::Fn_and:
+-        return inlineSimdBinaryBitwise(callInfo, native, MSimdBinaryBitwise::and_, type);
+-      case SimdOperation::Fn_or:
+-        return inlineSimdBinaryBitwise(callInfo, native, MSimdBinaryBitwise::or_, type);
+-      case SimdOperation::Fn_xor:
+-        return inlineSimdBinaryBitwise(callInfo, native, MSimdBinaryBitwise::xor_, type);
+-
+-      // Shifts.
+-      case SimdOperation::Fn_shiftLeftByScalar:
+-        return inlineSimdShift(callInfo, native, MSimdShift::lsh, type);
+-      case SimdOperation::Fn_shiftRightByScalar:
+-        return inlineSimdShift(callInfo, native, MSimdShift::rshForSign(GetSimdSign(type)), type);
+-
+-        // Boolean unary.
+-      case SimdOperation::Fn_allTrue:
+-        return inlineSimdAnyAllTrue(callInfo, /* IsAllTrue= */true, native, type);
+-      case SimdOperation::Fn_anyTrue:
+-        return inlineSimdAnyAllTrue(callInfo, /* IsAllTrue= */false, native, type);
+-
+-        // Comparisons.
+-      case SimdOperation::Fn_lessThan:
+-        return inlineSimdComp(callInfo, native, MSimdBinaryComp::lessThan, type);
+-      case SimdOperation::Fn_lessThanOrEqual:
+-        return inlineSimdComp(callInfo, native, MSimdBinaryComp::lessThanOrEqual, type);
+-      case SimdOperation::Fn_equal:
+-        return inlineSimdComp(callInfo, native, MSimdBinaryComp::equal, type);
+-      case SimdOperation::Fn_notEqual:
+-        return inlineSimdComp(callInfo, native, MSimdBinaryComp::notEqual, type);
+-      case SimdOperation::Fn_greaterThan:
+-        return inlineSimdComp(callInfo, native, MSimdBinaryComp::greaterThan, type);
+-      case SimdOperation::Fn_greaterThanOrEqual:
+-        return inlineSimdComp(callInfo, native, MSimdBinaryComp::greaterThanOrEqual, type);
+-
+-        // Int <-> Float conversions.
+-      case SimdOperation::Fn_fromInt32x4:
+-        return inlineSimdConvert(callInfo, native, false, SimdType::Int32x4, type);
+-      case SimdOperation::Fn_fromUint32x4:
+-        return inlineSimdConvert(callInfo, native, false, SimdType::Uint32x4, type);
+-      case SimdOperation::Fn_fromFloat32x4:
+-        return inlineSimdConvert(callInfo, native, false, SimdType::Float32x4, type);
+-
+-        // Load/store.
+-      case SimdOperation::Fn_load:
+-        return inlineSimdLoad(callInfo, native, type, GetSimdLanes(type));
+-      case SimdOperation::Fn_load1:
+-        return inlineSimdLoad(callInfo, native, type, 1);
+-      case SimdOperation::Fn_load2:
+-        return inlineSimdLoad(callInfo, native, type, 2);
+-      case SimdOperation::Fn_load3:
+-        return inlineSimdLoad(callInfo, native, type, 3);
+-      case SimdOperation::Fn_store:
+-        return inlineSimdStore(callInfo, native, type, GetSimdLanes(type));
+-      case SimdOperation::Fn_store1:
+-        return inlineSimdStore(callInfo, native, type, 1);
+-      case SimdOperation::Fn_store2:
+-        return inlineSimdStore(callInfo, native, type, 2);
+-      case SimdOperation::Fn_store3:
+-        return inlineSimdStore(callInfo, native, type, 3);
+-
+-        // Bitcasts. One for each type with a memory representation.
+-      case SimdOperation::Fn_fromInt32x4Bits:
+-        return inlineSimdConvert(callInfo, native, true, SimdType::Int32x4, type);
+-      case SimdOperation::Fn_fromUint32x4Bits:
+-        return inlineSimdConvert(callInfo, native, true, SimdType::Uint32x4, type);
+-      case SimdOperation::Fn_fromInt16x8Bits:
+-        return inlineSimdConvert(callInfo, native, true, SimdType::Int16x8, type);
+-      case SimdOperation::Fn_fromUint16x8Bits:
+-        return inlineSimdConvert(callInfo, native, true, SimdType::Uint16x8, type);
+-      case SimdOperation::Fn_fromInt8x16Bits:
+-        return inlineSimdConvert(callInfo, native, true, SimdType::Int8x16, type);
+-      case SimdOperation::Fn_fromUint8x16Bits:
+-        return inlineSimdConvert(callInfo, native, true, SimdType::Uint8x16, type);
+-      case SimdOperation::Fn_fromFloat32x4Bits:
+-        return inlineSimdConvert(callInfo, native, true, SimdType::Float32x4, type);
+-      case SimdOperation::Fn_fromFloat64x2Bits:
+-        return InliningStatus_NotInlined;
+-    }
+-
+-    MOZ_CRASH("Unexpected SIMD opcode");
+-}
+-
+-// The representation of boolean SIMD vectors is the same as the corresponding
+-// integer SIMD vectors with -1 lanes meaning true and 0 lanes meaning false.
+-//
+-// Functions that set the value of a boolean vector lane work by applying
+-// ToBoolean on the input argument, so they accept any argument type, just like
+-// the MNot and MTest instructions.
+-//
+-// Convert any scalar value into an appropriate SIMD lane value: An Int32 value
+-// that is either 0 for false or -1 for true.
+-MDefinition*
+-IonBuilder::convertToBooleanSimdLane(MDefinition* scalar)
+-{
+-    MSub* result;
+-
+-    if (scalar->type() == MIRType::Boolean) {
+-        // The input scalar is already a boolean with the int32 values 0 / 1.
+-        // Compute result = 0 - scalar.
+-        result = MSub::New(alloc(), constant(Int32Value(0)), scalar);
+-    } else {
+-        // For any other type, let MNot handle the conversion to boolean.
+-        // Compute result = !scalar - 1.
+-        MNot* inv = MNot::New(alloc(), scalar);
+-        current->add(inv);
+-        result = MSub::New(alloc(), inv, constant(Int32Value(1)));
+-    }
+-
+-    result->setInt32Specialization();
+-    current->add(result);
+-    return result;
+-}
+-
+-IonBuilder::InliningResult
+-IonBuilder::inlineConstructSimdObject(CallInfo& callInfo, SimdTypeDescr* descr)
+-{
+-    if (!JitSupportsSimd()) {
+-        trackOptimizationOutcome(TrackedOutcome::NoSimdJitSupport);
+-        return InliningStatus_NotInlined;
+-    }
+-
+-    // Generic constructor of SIMD valuesX4.
+-    MIRType simdType;
+-    if (!MaybeSimdTypeToMIRType(descr->type(), &simdType)) {
+-        trackOptimizationOutcome(TrackedOutcome::SimdTypeNotOptimized);
+-        return InliningStatus_NotInlined;
+-    }
+-
+-    // Take the templateObject out of Baseline ICs, such that we can box
+-    // SIMD value type in the same kind of objects.
+-    MOZ_ASSERT(InlineTypedObject::canAccommodateType(descr));
+-    MOZ_ASSERT(descr->getClass() == &SimdTypeDescr::class_,
+-               "getTemplateObjectForSimdCtor needs an update");
+-
+-    JSObject* templateObject = inspector->getTemplateObjectForSimdCtor(pc, descr->type());
+-    if (!templateObject)
+-        return InliningStatus_NotInlined;
+-
+-    // The previous assertion ensures this will never fail if we were able to
+-    // allocate a templateObject in Baseline.
+-    InlineTypedObject* inlineTypedObject = &templateObject->as<InlineTypedObject>();
+-    MOZ_ASSERT(&inlineTypedObject->typeDescr() == descr);
+-
+-    // When there are missing arguments, provide a default value
+-    // containing the coercion of 'undefined' to the right type.
+-    MConstant* defVal = nullptr;
+-    MIRType laneType = SimdTypeToLaneType(simdType);
+-    unsigned lanes = SimdTypeToLength(simdType);
+-    if (lanes != 4 || callInfo.argc() < lanes) {
+-        if (laneType == MIRType::Int32 || laneType == MIRType::Boolean) {
+-            // The default lane for a boolean vector is |false|, but
+-            // |MSimdSplat|, |MSimdValueX4|, and |MSimdInsertElement| all
+-            // require an Int32 argument with the value 0 or 01 to initialize a
+-            // boolean lane. See also convertToBooleanSimdLane() which is
+-            // idempotent with a 0 argument after constant folding.
+-            defVal = constant(Int32Value(0));
+-        } else if (laneType == MIRType::Double) {
+-            defVal = constant(DoubleNaNValue());
+-        } else {
+-            MOZ_ASSERT(laneType == MIRType::Float32);
+-            defVal = MConstant::NewFloat32(alloc(), JS::GenericNaN());
+-            current->add(defVal);
+-        }
+-    }
+-
+-    MInstruction *values = nullptr;
+-
+-    // Use the MSimdValueX4 constructor for X4 vectors.
+-    if (lanes == 4) {
+-        MDefinition* lane[4];
+-        for (unsigned i = 0; i < 4; i++)
+-            lane[i] = callInfo.getArgWithDefault(i, defVal);
+-
+-        // Convert boolean lanes into Int32 0 / -1.
+-        if (laneType == MIRType::Boolean) {
+-            for (unsigned i = 0; i < 4; i++)
+-                lane[i] = convertToBooleanSimdLane(lane[i]);
+-        }
+-
+-        values = MSimdValueX4::New(alloc(), simdType, lane[0], lane[1], lane[2], lane[3]);
+-        current->add(values);
+-    } else {
+-        // For general constructor calls, start from splat(defVal), insert one
+-        // lane at a time.
+-        values = MSimdSplat::New(alloc(), defVal, simdType);
+-        current->add(values);
+-
+-        // Stop early if constructor doesn't have enough arguments. These lanes
+-        // then get the default value.
+-        if (callInfo.argc() < lanes)
+-            lanes = callInfo.argc();
+-
+-        for (unsigned i = 0; i < lanes; i++) {
+-            MDefinition* lane = callInfo.getArg(i);
+-            if (laneType == MIRType::Boolean)
+-                lane = convertToBooleanSimdLane(lane);
+-            values = MSimdInsertElement::New(alloc(), values, lane, i);
+-            current->add(values);
+-        }
+-    }
+-
+-    MSimdBox* obj = MSimdBox::New(alloc(), constraints(), values, inlineTypedObject, descr->type(),
+-                                  inlineTypedObject->group()->initialHeap(constraints()));
+-    current->add(obj);
+-    current->push(obj);
+-
+-    callInfo.setImplicitlyUsedUnchecked();
+-    return InliningStatus_Inlined;
+-}
+-
+-bool
+-IonBuilder::canInlineSimd(CallInfo& callInfo, JSNative native, unsigned numArgs,
+-                          InlineTypedObject** templateObj)
+-{
+-    if (callInfo.argc() != numArgs)
+-        return false;
+-
+-    JSObject* templateObject = inspector->getTemplateObjectForNative(pc, native);
+-    if (!templateObject)
+-        return false;
+-
+-    *templateObj = &templateObject->as<InlineTypedObject>();
+-    return true;
+-}
+-
+-IonBuilder::InliningResult
+-IonBuilder::inlineSimdCheck(CallInfo& callInfo, JSNative native, SimdType type)
+-{
+-    InlineTypedObject* templateObj = nullptr;
+-    if (!canInlineSimd(callInfo, native, 1, &templateObj))
+-        return InliningStatus_NotInlined;
+-
+-    // Unboxing checks the SIMD object type and throws a TypeError if it doesn't
+-    // match type.
+-    MDefinition *arg = unboxSimd(callInfo.getArg(0), type);
+-
+-    // Create an unbox/box pair, expecting the box to be optimized away if
+-    // anyone use the return value from this check() call. This is what you want
+-    // for code like this:
+-    //
+-    // function f(x) {
+-    //   x = Int32x4.check(x)
+-    //   for(...) {
+-    //     y = Int32x4.add(x, ...)
+-    //   }
+-    //
+-    // The unboxing of x happens as early as possible, and only once.
+-    return boxSimd(callInfo, arg, templateObj);
+-}
+-
+-// Given a value or object, insert a dynamic check that this is a SIMD object of
+-// the required SimdType, and unbox it into the corresponding SIMD MIRType.
+-//
+-// This represents the standard type checking that all the SIMD operations
+-// perform on their arguments.
+-MDefinition*
+-IonBuilder::unboxSimd(MDefinition* ins, SimdType type)
+-{
+-    // Trivial optimization: If ins is a MSimdBox of the same SIMD type, there
+-    // is no way the unboxing could fail, and we can skip it altogether.
+-    // This is the same thing MSimdUnbox::foldsTo() does, but we can save the
+-    // memory allocation here.
+-    if (ins->isSimdBox()) {
+-        MSimdBox* box = ins->toSimdBox();
+-        if (box->simdType() == type) {
+-            MDefinition* value = box->input();
+-            MOZ_ASSERT(value->type() == SimdTypeToMIRType(type));
+-            return value;
+-        }
+-    }
+-
+-    MSimdUnbox* unbox = MSimdUnbox::New(alloc(), ins, type);
+-    current->add(unbox);
+-    return unbox;
+-}
+-
+-IonBuilder::InliningResult
+-IonBuilder::boxSimd(CallInfo& callInfo, MDefinition* ins, InlineTypedObject* templateObj)
+-{
+-    SimdType simdType = templateObj->typeDescr().as<SimdTypeDescr>().type();
+-    MSimdBox* obj = MSimdBox::New(alloc(), constraints(), ins, templateObj, simdType,
+-                                  templateObj->group()->initialHeap(constraints()));
+-
+-    // In some cases, ins has already been added to current.
+-    if (!ins->block() && ins->isInstruction())
+-        current->add(ins->toInstruction());
+-    current->add(obj);
+-    current->push(obj);
+-
+-    callInfo.setImplicitlyUsedUnchecked();
+-    return InliningStatus_Inlined;
+-}
+-
+-IonBuilder::InliningResult
+-IonBuilder::inlineSimdBinaryArith(CallInfo& callInfo, JSNative native,
+-                                  MSimdBinaryArith::Operation op, SimdType type)
+-{
+-    InlineTypedObject* templateObj = nullptr;
+-    if (!canInlineSimd(callInfo, native, 2, &templateObj))
+-        return InliningStatus_NotInlined;
+-
+-    MDefinition* lhs = unboxSimd(callInfo.getArg(0), type);
+-    MDefinition* rhs = unboxSimd(callInfo.getArg(1), type);
+-
+-    auto* ins = MSimdBinaryArith::AddLegalized(alloc(), current, lhs, rhs, op);
+-    return boxSimd(callInfo, ins, templateObj);
+-}
+-
+-IonBuilder::InliningResult
+-IonBuilder::inlineSimdBinaryBitwise(CallInfo& callInfo, JSNative native,
+-                                    MSimdBinaryBitwise::Operation op, SimdType type)
+-{
+-    InlineTypedObject* templateObj = nullptr;
+-    if (!canInlineSimd(callInfo, native, 2, &templateObj))
+-        return InliningStatus_NotInlined;
+-
+-    MDefinition* lhs = unboxSimd(callInfo.getArg(0), type);
+-    MDefinition* rhs = unboxSimd(callInfo.getArg(1), type);
+-
+-    auto* ins = MSimdBinaryBitwise::New(alloc(), lhs, rhs, op);
+-    return boxSimd(callInfo, ins, templateObj);
+-}
+-
+-// Inline a binary SIMD operation where both arguments are SIMD types.
+-IonBuilder::InliningResult
+-IonBuilder::inlineSimdBinarySaturating(CallInfo& callInfo, JSNative native,
+-                                       MSimdBinarySaturating::Operation op, SimdType type)
+-{
+-    InlineTypedObject* templateObj = nullptr;
+-    if (!canInlineSimd(callInfo, native, 2, &templateObj))
+-        return InliningStatus_NotInlined;
+-
+-    MDefinition* lhs = unboxSimd(callInfo.getArg(0), type);
+-    MDefinition* rhs = unboxSimd(callInfo.getArg(1), type);
+-
+-    MSimdBinarySaturating* ins =
+-      MSimdBinarySaturating::New(alloc(), lhs, rhs, op, GetSimdSign(type));
+-    return boxSimd(callInfo, ins, templateObj);
+-}
+-
+-// Inline a SIMD shiftByScalar operation.
+-IonBuilder::InliningResult
+-IonBuilder::inlineSimdShift(CallInfo& callInfo, JSNative native, MSimdShift::Operation op,
+-                            SimdType type)
+-{
+-    InlineTypedObject* templateObj = nullptr;
+-    if (!canInlineSimd(callInfo, native, 2, &templateObj))
+-        return InliningStatus_NotInlined;
+-
+-    MDefinition* vec = unboxSimd(callInfo.getArg(0), type);
+-
+-    MInstruction* ins = MSimdShift::AddLegalized(alloc(), current, vec, callInfo.getArg(1), op);
+-    return boxSimd(callInfo, ins, templateObj);
+-}
+-
+-IonBuilder::InliningResult
+-IonBuilder::inlineSimdComp(CallInfo& callInfo, JSNative native, MSimdBinaryComp::Operation op,
+-                           SimdType type)
+-{
+-    InlineTypedObject* templateObj = nullptr;
+-    if (!canInlineSimd(callInfo, native, 2, &templateObj))
+-        return InliningStatus_NotInlined;
+-
+-    MDefinition* lhs = unboxSimd(callInfo.getArg(0), type);
+-    MDefinition* rhs = unboxSimd(callInfo.getArg(1), type);
+-    MInstruction* ins =
+-      MSimdBinaryComp::AddLegalized(alloc(), current, lhs, rhs, op, GetSimdSign(type));
+-    return boxSimd(callInfo, ins, templateObj);
+-}
+-
+-IonBuilder::InliningResult
+-IonBuilder::inlineSimdUnary(CallInfo& callInfo, JSNative native, MSimdUnaryArith::Operation op,
+-                            SimdType type)
+-{
+-    InlineTypedObject* templateObj = nullptr;
+-    if (!canInlineSimd(callInfo, native, 1, &templateObj))
+-        return InliningStatus_NotInlined;
+-
+-    MDefinition* arg = unboxSimd(callInfo.getArg(0), type);
+-
+-    MSimdUnaryArith* ins = MSimdUnaryArith::New(alloc(), arg, op);
+-    return boxSimd(callInfo, ins, templateObj);
+-}
+-
+-IonBuilder::InliningResult
+-IonBuilder::inlineSimdSplat(CallInfo& callInfo, JSNative native, SimdType type)
+-{
+-    InlineTypedObject* templateObj = nullptr;
+-    if (!canInlineSimd(callInfo, native, 1, &templateObj))
+-        return InliningStatus_NotInlined;
+-
+-    MIRType mirType = SimdTypeToMIRType(type);
+-    MDefinition* arg = callInfo.getArg(0);
+-
+-    // Convert to 0 / -1 before splatting a boolean lane.
+-    if (SimdTypeToLaneType(mirType) == MIRType::Boolean)
+-        arg = convertToBooleanSimdLane(arg);
+-
+-    MSimdSplat* ins = MSimdSplat::New(alloc(), arg, mirType);
+-    return boxSimd(callInfo, ins, templateObj);
+-}
+-
+-IonBuilder::InliningResult
+-IonBuilder::inlineSimdExtractLane(CallInfo& callInfo, JSNative native, SimdType type)
+-{
+-    // extractLane() returns a scalar, so don't use canInlineSimd() which looks
+-    // for a template object.
+-    if (callInfo.argc() != 2 || callInfo.constructing()) {
+-        trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+-        return InliningStatus_NotInlined;
+-    }
+-
+-    // Lane index.
+-    MDefinition* arg = callInfo.getArg(1);
+-    if (!arg->isConstant() || arg->type() != MIRType::Int32)
+-        return InliningStatus_NotInlined;
+-    unsigned lane = arg->toConstant()->toInt32();
+-    if (lane >= GetSimdLanes(type))
+-        return InliningStatus_NotInlined;
+-
+-    // Original vector.
+-    MDefinition* orig = unboxSimd(callInfo.getArg(0), type);
+-    MIRType vecType = orig->type();
+-    MIRType laneType = SimdTypeToLaneType(vecType);
+-    SimdSign sign = GetSimdSign(type);
+-
+-    // An Uint32 lane can't be represented in MIRType::Int32. Get it as a double.
+-    if (type == SimdType::Uint32x4)
+-        laneType = MIRType::Double;
+-
+-    MSimdExtractElement* ins =
+-      MSimdExtractElement::New(alloc(), orig, laneType, lane, sign);
+-    current->add(ins);
+-    current->push(ins);
+-    callInfo.setImplicitlyUsedUnchecked();
+-    return InliningStatus_Inlined;
+-}
+-
+-IonBuilder::InliningResult
+-IonBuilder::inlineSimdReplaceLane(CallInfo& callInfo, JSNative native, SimdType type)
+-{
+-    InlineTypedObject* templateObj = nullptr;
+-    if (!canInlineSimd(callInfo, native, 3, &templateObj))
+-        return InliningStatus_NotInlined;
+-
+-    // Lane index.
+-    MDefinition* arg = callInfo.getArg(1);
+-    if (!arg->isConstant() || arg->type() != MIRType::Int32)
+-        return InliningStatus_NotInlined;
+-
+-    unsigned lane = arg->toConstant()->toInt32();
+-    if (lane >= GetSimdLanes(type))
+-        return InliningStatus_NotInlined;
+-
+-    // Original vector.
+-    MDefinition* orig = unboxSimd(callInfo.getArg(0), type);
+-    MIRType vecType = orig->type();
+-
+-    // Convert to 0 / -1 before inserting a boolean lane.
+-    MDefinition* value = callInfo.getArg(2);
+-    if (SimdTypeToLaneType(vecType) == MIRType::Boolean)
+-        value = convertToBooleanSimdLane(value);
+-
+-    MSimdInsertElement* ins = MSimdInsertElement::New(alloc(), orig, value, lane);
+-    return boxSimd(callInfo, ins, templateObj);
+-}
+-
+-// Inline a SIMD conversion or bitcast. When isCast==false, one of the types
+-// must be floating point and the other integer. In this case, sign indicates if
+-// the integer lanes should be treated as signed or unsigned integers.
+-IonBuilder::InliningResult
+-IonBuilder::inlineSimdConvert(CallInfo& callInfo, JSNative native, bool isCast, SimdType fromType,
+-                              SimdType toType)
+-{
+-    InlineTypedObject* templateObj = nullptr;
+-    if (!canInlineSimd(callInfo, native, 1, &templateObj))
+-        return InliningStatus_NotInlined;
+-
+-    MDefinition* arg = unboxSimd(callInfo.getArg(0), fromType);
+-    MIRType mirType = SimdTypeToMIRType(toType);
+-
+-    MInstruction* ins;
+-    if (isCast) {
+-        // Signed/Unsigned doesn't matter for bitcasts.
+-        ins = MSimdReinterpretCast::New(alloc(), arg, mirType);
+-    } else {
+-        // Exactly one of fromType, toType must be an integer type.
+-        SimdSign sign = GetSimdSign(fromType);
+-        if (sign == SimdSign::NotApplicable)
+-            sign = GetSimdSign(toType);
+-
+-        // Possibly expand into multiple instructions.
+-        ins = MSimdConvert::AddLegalized(alloc(), current, arg, mirType, sign);
+-    }
+-
+-    return boxSimd(callInfo, ins, templateObj);
+-}
+-
+-IonBuilder::InliningResult
+-IonBuilder::inlineSimdSelect(CallInfo& callInfo, JSNative native, SimdType type)
+-{
+-    InlineTypedObject* templateObj = nullptr;
+-    if (!canInlineSimd(callInfo, native, 3, &templateObj))
+-        return InliningStatus_NotInlined;
+-
+-    MDefinition* mask = unboxSimd(callInfo.getArg(0), GetBooleanSimdType(type));
+-    MDefinition* tval = unboxSimd(callInfo.getArg(1), type);
+-    MDefinition* fval = unboxSimd(callInfo.getArg(2), type);
+-
+-    MSimdSelect* ins = MSimdSelect::New(alloc(), mask, tval, fval);
+-    return boxSimd(callInfo, ins, templateObj);
+-}
+-
+-IonBuilder::InliningResult
+-IonBuilder::inlineSimdShuffle(CallInfo& callInfo, JSNative native, SimdType type,
+-                              unsigned numVectors)
+-{
+-    unsigned numLanes = GetSimdLanes(type);
+-    InlineTypedObject* templateObj = nullptr;
+-    if (!canInlineSimd(callInfo, native, numVectors + numLanes, &templateObj))
+-        return InliningStatus_NotInlined;
+-
+-    MIRType mirType = SimdTypeToMIRType(type);
+-
+-    MSimdGeneralShuffle* ins = MSimdGeneralShuffle::New(alloc(), numVectors, numLanes, mirType);
+-
+-    if (!ins->init(alloc()))
+-        return abort(AbortReason::Alloc);
+-
+-    for (unsigned i = 0; i < numVectors; i++)
+-        ins->setVector(i, unboxSimd(callInfo.getArg(i), type));
+-    for (size_t i = 0; i < numLanes; i++)
+-        ins->setLane(i, callInfo.getArg(numVectors + i));
+-
+-    return boxSimd(callInfo, ins, templateObj);
+-}
+-
+-IonBuilder::InliningResult
+-IonBuilder::inlineSimdAnyAllTrue(CallInfo& callInfo, bool IsAllTrue, JSNative native,
+-                                 SimdType type)
+-{
+-    // anyTrue() / allTrue() return a scalar, so don't use canInlineSimd() which looks
+-    // for a template object.
+-    if (callInfo.argc() != 1 || callInfo.constructing()) {
+-        trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+-        return InliningStatus_NotInlined;
+-    }
+-
+-    MDefinition* arg = unboxSimd(callInfo.getArg(0), type);
+-
+-    MUnaryInstruction* ins;
+-    if (IsAllTrue)
+-        ins = MSimdAllTrue::New(alloc(), arg, MIRType::Boolean);
+-    else
+-        ins = MSimdAnyTrue::New(alloc(), arg, MIRType::Boolean);
+-
+-    current->add(ins);
+-    current->push(ins);
+-    callInfo.setImplicitlyUsedUnchecked();
+-    return InliningStatus_Inlined;
+-}
+-
+-// Get the typed array element type corresponding to the lanes in a SIMD vector type.
+-// This only applies to SIMD types that can be loaded and stored to a typed array.
+-static Scalar::Type
+-SimdTypeToArrayElementType(SimdType type)
+-{
+-    switch (type) {
+-      case SimdType::Float32x4: return Scalar::Float32x4;
+-      case SimdType::Int8x16:
+-      case SimdType::Uint8x16:  return Scalar::Int8x16;
+-      case SimdType::Int16x8:
+-      case SimdType::Uint16x8:  return Scalar::Int16x8;
+-      case SimdType::Int32x4:
+-      case SimdType::Uint32x4:  return Scalar::Int32x4;
+-      default:                MOZ_CRASH("unexpected simd type");
+-    }
+-}
+-
+-bool
+-IonBuilder::prepareForSimdLoadStore(CallInfo& callInfo, Scalar::Type simdType,
+-                                    MInstruction** elements, MDefinition** index,
+-                                    Scalar::Type* arrayType)
+-{
+-    MDefinition* array = callInfo.getArg(0);
+-    *index = callInfo.getArg(1);
+-
+-    if (!ElementAccessIsTypedArray(constraints(), array, *index, arrayType))
+-        return false;
+-
+-    MInstruction* indexAsInt32 = MToNumberInt32::New(alloc(), *index);
+-    current->add(indexAsInt32);
+-    *index = indexAsInt32;
+-
+-    MDefinition* indexLoadEnd = *index;
+-
+-    MOZ_ASSERT(Scalar::byteSize(simdType) % Scalar::byteSize(*arrayType) == 0);
+-    int32_t byteLoadSize = Scalar::byteSize(simdType) / Scalar::byteSize(*arrayType);
+-    if (byteLoadSize > 1) {
+-        // Add the number of supplementary needed slots. Overflows are fine
+-        // because the bounds check code uses an unsigned comparison.
+-        MAdd* addedIndex = MAdd::New(alloc(), *index, constant(Int32Value(byteLoadSize - 1)));
+-        addedIndex->setInt32Specialization();
+-        current->add(addedIndex);
+-        indexLoadEnd = addedIndex;
+-    }
+-
+-    MInstruction* length;
+-    addTypedArrayLengthAndData(array, SkipBoundsCheck, index, &length, elements);
+-
+-    // If the index+size addition overflows, then indexLoadEnd might be
+-    // in bounds while the actual index isn't, so we need two bounds checks
+-    // here.
+-    if (byteLoadSize > 1) {
+-        indexLoadEnd = addBoundsCheck(indexLoadEnd, length);
+-        auto* sub = MSub::New(alloc(), indexLoadEnd, constant(Int32Value(byteLoadSize - 1)));
+-        sub->setInt32Specialization();
+-        current->add(sub);
+-        *index = sub;
+-    }
+-
+-    *index = addBoundsCheck(*index, length);
+-
+-    return true;
+-}
+-
+-IonBuilder::InliningResult
+-IonBuilder::inlineSimdLoad(CallInfo& callInfo, JSNative native, SimdType type, unsigned numElems)
+-{
+-    InlineTypedObject* templateObj = nullptr;
+-    if (!canInlineSimd(callInfo, native, 2, &templateObj))
+-        return InliningStatus_NotInlined;
+-
+-    Scalar::Type elemType = SimdTypeToArrayElementType(type);
+-
+-    MDefinition* index = nullptr;
+-    MInstruction* elements = nullptr;
+-    Scalar::Type arrayType;
+-    if (!prepareForSimdLoadStore(callInfo, elemType, &elements, &index, &arrayType))
+-        return InliningStatus_NotInlined;
+-
+-    MLoadUnboxedScalar* load = MLoadUnboxedScalar::New(alloc(), elements, index, arrayType);
+-    load->setResultType(SimdTypeToMIRType(type));
+-    load->setSimdRead(elemType, numElems);
+-
+-    return boxSimd(callInfo, load, templateObj);
+-}
+-
+-IonBuilder::InliningResult
+-IonBuilder::inlineSimdStore(CallInfo& callInfo, JSNative native, SimdType type, unsigned numElems)
+-{
+-    InlineTypedObject* templateObj = nullptr;
+-    if (!canInlineSimd(callInfo, native, 3, &templateObj))
+-        return InliningStatus_NotInlined;
+-
+-    Scalar::Type elemType = SimdTypeToArrayElementType(type);
+-
+-    MDefinition* index = nullptr;
+-    MInstruction* elements = nullptr;
+-    Scalar::Type arrayType;
+-    if (!prepareForSimdLoadStore(callInfo, elemType, &elements, &index, &arrayType))
+-        return InliningStatus_NotInlined;
+-
+-    MDefinition* valueToWrite = unboxSimd(callInfo.getArg(2), type);
+-    MStoreUnboxedScalar* store = MStoreUnboxedScalar::New(alloc(), elements, index,
+-                                                          valueToWrite, arrayType,
+-                                                          MStoreUnboxedScalar::TruncateInput);
+-    store->setSimdWrite(elemType, numElems);
+-
+-    current->add(store);
+-    // Produce the original boxed value as our return value.
+-    // This is unlikely to be used, so don't bother reboxing valueToWrite.
+-    current->push(callInfo.getArg(2));
+-
+-    callInfo.setImplicitlyUsedUnchecked();
+-
+-    MOZ_TRY(resumeAfter(store));
+-    return InliningStatus_Inlined;
+-}
+-
+-// Note that SIMD.cpp provides its own JSJitInfo objects for SIMD.foo.* functions.
+-// The Simd* objects defined here represent SIMD.foo() constructor calls.
+-// They are encoded with .nativeOp = 0. That is the sub-opcode within the SIMD type.
+-static_assert(uint16_t(SimdOperation::Constructor) == 0, "Constructor opcode must be 0");
+-
+ #define ADD_NATIVE(native) const JSJitInfo JitInfo_##native { \
+     { nullptr }, { uint16_t(InlinableNative::native) }, { 0 }, JSJitInfo::InlinableNative };
+     INLINABLE_NATIVE_LIST(ADD_NATIVE)
+ #undef ADD_NATIVE
+ 
+ } // namespace jit
+ } // namespace js
+diff --git a/js/src/jit/MIR.cpp b/js/src/jit/MIR.cpp
+--- a/js/src/jit/MIR.cpp
++++ b/js/src/jit/MIR.cpp
+@@ -12,17 +12,16 @@
+ #include "mozilla/IntegerPrintfMacros.h"
+ #include "mozilla/MathAlgorithms.h"
+ 
+ #include <ctype.h>
+ 
+ #include "jslibmath.h"
+ 
+ #include "builtin/RegExp.h"
+-#include "builtin/SIMD.h"
+ #include "builtin/String.h"
+ #include "jit/AtomicOperations.h"
+ #include "jit/BaselineInspector.h"
+ #include "jit/IonBuilder.h"
+ #include "jit/JitSpewer.h"
+ #include "jit/MIRGraph.h"
+ #include "jit/RangeAnalysis.h"
+ #include "js/Conversions.h"
+@@ -1307,542 +1306,18 @@ MWasmFloatConstant::valueHash() const
+ bool
+ MWasmFloatConstant::congruentTo(const MDefinition* ins) const
+ {
+     return ins->isWasmFloatConstant() &&
+            type() == ins->type() &&
+            u.bits_ == ins->toWasmFloatConstant()->u.bits_;
+ }
+ 
+-MDefinition*
+-MSimdValueX4::foldsTo(TempAllocator& alloc)
+-{
+-#ifdef DEBUG
+-    MIRType laneType = SimdTypeToLaneArgumentType(type());
+-#endif
+-    bool allConstants = true;
+-    bool allSame = true;
+-
+-    for (size_t i = 0; i < 4; ++i) {
+-        MDefinition* op = getOperand(i);
+-        MOZ_ASSERT(op->type() == laneType);
+-        if (!op->isConstant())
+-            allConstants = false;
+-        if (i > 0 && op != getOperand(i - 1))
+-            allSame = false;
+-    }
+-
+-    if (!allConstants && !allSame)
+-        return this;
+-
+-    if (allConstants) {
+-        SimdConstant cst;
+-        switch (type()) {
+-          case MIRType::Bool32x4: {
+-            int32_t a[4];
+-            for (size_t i = 0; i < 4; ++i)
+-                a[i] = getOperand(i)->toConstant()->valueToBooleanInfallible() ? -1 : 0;
+-            cst = SimdConstant::CreateX4(a);
+-            break;
+-          }
+-          case MIRType::Int32x4: {
+-            int32_t a[4];
+-            for (size_t i = 0; i < 4; ++i)
+-                a[i] = getOperand(i)->toConstant()->toInt32();
+-            cst = SimdConstant::CreateX4(a);
+-            break;
+-          }
+-          case MIRType::Float32x4: {
+-            float a[4];
+-            for (size_t i = 0; i < 4; ++i)
+-                a[i] = getOperand(i)->toConstant()->numberToDouble();
+-            cst = SimdConstant::CreateX4(a);
+-            break;
+-          }
+-          default: MOZ_CRASH("unexpected type in MSimdValueX4::foldsTo");
+-        }
+-
+-        return MSimdConstant::New(alloc, cst, type());
+-    }
+-
+-    MOZ_ASSERT(allSame);
+-    return MSimdSplat::New(alloc, getOperand(0), type());
+-}
+-
+-MDefinition*
+-MSimdSplat::foldsTo(TempAllocator& alloc)
+-{
+-#ifdef DEBUG
+-    MIRType laneType = SimdTypeToLaneArgumentType(type());
+-#endif
+-    MDefinition* op = getOperand(0);
+-    if (!op->isConstant())
+-        return this;
+-    MOZ_ASSERT(op->type() == laneType);
+-
+-    SimdConstant cst;
+-    switch (type()) {
+-      case MIRType::Bool8x16: {
+-        int8_t v = op->toConstant()->valueToBooleanInfallible() ? -1 : 0;
+-        cst = SimdConstant::SplatX16(v);
+-        break;
+-      }
+-      case MIRType::Bool16x8: {
+-        int16_t v = op->toConstant()->valueToBooleanInfallible() ? -1 : 0;
+-        cst = SimdConstant::SplatX8(v);
+-        break;
+-      }
+-      case MIRType::Bool32x4: {
+-        int32_t v = op->toConstant()->valueToBooleanInfallible() ? -1 : 0;
+-        cst = SimdConstant::SplatX4(v);
+-        break;
+-      }
+-      case MIRType::Int8x16: {
+-        int32_t v = op->toConstant()->toInt32();
+-        cst = SimdConstant::SplatX16(v);
+-        break;
+-      }
+-      case MIRType::Int16x8: {
+-        int32_t v = op->toConstant()->toInt32();
+-        cst = SimdConstant::SplatX8(v);
+-        break;
+-      }
+-      case MIRType::Int32x4: {
+-        int32_t v = op->toConstant()->toInt32();
+-        cst = SimdConstant::SplatX4(v);
+-        break;
+-      }
+-      case MIRType::Float32x4: {
+-        float v = op->toConstant()->numberToDouble();
+-        cst = SimdConstant::SplatX4(v);
+-        break;
+-      }
+-      default: MOZ_CRASH("unexpected type in MSimdSplat::foldsTo");
+-    }
+-
+-    return MSimdConstant::New(alloc, cst, type());
+-}
+-
+-MDefinition*
+-MSimdUnbox::foldsTo(TempAllocator& alloc)
+-{
+-    MDefinition* in = input();
+-
+-    if (in->isSimdBox()) {
+-        MSimdBox* box = in->toSimdBox();
+-        // If the operand is a MSimdBox, then we just reuse the operand of the
+-        // MSimdBox as long as the type corresponds to what we are supposed to
+-        // unbox.
+-        in = box->input();
+-        if (box->simdType() != simdType())
+-            return this;
+-        MOZ_ASSERT(in->type() == type());
+-        return in;
+-    }
+-
+-    return this;
+-}
+-
+-MDefinition*
+-MSimdSwizzle::foldsTo(TempAllocator& alloc)
+-{
+-    if (lanesMatch(0, 1, 2, 3))
+-        return input();
+-    return this;
+-}
+-
+-MDefinition*
+-MSimdGeneralShuffle::foldsTo(TempAllocator& alloc)
+-{
+-    FixedList<uint8_t> lanes;
+-    if (!lanes.init(alloc, numLanes()))
+-        return this;
+-
+-    for (size_t i = 0; i < numLanes(); i++) {
+-        if (!lane(i)->isConstant() || lane(i)->type() != MIRType::Int32)
+-            return this;
+-        int32_t temp = lane(i)->toConstant()->toInt32();
+-        if (temp < 0 || unsigned(temp) >= numLanes() * numVectors())
+-            return this;
+-        lanes[i] = uint8_t(temp);
+-    }
+-
+-    if (numVectors() == 1)
+-        return MSimdSwizzle::New(alloc, vector(0), lanes.data());
+-
+-    MOZ_ASSERT(numVectors() == 2);
+-    return MSimdShuffle::New(alloc, vector(0), vector(1), lanes.data());
+-}
+-
+-MInstruction*
+-MSimdConvert::AddLegalized(TempAllocator& alloc, MBasicBlock* addTo, MDefinition* obj,
+-                           MIRType toType, SimdSign sign, wasm::BytecodeOffset bytecodeOffset)
+-{
+-    MIRType fromType = obj->type();
+-
+-    if (SupportsUint32x4FloatConversions || sign != SimdSign::Unsigned) {
+-        MInstruction* ins = New(alloc, obj, toType, sign, bytecodeOffset);
+-        addTo->add(ins);
+-        return ins;
+-    }
+-
+-    // This architecture can't do Uint32x4 <-> Float32x4 conversions (Hi SSE!)
+-    MOZ_ASSERT(sign == SimdSign::Unsigned);
+-    if (fromType == MIRType::Int32x4 && toType == MIRType::Float32x4) {
+-        // Converting Uint32x4 -> Float32x4. This algorithm is from LLVM.
+-        //
+-        // Split the input number into high and low parts:
+-        //
+-        // uint32_t hi = x >> 16;
+-        // uint32_t lo = x & 0xffff;
+-        //
+-        // Insert these parts as the low mantissa bits in a float32 number with
+-        // the corresponding exponent:
+-        //
+-        // float fhi = (bits-as-float)(hi | 0x53000000); // 0x1.0p39f + hi*2^16
+-        // float flo = (bits-as-float)(lo | 0x4b000000); // 0x1.0p23f + lo
+-        //
+-        // Subtract the bias from the hi part:
+-        //
+-        // fhi -= (0x1.0p39 + 0x1.0p23) // hi*2^16 - 0x1.0p23
+-        //
+-        // And finally combine:
+-        //
+-        // result = flo + fhi // lo + hi*2^16.
+-
+-        // Compute hi = obj >> 16 (lane-wise unsigned shift).
+-        MInstruction* c16 = MConstant::New(alloc, Int32Value(16));
+-        addTo->add(c16);
+-        MInstruction* hi = MSimdShift::AddLegalized(alloc, addTo, obj, c16, MSimdShift::ursh);
+-
+-        // Compute lo = obj & 0xffff (lane-wise).
+-        MInstruction* m16 =
+-          MSimdConstant::New(alloc, SimdConstant::SplatX4(0xffff), MIRType::Int32x4);
+-        addTo->add(m16);
+-        MInstruction* lo = MSimdBinaryBitwise::New(alloc, obj, m16, MSimdBinaryBitwise::and_);
+-        addTo->add(lo);
+-
+-        // Mix in the exponents.
+-        MInstruction* exphi =
+-          MSimdConstant::New(alloc, SimdConstant::SplatX4(0x53000000), MIRType::Int32x4);
+-        addTo->add(exphi);
+-        MInstruction* mhi = MSimdBinaryBitwise::New(alloc, hi, exphi, MSimdBinaryBitwise::or_);
+-        addTo->add(mhi);
+-        MInstruction* explo =
+-          MSimdConstant::New(alloc, SimdConstant::SplatX4(0x4b000000), MIRType::Int32x4);
+-        addTo->add(explo);
+-        MInstruction* mlo = MSimdBinaryBitwise::New(alloc, lo, explo, MSimdBinaryBitwise::or_);
+-        addTo->add(mlo);
+-
+-        // Bit-cast both to Float32x4.
+-        MInstruction* fhi = MSimdReinterpretCast::New(alloc, mhi, MIRType::Float32x4);
+-        addTo->add(fhi);
+-        MInstruction* flo = MSimdReinterpretCast::New(alloc, mlo, MIRType::Float32x4);
+-        addTo->add(flo);
+-
+-        // Subtract out the bias: 0x1.0p39f + 0x1.0p23f.
+-        // MSVC doesn't support the hexadecimal float syntax.
+-        const float BiasValue = 549755813888.f + 8388608.f;
+-        MInstruction* bias =
+-          MSimdConstant::New(alloc, SimdConstant::SplatX4(BiasValue), MIRType::Float32x4);
+-        addTo->add(bias);
+-        MInstruction* fhi_debiased =
+-          MSimdBinaryArith::AddLegalized(alloc, addTo, fhi, bias, MSimdBinaryArith::Op_sub);
+-
+-        // Compute the final result.
+-        return MSimdBinaryArith::AddLegalized(alloc, addTo, fhi_debiased, flo,
+-                                              MSimdBinaryArith::Op_add);
+-    }
+-
+-    if (fromType == MIRType::Float32x4 && toType == MIRType::Int32x4) {
+-        // The Float32x4 -> Uint32x4 conversion can throw if the input is out of
+-        // range. This is handled by the LFloat32x4ToUint32x4 expansion.
+-        MInstruction* ins = New(alloc, obj, toType, sign, bytecodeOffset);
+-        addTo->add(ins);
+-        return ins;
+-    }
+-
+-    MOZ_CRASH("Unhandled SIMD type conversion");
+-}
+-
+-MInstruction*
+-MSimdBinaryComp::AddLegalized(TempAllocator& alloc, MBasicBlock* addTo, MDefinition* left,
+-                              MDefinition* right, Operation op, SimdSign sign)
+-{
+-    MOZ_ASSERT(left->type() == right->type());
+-    MIRType opType = left->type();
+-    MOZ_ASSERT(IsSimdType(opType));
+-    bool IsEquality = op == equal || op == notEqual;
+-
+-    // Check if this is an unsupported unsigned compare that needs to be biased.
+-    // If so, put the bias vector in `bias`.
+-    if (sign == SimdSign::Unsigned && !IsEquality) {
+-        MInstruction* bias = nullptr;
+-
+-        // This is an order comparison of Uint32x4 vectors which are not supported on this target.
+-        // Simply offset |left| and |right| by INT_MIN, then do a signed comparison.
+-        if (!SupportsUint32x4Compares && opType == MIRType::Int32x4)
+-            bias = MSimdConstant::New(alloc, SimdConstant::SplatX4(int32_t(0x80000000)), opType);
+-        else if (!SupportsUint16x8Compares && opType == MIRType::Int16x8)
+-            bias = MSimdConstant::New(alloc, SimdConstant::SplatX8(int16_t(0x8000)), opType);
+-        if (!SupportsUint8x16Compares && opType == MIRType::Int8x16)
+-            bias = MSimdConstant::New(alloc, SimdConstant::SplatX16(int8_t(0x80)), opType);
+-
+-        if (bias) {
+-            addTo->add(bias);
+-
+-            // Add the bias.
+-            MInstruction* bleft =
+-              MSimdBinaryArith::AddLegalized(alloc, addTo, left, bias, MSimdBinaryArith::Op_add);
+-            MInstruction* bright =
+-              MSimdBinaryArith::AddLegalized(alloc, addTo, right, bias, MSimdBinaryArith::Op_add);
+-
+-            // Do the equivalent signed comparison.
+-            MInstruction* result =
+-              MSimdBinaryComp::New(alloc, bleft, bright, op, SimdSign::Signed);
+-            addTo->add(result);
+-
+-            return result;
+-        }
+-    }
+-
+-    if (sign == SimdSign::Unsigned &&
+-        ((!SupportsUint32x4Compares && opType == MIRType::Int32x4) ||
+-         (!SupportsUint16x8Compares && opType == MIRType::Int16x8) ||
+-         (!SupportsUint8x16Compares && opType == MIRType::Int8x16))) {
+-        // The sign doesn't matter for equality tests. Flip it to make the
+-        // backend assertions happy.
+-        MOZ_ASSERT(IsEquality);
+-        sign = SimdSign::Signed;
+-    }
+-
+-    // This is a legal operation already. Just create the instruction requested.
+-    MInstruction* result = MSimdBinaryComp::New(alloc, left, right, op, sign);
+-    addTo->add(result);
+-    return result;
+-}
+-
+-MInstruction*
+-MSimdBinaryArith::AddLegalized(TempAllocator& alloc, MBasicBlock* addTo, MDefinition* left,
+-                               MDefinition* right, Operation op)
+-{
+-    MOZ_ASSERT(left->type() == right->type());
+-    MIRType opType = left->type();
+-    MOZ_ASSERT(IsSimdType(opType));
+-
+-    // SSE does not have 8x16 multiply instructions.
+-    if (opType == MIRType::Int8x16 && op == Op_mul) {
+-        // Express the multiply in terms of Int16x8 multiplies by handling the
+-        // even and odd lanes separately.
+-
+-        MInstruction* wideL = MSimdReinterpretCast::New(alloc, left, MIRType::Int16x8);
+-        addTo->add(wideL);
+-        MInstruction* wideR = MSimdReinterpretCast::New(alloc, right, MIRType::Int16x8);
+-        addTo->add(wideR);
+-
+-        // wideL = yyxx yyxx yyxx yyxx yyxx yyxx yyxx yyxx
+-        // wideR = bbaa bbaa bbaa bbaa bbaa bbaa bbaa bbaa
+-
+-        // Shift the odd lanes down to the low bits of the 16x8 vectors.
+-        MInstruction* eight = MConstant::New(alloc, Int32Value(8));
+-        addTo->add(eight);
+-        MInstruction* evenL = wideL;
+-        MInstruction* evenR = wideR;
+-        MInstruction* oddL =
+-          MSimdShift::AddLegalized(alloc, addTo, wideL, eight, MSimdShift::ursh);
+-        MInstruction* oddR =
+-          MSimdShift::AddLegalized(alloc, addTo, wideR, eight, MSimdShift::ursh);
+-
+-        // evenL = yyxx yyxx yyxx yyxx yyxx yyxx yyxx yyxx
+-        // evenR = bbaa bbaa bbaa bbaa bbaa bbaa bbaa bbaa
+-        // oddL  = 00yy 00yy 00yy 00yy 00yy 00yy 00yy 00yy
+-        // oddR  = 00bb 00bb 00bb 00bb 00bb 00bb 00bb 00bb
+-
+-        // Now do two 16x8 multiplications. We can use the low bits of each.
+-        MInstruction* even = MSimdBinaryArith::AddLegalized(alloc, addTo, evenL, evenR, Op_mul);
+-        MInstruction* odd = MSimdBinaryArith::AddLegalized(alloc, addTo, oddL, oddR, Op_mul);
+-
+-        // even = ~~PP ~~PP ~~PP ~~PP ~~PP ~~PP ~~PP ~~PP
+-        // odd  = ~~QQ ~~QQ ~~QQ ~~QQ ~~QQ ~~QQ ~~QQ ~~QQ
+-
+-        MInstruction* mask =
+-          MSimdConstant::New(alloc, SimdConstant::SplatX8(int16_t(0x00ff)), MIRType::Int16x8);
+-        addTo->add(mask);
+-        even = MSimdBinaryBitwise::New(alloc, even, mask, MSimdBinaryBitwise::and_);
+-        addTo->add(even);
+-        odd = MSimdShift::AddLegalized(alloc, addTo, odd, eight, MSimdShift::lsh);
+-
+-        // even = 00PP 00PP 00PP 00PP 00PP 00PP 00PP 00PP
+-        // odd  = QQ00 QQ00 QQ00 QQ00 QQ00 QQ00 QQ00 QQ00
+-
+-        // Combine:
+-        MInstruction* result = MSimdBinaryBitwise::New(alloc, even, odd, MSimdBinaryBitwise::or_);
+-        addTo->add(result);
+-        result = MSimdReinterpretCast::New(alloc, result, opType);
+-        addTo->add(result);
+-        return result;
+-    }
+-
+-    // This is a legal operation already. Just create the instruction requested.
+-    MInstruction* result = MSimdBinaryArith::New(alloc, left, right, op);
+-    addTo->add(result);
+-    return result;
+-}
+-
+-MInstruction*
+-MSimdShift::AddLegalized(TempAllocator& alloc, MBasicBlock* addTo, MDefinition* left,
+-                         MDefinition* right, Operation op)
+-{
+-    MIRType opType = left->type();
+-    MOZ_ASSERT(IsIntegerSimdType(opType));
+-
+-    // SSE does not provide 8x16 shift instructions.
+-    if (opType == MIRType::Int8x16) {
+-        // Express the shift in terms of Int16x8 shifts by splitting into even
+-        // and odd lanes, place 8-bit lanes into the high bits of Int16x8
+-        // vectors `even` and `odd`. Shift, mask, combine.
+-        //
+-        //   wide = Int16x8.fromInt8x16Bits(left);
+-        //   shiftBy = right & 7
+-        //   mask = Int16x8.splat(0xff00);
+-        //
+-        MInstruction* wide = MSimdReinterpretCast::New(alloc, left, MIRType::Int16x8);
+-        addTo->add(wide);
+-
+-        // wide = yyxx yyxx yyxx yyxx yyxx yyxx yyxx yyxx
+-
+-        MInstruction* shiftMask = MConstant::New(alloc, Int32Value(7));
+-        addTo->add(shiftMask);
+-        MBinaryBitwiseInstruction* shiftBy = MBitAnd::New(alloc, right, shiftMask);
+-        shiftBy->setInt32Specialization();
+-        addTo->add(shiftBy);
+-
+-        // Move the even 8x16 lanes into the high bits of the 16x8 lanes.
+-        MInstruction* eight = MConstant::New(alloc, Int32Value(8));
+-        addTo->add(eight);
+-        MInstruction* even = MSimdShift::AddLegalized(alloc, addTo, wide, eight, lsh);
+-
+-        // Leave the odd lanes in place.
+-        MInstruction* odd = wide;
+-
+-        // even = xx00 xx00 xx00 xx00 xx00 xx00 xx00 xx00
+-        // odd  = yyxx yyxx yyxx yyxx yyxx yyxx yyxx yyxx
+-
+-        MInstruction* mask =
+-          MSimdConstant::New(alloc, SimdConstant::SplatX8(int16_t(0xff00)), MIRType::Int16x8);
+-        addTo->add(mask);
+-
+-        // Left-shift: Clear the low bits in `odd` before shifting.
+-        if (op == lsh) {
+-            odd = MSimdBinaryBitwise::New(alloc, odd, mask, MSimdBinaryBitwise::and_);
+-            addTo->add(odd);
+-            // odd  = yy00 yy00 yy00 yy00 yy00 yy00 yy00 yy00
+-        }
+-
+-        // Do the real shift twice: once for the even lanes, once for the odd
+-        // lanes. This is a recursive call, but with a different type.
+-        even = MSimdShift::AddLegalized(alloc, addTo, even, shiftBy, op);
+-        odd = MSimdShift::AddLegalized(alloc, addTo, odd, shiftBy, op);
+-
+-        // even = XX~~ XX~~ XX~~ XX~~ XX~~ XX~~ XX~~ XX~~
+-        // odd  = YY~~ YY~~ YY~~ YY~~ YY~~ YY~~ YY~~ YY~~
+-
+-        // Right-shift: Clear the low bits in `odd` after shifting.
+-        if (op != lsh) {
+-            odd = MSimdBinaryBitwise::New(alloc, odd, mask, MSimdBinaryBitwise::and_);
+-            addTo->add(odd);
+-            // odd  = YY00 YY00 YY00 YY00 YY00 YY00 YY00 YY00
+-        }
+-
+-        // Move the even lanes back to their original place.
+-        even = MSimdShift::AddLegalized(alloc, addTo, even, eight, ursh);
+-
+-        // Now, `odd` contains the odd lanes properly shifted, and `even`
+-        // contains the even lanes properly shifted:
+-        //
+-        // even = 00XX 00XX 00XX 00XX 00XX 00XX 00XX 00XX
+-        // odd  = YY00 YY00 YY00 YY00 YY00 YY00 YY00 YY00
+-        //
+-        // Combine:
+-        MInstruction* result = MSimdBinaryBitwise::New(alloc, even, odd, MSimdBinaryBitwise::or_);
+-        addTo->add(result);
+-        result = MSimdReinterpretCast::New(alloc, result, opType);
+-        addTo->add(result);
+-        return result;
+-    }
+-
+-    // This is a legal operation already. Just create the instruction requested.
+-    MInstruction* result = MSimdShift::New(alloc, left, right, op);
+-    addTo->add(result);
+-    return result;
+-}
+-
+-template <typename T>
+-static void
+-PrintOpcodeOperation(T* mir, GenericPrinter& out)
+-{
+-    mir->MDefinition::printOpcode(out);
+-    out.printf(" (%s)", T::OperationName(mir->operation()));
+-}
+-
+ #ifdef JS_JITSPEW
+ void
+-MSimdBinaryArith::printOpcode(GenericPrinter& out) const
+-{
+-    PrintOpcodeOperation(this, out);
+-}
+-void
+-MSimdBinarySaturating::printOpcode(GenericPrinter& out) const
+-{
+-    PrintOpcodeOperation(this, out);
+-}
+-void
+-MSimdBinaryBitwise::printOpcode(GenericPrinter& out) const
+-{
+-    PrintOpcodeOperation(this, out);
+-}
+-void
+-MSimdUnaryArith::printOpcode(GenericPrinter& out) const
+-{
+-    PrintOpcodeOperation(this, out);
+-}
+-void
+-MSimdBinaryComp::printOpcode(GenericPrinter& out) const
+-{
+-    PrintOpcodeOperation(this, out);
+-}
+-void
+-MSimdShift::printOpcode(GenericPrinter& out) const
+-{
+-    PrintOpcodeOperation(this, out);
+-}
+-
+-void
+-MSimdInsertElement::printOpcode(GenericPrinter& out) const
+-{
+-    MDefinition::printOpcode(out);
+-    out.printf(" (lane %u)", lane());
+-}
+-
+-void
+-MSimdBox::printOpcode(GenericPrinter& out) const
+-{
+-    MDefinition::printOpcode(out);
+-    out.printf(" (%s%s)", SimdTypeToString(simdType()),
+-               initialHeap() == gc::TenuredHeap ? ", tenured" : "");
+-}
+-
+-void
+-MSimdUnbox::printOpcode(GenericPrinter& out) const
+-{
+-    MDefinition::printOpcode(out);
+-    out.printf(" (%s)", SimdTypeToString(simdType()));
+-}
+-
+-void
+ MControlInstruction::printOpcode(GenericPrinter& out) const
+ {
+     MDefinition::printOpcode(out);
+     for (size_t j = 0; j < numSuccessors(); j++) {
+         if (getSuccessor(j))
+             out.printf(" block%u", getSuccessor(j)->id());
+         else
+             out.printf(" (null-to-be-patched)");
+diff --git a/js/src/jit/MIR.h b/js/src/jit/MIR.h
+--- a/js/src/jit/MIR.h
++++ b/js/src/jit/MIR.h
+@@ -12,17 +12,16 @@
+ #ifndef jit_MIR_h
+ #define jit_MIR_h
+ 
+ #include "mozilla/Alignment.h"
+ #include "mozilla/Array.h"
+ #include "mozilla/Attributes.h"
+ #include "mozilla/MacroForEach.h"
+ 
+-#include "builtin/SIMDConstants.h"
+ #include "jit/AtomicOp.h"
+ #include "jit/BaselineIC.h"
+ #include "jit/FixedList.h"
+ #include "jit/InlineList.h"
+ #include "jit/JitAllocPolicy.h"
+ #include "jit/MacroAssembler.h"
+ #include "jit/MOpcodes.h"
+ #include "jit/TypedObjectPrediction.h"
+@@ -90,73 +89,16 @@ MIRType MIRTypeFromValue(const js::Value
+             return MIRType::MagicUninitializedLexical;
+           default:
+             MOZ_ASSERT_UNREACHABLE("Unexpected magic constant");
+         }
+     }
+     return MIRTypeFromValueType(vp.extractNonDoubleType());
+ }
+ 
+-// If simdType is one of the SIMD types suported by Ion, set mirType to the
+-// corresponding MIRType, and return true.
+-//
+-// If simdType is not suported by Ion, return false.
+-static inline MOZ_MUST_USE
+-bool MaybeSimdTypeToMIRType(SimdType type, MIRType* mirType)
+-{
+-    switch (type) {
+-      case SimdType::Uint32x4:
+-      case SimdType::Int32x4:     *mirType = MIRType::Int32x4;   return true;
+-      case SimdType::Uint16x8:
+-      case SimdType::Int16x8:     *mirType = MIRType::Int16x8;   return true;
+-      case SimdType::Uint8x16:
+-      case SimdType::Int8x16:     *mirType = MIRType::Int8x16;   return true;
+-      case SimdType::Float32x4:   *mirType = MIRType::Float32x4; return true;
+-      case SimdType::Bool32x4:    *mirType = MIRType::Bool32x4;  return true;
+-      case SimdType::Bool16x8:    *mirType = MIRType::Bool16x8;  return true;
+-      case SimdType::Bool8x16:    *mirType = MIRType::Bool8x16;  return true;
+-      default:                    return false;
+-    }
+-}
+-
+-// Convert a SimdType to the corresponding MIRType, or crash.
+-//
+-// Note that this is not an injective mapping: SimdType has signed and unsigned
+-// integer types that map to the same MIRType.
+-static inline
+-MIRType SimdTypeToMIRType(SimdType type)
+-{
+-    MIRType ret = MIRType::None;
+-    MOZ_ALWAYS_TRUE(MaybeSimdTypeToMIRType(type, &ret));
+-    return ret;
+-}
+-
+-static inline
+-SimdType MIRTypeToSimdType(MIRType type)
+-{
+-    switch (type) {
+-      case MIRType::Int32x4:   return SimdType::Int32x4;
+-      case MIRType::Int16x8:   return SimdType::Int16x8;
+-      case MIRType::Int8x16:   return SimdType::Int8x16;
+-      case MIRType::Float32x4: return SimdType::Float32x4;
+-      case MIRType::Bool32x4:  return SimdType::Bool32x4;
+-      case MIRType::Bool16x8:  return SimdType::Bool16x8;
+-      case MIRType::Bool8x16:  return SimdType::Bool8x16;
+-      default:                break;
+-    }
+-    MOZ_CRASH("unhandled MIRType");
+-}
+-
+-// Get the boolean MIRType with the same shape as type.
+-static inline
+-MIRType MIRTypeToBooleanSimdType(MIRType type)
+-{
+-    return SimdTypeToMIRType(GetBooleanSimdType(MIRTypeToSimdType(type)));
+-}
+-
+ #define MIR_FLAG_LIST(_)                                                        \
+     _(InWorklist)                                                               \
+     _(EmittedAtUses)                                                            \
+     _(Commutative)                                                              \
+     _(Movable)       /* Allow passes like LICM to move this instruction */      \
+     _(Lowered)       /* (Debug only) has a virtual register */                  \
+     _(Guard)         /* Not removable if uses == 0 */                           \
+                                                                                 \
+@@ -1792,1064 +1734,16 @@ class MWasmFloatConstant : public MNulla
+         return u.f64_;
+     }
+     const float& toFloat32() const {
+         MOZ_ASSERT(type() == MIRType::Float32);
+         return u.f32_;
+     }
+ };
+ 
+-// Generic constructor of SIMD valuesX4.
+-class MSimdValueX4
+-  : public MQuaternaryInstruction,
+-    public MixPolicy<SimdScalarPolicy<0>, SimdScalarPolicy<1>,
+-                     SimdScalarPolicy<2>, SimdScalarPolicy<3> >::Data
+-{
+-  protected:
+-    MSimdValueX4(MIRType type, MDefinition* x, MDefinition* y, MDefinition* z, MDefinition* w)
+-      : MQuaternaryInstruction(classOpcode, x, y, z, w)
+-    {
+-        MOZ_ASSERT(IsSimdType(type));
+-        MOZ_ASSERT(SimdTypeToLength(type) == 4);
+-
+-        setMovable();
+-        setResultType(type);
+-    }
+-
+-  public:
+-    INSTRUCTION_HEADER(SimdValueX4)
+-    TRIVIAL_NEW_WRAPPERS
+-
+-    bool canConsumeFloat32(MUse* use) const override {
+-        return SimdTypeToLaneType(type()) == MIRType::Float32;
+-    }
+-
+-    AliasSet getAliasSet() const override {
+-        return AliasSet::None();
+-    }
+-
+-    bool congruentTo(const MDefinition* ins) const override {
+-        return congruentIfOperandsEqual(ins);
+-    }
+-
+-    MDefinition* foldsTo(TempAllocator& alloc) override;
+-
+-    ALLOW_CLONE(MSimdValueX4)
+-};
+-
+-// Generic constructor of SIMD values with identical lanes.
+-class MSimdSplat
+-  : public MUnaryInstruction,
+-    public SimdScalarPolicy<0>::Data
+-{
+-  protected:
+-    MSimdSplat(MDefinition* v, MIRType type)
+-      : MUnaryInstruction(classOpcode, v)
+-    {
+-        MOZ_ASSERT(IsSimdType(type));
+-        setMovable();
+-        setResultType(type);
+-    }
+-
+-  public:
+-    INSTRUCTION_HEADER(SimdSplat)
+-    TRIVIAL_NEW_WRAPPERS
+-
+-    bool canConsumeFloat32(MUse* use) const override {
+-        return SimdTypeToLaneType(type()) == MIRType::Float32;
+-    }
+-
+-    AliasSet getAliasSet() const override {
+-        return AliasSet::None();
+-    }
+-
+-    bool congruentTo(const MDefinition* ins) const override {
+-        return congruentIfOperandsEqual(ins);
+-    }
+-
+-    MDefinition* foldsTo(TempAllocator& alloc) override;
+-
+-    ALLOW_CLONE(MSimdSplat)
+-};
+-
+-// A constant SIMD value.
+-class MSimdConstant
+-  : public MNullaryInstruction
+-{
+-    SimdConstant value_;
+-
+-  protected:
+-    MSimdConstant(const SimdConstant& v, MIRType type)
+-      : MNullaryInstruction(classOpcode),
+-        value_(v)
+-    {
+-        MOZ_ASSERT(IsSimdType(type));
+-        setMovable();
+-        setResultType(type);
+-    }
+-
+-  public:
+-    INSTRUCTION_HEADER(SimdConstant)
+-    TRIVIAL_NEW_WRAPPERS
+-
+-    bool congruentTo(const MDefinition* ins) const override {
+-        if (!ins->isSimdConstant())
+-            return false;
+-        // Bool32x4 and Int32x4 share the same underlying SimdConstant representation.
+-        if (type() != ins->type())
+-            return false;
+-        return value() == ins->toSimdConstant()->value();
+-    }
+-
+-    const SimdConstant& value() const {
+-        return value_;
+-    }
+-
+-    AliasSet getAliasSet() const override {
+-        return AliasSet::None();
+-    }
+-
+-    ALLOW_CLONE(MSimdConstant)
+-};
+-
+-// Converts all lanes of a given vector into the type of another vector
+-class MSimdConvert
+-  : public MUnaryInstruction,
+-    public SimdPolicy<0>::Data
+-{
+-    // When either fromType or toType is an integer vector, should it be treated
+-    // as signed or unsigned. Note that we don't support int-int conversions -
+-    // use MSimdReinterpretCast for that.
+-    SimdSign sign_;
+-    wasm::BytecodeOffset bytecodeOffset_;
+-
+-    MSimdConvert(MDefinition* obj, MIRType toType, SimdSign sign,
+-                 wasm::BytecodeOffset bytecodeOffset)
+-      : MUnaryInstruction(classOpcode, obj), sign_(sign), bytecodeOffset_(bytecodeOffset)
+-    {
+-        MIRType fromType = obj->type();
+-        MOZ_ASSERT(IsSimdType(fromType));
+-        MOZ_ASSERT(IsSimdType(toType));
+-        // All conversions are int <-> float, so signedness is required.
+-        MOZ_ASSERT(sign != SimdSign::NotApplicable);
+-
+-        setResultType(toType);
+-        specialization_ = fromType; // expects fromType as input
+-
+-        setMovable();
+-        if (IsFloatingPointSimdType(fromType) && IsIntegerSimdType(toType)) {
+-            // Does the extra range check => do not remove
+-            setGuard();
+-        }
+-    }
+-
+-    static MSimdConvert* New(TempAllocator& alloc, MDefinition* obj, MIRType toType, SimdSign sign,
+-                             wasm::BytecodeOffset bytecodeOffset)
+-    {
+-        return new (alloc) MSimdConvert(obj, toType, sign, bytecodeOffset);
+-    }
+-
+-  public:
+-    INSTRUCTION_HEADER(SimdConvert)
+-
+-    // Create a MSimdConvert instruction and add it to the basic block.
+-    // Possibly create and add an equivalent sequence of instructions instead if
+-    // the current target doesn't support the requested conversion directly.
+-    // Return the inserted MInstruction that computes the converted value.
+-    static MInstruction* AddLegalized(TempAllocator& alloc, MBasicBlock* addTo, MDefinition* obj,
+-                                      MIRType toType, SimdSign sign,
+-                                      wasm::BytecodeOffset bytecodeOffset = wasm::BytecodeOffset());
+-
+-    SimdSign signedness() const {
+-        return sign_;
+-    }
+-    wasm::BytecodeOffset bytecodeOffset() const {
+-        return bytecodeOffset_;
+-    }
+-
+-    AliasSet getAliasSet() const override {
+-        return AliasSet::None();
+-    }
+-    bool congruentTo(const MDefinition* ins) const override {
+-        if (!congruentIfOperandsEqual(ins))
+-            return false;
+-        const MSimdConvert* other = ins->toSimdConvert();
+-        return sign_ == other->sign_;
+-    }
+-    ALLOW_CLONE(MSimdConvert)
+-};
+-
+-// Casts bits of a vector input to another SIMD type (doesn't generate code).
+-class MSimdReinterpretCast
+-  : public MUnaryInstruction,
+-    public SimdPolicy<0>::Data
+-{
+-    MSimdReinterpretCast(MDefinition* obj, MIRType toType)
+-      : MUnaryInstruction(classOpcode, obj)
+-    {
+-        MIRType fromType = obj->type();
+-        MOZ_ASSERT(IsSimdType(fromType));
+-        MOZ_ASSERT(IsSimdType(toType));
+-        setMovable();
+-        setResultType(toType);
+-        specialization_ = fromType; // expects fromType as input
+-    }
+-
+-  public:
+-    INSTRUCTION_HEADER(SimdReinterpretCast)
+-    TRIVIAL_NEW_WRAPPERS
+-
+-    AliasSet getAliasSet() const override {
+-        return AliasSet::None();
+-    }
+-    bool congruentTo(const MDefinition* ins) const override {
+-        return congruentIfOperandsEqual(ins);
+-    }
+-    ALLOW_CLONE(MSimdReinterpretCast)
+-};
+-
+-// Extracts a lane element from a given vector type, given by its lane symbol.
+-//
+-// For integer SIMD types, a SimdSign must be provided so the lane value can be
+-// converted to a scalar correctly.
+-class MSimdExtractElement
+-  : public MUnaryInstruction,
+-    public SimdPolicy<0>::Data
+-{
+-  protected:
+-    unsigned lane_;
+-    SimdSign sign_;
+-
+-    MSimdExtractElement(MDefinition* obj, MIRType laneType, unsigned lane, SimdSign sign)
+-      : MUnaryInstruction(classOpcode, obj), lane_(lane), sign_(sign)
+-    {
+-        MIRType vecType = obj->type();
+-        MOZ_ASSERT(IsSimdType(vecType));
+-        MOZ_ASSERT(lane < SimdTypeToLength(vecType));
+-        MOZ_ASSERT(!IsSimdType(laneType));
+-        MOZ_ASSERT((sign != SimdSign::NotApplicable) == IsIntegerSimdType(vecType),
+-                   "Signedness must be specified for integer SIMD extractLanes");
+-        // The resulting type should match the lane type.
+-        // Allow extracting boolean lanes directly into an Int32 (for wasm).
+-        // Allow extracting Uint32 lanes into a double.
+-        //
+-        // We also allow extracting Uint32 lanes into a MIRType::Int32. This is
+-        // equivalent to extracting the Uint32 lane to a double and then
+-        // applying MTruncateToInt32, but it bypasses the conversion to/from
+-        // double.
+-        MOZ_ASSERT(SimdTypeToLaneType(vecType) == laneType ||
+-                   (IsBooleanSimdType(vecType) && laneType == MIRType::Int32) ||
+-                   (vecType == MIRType::Int32x4 && laneType == MIRType::Double &&
+-                    sign == SimdSign::Unsigned));
+-
+-        setMovable();
+-        specialization_ = vecType;
+-        setResultType(laneType);
+-    }
+-
+-  public:
+-    INSTRUCTION_HEADER(SimdExtractElement)
+-    TRIVIAL_NEW_WRAPPERS
+-
+-    unsigned lane() const {
+-        return lane_;
+-    }
+-
+-    SimdSign signedness() const {
+-        return sign_;
+-    }
+-
+-    AliasSet getAliasSet() const override {
+-        return AliasSet::None();
+-    }
+-    bool congruentTo(const MDefinition* ins) const override {
+-        if (!ins->isSimdExtractElement())
+-            return false;
+-        const MSimdExtractElement* other = ins->toSimdExtractElement();
+-        if (other->lane_ != lane_ || other->sign_ != sign_)
+-            return false;
+-        return congruentIfOperandsEqual(other);
+-    }
+-    ALLOW_CLONE(MSimdExtractElement)
+-};
+-
+-// Replaces the datum in the given lane by a scalar value of the same type.
+-class MSimdInsertElement
+-  : public MBinaryInstruction,
+-    public MixPolicy< SimdSameAsReturnedTypePolicy<0>, SimdScalarPolicy<1> >::Data
+-{
+-  private:
+-    unsigned lane_;
+-
+-    MSimdInsertElement(MDefinition* vec, MDefinition* val, unsigned lane)
+-      : MBinaryInstruction(classOpcode, vec, val), lane_(lane)
+-    {
+-        MIRType type = vec->type();
+-        MOZ_ASSERT(IsSimdType(type));
+-        MOZ_ASSERT(lane < SimdTypeToLength(type));
+-        setMovable();
+-        setResultType(type);
+-    }
+-
+-  public:
+-    INSTRUCTION_HEADER(SimdInsertElement)
+-    TRIVIAL_NEW_WRAPPERS
+-    NAMED_OPERANDS((0, vector), (1, value))
+-
+-    unsigned lane() const {
+-        return lane_;
+-    }
+-
+-    bool canConsumeFloat32(MUse* use) const override {
+-        return use == getUseFor(1) && SimdTypeToLaneType(type()) == MIRType::Float32;
+-    }
+-
+-    AliasSet getAliasSet() const override {
+-        return AliasSet::None();
+-    }
+-
+-    bool congruentTo(const MDefinition* ins) const override {
+-        return binaryCongruentTo(ins) && lane_ == ins->toSimdInsertElement()->lane();
+-    }
+-
+-#ifdef JS_JITSPEW
+-    void printOpcode(GenericPrinter& out) const override;
+-#endif
+-
+-    ALLOW_CLONE(MSimdInsertElement)
+-};
+-
+-// Returns true if all lanes are true.
+-class MSimdAllTrue
+-  : public MUnaryInstruction,
+-    public SimdPolicy<0>::Data
+-{
+-  protected:
+-    explicit MSimdAllTrue(MDefinition* obj, MIRType result)
+-      : MUnaryInstruction(classOpcode, obj)
+-    {
+-        MIRType simdType = obj->type();
+-        MOZ_ASSERT(IsBooleanSimdType(simdType));
+-        MOZ_ASSERT(result == MIRType::Boolean || result == MIRType::Int32);
+-        setResultType(result);
+-        specialization_ = simdType;
+-        setMovable();
+-    }
+-
+-  public:
+-    INSTRUCTION_HEADER(SimdAllTrue)
+-    TRIVIAL_NEW_WRAPPERS
+-
+-    AliasSet getAliasSet() const override {
+-        return AliasSet::None();
+-    }
+-    bool congruentTo(const MDefinition* ins) const override {
+-        return congruentIfOperandsEqual(ins);
+-    }
+-    ALLOW_CLONE(MSimdAllTrue)
+-};
+-
+-// Returns true if any lane is true.
+-class MSimdAnyTrue
+-  : public MUnaryInstruction,
+-    public SimdPolicy<0>::Data
+-{
+-  protected:
+-    explicit MSimdAnyTrue(MDefinition* obj, MIRType result)
+-      : MUnaryInstruction(classOpcode, obj)
+-    {
+-        MIRType simdType = obj->type();
+-        MOZ_ASSERT(IsBooleanSimdType(simdType));
+-        MOZ_ASSERT(result == MIRType::Boolean || result == MIRType::Int32);
+-        setResultType(result);
+-        specialization_ = simdType;
+-        setMovable();
+-    }
+-
+-  public:
+-    INSTRUCTION_HEADER(SimdAnyTrue)
+-    TRIVIAL_NEW_WRAPPERS
+-
+-    AliasSet getAliasSet() const override {
+-        return AliasSet::None();
+-    }
+-    bool congruentTo(const MDefinition* ins) const override {
+-        return congruentIfOperandsEqual(ins);
+-    }
+-
+-    ALLOW_CLONE(MSimdAnyTrue)
+-};
+-
+-// Base for the MSimdSwizzle and MSimdShuffle classes.
+-class MSimdShuffleBase
+-{
+-  protected:
+-    // As of now, there are at most 16 lanes. For each lane, we need to know
+-    // which input we choose and which of the lanes we choose.
+-    mozilla::Array<uint8_t, 16> lane_;
+-    uint32_t arity_;
+-
+-    MSimdShuffleBase(const uint8_t lanes[], MIRType type)
+-    {
+-        arity_ = SimdTypeToLength(type);
+-        for (unsigned i = 0; i < arity_; i++)
+-            lane_[i] = lanes[i];
+-    }
+-
+-    bool sameLanes(const MSimdShuffleBase* other) const {
+-        return arity_ == other->arity_ &&
+-               memcmp(&lane_[0], &other->lane_[0], arity_) == 0;
+-    }
+-
+-  public:
+-    unsigned numLanes() const {
+-        return arity_;
+-    }
+-
+-    unsigned lane(unsigned i) const {
+-        MOZ_ASSERT(i < arity_);
+-        return lane_[i];
+-    }
+-
+-    bool lanesMatch(uint32_t x, uint32_t y, uint32_t z, uint32_t w) const {
+-        return arity_ == 4 && lane(0) == x && lane(1) == y && lane(2) == z &&
+-               lane(3) == w;
+-    }
+-};
+-
+-// Applies a swizzle operation to the input, putting the input lanes as
+-// indicated in the output register's lanes. This implements the SIMD.js
+-// "swizzle" function, that takes one vector and an array of lane indexes.
+-class MSimdSwizzle
+-  : public MUnaryInstruction,
+-    public MSimdShuffleBase,
+-    public NoTypePolicy::Data
+-{
+-  protected:
+-    MSimdSwizzle(MDefinition* obj, const uint8_t lanes[])
+-      : MUnaryInstruction(classOpcode, obj), MSimdShuffleBase(lanes, obj->type())
+-    {
+-        for (unsigned i = 0; i < arity_; i++)
+-            MOZ_ASSERT(lane(i) < arity_);
+-        setResultType(obj->type());
+-        setMovable();
+-    }
+-
+-  public:
+-    INSTRUCTION_HEADER(SimdSwizzle)
+-    TRIVIAL_NEW_WRAPPERS
+-
+-    bool congruentTo(const MDefinition* ins) const override {
+-        if (!ins->isSimdSwizzle())
+-            return false;
+-        const MSimdSwizzle* other = ins->toSimdSwizzle();
+-        return sameLanes(other) && congruentIfOperandsEqual(other);
+-    }
+-
+-    AliasSet getAliasSet() const override {
+-        return AliasSet::None();
+-    }
+-
+-    MDefinition* foldsTo(TempAllocator& alloc) override;
+-
+-    ALLOW_CLONE(MSimdSwizzle)
+-};
+-
+-// A "general shuffle" is a swizzle or a shuffle with non-constant lane
+-// indices.  This is the one that Ion inlines and it can be folded into a
+-// MSimdSwizzle/MSimdShuffle if lane indices are constant.  Performance of
+-// general swizzle/shuffle does not really matter, as we expect to get
+-// constant indices most of the time.
+-class MSimdGeneralShuffle :
+-    public MVariadicInstruction,
+-    public SimdShufflePolicy::Data
+-{
+-    unsigned numVectors_;
+-    unsigned numLanes_;
+-
+-  protected:
+-    MSimdGeneralShuffle(unsigned numVectors, unsigned numLanes, MIRType type)
+-      : MVariadicInstruction(classOpcode), numVectors_(numVectors), numLanes_(numLanes)
+-    {
+-        MOZ_ASSERT(IsSimdType(type));
+-        MOZ_ASSERT(SimdTypeToLength(type) == numLanes_);
+-
+-        setResultType(type);
+-        specialization_ = type;
+-        setGuard(); // throws if lane index is out of bounds
+-        setMovable();
+-    }
+-
+-  public:
+-    INSTRUCTION_HEADER(SimdGeneralShuffle);
+-    TRIVIAL_NEW_WRAPPERS
+-
+-    MOZ_MUST_USE bool init(TempAllocator& alloc) {
+-        return MVariadicInstruction::init(alloc, numVectors_ + numLanes_);
+-    }
+-    void setVector(unsigned i, MDefinition* vec) {
+-        MOZ_ASSERT(i < numVectors_);
+-        initOperand(i, vec);
+-    }
+-    void setLane(unsigned i, MDefinition* laneIndex) {
+-        MOZ_ASSERT(i < numLanes_);
+-        initOperand(numVectors_ + i, laneIndex);
+-    }
+-
+-    unsigned numVectors() const {
+-        return numVectors_;
+-    }
+-    unsigned numLanes() const {
+-        return numLanes_;
+-    }
+-    MDefinition* vector(unsigned i) const {
+-        MOZ_ASSERT(i < numVectors_);
+-        return getOperand(i);
+-    }
+-    MDefinition* lane(unsigned i) const {
+-        MOZ_ASSERT(i < numLanes_);
+-        return getOperand(numVectors_ + i);
+-    }
+-
+-    bool congruentTo(const MDefinition* ins) const override {
+-        if (!ins->isSimdGeneralShuffle())
+-            return false;
+-        const MSimdGeneralShuffle* other = ins->toSimdGeneralShuffle();
+-        return numVectors_ == other->numVectors() &&
+-               numLanes_ == other->numLanes() &&
+-               congruentIfOperandsEqual(other);
+-    }
+-
+-    MDefinition* foldsTo(TempAllocator& alloc) override;
+-
+-    AliasSet getAliasSet() const override {
+-        return AliasSet::None();
+-    }
+-};
+-
+-// Applies a shuffle operation to the inputs. The lane indexes select a source
+-// lane from the concatenation of the two input vectors.
+-class MSimdShuffle
+-  : public MBinaryInstruction,
+-    public MSimdShuffleBase,
+-    public NoTypePolicy::Data
+-{
+-    MSimdShuffle(MDefinition* lhs, MDefinition* rhs, const uint8_t lanes[])
+-      : MBinaryInstruction(classOpcode, lhs, rhs), MSimdShuffleBase(lanes, lhs->type())
+-    {
+-        MOZ_ASSERT(IsSimdType(lhs->type()));
+-        MOZ_ASSERT(IsSimdType(rhs->type()));
+-        MOZ_ASSERT(lhs->type() == rhs->type());
+-        for (unsigned i = 0; i < arity_; i++)
+-            MOZ_ASSERT(lane(i) < 2 * arity_);
+-        setResultType(lhs->type());
+-        setMovable();
+-    }
+-
+-  public:
+-    INSTRUCTION_HEADER(SimdShuffle)
+-
+-    static MInstruction* New(TempAllocator& alloc, MDefinition* lhs, MDefinition* rhs,
+-                             const uint8_t lanes[])
+-    {
+-        unsigned arity = SimdTypeToLength(lhs->type());
+-
+-        // Swap operands so that new lanes come from LHS in majority.
+-        // In the balanced case, swap operands if needs be, in order to be able
+-        // to do only one vshufps on x86.
+-        unsigned lanesFromLHS = 0;
+-        for (unsigned i = 0; i < arity; i++) {
+-            if (lanes[i] < arity)
+-                lanesFromLHS++;
+-        }
+-
+-        if (lanesFromLHS < arity / 2 ||
+-            (arity == 4 && lanesFromLHS == 2 && lanes[0] >= 4 && lanes[1] >= 4)) {
+-            mozilla::Array<uint8_t, 16> newLanes;
+-            for (unsigned i = 0; i < arity; i++)
+-                newLanes[i] = (lanes[i] + arity) % (2 * arity);
+-            return New(alloc, rhs, lhs, &newLanes[0]);
+-        }
+-
+-        // If all lanes come from the same vector, just use swizzle instead.
+-        if (lanesFromLHS == arity)
+-            return MSimdSwizzle::New(alloc, lhs, lanes);
+-
+-        return new(alloc) MSimdShuffle(lhs, rhs, lanes);
+-    }
+-
+-    bool congruentTo(const MDefinition* ins) const override {
+-        if (!ins->isSimdShuffle())
+-            return false;
+-        const MSimdShuffle* other = ins->toSimdShuffle();
+-        return sameLanes(other) && binaryCongruentTo(other);
+-    }
+-
+-    AliasSet getAliasSet() const override {
+-        return AliasSet::None();
+-    }
+-
+-    ALLOW_CLONE(MSimdShuffle)
+-};
+-
+-class MSimdUnaryArith
+-  : public MUnaryInstruction,
+-    public SimdSameAsReturnedTypePolicy<0>::Data
+-{
+-  public:
+-    enum Operation {
+-#define OP_LIST_(OP) OP,
+-        FOREACH_FLOAT_SIMD_UNOP(OP_LIST_)
+-        neg,
+-        not_
+-#undef OP_LIST_
+-    };
+-
+-    static const char* OperationName(Operation op) {
+-        switch (op) {
+-          case abs:                         return "abs";
+-          case neg:                         return "neg";
+-          case not_:                        return "not";
+-          case reciprocalApproximation:     return "reciprocalApproximation";
+-          case reciprocalSqrtApproximation: return "reciprocalSqrtApproximation";
+-          case sqrt:                        return "sqrt";
+-        }
+-        MOZ_CRASH("unexpected operation");
+-    }
+-
+-  private:
+-    Operation operation_;
+-
+-    MSimdUnaryArith(MDefinition* def, Operation op)
+-      : MUnaryInstruction(classOpcode, def), operation_(op)
+-    {
+-        MIRType type = def->type();
+-        MOZ_ASSERT(IsSimdType(type));
+-        MOZ_ASSERT_IF(IsIntegerSimdType(type), op == neg || op == not_);
+-        setResultType(type);
+-        setMovable();
+-    }
+-
+-  public:
+-    INSTRUCTION_HEADER(SimdUnaryArith)
+-    TRIVIAL_NEW_WRAPPERS
+-
+-    Operation operation() const { return operation_; }
+-
+-    AliasSet getAliasSet() const override {
+-        return AliasSet::None();
+-    }
+-
+-    bool congruentTo(const MDefinition* ins) const override {
+-        return congruentIfOperandsEqual(ins) && ins->toSimdUnaryArith()->operation() == operation();
+-    }
+-
+-#ifdef JS_JITSPEW
+-    void printOpcode(GenericPrinter& out) const override;
+-#endif
+-
+-    ALLOW_CLONE(MSimdUnaryArith);
+-};
+-
+-// Compares each value of a SIMD vector to each corresponding lane's value of
+-// another SIMD vector, and returns a boolean vector containing the results of
+-// the comparison: all bits are set to 1 if the comparison is true, 0 otherwise.
+-// When comparing integer vectors, a SimdSign must be provided to request signed
+-// or unsigned comparison.
+-class MSimdBinaryComp
+-  : public MBinaryInstruction,
+-    public SimdAllPolicy::Data
+-{
+-  public:
+-    enum Operation {
+-#define NAME_(x) x,
+-        FOREACH_COMP_SIMD_OP(NAME_)
+-#undef NAME_
+-    };
+-
+-    static const char* OperationName(Operation op) {
+-        switch (op) {
+-#define NAME_(x) case x: return #x;
+-        FOREACH_COMP_SIMD_OP(NAME_)
+-#undef NAME_
+-        }
+-        MOZ_CRASH("unexpected operation");
+-    }
+-
+-  private:
+-    Operation operation_;
+-    SimdSign sign_;
+-
+-    MSimdBinaryComp(MDefinition* left, MDefinition* right, Operation op, SimdSign sign)
+-      : MBinaryInstruction(classOpcode, left, right), operation_(op), sign_(sign)
+-    {
+-        MOZ_ASSERT(left->type() == right->type());
+-        MIRType opType = left->type();
+-        MOZ_ASSERT(IsSimdType(opType));
+-        MOZ_ASSERT((sign != SimdSign::NotApplicable) == IsIntegerSimdType(opType),
+-                   "Signedness must be specified for integer SIMD compares");
+-        setResultType(MIRTypeToBooleanSimdType(opType));
+-        specialization_ = opType;
+-        setMovable();
+-        if (op == equal || op == notEqual)
+-            setCommutative();
+-    }
+-
+-    static MSimdBinaryComp* New(TempAllocator& alloc, MDefinition* left, MDefinition* right,
+-                                Operation op, SimdSign sign)
+-    {
+-        return new (alloc) MSimdBinaryComp(left, right, op, sign);
+-    }
+-
+-  public:
+-    INSTRUCTION_HEADER(SimdBinaryComp)
+-
+-    // Create a MSimdBinaryComp or an equivalent sequence of instructions
+-    // supported by the current target.
+-    // Add all instructions to the basic block |addTo|.
+-    static MInstruction* AddLegalized(TempAllocator& alloc, MBasicBlock* addTo, MDefinition* left,
+-                                      MDefinition* right, Operation op, SimdSign sign);
+-
+-    AliasSet getAliasSet() const override
+-    {
+-        return AliasSet::None();
+-    }
+-
+-    Operation operation() const { return operation_; }
+-    SimdSign signedness() const { return sign_; }
+-    MIRType specialization() const { return specialization_; }
+-
+-    // Swap the operands and reverse the comparison predicate.
+-    void reverse() {
+-        switch (operation()) {
+-          case greaterThan:        operation_ = lessThan; break;
+-          case greaterThanOrEqual: operation_ = lessThanOrEqual; break;
+-          case lessThan:           operation_ = greaterThan; break;
+-          case lessThanOrEqual:    operation_ = greaterThanOrEqual; break;
+-          case equal:
+-          case notEqual:
+-            break;
+-          default: MOZ_CRASH("Unexpected compare operation");
+-        }
+-        swapOperands();
+-    }
+-
+-    bool congruentTo(const MDefinition* ins) const override {
+-        if (!binaryCongruentTo(ins))
+-            return false;
+-        const MSimdBinaryComp* other = ins->toSimdBinaryComp();
+-        return specialization_ == other->specialization() &&
+-               operation_ == other->operation() &&
+-               sign_ == other->signedness();
+-    }
+-
+-#ifdef JS_JITSPEW
+-    void printOpcode(GenericPrinter& out) const override;
+-#endif
+-
+-    ALLOW_CLONE(MSimdBinaryComp)
+-};
+-
+-class MSimdBinaryArith
+-  : public MBinaryInstruction,
+-    public MixPolicy<SimdSameAsReturnedTypePolicy<0>, SimdSameAsReturnedTypePolicy<1> >::Data
+-{
+-  public:
+-    enum Operation {
+-#define OP_LIST_(OP) Op_##OP,
+-        FOREACH_NUMERIC_SIMD_BINOP(OP_LIST_)
+-        FOREACH_FLOAT_SIMD_BINOP(OP_LIST_)
+-#undef OP_LIST_
+-    };
+-
+-    static const char* OperationName(Operation op) {
+-        switch (op) {
+-#define OP_CASE_LIST_(OP) case Op_##OP: return #OP;
+-          FOREACH_NUMERIC_SIMD_BINOP(OP_CASE_LIST_)
+-          FOREACH_FLOAT_SIMD_BINOP(OP_CASE_LIST_)
+-#undef OP_CASE_LIST_
+-        }
+-        MOZ_CRASH("unexpected operation");
+-    }
+-
+-  private:
+-    Operation operation_;
+-
+-    MSimdBinaryArith(MDefinition* left, MDefinition* right, Operation op)
+-      : MBinaryInstruction(classOpcode, left, right), operation_(op)
+-    {
+-        MOZ_ASSERT(left->type() == right->type());
+-        MIRType type = left->type();
+-        MOZ_ASSERT(IsSimdType(type));
+-        MOZ_ASSERT_IF(IsIntegerSimdType(type), op == Op_add || op == Op_sub || op == Op_mul);
+-        setResultType(type);
+-        setMovable();
+-        if (op == Op_add || op == Op_mul || op == Op_min || op == Op_max)
+-            setCommutative();
+-    }
+-
+-    static MSimdBinaryArith* New(TempAllocator& alloc, MDefinition* left, MDefinition* right,
+-                                 Operation op)
+-    {
+-        return new (alloc) MSimdBinaryArith(left, right, op);
+-    }
+-
+-  public:
+-    INSTRUCTION_HEADER(SimdBinaryArith)
+-
+-    // Create an MSimdBinaryArith instruction and add it to the basic block. Possibly
+-    // create and add an equivalent sequence of instructions instead if the
+-    // current target doesn't support the requested shift operation directly.
+-    static MInstruction* AddLegalized(TempAllocator& alloc, MBasicBlock* addTo, MDefinition* left,
+-                                      MDefinition* right, Operation op);
+-
+-    AliasSet getAliasSet() const override {
+-        return AliasSet::None();
+-    }
+-
+-    Operation operation() const { return operation_; }
+-
+-    bool congruentTo(const MDefinition* ins) const override {
+-        if (!binaryCongruentTo(ins))
+-            return false;
+-        return operation_ == ins->toSimdBinaryArith()->operation();
+-    }
+-
+-#ifdef JS_JITSPEW
+-    void printOpcode(GenericPrinter& out) const override;
+-#endif
+-
+-    ALLOW_CLONE(MSimdBinaryArith)
+-};
+-
+-class MSimdBinarySaturating
+-  : public MBinaryInstruction,
+-    public MixPolicy<SimdSameAsReturnedTypePolicy<0>, SimdSameAsReturnedTypePolicy<1>>::Data
+-{
+-  public:
+-    enum Operation
+-    {
+-        add,
+-        sub,
+-    };
+-
+-    static const char* OperationName(Operation op)
+-    {
+-        switch (op) {
+-          case add:
+-            return "add";
+-          case sub:
+-            return "sub";
+-        }
+-        MOZ_CRASH("unexpected operation");
+-    }
+-
+-  private:
+-    Operation operation_;
+-    SimdSign sign_;
+-
+-    MSimdBinarySaturating(MDefinition* left, MDefinition* right, Operation op, SimdSign sign)
+-      : MBinaryInstruction(classOpcode, left, right)
+-      , operation_(op)
+-      , sign_(sign)
+-    {
+-        MOZ_ASSERT(left->type() == right->type());
+-        MIRType type = left->type();
+-        MOZ_ASSERT(type == MIRType::Int8x16 || type == MIRType::Int16x8);
+-        setResultType(type);
+-        setMovable();
+-        if (op == add)
+-            setCommutative();
+-    }
+-
+-  public:
+-    INSTRUCTION_HEADER(SimdBinarySaturating)
+-    TRIVIAL_NEW_WRAPPERS
+-
+-    AliasSet getAliasSet() const override { return AliasSet::None(); }
+-
+-    Operation operation() const { return operation_; }
+-    SimdSign signedness() const { return sign_; }
+-
+-    bool congruentTo(const MDefinition* ins) const override
+-    {
+-        if (!binaryCongruentTo(ins))
+-            return false;
+-        return operation_ == ins->toSimdBinarySaturating()->operation() &&
+-               sign_ == ins->toSimdBinarySaturating()->signedness();
+-    }
+-
+-#ifdef JS_JITSPEW
+-    void printOpcode(GenericPrinter& out) const override;
+-#endif
+-
+-    ALLOW_CLONE(MSimdBinarySaturating)
+-};
+-
+-class MSimdBinaryBitwise
+-  : public MBinaryInstruction,
+-    public MixPolicy<SimdSameAsReturnedTypePolicy<0>, SimdSameAsReturnedTypePolicy<1> >::Data
+-{
+-  public:
+-    enum Operation {
+-        and_,
+-        or_,
+-        xor_
+-    };
+-
+-    static const char* OperationName(Operation op) {
+-        switch (op) {
+-          case and_: return "and";
+-          case or_:  return "or";
+-          case xor_: return "xor";
+-        }
+-        MOZ_CRASH("unexpected operation");
+-    }
+-
+-  private:
+-    Operation operation_;
+-
+-    MSimdBinaryBitwise(MDefinition* left, MDefinition* right, Operation op)
+-      : MBinaryInstruction(classOpcode, left, right), operation_(op)
+-    {
+-        MOZ_ASSERT(left->type() == right->type());
+-        MIRType type = left->type();
+-        MOZ_ASSERT(IsSimdType(type));
+-        setResultType(type);
+-        setMovable();
+-        setCommutative();
+-    }
+-
+-  public:
+-    INSTRUCTION_HEADER(SimdBinaryBitwise)
+-    TRIVIAL_NEW_WRAPPERS
+-
+-    AliasSet getAliasSet() const override {
+-        return AliasSet::None();
+-    }
+-
+-    Operation operation() const { return operation_; }
+-
+-    bool congruentTo(const MDefinition* ins) const override {
+-        if (!binaryCongruentTo(ins))
+-            return false;
+-        return operation_ == ins->toSimdBinaryBitwise()->operation();
+-    }
+-
+-#ifdef JS_JITSPEW
+-    void printOpcode(GenericPrinter& out) const override;
+-#endif
+-
+-    ALLOW_CLONE(MSimdBinaryBitwise)
+-};
+-
+-class MSimdShift
+-  : public MBinaryInstruction,
+-    public MixPolicy<SimdSameAsReturnedTypePolicy<0>, SimdScalarPolicy<1> >::Data
+-{
+-  public:
+-    enum Operation {
+-        lsh,
+-        rsh,
+-        ursh
+-    };
+-
+-  private:
+-    Operation operation_;
+-
+-    MSimdShift(MDefinition* left, MDefinition* right, Operation op)
+-      : MBinaryInstruction(classOpcode, left, right), operation_(op)
+-    {
+-        MIRType type = left->type();
+-        MOZ_ASSERT(IsIntegerSimdType(type));
+-        setResultType(type);
+-        setMovable();
+-    }
+-
+-    static MSimdShift* New(TempAllocator& alloc, MDefinition* left, MDefinition* right,
+-                           Operation op)
+-    {
+-        return new (alloc) MSimdShift(left, right, op);
+-    }
+-
+-  public:
+-    INSTRUCTION_HEADER(SimdShift)
+-
+-    // Create an MSimdShift instruction and add it to the basic block. Possibly
+-    // create and add an equivalent sequence of instructions instead if the
+-    // current target doesn't support the requested shift operation directly.
+-    // Return the inserted MInstruction that computes the shifted value.
+-    static MInstruction* AddLegalized(TempAllocator& alloc, MBasicBlock* addTo, MDefinition* left,
+-                                      MDefinition* right, Operation op);
+-
+-    // Get the relevant right shift operation given the signedness of a type.
+-    static Operation rshForSign(SimdSign sign) {
+-        return sign == SimdSign::Unsigned ? ursh : rsh;
+-    }
+-
+-    AliasSet getAliasSet() const override {
+-        return AliasSet::None();
+-    }
+-
+-    Operation operation() const { return operation_; }
+-
+-    static const char* OperationName(Operation op) {
+-        switch (op) {
+-          case lsh:  return "lsh";
+-          case rsh:  return "rsh-arithmetic";
+-          case ursh: return "rsh-logical";
+-        }
+-        MOZ_CRASH("unexpected operation");
+-    }
+-
+-#ifdef JS_JITSPEW
+-    void printOpcode(GenericPrinter& out) const override;
+-#endif
+-
+-    bool congruentTo(const MDefinition* ins) const override {
+-        if (!binaryCongruentTo(ins))
+-            return false;
+-        return operation_ == ins->toSimdShift()->operation();
+-    }
+-
+-    ALLOW_CLONE(MSimdShift)
+-};
+-
+-class MSimdSelect
+-  : public MTernaryInstruction,
+-    public SimdSelectPolicy::Data
+-{
+-    MSimdSelect(MDefinition* mask, MDefinition* lhs, MDefinition* rhs)
+-      : MTernaryInstruction(classOpcode, mask, lhs, rhs)
+-    {
+-        MOZ_ASSERT(IsBooleanSimdType(mask->type()));
+-        MOZ_ASSERT(lhs->type() == lhs->type());
+-        MIRType type = lhs->type();
+-        MOZ_ASSERT(IsSimdType(type));
+-        setResultType(type);
+-        specialization_ = type;
+-        setMovable();
+-    }
+-
+-  public:
+-    INSTRUCTION_HEADER(SimdSelect)
+-    TRIVIAL_NEW_WRAPPERS
+-    NAMED_OPERANDS((0, mask))
+-
+-    AliasSet getAliasSet() const override {
+-        return AliasSet::None();
+-    }
+-
+-    bool congruentTo(const MDefinition* ins) const override {
+-        return congruentIfOperandsEqual(ins);
+-    }
+-
+-    ALLOW_CLONE(MSimdSelect)
+-};
+-
+ // Deep clone a constant JSObject.
+ class MCloneLiteral
+   : public MUnaryInstruction,
+     public ObjectPolicy<0>::Data
+ {
+   protected:
+     explicit MCloneLiteral(MDefinition* obj)
+       : MUnaryInstruction(classOpcode, obj)
+@@ -3735,133 +2629,16 @@ class MTypedObjectDescr
+     bool congruentTo(const MDefinition* ins) const override {
+         return congruentIfOperandsEqual(ins);
+     }
+     AliasSet getAliasSet() const override {
+         return AliasSet::Load(AliasSet::ObjectFields);
+     }
+ };
+ 
+-// Generic way for constructing a SIMD object in IonMonkey, this instruction
+-// takes as argument a SIMD instruction and returns a new SIMD object which
+-// corresponds to the MIRType of its operand.
+-class MSimdBox
+-  : public MUnaryInstruction,
+-    public NoTypePolicy::Data
+-{
+-  protected:
+-    CompilerGCPointer<InlineTypedObject*> templateObject_;
+-    SimdType simdType_;
+-    gc::InitialHeap initialHeap_;
+-
+-    MSimdBox(TempAllocator& alloc,
+-             CompilerConstraintList* constraints,
+-             MDefinition* op,
+-             InlineTypedObject* templateObject,
+-             SimdType simdType,
+-             gc::InitialHeap initialHeap)
+-      : MUnaryInstruction(classOpcode, op),
+-        templateObject_(templateObject),
+-        simdType_(simdType),
+-        initialHeap_(initialHeap)
+-    {
+-        MOZ_ASSERT(IsSimdType(op->type()));
+-        setMovable();
+-        setResultType(MIRType::Object);
+-        if (constraints)
+-            setResultTypeSet(MakeSingletonTypeSet(alloc, constraints, templateObject));
+-    }
+-
+-  public:
+-    INSTRUCTION_HEADER(SimdBox)
+-    TRIVIAL_NEW_WRAPPERS_WITH_ALLOC
+-
+-    InlineTypedObject* templateObject() const {
+-        return templateObject_;
+-    }
+-
+-    SimdType simdType() const {
+-        return simdType_;
+-    }
+-
+-    gc::InitialHeap initialHeap() const {
+-        return initialHeap_;
+-    }
+-
+-    bool congruentTo(const MDefinition* ins) const override {
+-        if (!congruentIfOperandsEqual(ins))
+-            return false;
+-        const MSimdBox* box = ins->toSimdBox();
+-        if (box->simdType() != simdType())
+-            return false;
+-        MOZ_ASSERT(box->templateObject() == templateObject());
+-        if (box->initialHeap() != initialHeap())
+-            return false;
+-        return true;
+-    }
+-
+-    AliasSet getAliasSet() const override {
+-        return AliasSet::None();
+-    }
+-
+-#ifdef JS_JITSPEW
+-    void printOpcode(GenericPrinter& out) const override;
+-#endif
+-
+-    MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
+-    bool canRecoverOnBailout() const override {
+-        return true;
+-    }
+-
+-    bool appendRoots(MRootList& roots) const override {
+-        return roots.append(templateObject_);
+-    }
+-};
+-
+-class MSimdUnbox
+-  : public MUnaryInstruction,
+-    public SingleObjectPolicy::Data
+-{
+-  protected:
+-    SimdType simdType_;
+-
+-    MSimdUnbox(MDefinition* op, SimdType simdType)
+-      : MUnaryInstruction(classOpcode, op),
+-        simdType_(simdType)
+-    {
+-        MIRType type = SimdTypeToMIRType(simdType);
+-        MOZ_ASSERT(IsSimdType(type));
+-        setGuard();
+-        setMovable();
+-        setResultType(type);
+-    }
+-
+-  public:
+-    INSTRUCTION_HEADER(SimdUnbox)
+-    TRIVIAL_NEW_WRAPPERS
+-    ALLOW_CLONE(MSimdUnbox)
+-
+-    SimdType simdType() const { return simdType_; }
+-
+-    MDefinition* foldsTo(TempAllocator& alloc) override;
+-    bool congruentTo(const MDefinition* ins) const override {
+-        if (!congruentIfOperandsEqual(ins))
+-            return false;
+-        return ins->toSimdUnbox()->simdType() == simdType();
+-    }
+-
+-    AliasSet getAliasSet() const override {
+-        return AliasSet::None();
+-    }
+-
+-#ifdef JS_JITSPEW
+-    void printOpcode(GenericPrinter& out) const override;
+-#endif
+-};
+-
+ // Creates a new derived type object. At runtime, this is just a call
+ // to `BinaryBlock::createDerived()`. That is, the MIR itself does not
+ // compile to particularly optimized code. However, using a distinct
+ // MIR for creating derived type objects allows the compiler to
+ // optimize ephemeral typed objects as would be created for a
+ // reference like `a.b.c` -- here, the `a.b` will create an ephemeral
+ // derived type object that aliases the memory of `a` itself. The
+ // specific nature of `a.b` is revealed by using
+@@ -10459,28 +9236,26 @@ enum MemoryBarrierRequirement
+ 
+ // Load an unboxed scalar value from a typed array or other object.
+ class MLoadUnboxedScalar
+   : public MBinaryInstruction,
+     public SingleObjectPolicy::Data
+ {
+     Scalar::Type storageType_;
+     Scalar::Type readType_;
+-    unsigned numElems_; // used only for SIMD
+     bool requiresBarrier_;
+     int32_t offsetAdjustment_;
+     bool canonicalizeDoubles_;
+ 
+     MLoadUnboxedScalar(MDefinition* elements, MDefinition* index, Scalar::Type storageType,
+                        MemoryBarrierRequirement requiresBarrier = DoesNotRequireMemoryBarrier,
+                        int32_t offsetAdjustment = 0, bool canonicalizeDoubles = true)
+       : MBinaryInstruction(classOpcode, elements, index),
+         storageType_(storageType),
+         readType_(storageType),
+-        numElems_(1),
+         requiresBarrier_(requiresBarrier == DoesRequireMemoryBarrier),
+         offsetAdjustment_(offsetAdjustment),
+         canonicalizeDoubles_(canonicalizeDoubles)
+     {
+         setResultType(MIRType::Value);
+         if (requiresBarrier_)
+             setGuard();         // Not removable or movable
+         else
+@@ -10490,23 +9265,16 @@ class MLoadUnboxedScalar
+         MOZ_ASSERT(storageType >= 0 && storageType < Scalar::MaxTypedArrayViewType);
+     }
+ 
+   public:
+     INSTRUCTION_HEADER(LoadUnboxedScalar)
+     TRIVIAL_NEW_WRAPPERS
+     NAMED_OPERANDS((0, elements), (1, index))
+ 
+-    void setSimdRead(Scalar::Type type, unsigned numElems) {
+-        readType_ = type;
+-        numElems_ = numElems;
+-    }
+-    unsigned numElems() const {
+-        return numElems_;
+-    }
+     Scalar::Type readType() const {
+         return readType_;
+     }
+ 
+     Scalar::Type storageType() const {
+         return storageType_;
+     }
+     bool fallible() const {
+@@ -10538,18 +9306,16 @@ class MLoadUnboxedScalar
+             return false;
+         if (!ins->isLoadUnboxedScalar())
+             return false;
+         const MLoadUnboxedScalar* other = ins->toLoadUnboxedScalar();
+         if (storageType_ != other->storageType_)
+             return false;
+         if (readType_ != other->readType_)
+             return false;
+-        if (numElems_ != other->numElems_)
+-            return false;
+         if (offsetAdjustment() != other->offsetAdjustment())
+             return false;
+         if (canonicalizeDoubles() != other->canonicalizeDoubles())
+             return false;
+         return congruentIfOperandsEqual(other);
+     }
+ 
+ #ifdef JS_JITSPEW
+@@ -10617,17 +9383,17 @@ class MLoadTypedArrayElementHole
+ class StoreUnboxedScalarBase
+ {
+     Scalar::Type writeType_;
+ 
+   protected:
+     explicit StoreUnboxedScalarBase(Scalar::Type writeType)
+       : writeType_(writeType)
+     {
+-        MOZ_ASSERT(isIntegerWrite() || isFloatWrite() || isSimdWrite());
++        MOZ_ASSERT(isIntegerWrite() || isFloatWrite());
+     }
+ 
+   public:
+     void setWriteType(Scalar::Type type) {
+         writeType_ = type;
+     }
+     Scalar::Type writeType() const {
+         return writeType_;
+@@ -10643,19 +9409,16 @@ class StoreUnboxedScalarBase
+                writeType_ == Scalar::Uint16 ||
+                writeType_ == Scalar::Int32 ||
+                writeType_ == Scalar::Uint32;
+     }
+     bool isFloatWrite() const {
+         return writeType_ == Scalar::Float32 ||
+                writeType_ == Scalar::Float64;
+     }
+-    bool isSimdWrite() const {
+-        return Scalar::isSimdType(writeType());
+-    }
+ };
+ 
+ // Store an unboxed scalar value to a typed array or other object.
+ class MStoreUnboxedScalar
+   : public MTernaryInstruction,
+     public StoreUnboxedScalarBase,
+     public StoreUnboxedScalarPolicy::Data
+ {
+@@ -10668,52 +9431,42 @@ class MStoreUnboxedScalar
+   private:
+     Scalar::Type storageType_;
+ 
+     // Whether this store truncates out of range inputs, for use by range analysis.
+     TruncateInputKind truncateInput_;
+ 
+     bool requiresBarrier_;
+     int32_t offsetAdjustment_;
+-    unsigned numElems_; // used only for SIMD
+ 
+     MStoreUnboxedScalar(MDefinition* elements, MDefinition* index, MDefinition* value,
+                         Scalar::Type storageType, TruncateInputKind truncateInput,
+                         MemoryBarrierRequirement requiresBarrier = DoesNotRequireMemoryBarrier,
+                         int32_t offsetAdjustment = 0)
+       : MTernaryInstruction(classOpcode, elements, index, value),
+         StoreUnboxedScalarBase(storageType),
+         storageType_(storageType),
+         truncateInput_(truncateInput),
+         requiresBarrier_(requiresBarrier == DoesRequireMemoryBarrier),
+-        offsetAdjustment_(offsetAdjustment),
+-        numElems_(1)
++        offsetAdjustment_(offsetAdjustment)
+     {
+         if (requiresBarrier_)
+             setGuard();         // Not removable or movable
+         else
+             setMovable();
+         MOZ_ASSERT(IsValidElementsType(elements, offsetAdjustment));
+         MOZ_ASSERT(index->type() == MIRType::Int32);
+         MOZ_ASSERT(storageType >= 0 && storageType < Scalar::MaxTypedArrayViewType);
+     }
+ 
+   public:
+     INSTRUCTION_HEADER(StoreUnboxedScalar)
+     TRIVIAL_NEW_WRAPPERS
+     NAMED_OPERANDS((0, elements), (1, index), (2, value))
+ 
+-    void setSimdWrite(Scalar::Type writeType, unsigned numElems) {
+-        MOZ_ASSERT(Scalar::isSimdType(writeType));
+-        setWriteType(writeType);
+-        numElems_ = numElems;
+-    }
+-    unsigned numElems() const {
+-        return numElems_;
+-    }
+     Scalar::Type storageType() const {
+         return storageType_;
+     }
+     AliasSet getAliasSet() const override {
+         return AliasSet::Store(AliasSet::UnboxedElement);
+     }
+     TruncateInputKind truncateInput() const {
+         return truncateInput_;
+@@ -10751,18 +9504,16 @@ class MStoreTypedArrayElementHole
+     }
+ 
+   public:
+     INSTRUCTION_HEADER(StoreTypedArrayElementHole)
+     TRIVIAL_NEW_WRAPPERS
+     NAMED_OPERANDS((0, elements), (1, length), (2, index), (3, value))
+ 
+     Scalar::Type arrayType() const {
+-        MOZ_ASSERT(!Scalar::isSimdType(writeType()),
+-                   "arrayType == writeType iff the write type isn't SIMD");
+         return writeType();
+     }
+     AliasSet getAliasSet() const override {
+         return AliasSet::Store(AliasSet::UnboxedElement);
+     }
+     TruncateKind operandTruncateKind(size_t index) const override;
+ 
+     bool canConsumeFloat32(MUse* use) const override {
+@@ -14514,17 +13265,16 @@ class MAsmJSMemoryAccess
+ 
+   public:
+     explicit MAsmJSMemoryAccess(Scalar::Type accessType)
+       : offset_(0),
+         accessType_(accessType),
+         needsBoundsCheck_(true)
+     {
+         MOZ_ASSERT(accessType != Scalar::Uint8Clamped);
+-        MOZ_ASSERT(!Scalar::isSimdType(accessType));
+     }
+ 
+     uint32_t offset() const { return offset_; }
+     uint32_t endOffset() const { return offset() + byteSize(); }
+     Scalar::Type accessType() const { return accessType_; }
+     unsigned byteSize() const { return TypedArrayElemSize(accessType()); }
+     bool needsBoundsCheck() const { return needsBoundsCheck_; }
+ 
+@@ -14824,17 +13574,17 @@ class MWasmLoadGlobalVar
+ {
+     MWasmLoadGlobalVar(MIRType type, unsigned globalDataOffset, bool isConstant, bool isIndirect,
+                        MDefinition* tlsPtr)
+       : MUnaryInstruction(classOpcode, tlsPtr),
+         globalDataOffset_(globalDataOffset),
+         isConstant_(isConstant),
+         isIndirect_(isIndirect)
+     {
+-        MOZ_ASSERT(IsNumberType(type) || IsSimdType(type));
++        MOZ_ASSERT(IsNumberType(type));
+         setResultType(type);
+         setMovable();
+     }
+ 
+     unsigned globalDataOffset_;
+     bool isConstant_;
+     bool isIndirect_;
+ 
+diff --git a/js/src/jit/MIRGenerator.h b/js/src/jit/MIRGenerator.h
+--- a/js/src/jit/MIRGenerator.h
++++ b/js/src/jit/MIRGenerator.h
+@@ -153,20 +153,16 @@ class MIRGenerator
+ 
+     void setNeedsStaticStackAlignment() {
+         needsStaticStackAlignment_ = true;
+     }
+     bool needsStaticStackAlignment() const {
+         return needsOverrecursedCheck_;
+     }
+ 
+-    // Traverses the graph to find if there's any SIMD instruction. Costful but
+-    // the value is cached, so don't worry about calling it several times.
+-    bool usesSimd();
+-
+     bool modifiesFrameArguments() const {
+         return modifiesFrameArguments_;
+     }
+ 
+     typedef Vector<ObjectGroup*, 0, JitAllocPolicy> ObjectGroupVector;
+ 
+     // When aborting with AbortReason::PreliminaryObjects, all groups with
+     // preliminary objects which haven't been analyzed yet.
+@@ -185,18 +181,16 @@ class MIRGenerator
+     MIRGraph* graph_;
+     AbortReasonOr<Ok> offThreadStatus_;
+     ObjectGroupVector abortedPreliminaryGroups_;
+     mozilla::Atomic<bool, mozilla::Relaxed> cancelBuild_;
+ 
+     uint32_t wasmMaxStackArgBytes_;
+     bool needsOverrecursedCheck_;
+     bool needsStaticStackAlignment_;
+-    bool usesSimd_;
+-    bool cachedUsesSimd_;
+ 
+     // Keep track of whether frame arguments are modified during execution.
+     // RegAlloc needs to know this as spilling values back to their register
+     // slots is not compatible with that.
+     bool modifiesFrameArguments_;
+ 
+     bool instrumentedProfiling_;
+     bool instrumentedProfilingIsCached_;
+diff --git a/js/src/jit/MIRGraph.cpp b/js/src/jit/MIRGraph.cpp
+--- a/js/src/jit/MIRGraph.cpp
++++ b/js/src/jit/MIRGraph.cpp
+@@ -27,59 +27,26 @@ MIRGenerator::MIRGenerator(CompileRealm*
+     alloc_(alloc),
+     graph_(graph),
+     offThreadStatus_(Ok()),
+     abortedPreliminaryGroups_(*alloc_),
+     cancelBuild_(false),
+     wasmMaxStackArgBytes_(0),
+     needsOverrecursedCheck_(false),
+     needsStaticStackAlignment_(false),
+-    usesSimd_(false),
+-    cachedUsesSimd_(false),
+     modifiesFrameArguments_(false),
+     instrumentedProfiling_(false),
+     instrumentedProfilingIsCached_(false),
+     safeForMinorGC_(true),
+     stringsCanBeInNursery_(realm ? realm->zone()->canNurseryAllocateStrings() : false),
+     minWasmHeapLength_(0),
+     options(options),
+     gs_(alloc)
+ { }
+ 
+-bool
+-MIRGenerator::usesSimd()
+-{
+-    if (cachedUsesSimd_)
+-        return usesSimd_;
+-
+-    cachedUsesSimd_ = true;
+-    for (ReversePostorderIterator block = graph_->rpoBegin(),
+-                                  end   = graph_->rpoEnd();
+-         block != end;
+-         block++)
+-    {
+-        // It's fine to use MInstructionIterator here because we don't have to
+-        // worry about Phis, since any reachable phi (or phi cycle) will have at
+-        // least one instruction as an input.
+-        for (MInstructionIterator inst = block->begin(); inst != block->end(); inst++) {
+-            // Instructions that have SIMD inputs but not a SIMD type are fine
+-            // to ignore, as their inputs are also reached at some point. By
+-            // induction, at least one instruction with a SIMD type is reached
+-            // at some point.
+-            if (IsSimdType(inst->type())) {
+-                MOZ_ASSERT(SupportsSimd);
+-                usesSimd_ = true;
+-                return true;
+-            }
+-        }
+-    }
+-    usesSimd_ = false;
+-    return false;
+-}
+-
+ mozilla::GenericErrorResult<AbortReason>
+ MIRGenerator::abort(AbortReason r)
+ {
+     if (JitSpewEnabled(JitSpew_IonAbort)) {
+         switch (r) {
+           case AbortReason::Alloc:
+             JitSpew(JitSpew_IonAbort, "AbortReason::Alloc");
+             break;
+diff --git a/js/src/jit/MacroAssembler.cpp b/js/src/jit/MacroAssembler.cpp
+--- a/js/src/jit/MacroAssembler.cpp
++++ b/js/src/jit/MacroAssembler.cpp
+@@ -401,90 +401,47 @@ template void MacroAssembler::guardTypeS
+                                            Label* miss);
+ template void MacroAssembler::guardTypeSet(const ValueOperand& value, const TypeSet* types,
+                                            BarrierKind kind, Register unboxScratch,
+                                            Register objScratch, Register spectreRegToZero,
+                                            Label* miss);
+ 
+ template<typename S, typename T>
+ static void
+-StoreToTypedFloatArray(MacroAssembler& masm, int arrayType, const S& value, const T& dest,
+-                       unsigned numElems)
++StoreToTypedFloatArray(MacroAssembler& masm, int arrayType, const S& value, const T& dest)
+ {
+     switch (arrayType) {
+       case Scalar::Float32:
+         masm.storeFloat32(value, dest);
+         break;
+       case Scalar::Float64:
+         masm.storeDouble(value, dest);
+         break;
+-      case Scalar::Float32x4:
+-        switch (numElems) {
+-          case 1:
+-            masm.storeFloat32(value, dest);
+-            break;
+-          case 2:
+-            masm.storeDouble(value, dest);
+-            break;
+-          case 3:
+-            masm.storeFloat32x3(value, dest);
+-            break;
+-          case 4:
+-            masm.storeUnalignedSimd128Float(value, dest);
+-            break;
+-          default: MOZ_CRASH("unexpected number of elements in simd write");
+-        }
+-        break;
+-      case Scalar::Int32x4:
+-        switch (numElems) {
+-          case 1:
+-            masm.storeInt32x1(value, dest);
+-            break;
+-          case 2:
+-            masm.storeInt32x2(value, dest);
+-            break;
+-          case 3:
+-            masm.storeInt32x3(value, dest);
+-            break;
+-          case 4:
+-            masm.storeUnalignedSimd128Int(value, dest);
+-            break;
+-          default: MOZ_CRASH("unexpected number of elements in simd write");
+-        }
+-        break;
+-      case Scalar::Int8x16:
+-        MOZ_ASSERT(numElems == 16, "unexpected partial store");
+-        masm.storeUnalignedSimd128Int(value, dest);
+-        break;
+-      case Scalar::Int16x8:
+-        MOZ_ASSERT(numElems == 8, "unexpected partial store");
+-        masm.storeUnalignedSimd128Int(value, dest);
+-        break;
+       default:
+         MOZ_CRASH("Invalid typed array type");
+     }
+ }
+ 
+ void
+ MacroAssembler::storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value,
+-                                       const BaseIndex& dest, unsigned numElems)
++                                       const BaseIndex& dest)
+ {
+-    StoreToTypedFloatArray(*this, arrayType, value, dest, numElems);
++    StoreToTypedFloatArray(*this, arrayType, value, dest);
+ }
+ void
+ MacroAssembler::storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value,
+-                                       const Address& dest, unsigned numElems)
++                                       const Address& dest)
+ {
+-    StoreToTypedFloatArray(*this, arrayType, value, dest, numElems);
++    StoreToTypedFloatArray(*this, arrayType, value, dest);
+ }
+ 
+ template<typename T>
+ void
+ MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const T& src, AnyRegister dest, Register temp,
+-                                   Label* fail, bool canonicalizeDoubles, unsigned numElems)
++                                   Label* fail, bool canonicalizeDoubles)
+ {
+     switch (arrayType) {
+       case Scalar::Int8:
+         load8SignExtend(src, dest.gpr());
+         break;
+       case Scalar::Uint8:
+       case Scalar::Uint8Clamped:
+         load8ZeroExtend(src, dest.gpr());
+@@ -515,69 +472,27 @@ MacroAssembler::loadFromTypedArray(Scala
+         loadFloat32(src, dest.fpu());
+         canonicalizeFloat(dest.fpu());
+         break;
+       case Scalar::Float64:
+         loadDouble(src, dest.fpu());
+         if (canonicalizeDoubles)
+             canonicalizeDouble(dest.fpu());
+         break;
+-      case Scalar::Int32x4:
+-        switch (numElems) {
+-          case 1:
+-            loadInt32x1(src, dest.fpu());
+-            break;
+-          case 2:
+-            loadInt32x2(src, dest.fpu());
+-            break;
+-          case 3:
+-            loadInt32x3(src, dest.fpu());
+-            break;
+-          case 4:
+-            loadUnalignedSimd128Int(src, dest.fpu());
+-            break;
+-          default: MOZ_CRASH("unexpected number of elements in SIMD load");
+-        }
+-        break;
+-      case Scalar::Float32x4:
+-        switch (numElems) {
+-          case 1:
+-            loadFloat32(src, dest.fpu());
+-            break;
+-          case 2:
+-            loadDouble(src, dest.fpu());
+-            break;
+-          case 3:
+-            loadFloat32x3(src, dest.fpu());
+-            break;
+-          case 4:
+-            loadUnalignedSimd128Float(src, dest.fpu());
+-            break;
+-          default: MOZ_CRASH("unexpected number of elements in SIMD load");
+-        }
+-        break;
+-      case Scalar::Int8x16:
+-        MOZ_ASSERT(numElems == 16, "unexpected partial load");
+-        loadUnalignedSimd128Int(src, dest.fpu());
+-        break;
+-      case Scalar::Int16x8:
+-        MOZ_ASSERT(numElems == 8, "unexpected partial load");
+-        loadUnalignedSimd128Int(src, dest.fpu());
+-        break;
+       default:
+         MOZ_CRASH("Invalid typed array type");
+     }
+ }
+ 
+-template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const Address& src, AnyRegister dest,
+-                                                 Register temp, Label* fail, bool canonicalizeDoubles,
+-                                                 unsigned numElems);
+-template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const BaseIndex& src, AnyRegister dest,
+-                                                 Register temp, Label* fail, bool canonicalizeDoubles,
+-                                                 unsigned numElems);
++template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const Address& src,
++                                                 AnyRegister dest, Register temp, Label* fail,
++                                                 bool canonicalizeDoubles);
++template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const BaseIndex& src,
++                                                 AnyRegister dest, Register temp, Label* fail,
++                                                 bool canonicalizeDoubles);
+ 
+ template<typename T>
+ void
+ MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const T& src, const ValueOperand& dest,
+                                    bool allowDouble, Register temp, Label* fail)
+ {
+     switch (arrayType) {
+       case Scalar::Int8:
+@@ -3393,51 +3308,16 @@ void
+ MacroAssembler::branchIfInlineTypedObject(Register obj, Register scratch, Label* label)
+ {
+     loadObjClassUnsafe(obj, scratch);
+     branchPtr(Assembler::Equal, scratch, ImmPtr(&InlineOpaqueTypedObject::class_), label);
+     branchPtr(Assembler::Equal, scratch, ImmPtr(&InlineTransparentTypedObject::class_), label);
+ }
+ 
+ void
+-MacroAssembler::branchIfNotSimdObject(Register obj, Register scratch, SimdType simdType,
+-                                      Label* label)
+-{
+-    loadPtr(Address(obj, JSObject::offsetOfGroup()), scratch);
+-
+-    // Guard that the object has the same representation as the one produced for
+-    // SIMD value-type.
+-    Address clasp(scratch, ObjectGroup::offsetOfClasp());
+-    static_assert(!SimdTypeDescr::Opaque, "SIMD objects are transparent");
+-    branchPtr(Assembler::NotEqual, clasp, ImmPtr(&InlineTransparentTypedObject::class_),
+-              label);
+-
+-    // obj->type()->typeDescr()
+-    // The previous class pointer comparison implies that the addendumKind is
+-    // Addendum_TypeDescr.
+-    loadPtr(Address(scratch, ObjectGroup::offsetOfAddendum()), scratch);
+-
+-    // Check for the /Kind/ reserved slot of the TypeDescr.  This is an Int32
+-    // Value which is equivalent to the object class check.
+-    static_assert(JS_DESCR_SLOT_KIND < NativeObject::MAX_FIXED_SLOTS, "Load from fixed slots");
+-    Address typeDescrKind(scratch, NativeObject::getFixedSlotOffset(JS_DESCR_SLOT_KIND));
+-    assertTestInt32(Assembler::Equal, typeDescrKind,
+-      "MOZ_ASSERT(obj->type()->typeDescr()->getReservedSlot(JS_DESCR_SLOT_KIND).isInt32())");
+-    branch32(Assembler::NotEqual, ToPayload(typeDescrKind), Imm32(js::type::Simd), label);
+-
+-    // Check if the SimdTypeDescr /Type/ matches the specialization of this
+-    // MSimdUnbox instruction.
+-    static_assert(JS_DESCR_SLOT_TYPE < NativeObject::MAX_FIXED_SLOTS, "Load from fixed slots");
+-    Address typeDescrType(scratch, NativeObject::getFixedSlotOffset(JS_DESCR_SLOT_TYPE));
+-    assertTestInt32(Assembler::Equal, typeDescrType,
+-      "MOZ_ASSERT(obj->type()->typeDescr()->getReservedSlot(JS_DESCR_SLOT_TYPE).isInt32())");
+-    branch32(Assembler::NotEqual, ToPayload(typeDescrType), Imm32(int32_t(simdType)), label);
+-}
+-
+-void
+ MacroAssembler::copyObjGroupNoPreBarrier(Register sourceObj, Register destObj, Register scratch)
+ {
+     loadPtr(Address(sourceObj, JSObject::offsetOfGroup()), scratch);
+     storePtr(scratch, Address(destObj, JSObject::offsetOfGroup()));
+ }
+ 
+ void
+ MacroAssembler::loadTypedObjectDescr(Register obj, Register dest)
+diff --git a/js/src/jit/MacroAssembler.h b/js/src/jit/MacroAssembler.h
+--- a/js/src/jit/MacroAssembler.h
++++ b/js/src/jit/MacroAssembler.h
+@@ -1151,18 +1151,16 @@ class MacroAssembler : public MacroAssem
+                                   Register scratch, Label* label);
+     void branchIfObjGroupHasNoAddendum(Register obj, Register scratch, Label* label);
+     void branchIfPretenuredGroup(const ObjectGroup* group, Register scratch, Label* label);
+ 
+     void branchIfNonNativeObj(Register obj, Register scratch, Label* label);
+ 
+     void branchIfInlineTypedObject(Register obj, Register scratch, Label* label);
+ 
+-    void branchIfNotSimdObject(Register obj, Register scratch, SimdType simdType, Label* label);
+-
+     inline void branchTestClassIsProxy(bool proxy, Register clasp, Label* label);
+ 
+     inline void branchTestObjectIsProxy(bool proxy, Register object, Register scratch, Label* label);
+ 
+     inline void branchTestProxyHandlerFamily(Condition cond, Register proxy, Register scratch,
+                                              const void* handlerp, Label* label);
+ 
+     void copyObjGroupNoPreBarrier(Register sourceObj, Register destObj, Register scratch);
+@@ -1369,19 +1367,16 @@ class MacroAssembler : public MacroAssem
+     // ========================================================================
+     // Canonicalization primitives.
+     inline void canonicalizeDouble(FloatRegister reg);
+     inline void canonicalizeDoubleIfDeterministic(FloatRegister reg);
+ 
+     inline void canonicalizeFloat(FloatRegister reg);
+     inline void canonicalizeFloatIfDeterministic(FloatRegister reg);
+ 
+-    inline void canonicalizeFloat32x4(FloatRegister reg, FloatRegister scratch)
+-        DEFINED_ON(x86_shared);
+-
+   public:
+     // ========================================================================
+     // Memory access primitives.
+     inline void storeUncanonicalizedDouble(FloatRegister src, const Address& dest)
+         DEFINED_ON(x86_shared, arm, arm64, mips32, mips64);
+     inline void storeUncanonicalizedDouble(FloatRegister src, const BaseIndex& dest)
+         DEFINED_ON(x86_shared, arm, arm64, mips32, mips64);
+     inline void storeUncanonicalizedDouble(FloatRegister src, const Operand& dest)
+@@ -2138,17 +2133,17 @@ class MacroAssembler : public MacroAssem
+         call(preBarrier);
+         Pop(PreBarrierReg);
+ 
+         bind(&done);
+     }
+ 
+     template<typename T>
+     void loadFromTypedArray(Scalar::Type arrayType, const T& src, AnyRegister dest, Register temp, Label* fail,
+-                            bool canonicalizeDoubles = true, unsigned numElems = 0);
++                            bool canonicalizeDoubles = true);
+ 
+     template<typename T>
+     void loadFromTypedArray(Scalar::Type arrayType, const T& src, const ValueOperand& dest, bool allowDouble,
+                             Register temp, Label* fail);
+ 
+     template<typename S, typename T>
+     void storeToTypedIntArray(Scalar::Type arrayType, const S& value, const T& dest) {
+         switch (arrayType) {
+@@ -2165,20 +2160,18 @@ class MacroAssembler : public MacroAssem
+           case Scalar::Uint32:
+             store32(value, dest);
+             break;
+           default:
+             MOZ_CRASH("Invalid typed array type");
+         }
+     }
+ 
+-    void storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value, const BaseIndex& dest,
+-                                unsigned numElems = 0);
+-    void storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value, const Address& dest,
+-                                unsigned numElems = 0);
++    void storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value, const BaseIndex& dest);
++    void storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value, const Address& dest);
+ 
+     void memoryBarrierBefore(const Synchronization& sync);
+     void memoryBarrierAfter(const Synchronization& sync);
+ 
+     // Load a property from an UnboxedPlainObject or UnboxedArrayObject.
+     template <typename T>
+     void loadUnboxedProperty(T address, JSValueType type, TypedOrValueRegister output);
+ 
+diff --git a/js/src/jit/RangeAnalysis.cpp b/js/src/jit/RangeAnalysis.cpp
+--- a/js/src/jit/RangeAnalysis.cpp
++++ b/js/src/jit/RangeAnalysis.cpp
+@@ -1780,20 +1780,16 @@ GetTypedArrayRange(TempAllocator& alloc,
+       case Scalar::Int16:
+         return Range::NewInt32Range(alloc, INT16_MIN, INT16_MAX);
+       case Scalar::Int32:
+         return Range::NewInt32Range(alloc, INT32_MIN, INT32_MAX);
+ 
+       case Scalar::Int64:
+       case Scalar::Float32:
+       case Scalar::Float64:
+-      case Scalar::Float32x4:
+-      case Scalar::Int8x16:
+-      case Scalar::Int16x8:
+-      case Scalar::Int32x4:
+       case Scalar::MaxTypedArrayViewType:
+         break;
+     }
+     return nullptr;
+ }
+ 
+ void
+ MLoadUnboxedScalar::computeRange(TempAllocator& alloc)
+diff --git a/js/src/jit/Recover.cpp b/js/src/jit/Recover.cpp
+--- a/js/src/jit/Recover.cpp
++++ b/js/src/jit/Recover.cpp
+@@ -5,17 +5,16 @@
+  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+ 
+ #include "jit/Recover.h"
+ 
+ #include "jsapi.h"
+ #include "jsmath.h"
+ 
+ #include "builtin/RegExp.h"
+-#include "builtin/SIMD.h"
+ #include "builtin/String.h"
+ #include "builtin/TypedObject.h"
+ #include "gc/Heap.h"
+ #include "jit/JitSpewer.h"
+ #include "jit/JSJitFrameIter.h"
+ #include "jit/MIR.h"
+ #include "jit/MIRGraph.h"
+ #include "jit/VMFunctions.h"
+@@ -1658,89 +1657,16 @@ RNewCallObject::recover(JSContext* cx, S
+ 
+     RootedValue result(cx);
+     result.setObject(*resultObject);
+     iter.storeInstructionResult(result);
+     return true;
+ }
+ 
+ bool
+-MSimdBox::writeRecoverData(CompactBufferWriter& writer) const
+-{
+-    MOZ_ASSERT(canRecoverOnBailout());
+-    writer.writeUnsigned(uint32_t(RInstruction::Recover_SimdBox));
+-    static_assert(unsigned(SimdType::Count) < 0x100, "assuming SimdType fits in 8 bits");
+-    writer.writeByte(uint8_t(simdType()));
+-    return true;
+-}
+-
+-RSimdBox::RSimdBox(CompactBufferReader& reader)
+-{
+-    type_ = reader.readByte();
+-}
+-
+-bool
+-RSimdBox::recover(JSContext* cx, SnapshotIterator& iter) const
+-{
+-    JSObject* resultObject = nullptr;
+-    RValueAllocation a = iter.readAllocation();
+-    MOZ_ASSERT(iter.allocationReadable(a));
+-    MOZ_ASSERT_IF(a.mode() == RValueAllocation::ANY_FLOAT_REG, a.fpuReg().isSimd128());
+-    const FloatRegisters::RegisterContent* raw = iter.floatAllocationPointer(a);
+-    switch (SimdType(type_)) {
+-      case SimdType::Bool8x16:
+-        resultObject = js::CreateSimd<Bool8x16>(cx, (const Bool8x16::Elem*) raw);
+-        break;
+-      case SimdType::Int8x16:
+-        resultObject = js::CreateSimd<Int8x16>(cx, (const Int8x16::Elem*) raw);
+-        break;
+-      case SimdType::Uint8x16:
+-        resultObject = js::CreateSimd<Uint8x16>(cx, (const Uint8x16::Elem*) raw);
+-        break;
+-      case SimdType::Bool16x8:
+-        resultObject = js::CreateSimd<Bool16x8>(cx, (const Bool16x8::Elem*) raw);
+-        break;
+-      case SimdType::Int16x8:
+-        resultObject = js::CreateSimd<Int16x8>(cx, (const Int16x8::Elem*) raw);
+-        break;
+-      case SimdType::Uint16x8:
+-        resultObject = js::CreateSimd<Uint16x8>(cx, (const Uint16x8::Elem*) raw);
+-        break;
+-      case SimdType::Bool32x4:
+-        resultObject = js::CreateSimd<Bool32x4>(cx, (const Bool32x4::Elem*) raw);
+-        break;
+-      case SimdType::Int32x4:
+-        resultObject = js::CreateSimd<Int32x4>(cx, (const Int32x4::Elem*) raw);
+-        break;
+-      case SimdType::Uint32x4:
+-        resultObject = js::CreateSimd<Uint32x4>(cx, (const Uint32x4::Elem*) raw);
+-        break;
+-      case SimdType::Float32x4:
+-        resultObject = js::CreateSimd<Float32x4>(cx, (const Float32x4::Elem*) raw);
+-        break;
+-      case SimdType::Float64x2:
+-        MOZ_CRASH("NYI, RSimdBox of Float64x2");
+-        break;
+-      case SimdType::Bool64x2:
+-        MOZ_CRASH("NYI, RSimdBox of Bool64x2");
+-        break;
+-      case SimdType::Count:
+-        MOZ_CRASH("RSimdBox of Count is unreachable");
+-    }
+-
+-    if (!resultObject)
+-        return false;
+-
+-    RootedValue result(cx);
+-    result.setObject(*resultObject);
+-    iter.storeInstructionResult(result);
+-    return true;
+-}
+-
+-bool
+ MObjectState::writeRecoverData(CompactBufferWriter& writer) const
+ {
+     MOZ_ASSERT(canRecoverOnBailout());
+     writer.writeUnsigned(uint32_t(RInstruction::Recover_ObjectState));
+     writer.writeUnsigned(numSlots());
+     return true;
+ }
+ 
+diff --git a/js/src/jit/Recover.h b/js/src/jit/Recover.h
+--- a/js/src/jit/Recover.h
++++ b/js/src/jit/Recover.h
+@@ -107,17 +107,16 @@ namespace jit {
+     _(NewArray)                                 \
+     _(NewArrayCopyOnWrite)                      \
+     _(NewIterator)                              \
+     _(NewDerivedTypedObject)                    \
+     _(NewCallObject)                            \
+     _(CreateThisWithTemplate)                   \
+     _(Lambda)                                   \
+     _(LambdaArrow)                              \
+-    _(SimdBox)                                  \
+     _(ObjectState)                              \
+     _(ArrayState)                               \
+     _(SetArrayLength)                           \
+     _(AtomicIsLockFree)                         \
+     _(AssertRecoveredOnBailout)
+ 
+ class RResumePoint;
+ class SnapshotIterator;
+@@ -685,27 +684,16 @@ class RLambdaArrow final : public RInstr
+ class RNewCallObject final : public RInstruction
+ {
+   public:
+     RINSTRUCTION_HEADER_NUM_OP_(NewCallObject, 1)
+ 
+     MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const override;
+ };
+ 
+-class RSimdBox final : public RInstruction
+-{
+-  private:
+-    uint8_t type_;
+-
+-  public:
+-    RINSTRUCTION_HEADER_NUM_OP_(SimdBox, 1)
+-
+-    MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const override;
+-};
+-
+ class RObjectState final : public RInstruction
+ {
+   private:
+     uint32_t numSlots_;        // Number of slots.
+ 
+   public:
+     RINSTRUCTION_HEADER_(ObjectState)
+ 
+diff --git a/js/src/jit/TypePolicy.cpp b/js/src/jit/TypePolicy.cpp
+--- a/js/src/jit/TypePolicy.cpp
++++ b/js/src/jit/TypePolicy.cpp
+@@ -634,55 +634,16 @@ NoFloatPolicyAfter<FirstOp>::adjustInput
+ }
+ 
+ template bool NoFloatPolicyAfter<0>::adjustInputs(TempAllocator& alloc, MInstruction* def) const;
+ template bool NoFloatPolicyAfter<1>::adjustInputs(TempAllocator& alloc, MInstruction* def) const;
+ template bool NoFloatPolicyAfter<2>::adjustInputs(TempAllocator& alloc, MInstruction* def) const;
+ 
+ template <unsigned Op>
+ bool
+-SimdScalarPolicy<Op>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins)
+-{
+-    MOZ_ASSERT(IsSimdType(ins->type()));
+-    MIRType laneType = SimdTypeToLaneType(ins->type());
+-
+-    MDefinition* in = ins->getOperand(Op);
+-
+-    // A vector with boolean lanes requires Int32 inputs that have already been
+-    // converted to 0/-1.
+-    // We can't insert a MIRType::Boolean lane directly - it requires conversion.
+-    if (laneType == MIRType::Boolean) {
+-        MOZ_ASSERT(in->type() == MIRType::Int32, "Boolean SIMD vector requires Int32 lanes.");
+-        return true;
+-    }
+-
+-    if (in->type() == laneType)
+-        return true;
+-
+-    MInstruction* replace;
+-    if (laneType == MIRType::Int32) {
+-        replace = MTruncateToInt32::New(alloc, in);
+-    } else {
+-        MOZ_ASSERT(laneType == MIRType::Float32);
+-        replace = MToFloat32::New(alloc, in);
+-    }
+-
+-    ins->block()->insertBefore(ins, replace);
+-    ins->replaceOperand(Op, replace);
+-
+-    return replace->typePolicy()->adjustInputs(alloc, replace);
+-}
+-
+-template bool SimdScalarPolicy<0>::staticAdjustInputs(TempAllocator& alloc, MInstruction* def);
+-template bool SimdScalarPolicy<1>::staticAdjustInputs(TempAllocator& alloc, MInstruction* def);
+-template bool SimdScalarPolicy<2>::staticAdjustInputs(TempAllocator& alloc, MInstruction* def);
+-template bool SimdScalarPolicy<3>::staticAdjustInputs(TempAllocator& alloc, MInstruction* def);
+-
+-template <unsigned Op>
+-bool
+ BoxPolicy<Op>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins)
+ {
+     MDefinition* in = ins->getOperand(Op);
+     if (in->type() == MIRType::Value)
+         return true;
+ 
+     ins->replaceOperand(Op, BoxAt(alloc, ins, in));
+     return true;
+@@ -852,85 +813,16 @@ ObjectPolicy<Op>::staticAdjustInputs(Tem
+     return replace->typePolicy()->adjustInputs(alloc, replace);
+ }
+ 
+ template bool ObjectPolicy<0>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins);
+ template bool ObjectPolicy<1>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins);
+ template bool ObjectPolicy<2>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins);
+ template bool ObjectPolicy<3>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins);
+ 
+-template <unsigned Op>
+-bool
+-SimdSameAsReturnedTypePolicy<Op>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins)
+-{
+-    MOZ_ASSERT(ins->type() == ins->getOperand(Op)->type());
+-    return true;
+-}
+-
+-template bool
+-SimdSameAsReturnedTypePolicy<0>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins);
+-template bool
+-SimdSameAsReturnedTypePolicy<1>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins);
+-
+-bool
+-SimdAllPolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins) const
+-{
+-    for (unsigned i = 0, e = ins->numOperands(); i < e; i++)
+-        MOZ_ASSERT(ins->getOperand(i)->type() == ins->typePolicySpecialization());
+-    return true;
+-}
+-
+-template <unsigned Op>
+-bool
+-SimdPolicy<Op>::adjustInputs(TempAllocator& alloc, MInstruction* ins) const
+-{
+-    MOZ_ASSERT(ins->typePolicySpecialization() == ins->getOperand(Op)->type());
+-    return true;
+-}
+-
+-template bool
+-SimdPolicy<0>::adjustInputs(TempAllocator& alloc, MInstruction* ins) const;
+-
+-bool
+-SimdShufflePolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins) const
+-{
+-    MSimdGeneralShuffle* s = ins->toSimdGeneralShuffle();
+-
+-    for (unsigned i = 0; i < s->numVectors(); i++)
+-        MOZ_ASSERT(ins->getOperand(i)->type() == ins->typePolicySpecialization());
+-
+-    // Next inputs are the lanes, which need to be int32
+-    for (unsigned i = 0; i < s->numLanes(); i++) {
+-        MDefinition* in = ins->getOperand(s->numVectors() + i);
+-        if (in->type() == MIRType::Int32)
+-            continue;
+-
+-        auto* replace = MToNumberInt32::New(alloc, in, IntConversionInputKind::NumbersOnly);
+-        ins->block()->insertBefore(ins, replace);
+-        ins->replaceOperand(s->numVectors() + i, replace);
+-        if (!replace->typePolicy()->adjustInputs(alloc, replace))
+-            return false;
+-    }
+-
+-    return true;
+-}
+-
+-bool
+-SimdSelectPolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins) const
+-{
+-    // First input is the mask, which has to be a boolean.
+-    MOZ_ASSERT(IsBooleanSimdType(ins->getOperand(0)->type()));
+-
+-    // Next inputs are the two vectors of a particular type.
+-    for (unsigned i = 1; i < 3; i++)
+-        MOZ_ASSERT(ins->getOperand(i)->type() == ins->typePolicySpecialization());
+-
+-    return true;
+-}
+-
+ bool
+ CallPolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins) const
+ {
+     MCall* call = ins->toCall();
+ 
+     MDefinition* func = call->getFunction();
+     if (func->type() != MIRType::Object) {
+         MInstruction* unbox = MUnbox::New(alloc, func, MIRType::Object, MUnbox::Fallible);
+@@ -978,23 +870,16 @@ InstanceOfPolicy::adjustInputs(TempAlloc
+     return true;
+ }
+ 
+ bool
+ StoreUnboxedScalarPolicy::adjustValueInput(TempAllocator& alloc, MInstruction* ins,
+                                            Scalar::Type writeType, MDefinition* value,
+                                            int valueOperand)
+ {
+-    // Storing a SIMD value requires a valueOperand that has already been
+-    // SimdUnboxed. See IonBuilder::inlineSimdStore(()
+-    if (Scalar::isSimdType(writeType)) {
+-        MOZ_ASSERT(IsSimdType(value->type()));
+-        return true;
+-    }
+-
+     MDefinition* curValue = value;
+     // First, ensure the value is int32, boolean, double or Value.
+     // The conversion is based on TypedArrayObjectTemplate::setElementTail.
+     switch (value->type()) {
+       case MIRType::Int32:
+       case MIRType::Double:
+       case MIRType::Float32:
+       case MIRType::Boolean:
+@@ -1265,19 +1150,16 @@ FilterTypeSetPolicy::adjustInputs(TempAl
+     _(CallSetElementPolicy)                     \
+     _(ClampPolicy)                              \
+     _(ComparePolicy)                            \
+     _(FilterTypeSetPolicy)                      \
+     _(InstanceOfPolicy)                         \
+     _(PowPolicy)                                \
+     _(SameValuePolicy)                          \
+     _(SignPolicy)                               \
+-    _(SimdAllPolicy)                            \
+-    _(SimdSelectPolicy)                         \
+-    _(SimdShufflePolicy)                        \
+     _(StoreTypedArrayHolePolicy)                \
+     _(StoreUnboxedScalarPolicy)                 \
+     _(StoreUnboxedObjectOrNullPolicy)           \
+     _(StoreUnboxedStringPolicy)                 \
+     _(TestPolicy)                               \
+     _(AllDoublePolicy)                          \
+     _(ToDoublePolicy)                           \
+     _(ToInt32Policy)                            \
+@@ -1305,17 +1187,16 @@ FilterTypeSetPolicy::adjustInputs(TempAl
+     _(MixPolicy<ObjectPolicy<0>, ObjectPolicy<1>, ObjectPolicy<2> >)                                          \
+     _(MixPolicy<StringPolicy<0>, UnboxedInt32Policy<1>, UnboxedInt32Policy<2>>)                               \
+     _(MixPolicy<StringPolicy<0>, ObjectPolicy<1>, StringPolicy<2> >)                                          \
+     _(MixPolicy<StringPolicy<0>, StringPolicy<1>, StringPolicy<2> >)                                          \
+     _(MixPolicy<ObjectPolicy<0>, StringPolicy<1>, UnboxedInt32Policy<2>>)                                     \
+     _(MixPolicy<ObjectPolicy<0>, UnboxedInt32Policy<1>, UnboxedInt32Policy<2>, UnboxedInt32Policy<3>>)        \
+     _(MixPolicy<ObjectPolicy<0>, UnboxedInt32Policy<1>, TruncateToInt32Policy<2>, TruncateToInt32Policy<3> >) \
+     _(MixPolicy<ObjectPolicy<0>, CacheIdPolicy<1>, NoFloatPolicy<2>>)                                         \
+-    _(MixPolicy<SimdScalarPolicy<0>, SimdScalarPolicy<1>, SimdScalarPolicy<2>, SimdScalarPolicy<3> >)         \
+     _(MixPolicy<ObjectPolicy<0>, BoxExceptPolicy<1, MIRType::Object>, CacheIdPolicy<2>>)                      \
+     _(MixPolicy<BoxPolicy<0>, ObjectPolicy<1> >)                                                              \
+     _(MixPolicy<ConvertToStringPolicy<0>, ConvertToStringPolicy<1> >)                                         \
+     _(MixPolicy<ConvertToStringPolicy<0>, ObjectPolicy<1> >)                                                  \
+     _(MixPolicy<DoublePolicy<0>, DoublePolicy<1> >)                                                           \
+     _(MixPolicy<UnboxedInt32Policy<0>, UnboxedInt32Policy<1> >)                                               \
+     _(MixPolicy<ObjectPolicy<0>, BoxPolicy<1> >)                                                              \
+     _(MixPolicy<BoxExceptPolicy<0, MIRType::Object>, CacheIdPolicy<1>>)                                       \
+@@ -1325,31 +1206,26 @@ FilterTypeSetPolicy::adjustInputs(TempAl
+     _(MixPolicy<ObjectPolicy<0>, UnboxedInt32Policy<2> >)                                                     \
+     _(MixPolicy<ObjectPolicy<0>, NoFloatPolicy<1> >)                                                          \
+     _(MixPolicy<ObjectPolicy<0>, NoFloatPolicy<2> >)                                                          \
+     _(MixPolicy<ObjectPolicy<0>, NoFloatPolicy<3> >)                                                          \
+     _(MixPolicy<ObjectPolicy<0>, ObjectPolicy<1> >)                                                           \
+     _(MixPolicy<ObjectPolicy<0>, StringPolicy<1> >)                                                           \
+     _(MixPolicy<ObjectPolicy<0>, ConvertToStringPolicy<2> >)                                                  \
+     _(MixPolicy<ObjectPolicy<1>, ConvertToStringPolicy<0> >)                                                  \
+-    _(MixPolicy<SimdSameAsReturnedTypePolicy<0>, SimdSameAsReturnedTypePolicy<1> >)                           \
+-    _(MixPolicy<SimdSameAsReturnedTypePolicy<0>, SimdScalarPolicy<1> >)                                       \
+     _(MixPolicy<StringPolicy<0>, UnboxedInt32Policy<1> >)                                                     \
+     _(MixPolicy<StringPolicy<0>, StringPolicy<1> >)                                                           \
+     _(MixPolicy<BoxPolicy<0>, BoxPolicy<1> >)                                                                 \
+     _(NoFloatPolicy<0>)                                                                                       \
+     _(NoFloatPolicyAfter<0>)                                                                                  \
+     _(NoFloatPolicyAfter<1>)                                                                                  \
+     _(NoFloatPolicyAfter<2>)                                                                                  \
+     _(ObjectPolicy<0>)                                                                                        \
+     _(ObjectPolicy<1>)                                                                                        \
+     _(ObjectPolicy<3>)                                                                                        \
+-    _(SimdPolicy<0>)                                                                                          \
+-    _(SimdSameAsReturnedTypePolicy<0>)                                                                        \
+-    _(SimdScalarPolicy<0>)                                                                                    \
+     _(StringPolicy<0>)
+ 
+ 
+ namespace js {
+ namespace jit {
+ 
+ // Define for all used TypePolicy specialization, the definition for
+ // |TypePolicy::Data::thisTypePolicy|.  This function returns one constant
+diff --git a/js/src/jit/TypePolicy.h b/js/src/jit/TypePolicy.h
+--- a/js/src/jit/TypePolicy.h
++++ b/js/src/jit/TypePolicy.h
+@@ -360,77 +360,16 @@ class ObjectPolicy final : public TypePo
+         return staticAdjustInputs(alloc, ins);
+     }
+ };
+ 
+ // Single-object input. If the input is a Value, it is unboxed. If it is
+ // a primitive, we use ValueToNonNullObject.
+ typedef ObjectPolicy<0> SingleObjectPolicy;
+ 
+-// Convert an operand to have a type identical to the scalar type of the
+-// returned type of the instruction.
+-template <unsigned Op>
+-class SimdScalarPolicy final : public TypePolicy
+-{
+-  public:
+-    constexpr SimdScalarPolicy() { }
+-    EMPTY_DATA_;
+-    static MOZ_MUST_USE bool staticAdjustInputs(TempAllocator& alloc, MInstruction* def);
+-    MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* def) const override {
+-        return staticAdjustInputs(alloc, def);
+-    }
+-};
+-
+-class SimdAllPolicy final : public TypePolicy
+-{
+-  public:
+-    constexpr SimdAllPolicy () { }
+-    SPECIALIZATION_DATA_;
+-    MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* ins) const override;
+-};
+-
+-template <unsigned Op>
+-class SimdPolicy final : public TypePolicy
+-{
+-  public:
+-    constexpr SimdPolicy() { }
+-    SPECIALIZATION_DATA_;
+-    MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* ins) const override;
+-};
+-
+-class SimdSelectPolicy final : public TypePolicy
+-{
+-  public:
+-    constexpr SimdSelectPolicy() { }
+-    SPECIALIZATION_DATA_;
+-    MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* ins) const override;
+-};
+-
+-class SimdShufflePolicy final : public TypePolicy
+-{
+-  public:
+-    constexpr SimdShufflePolicy() { }
+-    SPECIALIZATION_DATA_;
+-    MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* ins) const override;
+-};
+-
+-// SIMD value-type policy, use the returned type of the instruction to determine
+-// how to unbox its operand.
+-template <unsigned Op>
+-class SimdSameAsReturnedTypePolicy final : public TypePolicy
+-{
+-  public:
+-    constexpr SimdSameAsReturnedTypePolicy() { }
+-    EMPTY_DATA_;
+-    static MOZ_MUST_USE bool staticAdjustInputs(TempAllocator& alloc, MInstruction* ins);
+-    MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* ins) const override {
+-        return staticAdjustInputs(alloc, ins);
+-    }
+-};
+-
+ template <unsigned Op>
+ class BoxPolicy final : public TypePolicy
+ {
+   public:
+     constexpr BoxPolicy() { }
+     EMPTY_DATA_;
+     static MOZ_MUST_USE bool staticAdjustInputs(TempAllocator& alloc, MInstruction* ins);
+     MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* ins) const override {
+diff --git a/js/src/jit/TypedObjectPrediction.cpp b/js/src/jit/TypedObjectPrediction.cpp
+--- a/js/src/jit/TypedObjectPrediction.cpp
++++ b/js/src/jit/TypedObjectPrediction.cpp
+@@ -114,17 +114,16 @@ TypedObjectPrediction::kind() const
+ }
+ 
+ bool
+ TypedObjectPrediction::ofArrayKind() const
+ {
+     switch (kind()) {
+       case type::Scalar:
+       case type::Reference:
+-      case type::Simd:
+       case type::Struct:
+         return false;
+ 
+       case type::Array:
+         return true;
+     }
+ 
+     MOZ_CRASH("Bad kind");
+@@ -202,22 +201,16 @@ TypedObjectPrediction::scalarType() cons
+ }
+ 
+ ReferenceType
+ TypedObjectPrediction::referenceType() const
+ {
+     return extractType<ReferenceTypeDescr>();
+ }
+ 
+-SimdType
+-TypedObjectPrediction::simdType() const
+-{
+-    return descr().as<SimdTypeDescr>().type();
+-}
+-
+ bool
+ TypedObjectPrediction::hasKnownArrayLength(int32_t* length) const
+ {
+     switch (predictionKind()) {
+       case TypedObjectPrediction::Empty:
+       case TypedObjectPrediction::Inconsistent:
+         return false;
+ 
+diff --git a/js/src/jit/TypedObjectPrediction.h b/js/src/jit/TypedObjectPrediction.h
+--- a/js/src/jit/TypedObjectPrediction.h
++++ b/js/src/jit/TypedObjectPrediction.h
+@@ -160,21 +160,20 @@ class TypedObjectPrediction {
+     // The size may not be statically known if (1) the object is
+     // an array whose dimensions are unknown or (2) only a prefix
+     // of its type is known.
+     bool hasKnownSize(uint32_t* out) const;
+ 
+     //////////////////////////////////////////////////////////////////////
+     // Simple operations
+     //
+-    // Only valid when |kind()| is Scalar, Reference, or Simd (as appropriate).
++    // Only valid when |kind()| is Scalar or Reference.
+ 
+     Scalar::Type scalarType() const;
+     ReferenceType referenceType() const;
+-    SimdType simdType() const;
+ 
+     ///////////////////////////////////////////////////////////////////////////
+     // Queries valid only for arrays.
+ 
+     // Returns true if the length of the array is statically known,
+     // and sets |*length| appropriately. Otherwise returns false.
+     bool hasKnownArrayLength(int32_t* length) const;
+ 
+diff --git a/js/src/jit/arm/CodeGenerator-arm.cpp b/js/src/jit/arm/CodeGenerator-arm.cpp
+--- a/js/src/jit/arm/CodeGenerator-arm.cpp
++++ b/js/src/jit/arm/CodeGenerator-arm.cpp
+@@ -3152,252 +3152,18 @@ CodeGenerator::visitWasmAtomicExchangeI6
+     Register64 value = ToRegister64(lir->value());
+     Register64 out = ToOutRegister64(lir);
+ 
+     BaseIndex addr(HeapReg, ptr, TimesOne, lir->access().offset());
+     masm.atomicExchange64(Synchronization::Full(), addr, value, out);
+ }
+ 
+ void
+-CodeGenerator::visitSimdSplatX4(LSimdSplatX4* lir)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimd128Int(LSimd128Int* ins)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimd128Float(LSimd128Float* ins)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdExtractElementI(LSimdExtractElementI* ins)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdExtractElementF(LSimdExtractElementF* ins)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdBinaryCompIx4(LSimdBinaryCompIx4* lir)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdBinaryCompFx4(LSimdBinaryCompFx4* lir)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdBinaryArithIx4(LSimdBinaryArithIx4* lir)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdBinaryArithFx4(LSimdBinaryArithFx4* lir)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdBinaryBitwise(LSimdBinaryBitwise* lir)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+ CodeGenerator::visitNearbyInt(LNearbyInt*)
+ {
+     MOZ_CRASH("NYI");
+ }
+ 
+ void
+-CodeGenerator::visitSimdShift(LSimdShift*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+ CodeGenerator::visitNearbyIntF(LNearbyIntF*)
+ {
+     MOZ_CRASH("NYI");
+ }
+-
+-void
+-CodeGenerator::visitSimdSelect(LSimdSelect*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdAllTrue(LSimdAllTrue*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdAnyTrue(LSimdAnyTrue*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdShuffle(LSimdShuffle*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdSplatX8(LSimdSplatX8*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdSplatX16(LSimdSplatX16*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdSwizzleF(LSimdSwizzleF*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdSwizzleI(LSimdSwizzleI*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdShuffleX4(LSimdShuffleX4*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdBinaryCompIx8(LSimdBinaryCompIx8*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdUnaryArithFx4(LSimdUnaryArithFx4*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdUnaryArithIx4(LSimdUnaryArithIx4*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdUnaryArithIx8(LSimdUnaryArithIx8*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitFloat32x4ToInt32x4(LFloat32x4ToInt32x4*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitInt32x4ToFloat32x4(LInt32x4ToFloat32x4*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdBinaryArithIx8(LSimdBinaryArithIx8*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdBinaryCompIx16(LSimdBinaryCompIx16*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdInsertElementF(LSimdInsertElementF*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdInsertElementI(LSimdInsertElementI*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdUnaryArithIx16(LSimdUnaryArithIx16*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitFloat32x4ToUint32x4(LFloat32x4ToUint32x4*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdBinaryArithIx16(LSimdBinaryArithIx16*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdExtractElementB(LSimdExtractElementB*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdGeneralShuffleF(LSimdGeneralShuffleF*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdGeneralShuffleI(LSimdGeneralShuffleI*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdReinterpretCast(LSimdReinterpretCast*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdBinarySaturating(LSimdBinarySaturating*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdExtractElementU2D(LSimdExtractElementU2D*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+diff --git a/js/src/jit/arm/Lowering-arm.cpp b/js/src/jit/arm/Lowering-arm.cpp
+--- a/js/src/jit/arm/Lowering-arm.cpp
++++ b/js/src/jit/arm/Lowering-arm.cpp
+@@ -1053,68 +1053,8 @@ LIRGenerator::visitExtendInt32ToInt64(ME
+     lir->setDef(0, def);
+ }
+ 
+ void
+ LIRGenerator::visitSignExtendInt64(MSignExtendInt64* ins)
+ {
+     defineInt64(new(alloc()) LSignExtendInt64(useInt64RegisterAtStart(ins->input())), ins);
+ }
+-
+-void
+-LIRGenerator::visitSimdInsertElement(MSimdInsertElement*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-LIRGenerator::visitSimdExtractElement(MSimdExtractElement*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-LIRGenerator::visitSimdBinaryArith(MSimdBinaryArith*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-LIRGenerator::visitSimdSelect(MSimdSelect*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-LIRGenerator::visitSimdSplat(MSimdSplat*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-LIRGenerator::visitSimdValueX4(MSimdValueX4*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-LIRGenerator::visitSimdBinarySaturating(MSimdBinarySaturating*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-LIRGenerator::visitSimdSwizzle(MSimdSwizzle*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-LIRGenerator::visitSimdShuffle(MSimdShuffle*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-LIRGenerator::visitSimdGeneralShuffle(MSimdGeneralShuffle*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+diff --git a/js/src/jit/arm/Lowering-arm.h b/js/src/jit/arm/Lowering-arm.h
+--- a/js/src/jit/arm/Lowering-arm.h
++++ b/js/src/jit/arm/Lowering-arm.h
+@@ -58,27 +58,16 @@ class LIRGeneratorARM : public LIRGenera
+                             MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+ 
+     void lowerForFPU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir,
+                      MDefinition* src);
+     template<size_t Temps>
+     void lowerForFPU(LInstructionHelper<1, 2, Temps>* ins, MDefinition* mir,
+                      MDefinition* lhs, MDefinition* rhs);
+ 
+-    void lowerForCompIx4(LSimdBinaryCompIx4* ins, MSimdBinaryComp* mir,
+-                         MDefinition* lhs, MDefinition* rhs)
+-    {
+-        return lowerForFPU(ins, mir, lhs, rhs);
+-    }
+-    void lowerForCompFx4(LSimdBinaryCompFx4* ins, MSimdBinaryComp* mir,
+-                         MDefinition* lhs, MDefinition* rhs)
+-    {
+-        return lowerForFPU(ins, mir, lhs, rhs);
+-    }
+-
+     void lowerForBitAndAndBranch(LBitAndAndBranch* baab, MInstruction* mir,
+                                  MDefinition* lhs, MDefinition* rhs);
+     void lowerTruncateDToInt32(MTruncateToInt32* ins);
+     void lowerTruncateFToInt32(MTruncateToInt32* ins);
+     void lowerDivI(MDiv* div);
+     void lowerModI(MMod* mod);
+     void lowerDivI64(MDiv* div);
+     void lowerModI64(MMod* mod);
+diff --git a/js/src/jit/arm/MacroAssembler-arm.h b/js/src/jit/arm/MacroAssembler-arm.h
+--- a/js/src/jit/arm/MacroAssembler-arm.h
++++ b/js/src/jit/arm/MacroAssembler-arm.h
+@@ -1057,49 +1057,16 @@ class MacroAssemblerARMCompat : public M
+ 
+     void loadPtr(const Address& address, Register dest);
+     void loadPtr(const BaseIndex& src, Register dest);
+     void loadPtr(AbsoluteAddress address, Register dest);
+     void loadPtr(wasm::SymbolicAddress address, Register dest);
+ 
+     void loadPrivate(const Address& address, Register dest);
+ 
+-    void loadInt32x1(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void loadInt32x1(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void loadInt32x2(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void loadInt32x2(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void loadInt32x3(const Address& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void loadInt32x3(const BaseIndex& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void loadInt32x4(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void storeInt32x1(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); }
+-    void storeInt32x1(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); }
+-    void storeInt32x2(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); }
+-    void storeInt32x2(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); }
+-    void storeInt32x3(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); }
+-    void storeInt32x3(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); }
+-    void storeInt32x4(FloatRegister src, const Address& addr) { MOZ_CRASH("NYI"); }
+-    void loadAlignedSimd128Int(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void storeAlignedSimd128Int(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
+-    void loadUnalignedSimd128Int(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void loadUnalignedSimd128Int(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void storeUnalignedSimd128Int(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
+-    void storeUnalignedSimd128Int(FloatRegister src, BaseIndex addr) { MOZ_CRASH("NYI"); }
+-
+-    void loadFloat32x3(const Address& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void loadFloat32x3(const BaseIndex& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void loadFloat32x4(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void storeFloat32x4(FloatRegister src, const Address& addr) { MOZ_CRASH("NYI"); }
+-
+-    void loadAlignedSimd128Float(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void storeAlignedSimd128Float(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
+-    void loadUnalignedSimd128Float(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void loadUnalignedSimd128Float(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void storeUnalignedSimd128Float(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
+-    void storeUnalignedSimd128Float(FloatRegister src, BaseIndex addr) { MOZ_CRASH("NYI"); }
+-
+     void loadDouble(const Address& addr, FloatRegister dest);
+     void loadDouble(const BaseIndex& src, FloatRegister dest);
+ 
+     // Load a float value into a register, then expand it to a double.
+     void loadFloatAsDouble(const Address& addr, FloatRegister dest);
+     void loadFloatAsDouble(const BaseIndex& src, FloatRegister dest);
+ 
+     void loadFloat32(const Address& addr, FloatRegister dest);
+diff --git a/js/src/jit/arm64/CodeGenerator-arm64.cpp b/js/src/jit/arm64/CodeGenerator-arm64.cpp
+--- a/js/src/jit/arm64/CodeGenerator-arm64.cpp
++++ b/js/src/jit/arm64/CodeGenerator-arm64.cpp
+@@ -685,76 +685,16 @@ CodeGenerator::visitAtomicExchangeTypedA
+         masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value, temp, output);
+     } else {
+         BaseIndex dest(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
+         masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value, temp, output);
+     }
+ }
+ 
+ void
+-CodeGenerator::visitSimdSplatX4(LSimdSplatX4* lir)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimd128Int(LSimd128Int* ins)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimd128Float(LSimd128Float* ins)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdExtractElementI(LSimdExtractElementI* ins)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdExtractElementF(LSimdExtractElementF* ins)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdBinaryCompIx4(LSimdBinaryCompIx4* lir)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdBinaryCompFx4(LSimdBinaryCompFx4* lir)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdBinaryArithIx4(LSimdBinaryArithIx4* lir)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdBinaryArithFx4(LSimdBinaryArithFx4* lir)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdBinaryBitwise(LSimdBinaryBitwise* lir)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+ CodeGenerator::visitAddI64(LAddI64*)
+ {
+     MOZ_CRASH("NYI");
+ }
+ 
+ void
+ CodeGenerator::visitClzI64(LClzI64*)
+ {
+@@ -847,22 +787,16 @@ CodeGenerator::visitPopcntI64(LPopcntI64
+ 
+ void
+ CodeGenerator::visitRotateI64(LRotateI64*)
+ {
+     MOZ_CRASH("NYI");
+ }
+ 
+ void
+-CodeGenerator::visitSimdShift(LSimdShift*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+ CodeGenerator::visitWasmStore(LWasmStore*)
+ {
+     MOZ_CRASH("NYI");
+ }
+ 
+ void
+ CodeGenerator::visitCompareI64(LCompareI64*)
+ {
+@@ -871,94 +805,40 @@ CodeGenerator::visitCompareI64(LCompareI
+ 
+ void
+ CodeGenerator::visitNearbyIntF(LNearbyIntF*)
+ {
+     MOZ_CRASH("NYI");
+ }
+ 
+ void
+-CodeGenerator::visitSimdSelect(LSimdSelect*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+ CodeGenerator::visitWasmSelect(LWasmSelect*)
+ {
+     MOZ_CRASH("NYI");
+ }
+ 
+ void
+-CodeGenerator::visitSimdAllTrue(LSimdAllTrue*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdAnyTrue(LSimdAnyTrue*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdShuffle(LSimdShuffle*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdSplatX8(LSimdSplatX8*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+ CodeGenerator::visitWasmLoadI64(LWasmLoadI64*)
+ {
+     MOZ_CRASH("NYI");
+ }
+ 
+ void
+-CodeGenerator::visitSimdSplatX16(LSimdSplatX16*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdSwizzleF(LSimdSwizzleF*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdSwizzleI(LSimdSwizzleI*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+ CodeGenerator::visitWasmStoreI64(LWasmStoreI64*)
+ {
+     MOZ_CRASH("NYI");
+ }
+ 
+ void
+ CodeGenerator::visitMemoryBarrier(LMemoryBarrier*)
+ {
+     MOZ_CRASH("NYI");
+ }
+ 
+ void
+-CodeGenerator::visitSimdShuffleX4(LSimdShuffleX4*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+ CodeGenerator::visitSoftUDivOrMod(LSoftUDivOrMod*)
+ {
+     MOZ_CRASH("NYI");
+ }
+ 
+ void
+ CodeGenerator::visitWasmAddOffset(LWasmAddOffset*)
+ {
+@@ -997,154 +877,40 @@ CodeGenerator::visitTestI64AndBranch(LTe
+ 
+ void
+ CodeGenerator::visitWrapInt64ToInt32(LWrapInt64ToInt32*)
+ {
+     MOZ_CRASH("NYI");
+ }
+ 
+ void
+-CodeGenerator::visitSimdBinaryCompIx8(LSimdBinaryCompIx8*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdUnaryArithFx4(LSimdUnaryArithFx4*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdUnaryArithIx4(LSimdUnaryArithIx4*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdUnaryArithIx8(LSimdUnaryArithIx8*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+ CodeGenerator::visitExtendInt32ToInt64(LExtendInt32ToInt64*)
+ {
+     MOZ_CRASH("NYI");
+ }
+ 
+ void
+-CodeGenerator::visitFloat32x4ToInt32x4(LFloat32x4ToInt32x4*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitInt32x4ToFloat32x4(LInt32x4ToFloat32x4*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdBinaryArithIx8(LSimdBinaryArithIx8*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdBinaryCompIx16(LSimdBinaryCompIx16*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdInsertElementF(LSimdInsertElementF*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdInsertElementI(LSimdInsertElementI*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdUnaryArithIx16(LSimdUnaryArithIx16*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+ CodeGenerator::visitCompareI64AndBranch(LCompareI64AndBranch*)
+ {
+     MOZ_CRASH("NYI");
+ }
+ 
+ void
+-CodeGenerator::visitFloat32x4ToUint32x4(LFloat32x4ToUint32x4*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdBinaryArithIx16(LSimdBinaryArithIx16*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdExtractElementB(LSimdExtractElementB*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdGeneralShuffleF(LSimdGeneralShuffleF*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdGeneralShuffleI(LSimdGeneralShuffleI*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdReinterpretCast(LSimdReinterpretCast*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+ CodeGenerator::visitWasmTruncateToInt32(LWasmTruncateToInt32*)
+ {
+     MOZ_CRASH("NYI");
+ }
+ 
+ void
+-CodeGenerator::visitSimdBinarySaturating(LSimdBinarySaturating*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+ CodeGenerator::visitWasmReinterpretToI64(LWasmReinterpretToI64*)
+ {
+     MOZ_CRASH("NYI");
+ }
+ 
+ void
+-CodeGenerator::visitSimdExtractElementU2D(LSimdExtractElementU2D*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+ CodeGenerator::visitWasmAtomicExchangeHeap(LWasmAtomicExchangeHeap*)
+ {
+     MOZ_CRASH("NYI");
+ }
+ 
+ void
+ CodeGenerator::visitWasmReinterpretFromI64(LWasmReinterpretFromI64*)
+ {
+diff --git a/js/src/jit/arm64/Lowering-arm64.cpp b/js/src/jit/arm64/Lowering-arm64.cpp
+--- a/js/src/jit/arm64/Lowering-arm64.cpp
++++ b/js/src/jit/arm64/Lowering-arm64.cpp
+@@ -349,68 +349,8 @@ LIRGenerator::visitExtendInt32ToInt64(ME
+     MOZ_CRASH("NYI");
+ }
+ 
+ void
+ LIRGenerator::visitSignExtendInt64(MSignExtendInt64* ins)
+ {
+     MOZ_CRASH("NYI");
+ }
+-
+-void
+-LIRGenerator::visitSimdInsertElement(MSimdInsertElement*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-LIRGenerator::visitSimdExtractElement(MSimdExtractElement*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-LIRGenerator::visitSimdBinaryArith(MSimdBinaryArith*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-LIRGenerator::visitSimdSelect(MSimdSelect*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-LIRGenerator::visitSimdSplat(MSimdSplat*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-LIRGenerator::visitSimdValueX4(MSimdValueX4*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-LIRGenerator::visitSimdBinarySaturating(MSimdBinarySaturating*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-LIRGenerator::visitSimdSwizzle(MSimdSwizzle*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-LIRGenerator::visitSimdShuffle(MSimdShuffle*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-LIRGenerator::visitSimdGeneralShuffle(MSimdGeneralShuffle*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+diff --git a/js/src/jit/arm64/Lowering-arm64.h b/js/src/jit/arm64/Lowering-arm64.h
+--- a/js/src/jit/arm64/Lowering-arm64.h
++++ b/js/src/jit/arm64/Lowering-arm64.h
+@@ -58,28 +58,16 @@ class LIRGeneratorARM64 : public LIRGene
+                             MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+ 
+     void lowerForFPU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir, MDefinition* input);
+ 
+     template <size_t Temps>
+     void lowerForFPU(LInstructionHelper<1, 2, Temps>* ins, MDefinition* mir,
+                      MDefinition* lhs, MDefinition* rhs);
+ 
+-    void lowerForCompIx4(LSimdBinaryCompIx4* ins, MSimdBinaryComp* mir,
+-                         MDefinition* lhs, MDefinition* rhs)
+-    {
+-        return lowerForFPU(ins, mir, lhs, rhs);
+-    }
+-
+-    void lowerForCompFx4(LSimdBinaryCompFx4* ins, MSimdBinaryComp* mir,
+-                         MDefinition* lhs, MDefinition* rhs)
+-    {
+-        return lowerForFPU(ins, mir, lhs, rhs);
+-    }
+-
+     void lowerForBitAndAndBranch(LBitAndAndBranch* baab, MInstruction* mir,
+                                  MDefinition* lhs, MDefinition* rhs);
+     void lowerTruncateDToInt32(MTruncateToInt32* ins);
+     void lowerTruncateFToInt32(MTruncateToInt32* ins);
+     void lowerDivI(MDiv* div);
+     void lowerModI(MMod* mod);
+     void lowerDivI64(MDiv* div);
+     void lowerModI64(MMod* mod);
+diff --git a/js/src/jit/arm64/MacroAssembler-arm64.cpp b/js/src/jit/arm64/MacroAssembler-arm64.cpp
+--- a/js/src/jit/arm64/MacroAssembler-arm64.cpp
++++ b/js/src/jit/arm64/MacroAssembler-arm64.cpp
+@@ -314,20 +314,16 @@ MacroAssemblerCompat::wasmLoadImpl(const
+       case Scalar::Float32:
+         Ldr(SelectFPReg(outany, out64, 32), srcAddr);
+         break;
+       case Scalar::Float64:
+         Ldr(SelectFPReg(outany, out64, 64), srcAddr);
+         break;
+       case Scalar::Uint8Clamped:
+       case Scalar::MaxTypedArrayViewType:
+-      case Scalar::Float32x4:
+-      case Scalar::Int32x4:
+-      case Scalar::Int8x16:
+-      case Scalar::Int16x8:
+         MOZ_CRASH("unexpected array type");
+     }
+ 
+     asMasm().memoryBarrierAfter(access.sync());
+ }
+ 
+ void
+ MacroAssemblerCompat::wasmStoreImpl(const wasm::MemoryAccessDesc& access, AnyRegister valany,
+@@ -366,20 +362,16 @@ MacroAssemblerCompat::wasmStoreImpl(cons
+         Str(SelectGPReg(valany, val64), dstAddr);
+         break;
+       case Scalar::Float32:
+         Str(SelectFPReg(valany, val64, 32), dstAddr);
+         break;
+       case Scalar::Float64:
+         Str(SelectFPReg(valany, val64, 64), dstAddr);
+         break;
+-      case Scalar::Float32x4:
+-      case Scalar::Int32x4:
+-      case Scalar::Int8x16:
+-      case Scalar::Int16x8:
+       case Scalar::Uint8Clamped:
+       case Scalar::MaxTypedArrayViewType:
+         MOZ_CRASH("unexpected array type");
+     }
+ 
+     asMasm().memoryBarrierAfter(access.sync());
+ }
+ 
+diff --git a/js/src/jit/arm64/MacroAssembler-arm64.h b/js/src/jit/arm64/MacroAssembler-arm64.h
+--- a/js/src/jit/arm64/MacroAssembler-arm64.h
++++ b/js/src/jit/arm64/MacroAssembler-arm64.h
+@@ -916,54 +916,16 @@ class MacroAssemblerCompat : public vixl
+         Mov(scratch32, uint64_t(imm.value));
+         Str(scratch32, toMemOperand(address));
+     }
+ 
+     void store64(Register64 src, Address address) {
+         storePtr(src.reg, address);
+     }
+ 
+-    // SIMD.
+-    void loadInt32x1(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void loadInt32x1(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void loadInt32x2(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void loadInt32x2(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void loadInt32x3(const Address& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void loadInt32x3(const BaseIndex& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void loadInt32x4(const Address& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void storeInt32x1(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); }
+-    void storeInt32x1(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); }
+-    void storeInt32x2(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); }
+-    void storeInt32x2(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); }
+-    void storeInt32x3(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); }
+-    void storeInt32x3(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); }
+-    void storeInt32x4(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); }
+-    void loadAlignedSimd128Int(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void loadAlignedSimd128Int(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void storeAlignedSimd128Int(FloatRegister src, const Address& addr) { MOZ_CRASH("NYI"); }
+-    void storeAlignedSimd128Int(FloatRegister src, const BaseIndex& addr) { MOZ_CRASH("NYI"); }
+-    void loadUnalignedSimd128Int(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void loadUnalignedSimd128Int(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void storeUnalignedSimd128Int(FloatRegister dest, const Address& addr) { MOZ_CRASH("NYI"); }
+-    void storeUnalignedSimd128Int(FloatRegister dest, const BaseIndex& addr) { MOZ_CRASH("NYI"); }
+-
+-    void loadFloat32x3(const Address& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void loadFloat32x3(const BaseIndex& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void loadFloat32x4(const Address& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void storeFloat32x4(FloatRegister src, const Address& addr) { MOZ_CRASH("NYI"); }
+-
+-    void loadAlignedSimd128Float(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void loadAlignedSimd128Float(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void storeAlignedSimd128Float(FloatRegister src, const Address& addr) { MOZ_CRASH("NYI"); }
+-    void storeAlignedSimd128Float(FloatRegister src, const BaseIndex& addr) { MOZ_CRASH("NYI"); }
+-    void loadUnalignedSimd128Float(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void loadUnalignedSimd128Float(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void storeUnalignedSimd128Float(FloatRegister dest, const Address& addr) { MOZ_CRASH("NYI"); }
+-    void storeUnalignedSimd128Float(FloatRegister dest, const BaseIndex& addr) { MOZ_CRASH("NYI"); }
+-
+     // StackPointer manipulation.
+     inline void addToStackPtr(Register src);
+     inline void addToStackPtr(Imm32 imm);
+     inline void addToStackPtr(const Address& src);
+     inline void addStackPtrTo(Register dest);
+ 
+     inline void subFromStackPtr(Register src);
+     inline void subFromStackPtr(Imm32 imm);
+diff --git a/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp b/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
+--- a/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
++++ b/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
+@@ -2548,252 +2548,18 @@ CodeGenerator::visitWasmAtomicBinopI64(L
+ 
+     BaseIndex addr(HeapReg, ptr, TimesOne, offset);
+ 
+     masm.atomicFetchOp64(Synchronization::Full(), lir->mir()->operation(), value, addr, temp,
+                          output);
+ }
+ 
+ void
+-CodeGenerator::visitSimdSplatX4(LSimdSplatX4* lir)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimd128Int(LSimd128Int* ins)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimd128Float(LSimd128Float* ins)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdExtractElementI(LSimdExtractElementI* ins)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdExtractElementF(LSimdExtractElementF* ins)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdBinaryCompIx4(LSimdBinaryCompIx4* lir)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdBinaryCompFx4(LSimdBinaryCompFx4* lir)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdBinaryArithIx4(LSimdBinaryArithIx4* lir)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdBinaryArithFx4(LSimdBinaryArithFx4* lir)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdBinaryBitwise(LSimdBinaryBitwise* lir)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+ CodeGenerator::visitNearbyInt(LNearbyInt*)
+ {
+     MOZ_CRASH("NYI");
+ }
+ 
+ void
+-CodeGenerator::visitSimdShift(LSimdShift*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+ CodeGenerator::visitNearbyIntF(LNearbyIntF*)
+ {
+     MOZ_CRASH("NYI");
+ }
+-
+-void
+-CodeGenerator::visitSimdSelect(LSimdSelect*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdAllTrue(LSimdAllTrue*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdAnyTrue(LSimdAnyTrue*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdShuffle(LSimdShuffle*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdSplatX8(LSimdSplatX8*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdSplatX16(LSimdSplatX16*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdSwizzleF(LSimdSwizzleF*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdSwizzleI(LSimdSwizzleI*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdShuffleX4(LSimdShuffleX4*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdBinaryCompIx8(LSimdBinaryCompIx8*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdUnaryArithFx4(LSimdUnaryArithFx4*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdUnaryArithIx4(LSimdUnaryArithIx4*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdUnaryArithIx8(LSimdUnaryArithIx8*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitFloat32x4ToInt32x4(LFloat32x4ToInt32x4*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitInt32x4ToFloat32x4(LInt32x4ToFloat32x4*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdBinaryArithIx8(LSimdBinaryArithIx8*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdBinaryCompIx16(LSimdBinaryCompIx16*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdInsertElementF(LSimdInsertElementF*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdInsertElementI(LSimdInsertElementI*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdUnaryArithIx16(LSimdUnaryArithIx16*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitFloat32x4ToUint32x4(LFloat32x4ToUint32x4*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdBinaryArithIx16(LSimdBinaryArithIx16*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdExtractElementB(LSimdExtractElementB*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdGeneralShuffleF(LSimdGeneralShuffleF*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdGeneralShuffleI(LSimdGeneralShuffleI*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdReinterpretCast(LSimdReinterpretCast*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdBinarySaturating(LSimdBinarySaturating*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-CodeGenerator::visitSimdExtractElementU2D(LSimdExtractElementU2D*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+diff --git a/js/src/jit/mips-shared/Lowering-mips-shared.cpp b/js/src/jit/mips-shared/Lowering-mips-shared.cpp
+--- a/js/src/jit/mips-shared/Lowering-mips-shared.cpp
++++ b/js/src/jit/mips-shared/Lowering-mips-shared.cpp
+@@ -814,68 +814,8 @@ LIRGenerator::visitExtendInt32ToInt64(ME
+     defineInt64(new(alloc()) LExtendInt32ToInt64(useRegisterAtStart(ins->input())), ins);
+ }
+ 
+ void
+ LIRGenerator::visitSignExtendInt64(MSignExtendInt64* ins)
+ {
+     defineInt64(new(alloc()) LSignExtendInt64(useInt64RegisterAtStart(ins->input())), ins);
+ }
+-
+-void
+-LIRGenerator::visitSimdInsertElement(MSimdInsertElement*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-LIRGenerator::visitSimdExtractElement(MSimdExtractElement*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-LIRGenerator::visitSimdBinaryArith(MSimdBinaryArith*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-LIRGenerator::visitSimdSelect(MSimdSelect*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-LIRGenerator::visitSimdSplat(MSimdSplat*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-LIRGenerator::visitSimdValueX4(MSimdValueX4*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-LIRGenerator::visitSimdBinarySaturating(MSimdBinarySaturating*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-LIRGenerator::visitSimdSwizzle(MSimdSwizzle*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-LIRGenerator::visitSimdShuffle(MSimdShuffle*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+-
+-void
+-LIRGenerator::visitSimdGeneralShuffle(MSimdGeneralShuffle*)
+-{
+-    MOZ_CRASH("NYI");
+-}
+diff --git a/js/src/jit/mips-shared/Lowering-mips-shared.h b/js/src/jit/mips-shared/Lowering-mips-shared.h
+--- a/js/src/jit/mips-shared/Lowering-mips-shared.h
++++ b/js/src/jit/mips-shared/Lowering-mips-shared.h
+@@ -45,27 +45,16 @@ class LIRGeneratorMIPSShared : public LI
+                             MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+ 
+     void lowerForFPU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir,
+                      MDefinition* src);
+     template<size_t Temps>
+     void lowerForFPU(LInstructionHelper<1, 2, Temps>* ins, MDefinition* mir,
+                      MDefinition* lhs, MDefinition* rhs);
+ 
+-    void lowerForCompIx4(LSimdBinaryCompIx4* ins, MSimdBinaryComp* mir,
+-                         MDefinition* lhs, MDefinition* rhs)
+-    {
+-        return lowerForFPU(ins, mir, lhs, rhs);
+-    }
+-    void lowerForCompFx4(LSimdBinaryCompFx4* ins, MSimdBinaryComp* mir,
+-                         MDefinition* lhs, MDefinition* rhs)
+-    {
+-        return lowerForFPU(ins, mir, lhs, rhs);
+-    }
+-
+     void lowerForBitAndAndBranch(LBitAndAndBranch* baab, MInstruction* mir,
+                                  MDefinition* lhs, MDefinition* rhs);
+     void lowerDivI(MDiv* div);
+     void lowerModI(MMod* mod);
+     void lowerMulI(MMul* mul, MDefinition* lhs, MDefinition* rhs);
+     void lowerUDiv(MDiv* div);
+     void lowerUMod(MMod* mod);
+ 
+diff --git a/js/src/jit/mips32/MacroAssembler-mips32.h b/js/src/jit/mips32/MacroAssembler-mips32.h
+--- a/js/src/jit/mips32/MacroAssembler-mips32.h
++++ b/js/src/jit/mips32/MacroAssembler-mips32.h
+@@ -623,49 +623,16 @@ class MacroAssemblerMIPSCompat : public 
+ 
+     void loadPtr(const Address& address, Register dest);
+     void loadPtr(const BaseIndex& src, Register dest);
+     void loadPtr(AbsoluteAddress address, Register dest);
+     void loadPtr(wasm::SymbolicAddress address, Register dest);
+ 
+     void loadPrivate(const Address& address, Register dest);
+ 
+-    void loadInt32x1(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void loadInt32x1(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void loadInt32x2(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void loadInt32x2(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void loadInt32x3(const Address& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void loadInt32x3(const BaseIndex& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void loadInt32x4(const Address& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void storeInt32x1(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); }
+-    void storeInt32x1(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); }
+-    void storeInt32x2(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); }
+-    void storeInt32x2(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); }
+-    void storeInt32x3(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); }
+-    void storeInt32x3(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); }
+-    void storeInt32x4(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); }
+-    void loadAlignedSimd128Int(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void storeAlignedSimd128Int(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
+-    void loadUnalignedSimd128Int(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void loadUnalignedSimd128Int(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void storeUnalignedSimd128Int(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
+-    void storeUnalignedSimd128Int(FloatRegister src, BaseIndex addr) { MOZ_CRASH("NYI"); }
+-
+-    void loadFloat32x3(const Address& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void loadFloat32x3(const BaseIndex& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void loadFloat32x4(const Address& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void storeFloat32x4(FloatRegister src, const Address& addr) { MOZ_CRASH("NYI"); }
+-
+-    void loadAlignedSimd128Float(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void storeAlignedSimd128Float(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
+-    void loadUnalignedSimd128Float(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void loadUnalignedSimd128Float(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void storeUnalignedSimd128Float(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
+-    void storeUnalignedSimd128Float(FloatRegister src, BaseIndex addr) { MOZ_CRASH("NYI"); }
+-
+     void loadUnalignedDouble(const wasm::MemoryAccessDesc& access, const BaseIndex& src,
+                              Register temp, FloatRegister dest);
+ 
+     void loadUnalignedFloat32(const wasm::MemoryAccessDesc& access, const BaseIndex& src,
+                               Register temp, FloatRegister dest);
+ 
+     void store8(Register src, const Address& address);
+     void store8(Imm32 imm, const Address& address);
+diff --git a/js/src/jit/mips64/MacroAssembler-mips64.h b/js/src/jit/mips64/MacroAssembler-mips64.h
+--- a/js/src/jit/mips64/MacroAssembler-mips64.h
++++ b/js/src/jit/mips64/MacroAssembler-mips64.h
+@@ -639,49 +639,16 @@ class MacroAssemblerMIPS64Compat : publi
+ 
+     void loadPtr(const Address& address, Register dest);
+     void loadPtr(const BaseIndex& src, Register dest);
+     void loadPtr(AbsoluteAddress address, Register dest);
+     void loadPtr(wasm::SymbolicAddress address, Register dest);
+ 
+     void loadPrivate(const Address& address, Register dest);
+ 
+-    void loadInt32x1(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void loadInt32x1(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void loadInt32x2(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void loadInt32x2(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void loadInt32x3(const Address& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void loadInt32x3(const BaseIndex& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void loadInt32x4(const Address& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void storeInt32x1(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); }
+-    void storeInt32x1(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); }
+-    void storeInt32x2(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); }
+-    void storeInt32x2(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); }
+-    void storeInt32x3(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); }
+-    void storeInt32x3(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); }
+-    void storeInt32x4(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); }
+-    void loadAlignedSimd128Int(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void storeAlignedSimd128Int(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
+-    void loadUnalignedSimd128Int(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void loadUnalignedSimd128Int(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void storeUnalignedSimd128Int(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
+-    void storeUnalignedSimd128Int(FloatRegister src, BaseIndex addr) { MOZ_CRASH("NYI"); }
+-
+-    void loadFloat32x3(const Address& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void loadFloat32x3(const BaseIndex& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void loadFloat32x4(const Address& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void storeFloat32x4(FloatRegister src, const Address& addr) { MOZ_CRASH("NYI"); }
+-
+-    void loadAlignedSimd128Float(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void storeAlignedSimd128Float(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
+-    void loadUnalignedSimd128Float(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void loadUnalignedSimd128Float(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+-    void storeUnalignedSimd128Float(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
+-    void storeUnalignedSimd128Float(FloatRegister src, BaseIndex addr) { MOZ_CRASH("NYI"); }
+-
+     void loadUnalignedDouble(const wasm::MemoryAccessDesc& access, const BaseIndex& src,
+                              Register temp, FloatRegister dest);
+     void loadUnalignedFloat32(const wasm::MemoryAccessDesc& access, const BaseIndex& src,
+                               Register temp, FloatRegister dest);
+ 
+     void store8(Register src, const Address& address);
+     void store8(Imm32 imm, const Address& address);
+     void store8(Register src, const BaseIndex& address);
+diff --git a/js/src/jit/none/Lowering-none.h b/js/src/jit/none/Lowering-none.h
+--- a/js/src/jit/none/Lowering-none.h
++++ b/js/src/jit/none/Lowering-none.h
+@@ -41,24 +41,16 @@ class LIRGeneratorNone : public LIRGener
+     void lowerForALU(T, MDefinition*, MDefinition*, MDefinition* v = nullptr) { MOZ_CRASH(); }
+     template <typename T>
+     void lowerForFPU(T, MDefinition*, MDefinition*, MDefinition* v = nullptr) { MOZ_CRASH(); }
+     template <typename T>
+     void lowerForALUInt64(T, MDefinition*, MDefinition*, MDefinition* v = nullptr) { MOZ_CRASH(); }
+     void lowerForMulInt64(LMulI64*, MMul*, MDefinition*, MDefinition* v = nullptr) { MOZ_CRASH(); }
+     template <typename T>
+     void lowerForShiftInt64(T, MDefinition*, MDefinition*, MDefinition* v = nullptr) { MOZ_CRASH(); }
+-    void lowerForCompIx4(LSimdBinaryCompIx4* ins, MSimdBinaryComp* mir,
+-                         MDefinition* lhs, MDefinition* rhs) {
+-        MOZ_CRASH();
+-    }
+-    void lowerForCompFx4(LSimdBinaryCompFx4* ins, MSimdBinaryComp* mir,
+-                         MDefinition* lhs, MDefinition* rhs) {
+-        MOZ_CRASH();
+-    }
+     void lowerForBitAndAndBranch(LBitAndAndBranch*, MInstruction*,
+                                  MDefinition*, MDefinition*) {
+         MOZ_CRASH();
+     }
+ 
+     void lowerConstantDouble(double, MInstruction*) { MOZ_CRASH(); }
+     void lowerConstantFloat32(float, MInstruction*) { MOZ_CRASH(); }
+     void lowerTruncateDToInt32(MTruncateToInt32*) { MOZ_CRASH(); }
+diff --git a/js/src/jit/none/MacroAssembler-none.h b/js/src/jit/none/MacroAssembler-none.h
+--- a/js/src/jit/none/MacroAssembler-none.h
++++ b/js/src/jit/none/MacroAssembler-none.h
+@@ -268,55 +268,34 @@ class MacroAssemblerNone : public Assemb
+     template <typename T> void movePtr(T, Register) { MOZ_CRASH(); }
+     template <typename T> void move32(T, Register) { MOZ_CRASH(); }
+     template <typename T, typename S> void movq(T, S) { MOZ_CRASH(); }
+     template <typename T, typename S> void moveFloat32(T, S) { MOZ_CRASH(); }
+     template <typename T, typename S> void moveDouble(T, S) { MOZ_CRASH(); }
+     template <typename T, typename S> void move64(T, S) { MOZ_CRASH(); }
+     template <typename T> CodeOffset movWithPatch(T, Register) { MOZ_CRASH(); }
+ 
+-    template <typename T> void loadInt32x1(T, FloatRegister dest) { MOZ_CRASH(); }
+-    template <typename T> void loadInt32x2(T, FloatRegister dest) { MOZ_CRASH(); }
+-    template <typename T> void loadInt32x3(T, FloatRegister dest) { MOZ_CRASH(); }
+-    template <typename T> void loadInt32x4(T, FloatRegister dest) { MOZ_CRASH(); }
+-    template <typename T> void loadFloat32x3(T, FloatRegister dest) { MOZ_CRASH(); }
+-    template <typename T> void loadFloat32x4(T, FloatRegister dest) { MOZ_CRASH(); }
+-
+     template <typename T> void loadPtr(T, Register) { MOZ_CRASH(); }
+     template <typename T> void load32(T, Register) { MOZ_CRASH(); }
+     template <typename T> void loadFloat32(T, FloatRegister) { MOZ_CRASH(); }
+     template <typename T> void loadDouble(T, FloatRegister) { MOZ_CRASH(); }
+-    template <typename T> void loadAlignedSimd128Int(T, FloatRegister) { MOZ_CRASH(); }
+-    template <typename T> void loadUnalignedSimd128Int(T, FloatRegister) { MOZ_CRASH(); }
+-    template <typename T> void loadAlignedSimd128Float(T, FloatRegister) { MOZ_CRASH(); }
+-    template <typename T> void loadUnalignedSimd128Float(T, FloatRegister) { MOZ_CRASH(); }
+     template <typename T> void loadPrivate(T, Register) { MOZ_CRASH(); }
+     template <typename T> void load8SignExtend(T, Register) { MOZ_CRASH(); }
+     template <typename T> void load8ZeroExtend(T, Register) { MOZ_CRASH(); }
+     template <typename T> void load16SignExtend(T, Register) { MOZ_CRASH(); }
+     template <typename T> void load16ZeroExtend(T, Register) { MOZ_CRASH(); }
+     template <typename T> void load64(T, Register64 ) { MOZ_CRASH(); }
+ 
+     template <typename T, typename S> void storePtr(const T&, S) { MOZ_CRASH(); }
+     template <typename T, typename S> void store32(T, S) { MOZ_CRASH(); }
+     template <typename T, typename S> void store32_NoSecondScratch(T, S) { MOZ_CRASH(); }
+     template <typename T, typename S> void storeFloat32(T, S) { MOZ_CRASH(); }
+     template <typename T, typename S> void storeDouble(T, S) { MOZ_CRASH(); }
+-    template <typename T, typename S> void storeAlignedSimd128Int(T, S) { MOZ_CRASH(); }
+-    template <typename T, typename S> void storeUnalignedSimd128Int(T, S) { MOZ_CRASH(); }
+-    template <typename T, typename S> void storeAlignedSimd128Float(T, S) { MOZ_CRASH(); }
+-    template <typename T, typename S> void storeUnalignedSimd128Float(T, S) { MOZ_CRASH(); }
+     template <typename T, typename S> void store8(T, S) { MOZ_CRASH(); }
+     template <typename T, typename S> void store16(T, S) { MOZ_CRASH(); }
+-    template <typename T, typename S> void storeInt32x1(T, S) { MOZ_CRASH(); }
+-    template <typename T, typename S> void storeInt32x2(T, S) { MOZ_CRASH(); }
+-    template <typename T, typename S> void storeInt32x3(T, S) { MOZ_CRASH(); }
+-    template <typename T, typename S> void storeInt32x4(T, S) { MOZ_CRASH(); }
+-    template <typename T, typename S> void storeFloat32x3(T, S) { MOZ_CRASH(); }
+-    template <typename T, typename S> void storeFloat32x4(T, S) { MOZ_CRASH(); }
+     template <typename T, typename S> void store64(T, S) { MOZ_CRASH(); }
+ 
+     template <typename T> void computeEffectiveAddress(T, Register) { MOZ_CRASH(); }
+ 
+     void splitTagForTest(ValueOperand, ScratchTagScope&) { MOZ_CRASH(); }
+ 
+     void boxDouble(FloatRegister, ValueOperand, FloatRegister) { MOZ_CRASH(); }
+     void boxNonDouble(JSValueType, Register, ValueOperand) { MOZ_CRASH(); }
+diff --git a/js/src/jit/shared/Assembler-shared.h b/js/src/jit/shared/Assembler-shared.h
+--- a/js/src/jit/shared/Assembler-shared.h
++++ b/js/src/jit/shared/Assembler-shared.h
+@@ -786,49 +786,39 @@ typedef Vector<SymbolicAccess, 0, System
+ // Describes a single wasm or asm.js memory access for the purpose of generating
+ // code and metadata.
+ 
+ class MemoryAccessDesc
+ {
+     uint32_t offset_;
+     uint32_t align_;
+     Scalar::Type type_;
+-    unsigned numSimdElems_;
+     jit::Synchronization sync_;
+     wasm::BytecodeOffset trapOffset_;
+ 
+   public:
+     explicit MemoryAccessDesc(Scalar::Type type, uint32_t align, uint32_t offset,
+-                              BytecodeOffset trapOffset, unsigned numSimdElems = 0,
++                              BytecodeOffset trapOffset,
+                               const jit::Synchronization& sync = jit::Synchronization::None())
+       : offset_(offset),
+         align_(align),
+         type_(type),
+-        numSimdElems_(numSimdElems),
+         sync_(sync),
+         trapOffset_(trapOffset)
+     {
+-        MOZ_ASSERT(Scalar::isSimdType(type) == (numSimdElems > 0));
+-        MOZ_ASSERT(numSimdElems <= jit::ScalarTypeToLength(type));
+         MOZ_ASSERT(mozilla::IsPowerOfTwo(align));
+     }
+ 
+     uint32_t offset() const { return offset_; }
+     uint32_t align() const { return align_; }
+     Scalar::Type type() const { return type_; }
+-    unsigned byteSize() const {
+-        return Scalar::isSimdType(type())
+-               ? Scalar::scalarByteSize(type()) * numSimdElems()
+-               : Scalar::byteSize(type());
+-    }
+-    unsigned numSimdElems() const { MOZ_ASSERT(isSimd()); return numSimdElems_; }
++    unsigned byteSize() const { return Scalar::byteSize(type()); }
+     const jit::Synchronization& sync() const { return sync_; }
+     BytecodeOffset trapOffset() const { return trapOffset_; }
+     bool isAtomic() const { return !sync_.isNone(); }
+-    bool isSimd() const { return Scalar::isSimdType(type_); }
+ 
+     void clearOffset() { offset_ = 0; }
+     void setOffset(uint32_t offset) { offset_ = offset; }
+ };
+ 
+ // Summarizes a global access for a mutable (in asm.js) or immutable value (in
+ // asm.js or the wasm MVP) that needs to get patched later.
+ 
+diff --git a/js/src/jit/shared/CodeGenerator-shared-inl.h b/js/src/jit/shared/CodeGenerator-shared-inl.h
+--- a/js/src/jit/shared/CodeGenerator-shared-inl.h
++++ b/js/src/jit/shared/CodeGenerator-shared-inl.h
+@@ -373,20 +373,16 @@ CodeGeneratorShared::verifyHeapAccessDis
+         }
+         break;
+       case Scalar::Int64:
+         // Can't encode an imm64-to-memory move.
+         op = OtherOperand(ToRegister(alloc).encoding());
+         break;
+       case Scalar::Float32:
+       case Scalar::Float64:
+-      case Scalar::Float32x4:
+-      case Scalar::Int8x16:
+-      case Scalar::Int16x8:
+-      case Scalar::Int32x4:
+         op = OtherOperand(ToFloatRegister(alloc).encoding());
+         break;
+       case Scalar::Uint8Clamped:
+       case Scalar::MaxTypedArrayViewType:
+         MOZ_CRASH("Unexpected array type");
+     }
+ 
+     HeapAccess access(kind, TypedArrayElemSize(type), ComplexAddress(mem), op);
+diff --git a/js/src/jit/shared/CodeGenerator-shared.cpp b/js/src/jit/shared/CodeGenerator-shared.cpp
+--- a/js/src/jit/shared/CodeGenerator-shared.cpp
++++ b/js/src/jit/shared/CodeGenerator-shared.cpp
+@@ -83,27 +83,20 @@ CodeGeneratorShared::CodeGeneratorShared
+ 
+     if (gen->compilingWasm()) {
+         // Since wasm uses the system ABI which does not necessarily use a
+         // regular array where all slots are sizeof(Value), it maintains the max
+         // argument stack depth separately.
+         MOZ_ASSERT(graph->argumentSlotCount() == 0);
+         frameDepth_ += gen->wasmMaxStackArgBytes();
+ 
+-        if (gen->usesSimd()) {
+-            // If the function uses any SIMD then we may need to insert padding
+-            // so that local slots are aligned for SIMD.
+-            frameInitialAdjustment_ = ComputeByteAlignment(sizeof(wasm::Frame), WasmStackAlignment);
+-            frameDepth_ += frameInitialAdjustment_;
++        static_assert(!SupportsSimd, "we need padding so that local slots are SIMD-aligned and "
++                                     "the stack must be kept SIMD-aligned too.");
+ 
+-            // Keep the stack aligned. Some SIMD sequences build values on the
+-            // stack and need the stack aligned.
+-            frameDepth_ += ComputeByteAlignment(sizeof(wasm::Frame) + frameDepth_,
+-                                                WasmStackAlignment);
+-        } else if (gen->needsStaticStackAlignment()) {
++        if (gen->needsStaticStackAlignment()) {
+             // An MWasmCall does not align the stack pointer at calls sites but
+             // instead relies on the a priori stack adjustment. This must be the
+             // last adjustment of frameDepth_.
+             frameDepth_ += ComputeByteAlignment(sizeof(wasm::Frame) + frameDepth_,
+                                                 WasmStackAlignment);
+         }
+ 
+         // FrameSizeClass is only used for bailing, which cannot happen in
+diff --git a/js/src/jit/shared/LIR-shared.h b/js/src/jit/shared/LIR-shared.h
+--- a/js/src/jit/shared/LIR-shared.h
++++ b/js/src/jit/shared/LIR-shared.h
+@@ -156,682 +156,16 @@ class LMoveGroup : public LInstructionHe
+             LMove move = getMove(i);
+             if (move.from() == LGeneralReg(reg) || move.to() == LGeneralReg(reg))
+                 return true;
+         }
+         return false;
+     }
+ };
+ 
+-
+-// Constructs a SIMD object (value type) based on the MIRType of its input.
+-class LSimdBox : public LInstructionHelper<1, 1, 1>
+-{
+-  public:
+-    LIR_HEADER(SimdBox)
+-
+-    explicit LSimdBox(const LAllocation& simd, const LDefinition& temp)
+-      : LInstructionHelper(classOpcode)
+-    {
+-        setOperand(0, simd);
+-        setTemp(0, temp);
+-    }
+-
+-    const LDefinition* temp() {
+-        return getTemp(0);
+-    }
+-
+-    MSimdBox* mir() const {
+-        return mir_->toSimdBox();
+-    }
+-};
+-
+-class LSimdUnbox : public LInstructionHelper<1, 1, 1>
+-{
+-  public:
+-    LIR_HEADER(SimdUnbox)
+-
+-    LSimdUnbox(const LAllocation& obj, const LDefinition& temp)
+-      : LInstructionHelper(classOpcode)
+-    {
+-        setOperand(0, obj);
+-        setTemp(0, temp);
+-    }
+-
+-    const LDefinition* temp() {
+-        return getTemp(0);
+-    }
+-
+-    MSimdUnbox* mir() const {
+-        return mir_->toSimdUnbox();
+-    }
+-};
+-
+-// Constructs a SIMD value with 16 equal components (int8x16).
+-class LSimdSplatX16 : public LInstructionHelper<1, 1, 0>
+-{
+-  public:
+-    LIR_HEADER(SimdSplatX16)
+-    explicit LSimdSplatX16(const LAllocation& v)
+-      : LInstructionHelper(classOpcode)
+-    {
+-        setOperand(0, v);
+-    }
+-
+-    MSimdSplat* mir() const {
+-        return mir_->toSimdSplat();
+-    }
+-};
+-
+-// Constructs a SIMD value with 8 equal components (int16x8).
+-class LSimdSplatX8 : public LInstructionHelper<1, 1, 0>
+-{
+-  public:
+-    LIR_HEADER(SimdSplatX8)
+-    explicit LSimdSplatX8(const LAllocation& v)
+-      : LInstructionHelper(classOpcode)
+-    {
+-        setOperand(0, v);
+-    }
+-
+-    MSimdSplat* mir() const {
+-        return mir_->toSimdSplat();
+-    }
+-};
+-
+-// Constructs a SIMD value with 4 equal components (e.g. int32x4, float32x4).
+-class LSimdSplatX4 : public LInstructionHelper<1, 1, 0>
+-{
+-  public:
+-    LIR_HEADER(SimdSplatX4)
+-    explicit LSimdSplatX4(const LAllocation& v)
+-      : LInstructionHelper(classOpcode)
+-    {
+-        setOperand(0, v);
+-    }
+-
+-    MSimdSplat* mir() const {
+-        return mir_->toSimdSplat();
+-    }
+-};
+-
+-// Reinterpret the bits of a SIMD value with a different type.
+-class LSimdReinterpretCast : public LInstructionHelper<1, 1, 0>
+-{
+-  public:
+-    LIR_HEADER(SimdReinterpretCast)
+-    explicit LSimdReinterpretCast(const LAllocation& v)
+-      : LInstructionHelper(classOpcode)
+-    {
+-        setOperand(0, v);
+-    }
+-
+-    MSimdReinterpretCast* mir() const {
+-        return mir_->toSimdReinterpretCast();
+-    }
+-};
+-
+-class LSimdExtractElementBase : public LInstructionHelper<1, 1, 0>
+-{
+-  protected:
+-    LSimdExtractElementBase(Opcode opcode, const LAllocation& base)
+-      : LInstructionHelper(opcode)
+-    {
+-        setOperand(0, base);
+-    }
+-
+-  public:
+-    const LAllocation* getBase() {
+-        return getOperand(0);
+-    }
+-    MSimdExtractElement* mir() const {
+-        return mir_->toSimdExtractElement();
+-    }
+-};
+-
+-// Extracts an element from a given SIMD bool32x4 lane.
+-class LSimdExtractElementB : public LSimdExtractElementBase
+-{
+-  public:
+-    LIR_HEADER(SimdExtractElementB);
+-    explicit LSimdExtractElementB(const LAllocation& base)
+-      : LSimdExtractElementBase(classOpcode, base)
+-    {}
+-};
+-
+-// Extracts an element from a given SIMD int32x4 lane.
+-class LSimdExtractElementI : public LSimdExtractElementBase
+-{
+-  public:
+-    LIR_HEADER(SimdExtractElementI);
+-    explicit LSimdExtractElementI(const LAllocation& base)
+-      : LSimdExtractElementBase(classOpcode, base)
+-    {}
+-};
+-
+-// Extracts an element from a given SIMD float32x4 lane.
+-class LSimdExtractElementF : public LSimdExtractElementBase
+-{
+-  public:
+-    LIR_HEADER(SimdExtractElementF);
+-    explicit LSimdExtractElementF(const LAllocation& base)
+-      : LSimdExtractElementBase(classOpcode, base)
+-    {}
+-};
+-
+-// Extracts an element from an Uint32x4 SIMD vector, converts to double.
+-class LSimdExtractElementU2D : public LInstructionHelper<1, 1, 1>
+-{
+-  public:
+-    LIR_HEADER(SimdExtractElementU2D);
+-    LSimdExtractElementU2D(const LAllocation& base, const LDefinition& temp)
+-      : LInstructionHelper(classOpcode)
+-    {
+-        setOperand(0, base);
+-        setTemp(0, temp);
+-    }
+-    MSimdExtractElement* mir() const {
+-        return mir_->toSimdExtractElement();
+-    }
+-    const LDefinition* temp() {
+-        return getTemp(0);
+-    }
+-};
+-
+-
+-class LSimdInsertElementBase : public LInstructionHelper<1, 2, 0>
+-{
+-  protected:
+-    LSimdInsertElementBase(Opcode opcode, const LAllocation& vec, const LAllocation& val)
+-      : LInstructionHelper(opcode)
+-    {
+-        setOperand(0, vec);
+-        setOperand(1, val);
+-    }
+-
+-  public:
+-    const LAllocation* vector() {
+-        return getOperand(0);
+-    }
+-    const LAllocation* value() {
+-        return getOperand(1);
+-    }
+-    unsigned lane() const {
+-        return mir_->toSimdInsertElement()->lane();
+-    }
+-    unsigned length() const {
+-        return SimdTypeToLength(mir_->toSimdInsertElement()->type());
+-    }
+-};
+-
+-// Replace an element from a given SIMD integer or boolean lane with a given value.
+-// The value inserted into a boolean lane should be 0 or -1.
+-class LSimdInsertElementI : public LSimdInsertElementBase
+-{
+-  public:
+-    LIR_HEADER(SimdInsertElementI);
+-    LSimdInsertElementI(const LAllocation& vec, const LAllocation& val)
+-      : LSimdInsertElementBase(classOpcode, vec, val)
+-    {}
+-};
+-
+-// Replace an element from a given SIMD float32x4 lane with a given value.
+-class LSimdInsertElementF : public LSimdInsertElementBase
+-{
+-  public:
+-    LIR_HEADER(SimdInsertElementF);
+-    LSimdInsertElementF(const LAllocation& vec, const LAllocation& val)
+-      : LSimdInsertElementBase(classOpcode, vec, val)
+-    {}
+-};
+-
+-// Base class for both int32x4 and float32x4 shuffle instructions.
+-class LSimdSwizzleBase : public LInstructionHelper<1, 1, 1>
+-{
+-  public:
+-    LSimdSwizzleBase(Opcode opcode, const LAllocation& base)
+-      : LInstructionHelper(opcode)
+-    {
+-        setOperand(0, base);
+-    }
+-
+-    const LAllocation* getBase() {
+-        return getOperand(0);
+-    }
+-
+-    unsigned numLanes() const { return mir_->toSimdSwizzle()->numLanes(); }
+-    uint32_t lane(unsigned i) const { return mir_->toSimdSwizzle()->lane(i); }
+-
+-    bool lanesMatch(uint32_t x, uint32_t y, uint32_t z, uint32_t w) const {
+-        return mir_->toSimdSwizzle()->lanesMatch(x, y, z, w);
+-    }
+-};
+-
+-// Shuffles a int32x4 into another int32x4 vector.
+-class LSimdSwizzleI : public LSimdSwizzleBase
+-{
+-  public:
+-    LIR_HEADER(SimdSwizzleI);
+-    explicit LSimdSwizzleI(const LAllocation& base) : LSimdSwizzleBase(classOpcode, base)
+-    {}
+-};
+-// Shuffles a float32x4 into another float32x4 vector.
+-class LSimdSwizzleF : public LSimdSwizzleBase
+-{
+-  public:
+-    LIR_HEADER(SimdSwizzleF);
+-    explicit LSimdSwizzleF(const LAllocation& base) : LSimdSwizzleBase(classOpcode, base)
+-    {}
+-};
+-
+-class LSimdGeneralShuffleBase : public LVariadicInstruction<1, 1>
+-{
+-  public:
+-    LSimdGeneralShuffleBase(LNode::Opcode opcode, uint32_t numOperands, const LDefinition& temp)
+-      : LVariadicInstruction<1, 1>(opcode, numOperands)
+-    {
+-        setTemp(0, temp);
+-    }
+-    const LAllocation* vector(unsigned i) {
+-        MOZ_ASSERT(i < mir()->numVectors());
+-        return getOperand(i);
+-    }
+-    const LAllocation* lane(unsigned i) {
+-        MOZ_ASSERT(i < mir()->numLanes());
+-        return getOperand(mir()->numVectors() + i);
+-    }
+-    const LDefinition* temp() {
+-        return getTemp(0);
+-    }
+-    MSimdGeneralShuffle* mir() const {
+-        return mir_->toSimdGeneralShuffle();
+-    }
+-};
+-
+-class LSimdGeneralShuffleI : public LSimdGeneralShuffleBase
+-{
+-  public:
+-    LIR_HEADER(SimdGeneralShuffleI);
+-
+-    LSimdGeneralShuffleI(uint32_t numOperands, const LDefinition& temp)
+-      : LSimdGeneralShuffleBase(classOpcode, numOperands, temp)
+-    {}
+-};
+-
+-class LSimdGeneralShuffleF : public LSimdGeneralShuffleBase
+-{
+-  public:
+-    LIR_HEADER(SimdGeneralShuffleF);
+-
+-    LSimdGeneralShuffleF(uint32_t numOperands, const LDefinition& temp)
+-      : LSimdGeneralShuffleBase(classOpcode, numOperands, temp)
+-    {}
+-};
+-
+-// Base class for both int32x4 and float32x4 shuffle instructions.
+-class LSimdShuffleX4 : public LInstructionHelper<1, 2, 1>
+-{
+-  public:
+-    LIR_HEADER(SimdShuffleX4);
+-    LSimdShuffleX4()
+-      : LInstructionHelper(classOpcode)
+-    {}
+-
+-    const LAllocation* lhs() {
+-        return getOperand(0);
+-    }
+-    const LAllocation* rhs() {
+-        return getOperand(1);
+-    }
+-    const LDefinition* temp() {
+-        return getTemp(0);
+-    }
+-
+-    uint32_t lane(unsigned i) const { return mir_->toSimdShuffle()->lane(i); }
+-
+-    bool lanesMatch(uint32_t x, uint32_t y, uint32_t z, uint32_t w) const {
+-        return mir_->toSimdShuffle()->lanesMatch(x, y, z, w);
+-    }
+-};
+-
+-// Remaining shuffles (8x16, 16x8).
+-class LSimdShuffle : public LInstructionHelper<1, 2, 1>
+-{
+-  public:
+-    LIR_HEADER(SimdShuffle);
+-    LSimdShuffle()
+-      : LInstructionHelper(classOpcode)
+-    {}
+-
+-    const LAllocation* lhs() {
+-        return getOperand(0);
+-    }
+-    const LAllocation* rhs() {
+-        return getOperand(1);
+-    }
+-    const LDefinition* temp() {
+-        return getTemp(0);
+-    }
+-
+-    unsigned numLanes() const { return mir_->toSimdShuffle()->numLanes(); }
+-    unsigned lane(unsigned i) const { return mir_->toSimdShuffle()->lane(i); }
+-};
+-
+-// Binary SIMD comparison operation between two SIMD operands
+-class LSimdBinaryComp: public LInstructionHelper<1, 2, 0>
+-{
+-  protected:
+-    explicit LSimdBinaryComp(LNode::Opcode opcode)
+-      : LInstructionHelper<1, 2, 0>(opcode)
+-    {}
+-
+-  public:
+-    const LAllocation* lhs() {
+-        return getOperand(0);
+-    }
+-    const LAllocation* rhs() {
+-        return getOperand(1);
+-    }
+-    MSimdBinaryComp::Operation operation() const {
+-        return mir_->toSimdBinaryComp()->operation();
+-    }
+-    const char* extraName() const {
+-        return MSimdBinaryComp::OperationName(operation());
+-    }
+-};
+-
+-// Binary SIMD comparison operation between two Int8x16 operands.
+-class LSimdBinaryCompIx16 : public LSimdBinaryComp
+-{
+-  public:
+-    LIR_HEADER(SimdBinaryCompIx16);
+-    LSimdBinaryCompIx16() : LSimdBinaryComp(classOpcode) {}
+-};
+-
+-// Binary SIMD comparison operation between two Int16x8 operands.
+-class LSimdBinaryCompIx8 : public LSimdBinaryComp
+-{
+-  public:
+-    LIR_HEADER(SimdBinaryCompIx8);
+-    LSimdBinaryCompIx8() : LSimdBinaryComp(classOpcode) {}
+-};
+-
+-// Binary SIMD comparison operation between two Int32x4 operands.
+-class LSimdBinaryCompIx4 : public LSimdBinaryComp
+-{
+-  public:
+-    LIR_HEADER(SimdBinaryCompIx4);
+-    LSimdBinaryCompIx4() : LSimdBinaryComp(classOpcode) {}
+-};
+-
+-// Binary SIMD comparison operation between two Float32x4 operands
+-class LSimdBinaryCompFx4 : public LSimdBinaryComp
+-{
+-  public:
+-    LIR_HEADER(SimdBinaryCompFx4);
+-    LSimdBinaryCompFx4() : LSimdBinaryComp(classOpcode) {}
+-};
+-
+-// Binary SIMD arithmetic operation between two SIMD operands
+-class LSimdBinaryArith : public LInstructionHelper<1, 2, 1>
+-{
+-  public:
+-    explicit LSimdBinaryArith(LNode::Opcode opcode)
+-      : LInstructionHelper<1, 2, 1>(opcode)
+-    {}
+-
+-    const LAllocation* lhs() {
+-        return this->getOperand(0);
+-    }
+-    const LAllocation* rhs() {
+-        return this->getOperand(1);
+-    }
+-    const LDefinition* temp() {
+-        return getTemp(0);
+-    }
+-
+-    MSimdBinaryArith::Operation operation() const {
+-        return this->mir_->toSimdBinaryArith()->operation();
+-    }
+-    const char* extraName() const {
+-        return MSimdBinaryArith::OperationName(operation());
+-    }
+-};
+-
+-// Binary SIMD arithmetic operation between two Int8x16 operands
+-class LSimdBinaryArithIx16 : public LSimdBinaryArith
+-{
+-  public:
+-    LIR_HEADER(SimdBinaryArithIx16);
+-    LSimdBinaryArithIx16() : LSimdBinaryArith(classOpcode) {}
+-};
+-
+-// Binary SIMD arithmetic operation between two Int16x8 operands
+-class LSimdBinaryArithIx8 : public LSimdBinaryArith
+-{
+-  public:
+-    LIR_HEADER(SimdBinaryArithIx8);
+-    LSimdBinaryArithIx8() : LSimdBinaryArith(classOpcode) {}
+-};
+-
+-// Binary SIMD arithmetic operation between two Int32x4 operands
+-class LSimdBinaryArithIx4 : public LSimdBinaryArith
+-{
+-  public:
+-    LIR_HEADER(SimdBinaryArithIx4);
+-    LSimdBinaryArithIx4() : LSimdBinaryArith(classOpcode) {}
+-};
+-
+-// Binary SIMD arithmetic operation between two Float32x4 operands
+-class LSimdBinaryArithFx4 : public LSimdBinaryArith
+-{
+-  public:
+-    LIR_HEADER(SimdBinaryArithFx4);
+-    LSimdBinaryArithFx4() : LSimdBinaryArith(classOpcode) {}
+-};
+-
+-// Binary SIMD saturating arithmetic operation between two SIMD operands
+-class LSimdBinarySaturating : public LInstructionHelper<1, 2, 0>
+-{
+-  public:
+-    LIR_HEADER(SimdBinarySaturating);
+-    LSimdBinarySaturating()
+-      : LInstructionHelper(classOpcode)
+-    {}
+-
+-    const LAllocation* lhs() {
+-        return this->getOperand(0);
+-    }
+-    const LAllocation* rhs() {
+-        return this->getOperand(1);
+-    }
+-
+-    MSimdBinarySaturating::Operation operation() const {
+-        return this->mir_->toSimdBinarySaturating()->operation();
+-    }
+-    SimdSign signedness() const {
+-        return this->mir_->toSimdBinarySaturating()->signedness();
+-    }
+-    MIRType type() const {
+-        return mir_->type();
+-    }
+-    const char* extraName() const {
+-        return MSimdBinarySaturating::OperationName(operation());
+-    }
+-};
+-
+-// Unary SIMD arithmetic operation on a SIMD operand
+-class LSimdUnaryArith : public LInstructionHelper<1, 1, 0>
+-{
+-  public:
+-    LSimdUnaryArith(LNode::Opcode opcode, const LAllocation& in)
+-      : LInstructionHelper(opcode)
+-    {
+-        setOperand(0, in);
+-    }
+-    MSimdUnaryArith::Operation operation() const {
+-        return mir_->toSimdUnaryArith()->operation();
+-    }
+-};
+-
+-// Unary SIMD arithmetic operation on a Int8x16 operand
+-class LSimdUnaryArithIx16 : public LSimdUnaryArith
+-{
+-  public:
+-    LIR_HEADER(SimdUnaryArithIx16);
+-    explicit LSimdUnaryArithIx16(const LAllocation& in) : LSimdUnaryArith(classOpcode, in) {}
+-};
+-
+-// Unary SIMD arithmetic operation on a Int16x8 operand
+-class LSimdUnaryArithIx8 : public LSimdUnaryArith
+-{
+-  public:
+-    LIR_HEADER(SimdUnaryArithIx8);
+-    explicit LSimdUnaryArithIx8(const LAllocation& in) : LSimdUnaryArith(classOpcode, in) {}
+-};
+-
+-// Unary SIMD arithmetic operation on a Int32x4 operand
+-class LSimdUnaryArithIx4 : public LSimdUnaryArith
+-{
+-  public:
+-    LIR_HEADER(SimdUnaryArithIx4);
+-    explicit LSimdUnaryArithIx4(const LAllocation& in) : LSimdUnaryArith(classOpcode, in) {}
+-};
+-
+-// Unary SIMD arithmetic operation on a Float32x4 operand
+-class LSimdUnaryArithFx4 : public LSimdUnaryArith
+-{
+-  public:
+-    LIR_HEADER(SimdUnaryArithFx4);
+-    explicit LSimdUnaryArithFx4(const LAllocation& in) : LSimdUnaryArith(classOpcode, in) {}
+-};
+-
+-// Binary SIMD bitwise operation between two 128-bit operands.
+-class LSimdBinaryBitwise : public LInstructionHelper<1, 2, 0>
+-{
+-  public:
+-    LIR_HEADER(SimdBinaryBitwise);
+-    LSimdBinaryBitwise()
+-      : LInstructionHelper(classOpcode)
+-    {}
+-    const LAllocation* lhs() {
+-        return getOperand(0);
+-    }
+-    const LAllocation* rhs() {
+-        return getOperand(1);
+-    }
+-    MSimdBinaryBitwise::Operation operation() const {
+-        return mir_->toSimdBinaryBitwise()->operation();
+-    }
+-    const char* extraName() const {
+-        return MSimdBinaryBitwise::OperationName(operation());
+-    }
+-    MIRType type() const {
+-        return mir_->type();
+-    }
+-};
+-
+-// Shift a SIMD vector by a scalar amount.
+-// The temp register is only required if the shift amount is a dynamical
+-// value. If it is a constant, use a BogusTemp instead.
+-class LSimdShift : public LInstructionHelper<1, 2, 1>
+-{
+-  public:
+-    LIR_HEADER(SimdShift)
+-    LSimdShift(const LAllocation& vec, const LAllocation& val, const LDefinition& temp)
+-      : LInstructionHelper(classOpcode)
+-    {
+-        setOperand(0, vec);
+-        setOperand(1, val);
+-        setTemp(0, temp);
+-    }
+-    const LAllocation* vector() {
+-        return getOperand(0);
+-    }
+-    const LAllocation* value() {
+-        return getOperand(1);
+-    }
+-    const LDefinition* temp() {
+-        return getTemp(0);
+-    }
+-    MSimdShift::Operation operation() const {
+-        return mir_->toSimdShift()->operation();
+-    }
+-    const char* extraName() const {
+-        return MSimdShift::OperationName(operation());
+-    }
+-    MSimdShift* mir() const {
+-        return mir_->toSimdShift();
+-    }
+-    MIRType type() const {
+-        return mir_->type();
+-    }
+-};
+-
+-// SIMD selection of lanes from two int32x4 or float32x4 arguments based on a
+-// int32x4 argument.
+-class LSimdSelect : public LInstructionHelper<1, 3, 1>
+-{
+-  public:
+-    LIR_HEADER(SimdSelect);
+-    LSimdSelect()
+-      : LInstructionHelper(classOpcode)
+-    {}
+-    const LAllocation* mask() {
+-        return getOperand(0);
+-    }
+-    const LAllocation* lhs() {
+-        return getOperand(1);
+-    }
+-    const LAllocation* rhs() {
+-        return getOperand(2);
+-    }
+-    const LDefinition* temp() {
+-        return getTemp(0);
+-    }
+-    MSimdSelect* mir() const {
+-        return mir_->toSimdSelect();
+-    }
+-};
+-
+-class LSimdAnyTrue : public LInstructionHelper<1, 1, 0>
+-{
+-  public:
+-    LIR_HEADER(SimdAnyTrue)
+-    explicit LSimdAnyTrue(const LAllocation& input)
+-      : LInstructionHelper(classOpcode)
+-    {
+-        setOperand(0, input);
+-    }
+-    const LAllocation* vector() {
+-        return getOperand(0);
+-    }
+-    MSimdAnyTrue* mir() const {
+-        return mir_->toSimdAnyTrue();
+-    }
+-};
+-
+-class LSimdAllTrue : public LInstructionHelper<1, 1, 0>
+-{
+-  public:
+-    LIR_HEADER(SimdAllTrue)
+-    explicit LSimdAllTrue(const LAllocation& input)
+-      : LInstructionHelper(classOpcode)
+-    {
+-        setOperand(0, input);
+-    }
+-    const LAllocation* vector() {
+-        return getOperand(0);
+-    }
+-    MSimdAllTrue* mir() const {
+-        return mir_->toSimdAllTrue();
+-    }
+-};
+-
+-
+ // Constant 32-bit integer.
+ class LInteger : public LInstructionHelper<1, 0, 0>
+ {
+     int32_t i32_;
+ 
+   public:
+     LIR_HEADER(Integer)
+ 
+@@ -936,37 +270,16 @@ class LFloat32 : public LInstructionHelp
+         f_(f)
+     { }
+ 
+     const float& getFloat() const {
+         return f_;
+     }
+ };
+ 
+-// Constant 128-bit SIMD integer vector (8x16, 16x8, 32x4).
+-// Also used for Bool32x4, Bool16x8, etc.
+-class LSimd128Int : public LInstructionHelper<1, 0, 0>
+-{
+-  public:
+-    LIR_HEADER(Simd128Int);
+-
+-    explicit LSimd128Int() : LInstructionHelper(classOpcode) {}
+-    const SimdConstant& getValue() const { return mir_->toSimdConstant()->value(); }
+-};
+-
+-// Constant 128-bit SIMD floating point vector (32x4, 64x2).
+-class LSimd128Float : public LInstructionHelper<1, 0, 0>
+-{
+-  public:
+-    LIR_HEADER(Simd128Float);
+-
+-    explicit LSimd128Float() : LInstructionHelper(classOpcode) {}
+-    const SimdConstant& getValue() const { return mir_->toSimdConstant()->value(); }
+-};
+-
+ // A constant Value.
+ class LValue : public LInstructionHelper<BOX_PIECES, 0, 0>
+ {
+     Value v_;
+ 
+   public:
+     LIR_HEADER(Value)
+ 
+@@ -5106,69 +4419,16 @@ class LValueToObjectOrNull : public LIns
+ 
+     static const size_t Input = 0;
+ 
+     const MToObjectOrNull* mir() {
+         return mir_->toToObjectOrNull();
+     }
+ };
+ 
+-class LInt32x4ToFloat32x4 : public LInstructionHelper<1, 1, 0>
+-{
+-  public:
+-    LIR_HEADER(Int32x4ToFloat32x4);
+-    explicit LInt32x4ToFloat32x4(const LAllocation& input)
+-      : LInstructionHelper(classOpcode)
+-    {
+-        setOperand(0, input);
+-    }
+-};
+-
+-class LFloat32x4ToInt32x4 : public LInstructionHelper<1, 1, 1>
+-{
+-  public:
+-    LIR_HEADER(Float32x4ToInt32x4);
+-    explicit LFloat32x4ToInt32x4(const LAllocation& input, const LDefinition& temp)
+-      : LInstructionHelper(classOpcode)
+-    {
+-        setOperand(0, input);
+-        setTemp(0, temp);
+-    }
+-    const LDefinition* temp() {
+-        return getTemp(0);
+-    }
+-    const MSimdConvert* mir() const {
+-        return mir_->toSimdConvert();
+-    }
+-};
+-
+-// Float32x4 to Uint32x4 needs one GPR temp and one FloatReg temp.
+-class LFloat32x4ToUint32x4 : public LInstructionHelper<1, 1, 2>
+-{
+-  public:
+-    LIR_HEADER(Float32x4ToUint32x4);
+-    explicit LFloat32x4ToUint32x4(const LAllocation& input, const LDefinition& tempR,
+-                                  const LDefinition& tempF)
+-      : LInstructionHelper(classOpcode)
+-    {
+-        setOperand(0, input);
+-        setTemp(0, tempR);
+-        setTemp(1, tempF);
+-    }
+-    const LDefinition* tempR() {
+-        return getTemp(0);
+-    }
+-    const LDefinition* tempF() {
+-        return getTemp(1);
+-    }
+-    const MSimdConvert* mir() const {
+-        return mir_->toSimdConvert();
+-    }
+-};
+-
+ // Double raised to a half power.
+ class LPowHalfD : public LInstructionHelper<1, 1, 0>
+ {
+   public:
+     LIR_HEADER(PowHalfD);
+     explicit LPowHalfD(const LAllocation& input)
+       : LInstructionHelper(classOpcode)
+     {
+diff --git a/js/src/jit/shared/Lowering-shared-inl.h b/js/src/jit/shared/Lowering-shared-inl.h
+--- a/js/src/jit/shared/Lowering-shared-inl.h
++++ b/js/src/jit/shared/Lowering-shared-inl.h
+@@ -369,19 +369,16 @@ static inline bool
+ IsCompatibleLIRCoercion(MIRType to, MIRType from)
+ {
+     if (to == from)
+         return true;
+     if ((to == MIRType::Int32 || to == MIRType::Boolean) &&
+         (from == MIRType::Int32 || from == MIRType::Boolean)) {
+         return true;
+     }
+-    // SIMD types can be coerced with from*Bits operators.
+-    if (IsSimdType(to) && IsSimdType(from))
+-        return true;
+     return false;
+ }
+ 
+ 
+ // We can redefine the sin(x) and cos(x) function to return the sincos result.
+ void
+ LIRGeneratorShared::redefine(MDefinition* def, MDefinition* as, MMathFunction::Function func)
+ {
+diff --git a/js/src/jit/x64/Assembler-x64.h b/js/src/jit/x64/Assembler-x64.h
+--- a/js/src/jit/x64/Assembler-x64.h
++++ b/js/src/jit/x64/Assembler-x64.h
+@@ -219,17 +219,17 @@ static constexpr uint32_t JitStackAlignm
+ static constexpr uint32_t JitStackValueAlignment = JitStackAlignment / sizeof(Value);
+ static_assert(JitStackAlignment % sizeof(Value) == 0 && JitStackValueAlignment >= 1,
+   "Stack alignment should be a non-zero multiple of sizeof(Value)");
+ 
+ // This boolean indicates whether we support SIMD instructions flavoured for
+ // this architecture or not. Rather than a method in the LIRGenerator, it is
+ // here such that it is accessible from the entire codebase. Once full support
+ // for SIMD is reached on all tier-1 platforms, this constant can be deleted.
+-static constexpr bool SupportsSimd = true;
++static constexpr bool SupportsSimd = false;
+ static constexpr uint32_t SimdMemoryAlignment = 16;
+ 
+ static_assert(CodeAlignment % SimdMemoryAlignment == 0,
+   "Code alignment should be larger than any of the alignments which are used for "
+   "the constant sections of the code buffer.  Thus it should be larger than the "
+   "alignment for SIMD constants.");
+ 
+ static_assert(JitStackAlignment % SimdMemoryAlignment == 0,
+diff --git a/js/src/jit/x64/CodeGenerator-x64.cpp b/js/src/jit/x64/CodeGenerator-x64.cpp
+--- a/js/src/jit/x64/CodeGenerator-x64.cpp
++++ b/js/src/jit/x64/CodeGenerator-x64.cpp
+@@ -382,18 +382,16 @@ CodeGenerator::visitWasmUint32ToFloat32(
+     masm.convertUInt32ToFloat32(ToRegister(lir->input()), ToFloatRegister(lir->output()));
+ }
+ 
+ void
+ CodeGeneratorX64::wasmStore(const wasm::MemoryAccessDesc& access, const LAllocation* value,
+                             Operand dstAddr)
+ {
+     if (value->isConstant()) {
+-        MOZ_ASSERT(!access.isSimd());
+-
+         masm.memoryBarrierBefore(access.sync());
+ 
+         const MConstant* mir = value->toConstant();
+         Imm32 cst = Imm32(mir->type() == MIRType::Int32 ? mir->toInt32() : mir->toInt64());
+ 
+         masm.append(access, masm.size());
+         switch (access.type()) {
+           case Scalar::Int8:
+@@ -406,20 +404,16 @@ CodeGeneratorX64::wasmStore(const wasm::
+             break;
+           case Scalar::Int32:
+           case Scalar::Uint32:
+             masm.movl(cst, dstAddr);
+             break;
+           case Scalar::Int64:
+           case Scalar::Float32:
+           case Scalar::Float64:
+-          case Scalar::Float32x4:
+-          case Scalar::Int8x16:
+-          case Scalar::Int16x8:
+-          case Scalar::Int32x4:
+           case Scalar::Uint8Clamped:
+           case Scalar::MaxTypedArrayViewType:
+             MOZ_CRASH("unexpected array type");
+         }
+ 
+         masm.memoryBarrierAfter(access.sync());
+     } else {
+         masm.wasmStore(access, ToAnyRegister(value), dstAddr);
+@@ -494,17 +488,16 @@ CodeGenerator::visitAsmJSLoadHeap(LAsmJS
+ {
+     const MAsmJSLoadHeap* mir = ins->mir();
+     MOZ_ASSERT(mir->offset() < wasm::OffsetGuardLimit);
+ 
+     const LAllocation* ptr = ins->ptr();
+     const LDefinition* out = ins->output();
+ 
+     Scalar::Type accessType = mir->access().type();
+-    MOZ_ASSERT(!Scalar::isSimdType(accessType));
+ 
+     Operand srcAddr = ptr->isBogus()
+                       ? Operand(HeapReg, mir->offset())
+                       : Operand(HeapReg, ToRegister(ptr), TimesOne, mir->offset());
+ 
+     uint32_t before = masm.size();
+     masm.wasmLoad(mir->access(), srcAddr, ToAnyRegister(out));
+     uint32_t after = masm.size();
+@@ -516,17 +509,16 @@ CodeGenerator::visitAsmJSStoreHeap(LAsmJ
+ {
+     const MAsmJSStoreHeap* mir = ins->mir();
+     MOZ_ASSERT(mir->offset() < wasm::OffsetGuardLimit);
+ 
+     const LAllocation* ptr = ins->ptr();
+     const LAllocation* value = ins->value();
+ 
+     Scalar::Type accessType = mir->access().type();
+-    MOZ_ASSERT(!Scalar::isSimdType(accessType));
+ 
+     canonicalizeIfDeterministic(accessType, value);
+ 
+     Operand dstAddr = ptr->isBogus()
+                       ? Operand(HeapReg, mir->offset())
+                       : Operand(HeapReg, ToRegister(ptr), TimesOne, mir->offset());
+ 
+     uint32_t before = masm.size();
+diff --git a/js/src/jit/x64/Lowering-x64.cpp b/js/src/jit/x64/Lowering-x64.cpp
+--- a/js/src/jit/x64/Lowering-x64.cpp
++++ b/js/src/jit/x64/Lowering-x64.cpp
+@@ -231,20 +231,16 @@ LIRGenerator::visitWasmStore(MWasmStore*
+         // No way to encode an int64-to-memory move on x64.
+         if (value->isConstant() && value->type() != MIRType::Int64)
+             valueAlloc = useOrConstantAtStart(value);
+         else
+             valueAlloc = useRegisterAtStart(value);
+         break;
+       case Scalar::Float32:
+       case Scalar::Float64:
+-      case Scalar::Float32x4:
+-      case Scalar::Int8x16:
+-      case Scalar::Int16x8:
+-      case Scalar::Int32x4:
+         valueAlloc = useRegisterAtStart(value);
+         break;
+       case Scalar::Uint8Clamped:
+       case Scalar::MaxTypedArrayViewType:
+         MOZ_CRASH("unexpected array type");
+     }
+ 
+     LAllocation baseAlloc = useRegisterOrZeroAtStart(base);
+@@ -275,20 +271,16 @@ LIRGenerator::visitAsmJSStoreHeap(MAsmJS
+       case Scalar::Uint16:
+       case Scalar::Int32:
+       case Scalar::Uint32:
+         lir = new(alloc()) LAsmJSStoreHeap(useRegisterOrZeroAtStart(base),
+                                            useRegisterOrConstantAtStart(ins->value()));
+         break;
+       case Scalar::Float32:
+       case Scalar::Float64:
+-      case Scalar::Float32x4:
+-      case Scalar::Int8x16:
+-      case Scalar::Int16x8:
+-      case Scalar::Int32x4:
+         lir = new(alloc()) LAsmJSStoreHeap(useRegisterOrZeroAtStart(base),
+                                            useRegisterAtStart(ins->value()));
+         break;
+       case Scalar::Int64:
+       case Scalar::Uint8Clamped:
+       case Scalar::MaxTypedArrayViewType:
+         MOZ_CRASH("unexpected array type");
+     }
+diff --git a/js/src/jit/x64/MacroAssembler-x64.cpp b/js/src/jit/x64/MacroAssembler-x64.cpp
+--- a/js/src/jit/x64/MacroAssembler-x64.cpp
++++ b/js/src/jit/x64/MacroAssembler-x64.cpp
+@@ -613,61 +613,31 @@ MacroAssembler::wasmLoad(const wasm::Mem
+         movl(srcAddr, out.gpr());
+         break;
+       case Scalar::Float32:
+         loadFloat32(srcAddr, out.fpu());
+         break;
+       case Scalar::Float64:
+         loadDouble(srcAddr, out.fpu());
+         break;
+-      case Scalar::Float32x4:
+-        switch (access.numSimdElems()) {
+-          // In memory-to-register mode, movss zeroes out the high lanes.
+-          case 1: loadFloat32(srcAddr, out.fpu()); break;
+-          // See comment above, which also applies to movsd.
+-          case 2: loadDouble(srcAddr, out.fpu()); break;
+-          case 4: loadUnalignedSimd128Float(srcAddr, out.fpu()); break;
+-          default: MOZ_CRASH("unexpected size for partial load");
+-        }
+-        break;
+-      case Scalar::Int32x4:
+-        switch (access.numSimdElems()) {
+-          // In memory-to-register mode, movd zeroes out the high lanes.
+-          case 1: vmovd(srcAddr, out.fpu()); break;
+-          // See comment above, which also applies to movq.
+-          case 2: vmovq(srcAddr, out.fpu()); break;
+-          case 4: loadUnalignedSimd128Int(srcAddr, out.fpu()); break;
+-          default: MOZ_CRASH("unexpected size for partial load");
+-        }
+-        break;
+-      case Scalar::Int8x16:
+-        MOZ_ASSERT(access.numSimdElems() == 16, "unexpected partial load");
+-        loadUnalignedSimd128Int(srcAddr, out.fpu());
+-        break;
+-      case Scalar::Int16x8:
+-        MOZ_ASSERT(access.numSimdElems() == 8, "unexpected partial load");
+-        loadUnalignedSimd128Int(srcAddr, out.fpu());
+-        break;
+       case Scalar::Int64:
+         MOZ_CRASH("int64 loads must use load64");
+       case Scalar::Uint8Clamped:
+       case Scalar::MaxTypedArrayViewType:
+         MOZ_CRASH("unexpected array type");
+     }
+ 
+     memoryBarrierAfter(access.sync());
+ }
+ 
+ void
+ MacroAssembler::wasmLoadI64(const wasm::MemoryAccessDesc& access, Operand srcAddr, Register64 out)
+ {
+     memoryBarrierBefore(access.sync());
+ 
+-    MOZ_ASSERT(!access.isSimd());
+-
+     append(access, size());
+     switch (access.type()) {
+       case Scalar::Int8:
+         movsbq(srcAddr, out.reg);
+         break;
+       case Scalar::Uint8:
+         movzbq(srcAddr, out.reg);
+         break;
+@@ -684,20 +654,16 @@ MacroAssembler::wasmLoadI64(const wasm::
+       case Scalar::Uint32:
+         movl(srcAddr, out.reg);
+         break;
+       case Scalar::Int64:
+         movq(srcAddr, out.reg);
+         break;
+       case Scalar::Float32:
+       case Scalar::Float64:
+-      case Scalar::Float32x4:
+-      case Scalar::Int8x16:
+-      case Scalar::Int16x8:
+-      case Scalar::Int32x4:
+         MOZ_CRASH("non-int64 loads should use load()");
+       case Scalar::Uint8Clamped:
+       case Scalar::MaxTypedArrayViewType:
+         MOZ_CRASH("unexpected array type");
+     }
+ 
+     memoryBarrierAfter(access.sync());
+ }
+@@ -725,44 +691,16 @@ MacroAssembler::wasmStore(const wasm::Me
+         movq(value.gpr(), dstAddr);
+         break;
+       case Scalar::Float32:
+         storeUncanonicalizedFloat32(value.fpu(), dstAddr);
+         break;
+       case Scalar::Float64:
+         storeUncanonicalizedDouble(value.fpu(), dstAddr);
+         break;
+-      case Scalar::Float32x4:
+-        switch (access.numSimdElems()) {
+-          // In memory-to-register mode, movss zeroes out the high lanes.
+-          case 1: storeUncanonicalizedFloat32(value.fpu(), dstAddr); break;
+-          // See comment above, which also applies to movsd.
+-          case 2: storeUncanonicalizedDouble(value.fpu(), dstAddr); break;
+-          case 4: storeUnalignedSimd128Float(value.fpu(), dstAddr); break;
+-          default: MOZ_CRASH("unexpected size for partial load");
+-        }
+-        break;
+-      case Scalar::Int32x4:
+-        switch (access.numSimdElems()) {
+-          // In memory-to-register mode, movd zeroes out the high lanes.
+-          case 1: vmovd(value.fpu(), dstAddr); break;
+-          // See comment above, which also applies to movq.
+-          case 2: vmovq(value.fpu(), dstAddr); break;
+-          case 4: storeUnalignedSimd128Int(value.fpu(), dstAddr); break;
+-          default: MOZ_CRASH("unexpected size for partial load");
+-        }
+-        break;
+-      case Scalar::Int8x16:
+-        MOZ_ASSERT(access.numSimdElems() == 16, "unexpected partial store");
+-        storeUnalignedSimd128Int(value.fpu(), dstAddr);
+-        break;
+-      case Scalar::Int16x8:
+-        MOZ_ASSERT(access.numSimdElems() == 8, "unexpected partial store");
+-        storeUnalignedSimd128Int(value.fpu(), dstAddr);
+-        break;
+       case Scalar::Uint8Clamped:
+       case Scalar::MaxTypedArrayViewType:
+         MOZ_CRASH("unexpected array type");
+     }
+ 
+     memoryBarrierAfter(access.sync());
+ }
+ 
+diff --git a/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp b/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
+--- a/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
++++ b/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
+@@ -296,26 +296,21 @@ CodeGenerator::visitWasmStackArg(LWasmSt
+     } else {
+         switch (mir->input()->type()) {
+           case MIRType::Double:
+             masm.storeDouble(ToFloatRegister(ins->arg()), dst);
+             return;
+           case MIRType::Float32:
+             masm.storeFloat32(ToFloatRegister(ins->arg()), dst);
+             return;
+-          // StackPointer is SIMD-aligned and ABIArgGenerator guarantees
+-          // stack offsets are SIMD-aligned.
+           case MIRType::Int32x4:
+           case MIRType::Bool32x4:
+-            masm.storeAlignedSimd128Int(ToFloatRegister(ins->arg()), dst);
+-            return;
+           case MIRType::Float32x4:
+-            masm.storeAlignedSimd128Float(ToFloatRegister(ins->arg()), dst);
+-            return;
+-          default: break;
++          default:
++            break;
+         }
+         MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unexpected mir type in WasmStackArg");
+     }
+ }
+ 
+ void
+ CodeGenerator::visitWasmStackArgI64(LWasmStackArgI64* ins)
+ {
+@@ -395,20 +390,16 @@ CodeGenerator::visitWasmReinterpret(LWas
+     }
+ }
+ 
+ void
+ CodeGeneratorX86Shared::visitOutOfLineLoadTypedArrayOutOfBounds(OutOfLineLoadTypedArrayOutOfBounds* ool)
+ {
+     switch (ool->viewType()) {
+       case Scalar::Int64:
+-      case Scalar::Float32x4:
+-      case Scalar::Int8x16:
+-      case Scalar::Int16x8:
+-      case Scalar::Int32x4:
+       case Scalar::MaxTypedArrayViewType:
+         MOZ_CRASH("unexpected array type");
+       case Scalar::Float32:
+         masm.loadConstantFloat32(float(GenericNaN()), ool->dest().fpu());
+         break;
+       case Scalar::Float64:
+         masm.loadConstantDouble(GenericNaN(), ool->dest().fpu());
+         break;
+@@ -2484,863 +2475,16 @@ CodeGenerator::visitNegF(LNegF* ins)
+ {
+     FloatRegister input = ToFloatRegister(ins->input());
+     MOZ_ASSERT(input == ToFloatRegister(ins->output()));
+ 
+     masm.negateFloat(input);
+ }
+ 
+ void
+-CodeGenerator::visitSimd128Int(LSimd128Int* ins)
+-{
+-    const LDefinition* out = ins->getDef(0);
+-    masm.loadConstantSimd128Int(ins->getValue(), ToFloatRegister(out));
+-}
+-
+-void
+-CodeGenerator::visitSimd128Float(LSimd128Float* ins)
+-{
+-    const LDefinition* out = ins->getDef(0);
+-    masm.loadConstantSimd128Float(ins->getValue(), ToFloatRegister(out));
+-}
+-
+-void
+-CodeGenerator::visitInt32x4ToFloat32x4(LInt32x4ToFloat32x4* ins)
+-{
+-    FloatRegister in = ToFloatRegister(ins->input());
+-    FloatRegister out = ToFloatRegister(ins->output());
+-    masm.convertInt32x4ToFloat32x4(in, out);
+-}
+-
+-void
+-CodeGenerator::visitFloat32x4ToInt32x4(LFloat32x4ToInt32x4* ins)
+-{
+-    FloatRegister in = ToFloatRegister(ins->input());
+-    FloatRegister out = ToFloatRegister(ins->output());
+-    Register temp = ToRegister(ins->temp());
+-    auto* ool = new(alloc()) OutOfLineSimdFloatToIntCheck(temp, in, ins,
+-                                                          ins->mir()->bytecodeOffset());
+-    addOutOfLineCode(ool, ins->mir());
+-    masm.checkedConvertFloat32x4ToInt32x4(in, out, temp, ool->entry(), ool->rejoin());
+-}
+-
+-void
+-CodeGeneratorX86Shared::visitOutOfLineSimdFloatToIntCheck(OutOfLineSimdFloatToIntCheck* ool)
+-{
+-    Label onConversionError;
+-    masm.oolConvertFloat32x4ToInt32x4(ool->input(), ool->temp(), ool->rejoin(), &onConversionError);
+-    masm.bind(&onConversionError);
+-    if (gen->compilingWasm())
+-        masm.wasmTrap(wasm::Trap::ImpreciseSimdConversion, ool->bytecodeOffset());
+-    else
+-        bailout(ool->ins()->snapshot());
+-}
+-
+-// Convert Float32x4 to Uint32x4.
+-// If any input lane value is out of range or NaN, bail out.
+-void
+-CodeGenerator::visitFloat32x4ToUint32x4(LFloat32x4ToUint32x4* ins)
+-{
+-    FloatRegister in = ToFloatRegister(ins->input());
+-    FloatRegister out = ToFloatRegister(ins->output());
+-    Register temp = ToRegister(ins->tempR());
+-    FloatRegister tempF = ToFloatRegister(ins->tempF());
+-
+-    Label failed;
+-    masm.checkedConvertFloat32x4ToUint32x4(in, out, temp, tempF, &failed);
+-
+-    Label ok;
+-    masm.jump(&ok);
+-    masm.bind(&failed);
+-    if (gen->compilingWasm())
+-        masm.wasmTrap(wasm::Trap::ImpreciseSimdConversion, ins->mir()->bytecodeOffset());
+-    else
+-        bailout(ins->snapshot());
+-    masm.bind(&ok);
+-}
+-
+-void
+-CodeGenerator::visitSimdValueInt32x4(LSimdValueInt32x4* ins)
+-{
+-    MOZ_ASSERT(ins->mir()->type() == MIRType::Int32x4 || ins->mir()->type() == MIRType::Bool32x4);
+-    masm.createInt32x4(ToRegister(ins->getOperand(0)),
+-                       ToRegister(ins->getOperand(1)),
+-                       ToRegister(ins->getOperand(2)),
+-                       ToRegister(ins->getOperand(3)),
+-                       ToFloatRegister(ins->output())
+-                      );
+-}
+-
+-void
+-CodeGenerator::visitSimdValueFloat32x4(LSimdValueFloat32x4* ins)
+-{
+-    MOZ_ASSERT(ins->mir()->type() == MIRType::Float32x4);
+-
+-    FloatRegister r0 = ToFloatRegister(ins->getOperand(0));
+-    FloatRegister r1 = ToFloatRegister(ins->getOperand(1));
+-    FloatRegister r2 = ToFloatRegister(ins->getOperand(2));
+-    FloatRegister r3 = ToFloatRegister(ins->getOperand(3));
+-    FloatRegister tmp = ToFloatRegister(ins->getTemp(0));
+-    FloatRegister output = ToFloatRegister(ins->output());
+-
+-    masm.createFloat32x4(r0, r1, r2, r3, tmp, output);
+-}
+-
+-void
+-CodeGenerator::visitSimdSplatX16(LSimdSplatX16* ins)
+-{
+-    MOZ_ASSERT(SimdTypeToLength(ins->mir()->type()) == 16);
+-    Register input = ToRegister(ins->getOperand(0));
+-    FloatRegister output = ToFloatRegister(ins->output());
+-    masm.splatX16(input, output);
+-}
+-
+-void
+-CodeGenerator::visitSimdSplatX8(LSimdSplatX8* ins)
+-{
+-    MOZ_ASSERT(SimdTypeToLength(ins->mir()->type()) == 8);
+-    Register input = ToRegister(ins->getOperand(0));
+-    FloatRegister output = ToFloatRegister(ins->output());
+-    masm.splatX8(input, output);
+-}
+-
+-void
+-CodeGenerator::visitSimdSplatX4(LSimdSplatX4* ins)
+-{
+-    FloatRegister output = ToFloatRegister(ins->output());
+-    MSimdSplat* mir = ins->mir();
+-    MOZ_ASSERT(IsSimdType(mir->type()));
+-    JS_STATIC_ASSERT(sizeof(float) == sizeof(int32_t));
+-    if (mir->type() == MIRType::Float32x4)
+-        masm.splatX4(ToFloatRegister(ins->getOperand(0)), output);
+-    else
+-        masm.splatX4(ToRegister(ins->getOperand(0)), output);
+-}
+-
+-void
+-CodeGenerator::visitSimdReinterpretCast(LSimdReinterpretCast* ins)
+-{
+-    FloatRegister input = ToFloatRegister(ins->input());
+-    FloatRegister output = ToFloatRegister(ins->output());
+-    bool isIntLaneType = IsIntegerSimdType(ins->mir()->type());
+-    masm.reinterpretSimd(isIntLaneType, input, output);
+-}
+-
+-void
+-CodeGenerator::visitSimdExtractElementB(LSimdExtractElementB* ins)
+-{
+-    FloatRegister input = ToFloatRegister(ins->input());
+-    Register output = ToRegister(ins->output());
+-    MSimdExtractElement* mir = ins->mir();
+-    unsigned numLanes = SimdTypeToLength(mir->specialization());
+-    masm.extractLaneSimdBool(input, output, numLanes, mir->lane());
+-}
+-
+-void
+-CodeGenerator::visitSimdExtractElementI(LSimdExtractElementI* ins)
+-{
+-    FloatRegister input = ToFloatRegister(ins->input());
+-    Register output = ToRegister(ins->output());
+-    MSimdExtractElement* mir = ins->mir();
+-    unsigned numLanes = SimdTypeToLength(mir->specialization());
+-    switch (numLanes) {
+-      case 4:
+-        masm.extractLaneInt32x4(input, output, mir->lane());
+-        break;
+-      case 8:
+-        masm.extractLaneInt16x8(input, output, mir->lane(), mir->signedness());
+-        break;
+-      case 16:
+-        masm.extractLaneInt8x16(input, output, mir->lane(), mir->signedness());
+-        break;
+-      default:
+-        MOZ_CRASH("Unhandled SIMD length");
+-    }
+-}
+-
+-void
+-CodeGenerator::visitSimdExtractElementU2D(LSimdExtractElementU2D* ins)
+-{
+-    FloatRegister input = ToFloatRegister(ins->input());
+-    FloatRegister output = ToFloatRegister(ins->output());
+-    Register temp = ToRegister(ins->temp());
+-    MSimdExtractElement* mir = ins->mir();
+-    MOZ_ASSERT(mir->specialization() == MIRType::Int32x4);
+-    masm.extractLaneInt32x4(input, temp, mir->lane());
+-    masm.convertUInt32ToDouble(temp, output);
+-}
+-
+-void
+-CodeGenerator::visitSimdExtractElementF(LSimdExtractElementF* ins)
+-{
+-    FloatRegister input = ToFloatRegister(ins->input());
+-    FloatRegister output = ToFloatRegister(ins->output());
+-    unsigned lane = ins->mir()->lane();
+-    bool canonicalize = !gen->compilingWasm();
+-    masm.extractLaneFloat32x4(input, output, lane, canonicalize);
+-}
+-
+-void
+-CodeGenerator::visitSimdInsertElementI(LSimdInsertElementI* ins)
+-{
+-    FloatRegister input = ToFloatRegister(ins->vector());
+-    Register value = ToRegister(ins->value());
+-    FloatRegister output = ToFloatRegister(ins->output());
+-    MOZ_ASSERT(input == output); // defineReuseInput(0)
+-    unsigned lane = ins->lane();
+-    unsigned length = ins->length();
+-    masm.insertLaneSimdInt(input, value, output, lane, length);
+-}
+-
+-void
+-CodeGenerator::visitSimdInsertElementF(LSimdInsertElementF* ins)
+-{
+-    FloatRegister input = ToFloatRegister(ins->vector());
+-    FloatRegister value = ToFloatRegister(ins->value());
+-    FloatRegister output = ToFloatRegister(ins->output());
+-    MOZ_ASSERT(input == output); // defineReuseInput(0)
+-    masm.insertLaneFloat32x4(input, value, output, ins->lane());
+-}
+-
+-void
+-CodeGenerator::visitSimdAllTrue(LSimdAllTrue* ins)
+-{
+-    FloatRegister input = ToFloatRegister(ins->input());
+-    Register output = ToRegister(ins->output());
+-    masm.allTrueSimdBool(input, output);
+-}
+-
+-void
+-CodeGenerator::visitSimdAnyTrue(LSimdAnyTrue* ins)
+-{
+-    FloatRegister input = ToFloatRegister(ins->input());
+-    Register output = ToRegister(ins->output());
+-    masm.anyTrueSimdBool(input, output);
+-}
+-
+-// XXX note for reviewer: this is SIMD.js only, no need to keep it for wasm.
+-template <class T, class Reg> void
+-CodeGeneratorX86Shared::visitSimdGeneralShuffle(LSimdGeneralShuffleBase* ins, Reg tempRegister)
+-{
+-    MSimdGeneralShuffle* mir = ins->mir();
+-    unsigned numVectors = mir->numVectors();
+-
+-    Register laneTemp = ToRegister(ins->temp());
+-
+-    // This won't generate fast code, but it's fine because we expect users
+-    // to have used constant indices (and thus MSimdGeneralShuffle to be fold
+-    // into MSimdSwizzle/MSimdShuffle, which are fast).
+-
+-    // We need stack space for the numVectors inputs and for the output vector.
+-    unsigned stackSpace = Simd128DataSize * (numVectors + 1);
+-    masm.reserveStack(stackSpace);
+-
+-    for (unsigned i = 0; i < numVectors; i++) {
+-        masm.storeAlignedVector<T>(ToFloatRegister(ins->vector(i)),
+-                                   Address(StackPointer, Simd128DataSize * (1 + i)));
+-    }
+-
+-    Label bail;
+-    const Scale laneScale = ScaleFromElemWidth(sizeof(T));
+-
+-    for (size_t i = 0; i < mir->numLanes(); i++) {
+-        Operand lane = ToOperand(ins->lane(i));
+-
+-        masm.cmp32(lane, Imm32(numVectors * mir->numLanes() - 1));
+-        masm.j(Assembler::Above, &bail);
+-
+-        if (lane.kind() == Operand::REG) {
+-            masm.loadScalar<T>(Operand(StackPointer, ToRegister(ins->lane(i)), laneScale, Simd128DataSize),
+-                               tempRegister);
+-        } else {
+-            masm.load32(lane, laneTemp);
+-            masm.loadScalar<T>(Operand(StackPointer, laneTemp, laneScale, Simd128DataSize), tempRegister);
+-        }
+-
+-        masm.storeScalar<T>(tempRegister, Address(StackPointer, i * sizeof(T)));
+-    }
+-
+-    FloatRegister output = ToFloatRegister(ins->output());
+-    masm.loadAlignedVector<T>(Address(StackPointer, 0), output);
+-
+-    Label join;
+-    masm.jump(&join);
+-
+-    {
+-        masm.bind(&bail);
+-        masm.freeStack(stackSpace);
+-        bailout(ins->snapshot());
+-    }
+-
+-    masm.bind(&join);
+-    masm.setFramePushed(masm.framePushed() + stackSpace);
+-    masm.freeStack(stackSpace);
+-}
+-
+-// XXX SIMD.js only
+-void
+-CodeGenerator::visitSimdGeneralShuffleI(LSimdGeneralShuffleI* ins)
+-{
+-    switch (ins->mir()->type()) {
+-      case MIRType::Int8x16:
+-        return visitSimdGeneralShuffle<int8_t, Register>(ins, ToRegister(ins->temp()));
+-      case MIRType::Int16x8:
+-        return visitSimdGeneralShuffle<int16_t, Register>(ins, ToRegister(ins->temp()));
+-      case MIRType::Int32x4:
+-        return visitSimdGeneralShuffle<int32_t, Register>(ins, ToRegister(ins->temp()));
+-      default:
+-        MOZ_CRASH("unsupported type for general shuffle");
+-    }
+-}
+-void
+-CodeGenerator::visitSimdGeneralShuffleF(LSimdGeneralShuffleF* ins)
+-{
+-    ScratchFloat32Scope scratch(masm);
+-    visitSimdGeneralShuffle<float, FloatRegister>(ins, scratch);
+-}
+-
+-void
+-CodeGenerator::visitSimdSwizzleI(LSimdSwizzleI* ins)
+-{
+-    FloatRegister input = ToFloatRegister(ins->input());
+-    FloatRegister output = ToFloatRegister(ins->output());
+-    const unsigned numLanes = ins->numLanes();
+-
+-    switch (numLanes) {
+-        case 4: {
+-            unsigned lanes[4];
+-            for (unsigned i = 0; i < 4; i++)
+-                lanes[i] = ins->lane(i);
+-            masm.swizzleInt32x4(input, output, lanes);
+-            return;
+-        }
+-    }
+-
+-    // In the general case, use pshufb if it is available. Convert to a
+-    // byte-wise swizzle.
+-    const unsigned bytesPerLane = 16 / numLanes;
+-    int8_t lanes[16];
+-    for (unsigned i = 0; i < numLanes; i++) {
+-        for (unsigned b = 0; b < bytesPerLane; b++)
+-            lanes[i * bytesPerLane + b] = ins->lane(i) * bytesPerLane + b;
+-    }
+-
+-    Maybe<Register> maybeTemp;
+-    if (!ins->getTemp(0)->isBogusTemp())
+-        maybeTemp.emplace(ToRegister(ins->getTemp(0)));
+-
+-    masm.swizzleInt8x16(input, output, maybeTemp, lanes);
+-}
+-
+-void
+-CodeGenerator::visitSimdSwizzleF(LSimdSwizzleF* ins)
+-{
+-    FloatRegister input = ToFloatRegister(ins->input());
+-    FloatRegister output = ToFloatRegister(ins->output());
+-    MOZ_ASSERT(ins->numLanes() == 4);
+-    unsigned lanes[4];
+-    for (unsigned i = 0; i < 4; i++)
+-        lanes[i] = ins->lane(i);
+-    masm.swizzleFloat32x4(input, output, lanes);
+-}
+-
+-void
+-CodeGenerator::visitSimdShuffle(LSimdShuffle* ins)
+-{
+-    FloatRegister lhs = ToFloatRegister(ins->lhs());
+-    FloatRegister rhs = ToFloatRegister(ins->rhs());
+-    FloatRegister output = ToFloatRegister(ins->output());
+-    const unsigned numLanes = ins->numLanes();
+-    const unsigned bytesPerLane = 16 / numLanes;
+-
+-    // Convert the shuffle to a byte-wise shuffle.
+-    uint8_t lanes[16];
+-    for (unsigned i = 0; i < numLanes; i++) {
+-        for (unsigned b = 0; b < bytesPerLane; b++) {
+-            lanes[i * bytesPerLane + b] = ins->lane(i) * bytesPerLane + b;
+-        }
+-    }
+-
+-    Maybe<FloatRegister> maybeFloatTemp;
+-    Maybe<Register> maybeTemp;
+-    if (AssemblerX86Shared::HasSSSE3())
+-        maybeFloatTemp.emplace(ToFloatRegister(ins->temp()));
+-    else
+-        maybeTemp.emplace(ToRegister(ins->temp()));
+-
+-    masm.shuffleInt8x16(lhs, rhs, output, maybeFloatTemp, maybeTemp, lanes);
+-}
+-
+-void
+-CodeGenerator::visitSimdShuffleX4(LSimdShuffleX4* ins)
+-{
+-    FloatRegister lhs = ToFloatRegister(ins->lhs());
+-    Operand rhs = ToOperand(ins->rhs());
+-    FloatRegister out = ToFloatRegister(ins->output());
+-    unsigned lanes[4];
+-    for (unsigned i = 0; i < 4; i++)
+-        lanes[i] = ins->lane(i);
+-    Maybe<FloatRegister> maybeTemp;
+-    if (!ins->temp()->isBogusTemp())
+-        maybeTemp.emplace(ToFloatRegister(ins->temp()));
+-    masm.shuffleX4(lhs, rhs, out, maybeTemp, lanes);
+-}
+-
+-static inline Assembler::Condition
+-ToCondition(MSimdBinaryComp::Operation op)
+-{
+-    switch (op) {
+-      case MSimdBinaryComp::greaterThan: return Assembler::GreaterThan;
+-      case MSimdBinaryComp::equal: return Assembler::Equal;
+-      case MSimdBinaryComp::lessThan: return Assembler::LessThan;
+-      case MSimdBinaryComp::notEqual: return Assembler::NotEqual;
+-      case MSimdBinaryComp::greaterThanOrEqual: return Assembler::GreaterThanOrEqual;
+-      case MSimdBinaryComp::lessThanOrEqual: return Assembler::LessThanOrEqual;
+-    }
+-    MOZ_CRASH("unexpected cond");
+-}
+-
+-void
+-CodeGenerator::visitSimdBinaryCompIx16(LSimdBinaryCompIx16* ins)
+-{
+-    FloatRegister lhs = ToFloatRegister(ins->lhs());
+-    Operand rhs = ToOperand(ins->rhs());
+-    FloatRegister output = ToFloatRegister(ins->output());
+-    MOZ_ASSERT_IF(!Assembler::HasAVX(), output == lhs);
+-    masm.compareInt8x16(lhs, rhs, ToCondition(ins->operation()), output);
+-}
+-
+-void
+-CodeGenerator::visitSimdBinaryCompIx8(LSimdBinaryCompIx8* ins)
+-{
+-    FloatRegister lhs = ToFloatRegister(ins->lhs());
+-    Operand rhs = ToOperand(ins->rhs());
+-    FloatRegister output = ToFloatRegister(ins->output());
+-    MOZ_ASSERT_IF(!Assembler::HasAVX(), output == lhs);
+-    masm.compareInt16x8(lhs, rhs, ToCondition(ins->operation()), output);
+-}
+-
+-void
+-CodeGenerator::visitSimdBinaryCompIx4(LSimdBinaryCompIx4* ins)
+-{
+-    FloatRegister lhs = ToFloatRegister(ins->lhs());
+-    Operand rhs = ToOperand(ins->rhs());
+-    MOZ_ASSERT(ToFloatRegister(ins->output()) == lhs);
+-    masm.compareInt32x4(lhs, rhs, ToCondition(ins->operation()), lhs);
+-}
+-
+-void
+-CodeGenerator::visitSimdBinaryCompFx4(LSimdBinaryCompFx4* ins)
+-{
+-    FloatRegister lhs = ToFloatRegister(ins->lhs());
+-    Operand rhs = ToOperand(ins->rhs());
+-    FloatRegister output = ToFloatRegister(ins->output());
+-    masm.compareFloat32x4(lhs, rhs, ToCondition(ins->operation()), output);
+-}
+-
+-void
+-CodeGenerator::visitSimdBinaryArithIx16(LSimdBinaryArithIx16* ins)
+-{
+-    FloatRegister lhs = ToFloatRegister(ins->lhs());
+-    Operand rhs = ToOperand(ins->rhs());
+-    FloatRegister output = ToFloatRegister(ins->output());
+-
+-    MSimdBinaryArith::Operation op = ins->operation();
+-    switch (op) {
+-      case MSimdBinaryArith::Op_add:
+-        masm.addInt8x16(lhs, rhs, output);
+-        return;
+-      case MSimdBinaryArith::Op_sub:
+-        masm.subInt8x16(lhs, rhs, output);
+-        return;
+-      case MSimdBinaryArith::Op_mul:
+-        // 8x16 mul is a valid operation, but not supported in SSE or AVX.
+-        // The operation is synthesized from 16x8 multiplies by
+-        // MSimdBinaryArith::AddLegalized().
+-        break;
+-      case MSimdBinaryArith::Op_div:
+-      case MSimdBinaryArith::Op_max:
+-      case MSimdBinaryArith::Op_min:
+-      case MSimdBinaryArith::Op_minNum:
+-      case MSimdBinaryArith::Op_maxNum:
+-        break;
+-    }
+-    MOZ_CRASH("unexpected SIMD op");
+-}
+-
+-void
+-CodeGenerator::visitSimdBinaryArithIx8(LSimdBinaryArithIx8* ins)
+-{
+-    FloatRegister lhs = ToFloatRegister(ins->lhs());
+-    Operand rhs = ToOperand(ins->rhs());
+-    FloatRegister output = ToFloatRegister(ins->output());
+-
+-    MSimdBinaryArith::Operation op = ins->operation();
+-    switch (op) {
+-      case MSimdBinaryArith::Op_add:
+-        masm.addInt16x8(lhs, rhs, output);
+-        return;
+-      case MSimdBinaryArith::Op_sub:
+-        masm.subInt16x8(lhs, rhs, output);
+-        return;
+-      case MSimdBinaryArith::Op_mul:
+-        masm.mulInt16x8(lhs, rhs, output);
+-        return;
+-      case MSimdBinaryArith::Op_div:
+-      case MSimdBinaryArith::Op_max:
+-      case MSimdBinaryArith::Op_min:
+-      case MSimdBinaryArith::Op_minNum:
+-      case MSimdBinaryArith::Op_maxNum:
+-        break;
+-    }
+-    MOZ_CRASH("unexpected SIMD op");
+-}
+-
+-void
+-CodeGenerator::visitSimdBinaryArithIx4(LSimdBinaryArithIx4* ins)
+-{
+-    FloatRegister lhs = ToFloatRegister(ins->lhs());
+-    Operand rhs = ToOperand(ins->rhs());
+-    FloatRegister output = ToFloatRegister(ins->output());
+-
+-    MSimdBinaryArith::Operation op = ins->operation();
+-    switch (op) {
+-      case MSimdBinaryArith::Op_add:
+-        masm.addInt32x4(lhs, rhs, output);
+-        return;
+-      case MSimdBinaryArith::Op_sub:
+-        masm.subInt32x4(lhs, rhs, output);
+-        return;
+-      case MSimdBinaryArith::Op_mul: {
+-        Maybe<FloatRegister> maybeTemp;
+-        if (!AssemblerX86Shared::HasSSE41())
+-            maybeTemp.emplace(ToFloatRegister(ins->getTemp(0)));
+-        masm.mulInt32x4(lhs, rhs, maybeTemp, output);
+-        return;
+-      }
+-      case MSimdBinaryArith::Op_div:
+-        // x86 doesn't have SIMD i32 div.
+-        break;
+-      case MSimdBinaryArith::Op_max:
+-        // we can do max with a single instruction only if we have SSE4.1
+-        // using the PMAXSD instruction.
+-        break;
+-      case MSimdBinaryArith::Op_min:
+-        // we can do max with a single instruction only if we have SSE4.1
+-        // using the PMINSD instruction.
+-        break;
+-      case MSimdBinaryArith::Op_minNum:
+-      case MSimdBinaryArith::Op_maxNum:
+-        break;
+-    }
+-    MOZ_CRASH("unexpected SIMD op");
+-}
+-
+-void
+-CodeGenerator::visitSimdBinaryArithFx4(LSimdBinaryArithFx4* ins)
+-{
+-    FloatRegister lhs = ToFloatRegister(ins->lhs());
+-    Operand rhs = ToOperand(ins->rhs());
+-    FloatRegister output = ToFloatRegister(ins->output());
+-
+-    MSimdBinaryArith::Operation op = ins->operation();
+-    switch (op) {
+-      case MSimdBinaryArith::Op_add:
+-        masm.addFloat32x4(lhs, rhs, output);
+-        return;
+-      case MSimdBinaryArith::Op_sub:
+-        masm.subFloat32x4(lhs, rhs, output);
+-        return;
+-      case MSimdBinaryArith::Op_mul:
+-        masm.mulFloat32x4(lhs, rhs, output);
+-        return;
+-      case MSimdBinaryArith::Op_div:
+-        masm.divFloat32x4(lhs, rhs, output);
+-        return;
+-      case MSimdBinaryArith::Op_max: {
+-        masm.maxFloat32x4(lhs, rhs, ToFloatRegister(ins->temp()), output);
+-        return;
+-      }
+-      case MSimdBinaryArith::Op_min: {
+-        masm.minFloat32x4(lhs, rhs, output);
+-        return;
+-      }
+-      case MSimdBinaryArith::Op_minNum: {
+-        masm.minNumFloat32x4(lhs, rhs, ToFloatRegister(ins->temp()), output);
+-        return;
+-      }
+-      case MSimdBinaryArith::Op_maxNum: {
+-        masm.maxNumFloat32x4(lhs, rhs, ToFloatRegister(ins->temp()), output);
+-        return;
+-      }
+-    }
+-    MOZ_CRASH("unexpected SIMD op");
+-}
+-
+-void
+-CodeGenerator::visitSimdBinarySaturating(LSimdBinarySaturating* ins)
+-{
+-    FloatRegister lhs = ToFloatRegister(ins->lhs());
+-    Operand rhs = ToOperand(ins->rhs());
+-    FloatRegister output = ToFloatRegister(ins->output());
+-
+-    SimdSign sign = ins->signedness();
+-    MOZ_ASSERT(sign != SimdSign::NotApplicable);
+-
+-    switch (ins->type()) {
+-      case MIRType::Int8x16:
+-        switch (ins->operation()) {
+-          case MSimdBinarySaturating::add:
+-            masm.addSatInt8x16(lhs, rhs, sign, output);
+-            return;
+-          case MSimdBinarySaturating::sub:
+-            masm.subSatInt8x16(lhs, rhs, sign, output);
+-            return;
+-        }
+-        break;
+-
+-      case MIRType::Int16x8:
+-        switch (ins->operation()) {
+-          case MSimdBinarySaturating::add:
+-            masm.addSatInt16x8(lhs, rhs, sign, output);
+-            return;
+-          case MSimdBinarySaturating::sub:
+-            masm.subSatInt16x8(lhs, rhs, sign, output);
+-            return;
+-        }
+-        break;
+-
+-      default:
+-        break;
+-    }
+-    MOZ_CRASH("unsupported type for SIMD saturating arithmetic");
+-}
+-
+-void
+-CodeGenerator::visitSimdUnaryArithIx16(LSimdUnaryArithIx16* ins)
+-{
+-    Operand in = ToOperand(ins->input());
+-    FloatRegister out = ToFloatRegister(ins->output());
+-    switch (ins->operation()) {
+-      case MSimdUnaryArith::neg:
+-        masm.negInt8x16(in, out);
+-        return;
+-      case MSimdUnaryArith::not_:
+-        masm.notInt8x16(in, out);
+-        return;
+-      case MSimdUnaryArith::abs:
+-      case MSimdUnaryArith::reciprocalApproximation:
+-      case MSimdUnaryArith::reciprocalSqrtApproximation:
+-      case MSimdUnaryArith::sqrt:
+-        break;
+-    }
+-    MOZ_CRASH("unexpected SIMD op");
+-}
+-
+-void
+-CodeGenerator::visitSimdUnaryArithIx8(LSimdUnaryArithIx8* ins)
+-{
+-    Operand in = ToOperand(ins->input());
+-    FloatRegister out = ToFloatRegister(ins->output());
+-    switch (ins->operation()) {
+-      case MSimdUnaryArith::neg:
+-        masm.negInt16x8(in, out);
+-        return;
+-      case MSimdUnaryArith::not_:
+-        masm.notInt16x8(in, out);
+-        return;
+-      case MSimdUnaryArith::abs:
+-      case MSimdUnaryArith::reciprocalApproximation:
+-      case MSimdUnaryArith::reciprocalSqrtApproximation:
+-      case MSimdUnaryArith::sqrt:
+-        break;
+-    }
+-    MOZ_CRASH("unexpected SIMD op");
+-}
+-
+-void
+-CodeGenerator::visitSimdUnaryArithIx4(LSimdUnaryArithIx4* ins)
+-{
+-    Operand in = ToOperand(ins->input());
+-    FloatRegister out = ToFloatRegister(ins->output());
+-    switch (ins->operation()) {
+-      case MSimdUnaryArith::neg:
+-        masm.negInt32x4(in, out);
+-        return;
+-      case MSimdUnaryArith::not_:
+-        masm.notInt32x4(in, out);
+-        return;
+-      case MSimdUnaryArith::abs:
+-      case MSimdUnaryArith::reciprocalApproximation:
+-      case MSimdUnaryArith::reciprocalSqrtApproximation:
+-      case MSimdUnaryArith::sqrt:
+-        break;
+-    }
+-    MOZ_CRASH("unexpected SIMD op");
+-}
+-
+-void
+-CodeGenerator::visitSimdUnaryArithFx4(LSimdUnaryArithFx4* ins)
+-{
+-    Operand in = ToOperand(ins->input());
+-    FloatRegister out = ToFloatRegister(ins->output());
+-
+-    switch (ins->operation()) {
+-      case MSimdUnaryArith::abs:
+-        masm.absFloat32x4(in, out);
+-        return;
+-      case MSimdUnaryArith::neg:
+-        masm.negFloat32x4(in, out);
+-        return;
+-      case MSimdUnaryArith::not_:
+-        masm.notFloat32x4(in, out);
+-        return;
+-      case MSimdUnaryArith::reciprocalApproximation:
+-        masm.packedRcpApproximationFloat32x4(in, out);
+-        return;
+-      case MSimdUnaryArith::reciprocalSqrtApproximation:
+-        masm.packedRcpSqrtApproximationFloat32x4(in, out);
+-        return;
+-      case MSimdUnaryArith::sqrt:
+-        masm.packedSqrtFloat32x4(in, out);
+-        return;
+-    }
+-    MOZ_CRASH("unexpected SIMD op");
+-}
+-
+-void
+-CodeGenerator::visitSimdBinaryBitwise(LSimdBinaryBitwise* ins)
+-{
+-    FloatRegister lhs = ToFloatRegister(ins->lhs());
+-    Operand rhs = ToOperand(ins->rhs());
+-    FloatRegister output = ToFloatRegister(ins->output());
+-
+-    MSimdBinaryBitwise::Operation op = ins->operation();
+-    switch (op) {
+-      case MSimdBinaryBitwise::and_:
+-        if (ins->type() == MIRType::Float32x4)
+-            masm.bitwiseAndFloat32x4(lhs, rhs, output);
+-        else
+-            masm.bitwiseAndSimdInt(lhs, rhs, output);
+-        return;
+-      case MSimdBinaryBitwise::or_:
+-        if (ins->type() == MIRType::Float32x4)
+-            masm.bitwiseOrFloat32x4(lhs, rhs, output);
+-        else
+-            masm.bitwiseOrSimdInt(lhs, rhs, output);
+-        return;
+-      case MSimdBinaryBitwise::xor_:
+-        if (ins->type() == MIRType::Float32x4)
+-            masm.bitwiseXorFloat32x4(lhs, rhs, output);
+-        else
+-            masm.bitwiseXorSimdInt(lhs, rhs, output);
+-        return;
+-    }
+-    MOZ_CRASH("unexpected SIMD bitwise op");
+-}
+-
+-void
+-CodeGenerator::visitSimdShift(LSimdShift* ins)
+-{
+-    FloatRegister out = ToFloatRegister(ins->output());
+-    MOZ_ASSERT(ToFloatRegister(ins->vector()) == out); // defineReuseInput(0);
+-
+-    // Note that SSE doesn't have instructions for shifting 8x16 vectors.
+-    // These shifts are synthesized by the MSimdShift::AddLegalized() function.
+-    const LAllocation* val = ins->value();
+-    if (val->isConstant()) {
+-        MOZ_ASSERT(ins->temp()->isBogusTemp());
+-        Imm32 count(uint32_t(ToInt32(val)));
+-        switch (ins->type()) {
+-          case MIRType::Int16x8:
+-            switch (ins->operation()) {
+-              case MSimdShift::lsh:
+-                masm.packedLeftShiftByScalarInt16x8(count, out);
+-                return;
+-              case MSimdShift::rsh:
+-                masm.packedRightShiftByScalarInt16x8(count, out);
+-                return;
+-              case MSimdShift::ursh:
+-                masm.packedUnsignedRightShiftByScalarInt16x8(count, out);
+-                return;
+-            }
+-            break;
+-          case MIRType::Int32x4:
+-            switch (ins->operation()) {
+-              case MSimdShift::lsh:
+-                masm.packedLeftShiftByScalarInt32x4(count, out);
+-                return;
+-              case MSimdShift::rsh:
+-                masm.packedRightShiftByScalarInt32x4(count, out);
+-                return;
+-              case MSimdShift::ursh:
+-                masm.packedUnsignedRightShiftByScalarInt32x4(count, out);
+-                return;
+-            }
+-            break;
+-          default:
+-            MOZ_CRASH("unsupported type for SIMD shifts");
+-        }
+-        MOZ_CRASH("unexpected SIMD bitwise op");
+-    }
+-
+-    Register temp = ToRegister(ins->temp());
+-    Register count = ToRegister(val);
+-
+-    switch (ins->type()) {
+-      case MIRType::Int16x8:
+-        switch (ins->operation()) {
+-          case MSimdShift::lsh:
+-            masm.packedLeftShiftByScalarInt16x8(out, count, temp, out);
+-            return;
+-          case MSimdShift::rsh:
+-            masm.packedRightShiftByScalarInt16x8(out, count, temp, out);
+-            return;
+-          case MSimdShift::ursh:
+-            masm.packedUnsignedRightShiftByScalarInt16x8(out, count, temp, out);
+-            return;
+-        }
+-        break;
+-      case MIRType::Int32x4:
+-        switch (ins->operation()) {
+-          case MSimdShift::lsh:
+-            masm.packedLeftShiftByScalarInt32x4(out, count, temp, out);
+-            return;
+-          case MSimdShift::rsh:
+-            masm.packedRightShiftByScalarInt32x4(out, count, temp, out);
+-            return;
+-          case MSimdShift::ursh:
+-            masm.packedUnsignedRightShiftByScalarInt32x4(out, count, temp, out);
+-            return;
+-        }
+-        break;
+-      default:
+-        MOZ_CRASH("unsupported type for SIMD shifts");
+-    }
+-    MOZ_CRASH("unexpected SIMD bitwise op");
+-}
+-
+-void
+-CodeGenerator::visitSimdSelect(LSimdSelect* ins)
+-{
+-    FloatRegister mask = ToFloatRegister(ins->mask());
+-    FloatRegister onTrue = ToFloatRegister(ins->lhs());
+-    FloatRegister onFalse = ToFloatRegister(ins->rhs());
+-    FloatRegister output = ToFloatRegister(ins->output());
+-    FloatRegister temp = ToFloatRegister(ins->temp());
+-
+-    MSimdSelect* mir = ins->mir();
+-    unsigned lanes = SimdTypeToLength(mir->type());
+-    if (lanes == 4)
+-        masm.selectX4(mask, onTrue, onFalse, temp, output);
+-    else
+-        masm.selectSimd128(mask, onTrue, onFalse, temp, output);
+-}
+-
+-void
+ CodeGenerator::visitCompareExchangeTypedArrayElement(LCompareExchangeTypedArrayElement* lir)
+ {
+     Register elements = ToRegister(lir->elements());
+     AnyRegister output = ToAnyRegister(lir->output());
+     Register temp = lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
+ 
+     Register oldval = ToRegister(lir->oldval());
+     Register newval = ToRegister(lir->newval());
+@@ -3497,25 +2641,16 @@ CodeGeneratorX86Shared::canonicalizeIfDe
+         masm.canonicalizeFloatIfDeterministic(in);
+         break;
+       }
+       case Scalar::Float64: {
+         FloatRegister in = ToFloatRegister(value);
+         masm.canonicalizeDoubleIfDeterministic(in);
+         break;
+       }
+-      case Scalar::Float32x4: {
+-        FloatRegister in = ToFloatRegister(value);
+-        MOZ_ASSERT(in.isSimd128());
+-        FloatRegister scratch = in != xmm0.asSimd128() ? xmm0 : xmm1;
+-        masm.push(scratch);
+-        masm.canonicalizeFloat32x4(in, scratch);
+-        masm.pop(scratch);
+-        break;
+-      }
+       default: {
+         // Other types don't need canonicalization.
+         break;
+       }
+     }
+ #endif // JS_MORE_DETERMINISTIC
+ }
+ 
+diff --git a/js/src/jit/x86-shared/CodeGenerator-x86-shared.h b/js/src/jit/x86-shared/CodeGenerator-x86-shared.h
+--- a/js/src/jit/x86-shared/CodeGenerator-x86-shared.h
++++ b/js/src/jit/x86-shared/CodeGenerator-x86-shared.h
+@@ -46,41 +46,16 @@ class CodeGeneratorX86Shared : public Co
+ 
+         AnyRegister dest() const { return dest_; }
+         Scalar::Type viewType() const { return viewType_; }
+         void accept(CodeGeneratorX86Shared* codegen) override {
+             codegen->visitOutOfLineLoadTypedArrayOutOfBounds(this);
+         }
+     };
+ 
+-    // Additional bounds check for vector Float to Int conversion, when the
+-    // undefined pattern is seen. Might imply a bailout.
+-    class OutOfLineSimdFloatToIntCheck : public OutOfLineCodeBase<CodeGeneratorX86Shared>
+-    {
+-        Register temp_;
+-        FloatRegister input_;
+-        LInstruction* ins_;
+-        wasm::BytecodeOffset bytecodeOffset_;
+-
+-      public:
+-        OutOfLineSimdFloatToIntCheck(Register temp, FloatRegister input, LInstruction *ins,
+-                                     wasm::BytecodeOffset bytecodeOffset)
+-          : temp_(temp), input_(input), ins_(ins), bytecodeOffset_(bytecodeOffset)
+-        {}
+-
+-        Register temp() const { return temp_; }
+-        FloatRegister input() const { return input_; }
+-        LInstruction* ins() const { return ins_; }
+-        wasm::BytecodeOffset bytecodeOffset() const { return bytecodeOffset_; }
+-
+-        void accept(CodeGeneratorX86Shared* codegen) override {
+-            codegen->visitOutOfLineSimdFloatToIntCheck(this);
+-        }
+-    };
+-
+     NonAssertingLabel deoptLabel_;
+ 
+     Operand ToOperand(const LAllocation& a);
+     Operand ToOperand(const LAllocation* a);
+     Operand ToOperand(const LDefinition* def);
+ 
+ #ifdef JS_PUNBOX64
+     Operand ToOperandOrRegister64(const LInt64Allocation input);
+@@ -168,31 +143,28 @@ class CodeGeneratorX86Shared : public Co
+     {
+         MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+         masm.cmpPtr(reg, ImmWord(0));
+         emitBranch(cond, ifTrue, ifFalse);
+     }
+ 
+     void emitTableSwitchDispatch(MTableSwitch* mir, Register index, Register base);
+ 
+-    template <class T, class Reg> void visitSimdGeneralShuffle(LSimdGeneralShuffleBase* lir, Reg temp);
+-
+     void generateInvalidateEpilogue();
+ 
+     void canonicalizeIfDeterministic(Scalar::Type type, const LAllocation* value);
+ 
+   public:
+     // Out of line visitors.
+     void visitOutOfLineBailout(OutOfLineBailout* ool);
+     void visitOutOfLineUndoALUOperation(OutOfLineUndoALUOperation* ool);
+     void visitMulNegativeZeroCheck(MulNegativeZeroCheck* ool);
+     void visitModOverflowCheck(ModOverflowCheck* ool);
+     void visitReturnZero(ReturnZero* ool);
+     void visitOutOfLineTableSwitch(OutOfLineTableSwitch* ool);
+-    void visitOutOfLineSimdFloatToIntCheck(OutOfLineSimdFloatToIntCheck* ool);
+     void visitOutOfLineLoadTypedArrayOutOfBounds(OutOfLineLoadTypedArrayOutOfBounds* ool);
+     void visitOutOfLineWasmTruncateCheck(OutOfLineWasmTruncateCheck* ool);
+ };
+ 
+ // An out-of-line bailout thunk.
+ class OutOfLineBailout : public OutOfLineCodeBase<CodeGeneratorX86Shared>
+ {
+     LSnapshot* snapshot_;
+diff --git a/js/src/jit/x86-shared/LIR-x86-shared.h b/js/src/jit/x86-shared/LIR-x86-shared.h
+--- a/js/src/jit/x86-shared/LIR-x86-shared.h
++++ b/js/src/jit/x86-shared/LIR-x86-shared.h
+@@ -339,59 +339,16 @@ class LMulI : public LBinaryMath<0, 1>
+     MMul* mir() const {
+         return mir_->toMul();
+     }
+     const LAllocation* lhsCopy() {
+         return this->getOperand(2);
+     }
+ };
+ 
+-// Constructs an int32x4 SIMD value.
+-class LSimdValueInt32x4 : public LInstructionHelper<1, 4, 0>
+-{
+-  public:
+-    LIR_HEADER(SimdValueInt32x4)
+-    LSimdValueInt32x4(const LAllocation& x, const LAllocation& y,
+-                      const LAllocation& z, const LAllocation& w)
+-      : LInstructionHelper(classOpcode)
+-    {
+-        setOperand(0, x);
+-        setOperand(1, y);
+-        setOperand(2, z);
+-        setOperand(3, w);
+-    }
+-
+-    MSimdValueX4* mir() const {
+-        return mir_->toSimdValueX4();
+-    }
+-};
+-
+-// Constructs a float32x4 SIMD value, optimized for x86 family
+-class LSimdValueFloat32x4 : public LInstructionHelper<1, 4, 1>
+-{
+-  public:
+-    LIR_HEADER(SimdValueFloat32x4)
+-    LSimdValueFloat32x4(const LAllocation& x, const LAllocation& y,
+-                        const LAllocation& z, const LAllocation& w,
+-                        const LDefinition& copyY)
+-      : LInstructionHelper(classOpcode)
+-    {
+-        setOperand(0, x);
+-        setOperand(1, y);
+-        setOperand(2, z);
+-        setOperand(3, w);
+-
+-        setTemp(0, copyY);
+-    }
+-
+-    MSimdValueX4* mir() const {
+-        return mir_->toSimdValueX4();
+-    }
+-};
+-
+ class LInt64ToFloatingPoint : public LInstructionHelper<1, INT64_PIECES, 1>
+ {
+   public:
+     LIR_HEADER(Int64ToFloatingPoint);
+ 
+     LInt64ToFloatingPoint(const LInt64Allocation& in, const LDefinition& temp)
+       : LInstructionHelper(classOpcode)
+     {
+diff --git a/js/src/jit/x86-shared/Lowering-x86-shared.cpp b/js/src/jit/x86-shared/Lowering-x86-shared.cpp
+--- a/js/src/jit/x86-shared/Lowering-x86-shared.cpp
++++ b/js/src/jit/x86-shared/Lowering-x86-shared.cpp
+@@ -132,41 +132,16 @@ LIRGeneratorX86Shared::lowerForFPU(LInst
+ }
+ 
+ template void LIRGeneratorX86Shared::lowerForFPU(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir,
+                                                  MDefinition* lhs, MDefinition* rhs);
+ template void LIRGeneratorX86Shared::lowerForFPU(LInstructionHelper<1, 2, 1>* ins, MDefinition* mir,
+                                                  MDefinition* lhs, MDefinition* rhs);
+ 
+ void
+-LIRGeneratorX86Shared::lowerForCompIx4(LSimdBinaryCompIx4* ins, MSimdBinaryComp* mir, MDefinition* lhs, MDefinition* rhs)
+-{
+-    lowerForALU(ins, mir, lhs, rhs);
+-}
+-
+-void
+-LIRGeneratorX86Shared::lowerForCompFx4(LSimdBinaryCompFx4* ins, MSimdBinaryComp* mir, MDefinition* lhs, MDefinition* rhs)
+-{
+-    // Swap the operands around to fit the instructions that x86 actually has.
+-    // We do this here, before register allocation, so that we don't need
+-    // temporaries and copying afterwards.
+-    switch (mir->operation()) {
+-      case MSimdBinaryComp::greaterThan:
+-      case MSimdBinaryComp::greaterThanOrEqual:
+-        mir->reverse();
+-        Swap(lhs, rhs);
+-        break;
+-      default:
+-        break;
+-    }
+-
+-    lowerForFPU(ins, mir, lhs, rhs);
+-}
+-
+-void
+ LIRGeneratorX86Shared::lowerForBitAndAndBranch(LBitAndAndBranch* baab, MInstruction* mir,
+                                                MDefinition* lhs, MDefinition* rhs)
+ {
+     baab->setOperand(0, useRegisterAtStart(lhs));
+     baab->setOperand(1, useRegisterOrConstantAtStart(rhs));
+     add(baab, mir);
+ }
+ 
+@@ -606,368 +581,16 @@ LIRGeneratorX86Shared::lowerAtomicTypedA
+         defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
+     else if (reuseInput)
+         defineReuseInput(lir, ins, LAtomicTypedArrayElementBinop::valueOp);
+     else
+         define(lir, ins);
+ }
+ 
+ void
+-LIRGenerator::visitSimdInsertElement(MSimdInsertElement* ins)
+-{
+-    MOZ_ASSERT(IsSimdType(ins->type()));
+-
+-    LUse vec = useRegisterAtStart(ins->vector());
+-    LUse val = useRegister(ins->value());
+-    switch (ins->type()) {
+-      case MIRType::Int8x16:
+-      case MIRType::Bool8x16:
+-        // When SSE 4.1 is not available, we need to go via the stack.
+-        // This requires the value to be inserted to be in %eax-%edx.
+-        // Pick %ebx since other instructions use %eax or %ecx hard-wired.
+-#if defined(JS_CODEGEN_X86)
+-        if (!AssemblerX86Shared::HasSSE41())
+-            val = useFixed(ins->value(), ebx);
+-#endif
+-        defineReuseInput(new(alloc()) LSimdInsertElementI(vec, val), ins, 0);
+-        break;
+-      case MIRType::Int16x8:
+-      case MIRType::Int32x4:
+-      case MIRType::Bool16x8:
+-      case MIRType::Bool32x4:
+-        defineReuseInput(new(alloc()) LSimdInsertElementI(vec, val), ins, 0);
+-        break;
+-      case MIRType::Float32x4:
+-        defineReuseInput(new(alloc()) LSimdInsertElementF(vec, val), ins, 0);
+-        break;
+-      default:
+-        MOZ_CRASH("Unknown SIMD kind when generating constant");
+-    }
+-}
+-
+-void
+-LIRGenerator::visitSimdExtractElement(MSimdExtractElement* ins)
+-{
+-    MOZ_ASSERT(IsSimdType(ins->input()->type()));
+-    MOZ_ASSERT(!IsSimdType(ins->type()));
+-
+-    switch (ins->input()->type()) {
+-      case MIRType::Int8x16:
+-      case MIRType::Int16x8:
+-      case MIRType::Int32x4: {
+-        MOZ_ASSERT(ins->signedness() != SimdSign::NotApplicable);
+-        LUse use = useRegisterAtStart(ins->input());
+-        if (ins->type() == MIRType::Double) {
+-            // Extract an Uint32 lane into a double.
+-            MOZ_ASSERT(ins->signedness() == SimdSign::Unsigned);
+-            define(new (alloc()) LSimdExtractElementU2D(use, temp()), ins);
+-        } else {
+-            auto* lir = new (alloc()) LSimdExtractElementI(use);
+-#if defined(JS_CODEGEN_X86)
+-            // On x86 (32-bit), we may need to use movsbl or movzbl instructions
+-            // to sign or zero extend the extracted lane to 32 bits. The 8-bit
+-            // version of these instructions require a source register that is
+-            // %al, %bl, %cl, or %dl.
+-            // Fix it to %ebx since we can't express that constraint better.
+-            if (ins->input()->type() == MIRType::Int8x16) {
+-                defineFixed(lir, ins, LAllocation(AnyRegister(ebx)));
+-                return;
+-            }
+-#endif
+-            define(lir, ins);
+-        }
+-        break;
+-      }
+-      case MIRType::Float32x4: {
+-        MOZ_ASSERT(ins->signedness() == SimdSign::NotApplicable);
+-        LUse use = useRegisterAtStart(ins->input());
+-        define(new(alloc()) LSimdExtractElementF(use), ins);
+-        break;
+-      }
+-      case MIRType::Bool8x16:
+-      case MIRType::Bool16x8:
+-      case MIRType::Bool32x4: {
+-        MOZ_ASSERT(ins->signedness() == SimdSign::NotApplicable);
+-        LUse use = useRegisterAtStart(ins->input());
+-        define(new(alloc()) LSimdExtractElementB(use), ins);
+-        break;
+-      }
+-      default:
+-        MOZ_CRASH("Unknown SIMD kind when extracting element");
+-    }
+-}
+-
+-void
+-LIRGenerator::visitSimdBinaryArith(MSimdBinaryArith* ins)
+-{
+-    MOZ_ASSERT(IsSimdType(ins->lhs()->type()));
+-    MOZ_ASSERT(IsSimdType(ins->rhs()->type()));
+-    MOZ_ASSERT(IsSimdType(ins->type()));
+-
+-    MDefinition* lhs = ins->lhs();
+-    MDefinition* rhs = ins->rhs();
+-
+-    if (ins->isCommutative())
+-        ReorderCommutative(&lhs, &rhs, ins);
+-
+-    switch (ins->type()) {
+-      case MIRType::Int8x16: {
+-          LSimdBinaryArithIx16* lir = new (alloc()) LSimdBinaryArithIx16();
+-          lir->setTemp(0, LDefinition::BogusTemp());
+-          lowerForFPU(lir, ins, lhs, rhs);
+-          return;
+-      }
+-
+-      case MIRType::Int16x8: {
+-          LSimdBinaryArithIx8* lir = new (alloc()) LSimdBinaryArithIx8();
+-          lir->setTemp(0, LDefinition::BogusTemp());
+-          lowerForFPU(lir, ins, lhs, rhs);
+-          return;
+-      }
+-
+-      case MIRType::Int32x4: {
+-          LSimdBinaryArithIx4* lir = new (alloc()) LSimdBinaryArithIx4();
+-          bool needsTemp =
+-              ins->operation() == MSimdBinaryArith::Op_mul && !MacroAssembler::HasSSE41();
+-          lir->setTemp(0, needsTemp ? temp(LDefinition::SIMD128INT) : LDefinition::BogusTemp());
+-          lowerForFPU(lir, ins, lhs, rhs);
+-          return;
+-      }
+-
+-      case MIRType::Float32x4: {
+-          LSimdBinaryArithFx4* lir = new (alloc()) LSimdBinaryArithFx4();
+-
+-          bool needsTemp = ins->operation() == MSimdBinaryArith::Op_max ||
+-              ins->operation() == MSimdBinaryArith::Op_minNum ||
+-              ins->operation() == MSimdBinaryArith::Op_maxNum;
+-          lir->setTemp(0,
+-                       needsTemp ? temp(LDefinition::SIMD128FLOAT) : LDefinition::BogusTemp());
+-          lowerForFPU(lir, ins, lhs, rhs);
+-          return;
+-      }
+-
+-      default:
+-        MOZ_CRASH("unknown simd type on binary arith operation");
+-    }
+-}
+-
+-void
+-LIRGenerator::visitSimdBinarySaturating(MSimdBinarySaturating* ins)
+-{
+-    MOZ_ASSERT(IsSimdType(ins->lhs()->type()));
+-    MOZ_ASSERT(IsSimdType(ins->rhs()->type()));
+-    MOZ_ASSERT(IsSimdType(ins->type()));
+-
+-    MDefinition* lhs = ins->lhs();
+-    MDefinition* rhs = ins->rhs();
+-
+-    if (ins->isCommutative())
+-        ReorderCommutative(&lhs, &rhs, ins);
+-
+-    LSimdBinarySaturating* lir = new (alloc()) LSimdBinarySaturating();
+-    lowerForFPU(lir, ins, lhs, rhs);
+-}
+-
+-void
+-LIRGenerator::visitSimdSelect(MSimdSelect* ins)
+-{
+-    MOZ_ASSERT(IsSimdType(ins->type()));
+-
+-    LSimdSelect* lins = new(alloc()) LSimdSelect;
+-    MDefinition* r0 = ins->getOperand(0);
+-    MDefinition* r1 = ins->getOperand(1);
+-    MDefinition* r2 = ins->getOperand(2);
+-
+-    lins->setOperand(0, useRegister(r0));
+-    lins->setOperand(1, useRegister(r1));
+-    lins->setOperand(2, useRegister(r2));
+-    lins->setTemp(0, temp(LDefinition::SIMD128FLOAT));
+-
+-    define(lins, ins);
+-}
+-
+-void
+-LIRGenerator::visitSimdSplat(MSimdSplat* ins)
+-{
+-    LAllocation x = useRegisterAtStart(ins->getOperand(0));
+-
+-    switch (ins->type()) {
+-      case MIRType::Int8x16:
+-        define(new (alloc()) LSimdSplatX16(x), ins);
+-        break;
+-      case MIRType::Int16x8:
+-        define(new (alloc()) LSimdSplatX8(x), ins);
+-        break;
+-      case MIRType::Int32x4:
+-      case MIRType::Float32x4:
+-      case MIRType::Bool8x16:
+-      case MIRType::Bool16x8:
+-      case MIRType::Bool32x4:
+-        // Use the SplatX4 instruction for all boolean splats. Since the input
+-        // value is a 32-bit int that is either 0 or -1, the X4 splat gives
+-        // the right result for all boolean geometries.
+-        // For floats, (Non-AVX) codegen actually wants the input and the output
+-        // to be in the same register, but we can't currently use
+-        // defineReuseInput because they have different types (scalar vs
+-        // vector), so a spill slot for one may not be suitable for the other.
+-        define(new (alloc()) LSimdSplatX4(x), ins);
+-        break;
+-      default:
+-        MOZ_CRASH("Unknown SIMD kind");
+-    }
+-}
+-
+-void
+-LIRGenerator::visitSimdValueX4(MSimdValueX4* ins)
+-{
+-    switch (ins->type()) {
+-      case MIRType::Float32x4: {
+-        // Ideally, x would be used at start and reused for the output, however
+-        // register allocation currently doesn't permit us to tie together two
+-        // virtual registers with different types.
+-        LAllocation x = useRegister(ins->getOperand(0));
+-        LAllocation y = useRegister(ins->getOperand(1));
+-        LAllocation z = useRegister(ins->getOperand(2));
+-        LAllocation w = useRegister(ins->getOperand(3));
+-        LDefinition t = temp(LDefinition::SIMD128FLOAT);
+-        define(new (alloc()) LSimdValueFloat32x4(x, y, z, w, t), ins);
+-        break;
+-      }
+-      case MIRType::Bool32x4:
+-      case MIRType::Int32x4: {
+-        // No defineReuseInput => useAtStart for everyone.
+-        LAllocation x = useRegisterAtStart(ins->getOperand(0));
+-        LAllocation y = useRegisterAtStart(ins->getOperand(1));
+-        LAllocation z = useRegisterAtStart(ins->getOperand(2));
+-        LAllocation w = useRegisterAtStart(ins->getOperand(3));
+-        define(new(alloc()) LSimdValueInt32x4(x, y, z, w), ins);
+-        break;
+-      }
+-      default:
+-        MOZ_CRASH("Unknown SIMD kind");
+-    }
+-}
+-
+-void
+-LIRGenerator::visitSimdSwizzle(MSimdSwizzle* ins)
+-{
+-    MOZ_ASSERT(IsSimdType(ins->input()->type()));
+-    MOZ_ASSERT(IsSimdType(ins->type()));
+-
+-    if (IsIntegerSimdType(ins->input()->type())) {
+-        LUse use = useRegisterAtStart(ins->input());
+-        LSimdSwizzleI* lir = new (alloc()) LSimdSwizzleI(use);
+-        define(lir, ins);
+-        // We need a GPR temp register for pre-SSSE3 codegen (no vpshufb).
+-        if (Assembler::HasSSSE3()) {
+-            lir->setTemp(0, LDefinition::BogusTemp());
+-        } else {
+-            // The temp must be a GPR usable with 8-bit loads and stores.
+-#if defined(JS_CODEGEN_X86)
+-            lir->setTemp(0, tempFixed(ebx));
+-#else
+-            lir->setTemp(0, temp());
+-#endif
+-        }
+-    } else if (ins->input()->type() == MIRType::Float32x4) {
+-        LUse use = useRegisterAtStart(ins->input());
+-        LSimdSwizzleF* lir = new (alloc()) LSimdSwizzleF(use);
+-        define(lir, ins);
+-        lir->setTemp(0, LDefinition::BogusTemp());
+-    } else {
+-        MOZ_CRASH("Unknown SIMD kind when getting lane");
+-    }
+-}
+-
+-void
+-LIRGenerator::visitSimdShuffle(MSimdShuffle* ins)
+-{
+-    MOZ_ASSERT(IsSimdType(ins->lhs()->type()));
+-    MOZ_ASSERT(IsSimdType(ins->rhs()->type()));
+-    MOZ_ASSERT(IsSimdType(ins->type()));
+-    if (ins->type() == MIRType::Int32x4 || ins->type() == MIRType::Float32x4) {
+-        bool zFromLHS = ins->lane(2) < 4;
+-        bool wFromLHS = ins->lane(3) < 4;
+-        uint32_t lanesFromLHS = (ins->lane(0) < 4) + (ins->lane(1) < 4) + zFromLHS + wFromLHS;
+-
+-        LSimdShuffleX4* lir = new (alloc()) LSimdShuffleX4();
+-        lowerForFPU(lir, ins, ins->lhs(), ins->rhs());
+-
+-        // See codegen for requirements details.
+-        LDefinition temp =
+-          (lanesFromLHS == 3) ? tempCopy(ins->rhs(), 1) : LDefinition::BogusTemp();
+-        lir->setTemp(0, temp);
+-    } else {
+-        MOZ_ASSERT(ins->type() == MIRType::Int8x16 || ins->type() == MIRType::Int16x8);
+-        LSimdShuffle* lir = new (alloc()) LSimdShuffle();
+-        lir->setOperand(0, useRegister(ins->lhs()));
+-        lir->setOperand(1, useRegister(ins->rhs()));
+-        define(lir, ins);
+-        // We need a GPR temp register for pre-SSSE3 codegen, and an SSE temp
+-        // when using pshufb.
+-        if (Assembler::HasSSSE3()) {
+-            lir->setTemp(0, temp(LDefinition::SIMD128INT));
+-        } else {
+-            // The temp must be a GPR usable with 8-bit loads and stores.
+-#if defined(JS_CODEGEN_X86)
+-            lir->setTemp(0, tempFixed(ebx));
+-#else
+-            lir->setTemp(0, temp());
+-#endif
+-        }
+-    }
+-}
+-
+-void
+-LIRGenerator::visitSimdGeneralShuffle(MSimdGeneralShuffle* ins)
+-{
+-    MOZ_ASSERT(IsSimdType(ins->type()));
+-
+-    size_t numOperands = ins->numVectors() + ins->numLanes();
+-
+-    LSimdGeneralShuffleBase* lir;
+-    if (IsIntegerSimdType(ins->type())) {
+-#if defined(JS_CODEGEN_X86)
+-        // The temp register must be usable with 8-bit load and store
+-        // instructions, so one of %eax-%edx.
+-        LDefinition t;
+-        if (ins->type() == MIRType::Int8x16)
+-            t = tempFixed(ebx);
+-        else
+-            t = temp();
+-#else
+-        LDefinition t = temp();
+-#endif
+-        lir = allocateVariadic<LSimdGeneralShuffleI>(numOperands, t);
+-    } else if (ins->type() == MIRType::Float32x4) {
+-        lir = allocateVariadic<LSimdGeneralShuffleF>(numOperands, temp());
+-    } else {
+-        MOZ_CRASH("Unknown SIMD kind when doing a shuffle");
+-    }
+-
+-    if (!lir)
+-        return;
+-
+-    for (unsigned i = 0; i < ins->numVectors(); i++) {
+-        MOZ_ASSERT(IsSimdType(ins->vector(i)->type()));
+-        lir->setOperand(i, useRegister(ins->vector(i)));
+-    }
+-
+-    for (unsigned i = 0; i < ins->numLanes(); i++) {
+-        MOZ_ASSERT(ins->lane(i)->type() == MIRType::Int32);
+-        // Note that there can be up to 16 lane arguments, so we can't assume
+-        // that they all get an allocated register.
+-        lir->setOperand(i + ins->numVectors(), use(ins->lane(i)));
+-    }
+-
+-    assignSnapshot(lir, Bailout_BoundsCheck);
+-    define(lir, ins);
+-}
+-
+-void
+ LIRGenerator::visitCopySign(MCopySign* ins)
+ {
+     MDefinition* lhs = ins->lhs();
+     MDefinition* rhs = ins->rhs();
+ 
+     MOZ_ASSERT(IsFloatingPointType(lhs->type()));
+     MOZ_ASSERT(lhs->type() == rhs->type());
+     MOZ_ASSERT(lhs->type() == ins->type());
+diff --git a/js/src/jit/x86-shared/Lowering-x86-shared.h b/js/src/jit/x86-shared/Lowering-x86-shared.h
+--- a/js/src/jit/x86-shared/Lowering-x86-shared.h
++++ b/js/src/jit/x86-shared/Lowering-x86-shared.h
+@@ -31,20 +31,16 @@ class LIRGeneratorX86Shared : public LIR
+ 
+     template<size_t Temps>
+     void lowerForShiftInt64(LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, Temps>* ins,
+                             MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+ 
+     template<size_t Temps>
+     void lowerForFPU(LInstructionHelper<1, 2, Temps>* ins, MDefinition* mir, MDefinition* lhs,
+                      MDefinition* rhs);
+-    void lowerForCompIx4(LSimdBinaryCompIx4* ins, MSimdBinaryComp* mir,
+-                         MDefinition* lhs, MDefinition* rhs);
+-    void lowerForCompFx4(LSimdBinaryCompFx4* ins, MSimdBinaryComp* mir,
+-                         MDefinition* lhs, MDefinition* rhs);
+     void lowerForBitAndAndBranch(LBitAndAndBranch* baab, MInstruction* mir,
+                                  MDefinition* lhs, MDefinition* rhs);
+     void lowerMulI(MMul* mul, MDefinition* lhs, MDefinition* rhs);
+     void lowerDivI(MDiv* div);
+     void lowerModI(MMod* mod);
+     void lowerUDiv(MDiv* div);
+     void lowerUMod(MMod* mod);
+     void lowerUrshD(MUrsh* mir);
+diff --git a/js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h b/js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h
+--- a/js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h
++++ b/js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h
+@@ -1098,39 +1098,16 @@ void
+ MacroAssembler::spectreZeroRegister(Condition cond, Register scratch, Register dest)
+ {
+     // Note: use movl instead of move32/xorl to ensure flags are not clobbered.
+     movl(Imm32(0), scratch);
+     spectreMovePtr(cond, scratch, dest);
+ }
+ 
+ // ========================================================================
+-// Canonicalization primitives.
+-void
+-MacroAssembler::canonicalizeFloat32x4(FloatRegister reg, FloatRegister scratch)
+-{
+-    ScratchSimd128Scope scratch2(*this);
+-
+-    MOZ_ASSERT(scratch.asSimd128() != scratch2.asSimd128());
+-    MOZ_ASSERT(reg.asSimd128() != scratch2.asSimd128());
+-    MOZ_ASSERT(reg.asSimd128() != scratch.asSimd128());
+-
+-    FloatRegister mask = scratch;
+-    vcmpordps(Operand(reg), reg, mask);
+-
+-    FloatRegister ifFalse = scratch2;
+-    float nanf = float(JS::GenericNaN());
+-    loadConstantSimd128Float(SimdConstant::SplatX4(nanf), ifFalse);
+-
+-    bitwiseAndFloat32x4(reg, Operand(mask), reg);
+-    bitwiseAndNotFloat32x4(mask, Operand(ifFalse), mask);
+-    bitwiseOrFloat32x4(reg, Operand(mask), reg);
+-}
+-
+-// ========================================================================
+ // Memory access primitives.
+ void
+ MacroAssembler::storeUncanonicalizedDouble(FloatRegister src, const Address& dest)
+ {
+     vmovsd(src, dest);
+ }
+ void
+ MacroAssembler::storeUncanonicalizedDouble(FloatRegister src, const BaseIndex& dest)
+diff --git a/js/src/jit/x86-shared/MacroAssembler-x86-shared.h b/js/src/jit/x86-shared/MacroAssembler-x86-shared.h
+--- a/js/src/jit/x86-shared/MacroAssembler-x86-shared.h
++++ b/js/src/jit/x86-shared/MacroAssembler-x86-shared.h
+@@ -601,47 +601,16 @@ class MacroAssemblerX86Shared : public A
+             selectSimd128(mask, onTrue, onFalse, temp, output);
+     }
+ 
+     template <class T, class Reg> inline void loadScalar(const Operand& src, Reg dest);
+     template <class T, class Reg> inline void storeScalar(Reg src, const Address& dest);
+     template <class T> inline void loadAlignedVector(const Address& src, FloatRegister dest);
+     template <class T> inline void storeAlignedVector(FloatRegister src, const Address& dest);
+ 
+-    void loadInt32x1(const Address& src, FloatRegister dest) {
+-        vmovd(Operand(src), dest);
+-    }
+-    void loadInt32x1(const BaseIndex& src, FloatRegister dest) {
+-        vmovd(Operand(src), dest);
+-    }
+-    void loadInt32x2(const Address& src, FloatRegister dest) {
+-        vmovq(Operand(src), dest);
+-    }
+-    void loadInt32x2(const BaseIndex& src, FloatRegister dest) {
+-        vmovq(Operand(src), dest);
+-    }
+-    void loadInt32x3(const BaseIndex& src, FloatRegister dest) {
+-        BaseIndex srcZ(src);
+-        srcZ.offset += 2 * sizeof(int32_t);
+-
+-        ScratchSimd128Scope scratch(asMasm());
+-        vmovq(Operand(src), dest);
+-        vmovd(Operand(srcZ), scratch);
+-        vmovlhps(scratch, dest, dest);
+-    }
+-    void loadInt32x3(const Address& src, FloatRegister dest) {
+-        Address srcZ(src);
+-        srcZ.offset += 2 * sizeof(int32_t);
+-
+-        ScratchSimd128Scope scratch(asMasm());
+-        vmovq(Operand(src), dest);
+-        vmovd(Operand(srcZ), scratch);
+-        vmovlhps(scratch, dest, dest);
+-    }
+-
+     void loadAlignedSimd128Int(const Address& src, FloatRegister dest) {
+         vmovdqa(Operand(src), dest);
+     }
+     void loadAlignedSimd128Int(const Operand& src, FloatRegister dest) {
+         vmovdqa(src, dest);
+     }
+     void storeAlignedSimd128Int(FloatRegister src, const Address& dest) {
+         vmovdqa(src, Operand(dest));
+@@ -666,45 +635,16 @@ class MacroAssemblerX86Shared : public A
+     }
+     void loadUnalignedSimd128Int(const BaseIndex& src, FloatRegister dest) {
+         vmovdqu(Operand(src), dest);
+     }
+     void loadUnalignedSimd128Int(const Operand& src, FloatRegister dest) {
+         vmovdqu(src, dest);
+     }
+ 
+-    void storeInt32x1(FloatRegister src, const Address& dest) {
+-        vmovd(src, Operand(dest));
+-    }
+-    void storeInt32x1(FloatRegister src, const BaseIndex& dest) {
+-        vmovd(src, Operand(dest));
+-    }
+-    void storeInt32x2(FloatRegister src, const Address& dest) {
+-        vmovq(src, Operand(dest));
+-    }
+-    void storeInt32x2(FloatRegister src, const BaseIndex& dest) {
+-        vmovq(src, Operand(dest));
+-    }
+-    void storeInt32x3(FloatRegister src, const Address& dest) {
+-        Address destZ(dest);
+-        destZ.offset += 2 * sizeof(int32_t);
+-        vmovq(src, Operand(dest));
+-        ScratchSimd128Scope scratch(asMasm());
+-        vmovhlps(src, scratch, scratch);
+-        vmovd(scratch, Operand(destZ));
+-    }
+-    void storeInt32x3(FloatRegister src, const BaseIndex& dest) {
+-        BaseIndex destZ(dest);
+-        destZ.offset += 2 * sizeof(int32_t);
+-        vmovq(src, Operand(dest));
+-        ScratchSimd128Scope scratch(asMasm());
+-        vmovhlps(src, scratch, scratch);
+-        vmovd(scratch, Operand(destZ));
+-    }
+-
+     void storeUnalignedSimd128Int(FloatRegister src, const Address& dest) {
+         vmovdqu(src, Operand(dest));
+     }
+     void storeUnalignedSimd128Int(FloatRegister src, const BaseIndex& dest) {
+         vmovdqu(src, Operand(dest));
+     }
+     void storeUnalignedSimd128Int(FloatRegister src, const Operand& dest) {
+         vmovdqu(src, dest);
+@@ -777,33 +717,16 @@ class MacroAssemblerX86Shared : public A
+         count.value &= 31;
+         vpsrad(count, dest, dest);
+     }
+     void packedUnsignedRightShiftByScalarInt32x4(Imm32 count, FloatRegister dest) {
+         count.value &= 31;
+         vpsrld(count, dest, dest);
+     }
+ 
+-    void loadFloat32x3(const Address& src, FloatRegister dest) {
+-        Address srcZ(src);
+-        srcZ.offset += 2 * sizeof(float);
+-        vmovsd(src, dest);
+-        ScratchSimd128Scope scratch(asMasm());
+-        vmovss(srcZ, scratch);
+-        vmovlhps(scratch, dest, dest);
+-    }
+-    void loadFloat32x3(const BaseIndex& src, FloatRegister dest) {
+-        BaseIndex srcZ(src);
+-        srcZ.offset += 2 * sizeof(float);
+-        vmovsd(src, dest);
+-        ScratchSimd128Scope scratch(asMasm());
+-        vmovss(srcZ, scratch);
+-        vmovlhps(scratch, dest, dest);
+-    }
+-
+     void loadAlignedSimd128Float(const Address& src, FloatRegister dest) {
+         vmovaps(Operand(src), dest);
+     }
+     void loadAlignedSimd128Float(const Operand& src, FloatRegister dest) {
+         vmovaps(src, dest);
+     }
+ 
+     void storeAlignedSimd128Float(FloatRegister src, const Address& dest) {
+diff --git a/js/src/jit/x86/Assembler-x86.h b/js/src/jit/x86/Assembler-x86.h
+--- a/js/src/jit/x86/Assembler-x86.h
++++ b/js/src/jit/x86/Assembler-x86.h
+@@ -136,17 +136,17 @@ static constexpr uint32_t JitStackAlignm
+ static constexpr uint32_t JitStackValueAlignment = JitStackAlignment / sizeof(Value);
+ static_assert(JitStackAlignment % sizeof(Value) == 0 && JitStackValueAlignment >= 1,
+   "Stack alignment should be a non-zero multiple of sizeof(Value)");
+ 
+ // This boolean indicates whether we support SIMD instructions flavoured for
+ // this architecture or not. Rather than a method in the LIRGenerator, it is
+ // here such that it is accessible from the entire codebase. Once full support
+ // for SIMD is reached on all tier-1 platforms, this constant can be deleted.
+-static constexpr bool SupportsSimd = true;
++static constexpr bool SupportsSimd = false;
+ static constexpr uint32_t SimdMemoryAlignment = 16;
+ 
+ static_assert(CodeAlignment % SimdMemoryAlignment == 0,
+   "Code alignment should be larger than any of the alignments which are used for "
+   "the constant sections of the code buffer.  Thus it should be larger than the "
+   "alignment for SIMD constants.");
+ 
+ static_assert(JitStackAlignment % SimdMemoryAlignment == 0,
+diff --git a/js/src/jit/x86/CodeGenerator-x86.cpp b/js/src/jit/x86/CodeGenerator-x86.cpp
+--- a/js/src/jit/x86/CodeGenerator-x86.cpp
++++ b/js/src/jit/x86/CodeGenerator-x86.cpp
+@@ -345,17 +345,16 @@ CodeGenerator::visitAsmJSLoadHeap(LAsmJS
+     MOZ_ASSERT(mir->access().offset() == 0);
+ 
+     const LAllocation* ptr = ins->ptr();
+     const LAllocation* boundsCheckLimit = ins->boundsCheckLimit();
+     const LAllocation* memoryBase = ins->memoryBase();
+     AnyRegister out = ToAnyRegister(ins->output());
+ 
+     Scalar::Type accessType = mir->accessType();
+-    MOZ_ASSERT(!Scalar::isSimdType(accessType));
+ 
+     OutOfLineLoadTypedArrayOutOfBounds* ool = nullptr;
+     if (mir->needsBoundsCheck()) {
+         ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(out, accessType);
+         addOutOfLineCode(ool, mir);
+ 
+         masm.wasmBoundsCheck(Assembler::AboveOrEqual, ToRegister(ptr), ToRegister(boundsCheckLimit),
+                              ool->entry());
+@@ -378,17 +377,16 @@ CodeGenerator::visitAsmJSStoreHeap(LAsmJ
+     MOZ_ASSERT(mir->offset() == 0);
+ 
+     const LAllocation* ptr = ins->ptr();
+     const LAllocation* value = ins->value();
+     const LAllocation* boundsCheckLimit = ins->boundsCheckLimit();
+     const LAllocation* memoryBase = ins->memoryBase();
+ 
+     Scalar::Type accessType = mir->accessType();
+-    MOZ_ASSERT(!Scalar::isSimdType(accessType));
+     canonicalizeIfDeterministic(accessType, value);
+ 
+     Operand dstAddr = ptr->isBogus()
+                       ? Operand(ToRegister(memoryBase), 0)
+                       : Operand(ToRegister(memoryBase), ToRegister(ptr), TimesOne);
+ 
+     Label rejoin;
+     if (mir->needsBoundsCheck()) {
+diff --git a/js/src/jit/x86/Lowering-x86.cpp b/js/src/jit/x86/Lowering-x86.cpp
+--- a/js/src/jit/x86/Lowering-x86.cpp
++++ b/js/src/jit/x86/Lowering-x86.cpp
+@@ -366,20 +366,16 @@ LIRGenerator::visitWasmStore(MWasmStore*
+     switch (ins->access().type()) {
+       case Scalar::Int8: case Scalar::Uint8:
+         // See comment for LIRGeneratorX86::useByteOpRegister.
+         valueAlloc = useFixed(ins->value(), eax);
+         break;
+       case Scalar::Int16: case Scalar::Uint16:
+       case Scalar::Int32: case Scalar::Uint32:
+       case Scalar::Float32: case Scalar::Float64:
+-      case Scalar::Float32x4:
+-      case Scalar::Int8x16:
+-      case Scalar::Int16x8:
+-      case Scalar::Int32x4:
+         // For now, don't allow constant values. The immediate operand affects
+         // instruction layout which affects patching.
+         valueAlloc = useRegisterAtStart(ins->value());
+         break;
+       case Scalar::Int64: {
+         LInt64Allocation valueAlloc = useInt64RegisterAtStart(ins->value());
+         auto* lir = new(alloc()) LWasmStoreI64(baseAlloc, valueAlloc,
+                                                useRegisterAtStart(memoryBase));
+@@ -446,20 +442,16 @@ LIRGenerator::visitAsmJSStoreHeap(MAsmJS
+       case Scalar::Int8: case Scalar::Uint8:
+         // See comment for LIRGeneratorX86::useByteOpRegister.
+         lir = new(alloc()) LAsmJSStoreHeap(baseAlloc, useFixed(ins->value(), eax),
+                                            limitAlloc, useRegisterAtStart(memoryBase));
+         break;
+       case Scalar::Int16: case Scalar::Uint16:
+       case Scalar::Int32: case Scalar::Uint32:
+       case Scalar::Float32: case Scalar::Float64:
+-      case Scalar::Float32x4:
+-      case Scalar::Int8x16:
+-      case Scalar::Int16x8:
+-      case Scalar::Int32x4:
+         // For now, don't allow constant values. The immediate operand affects
+         // instruction layout which affects patching.
+         lir = new (alloc()) LAsmJSStoreHeap(baseAlloc, useRegisterAtStart(ins->value()),
+                                             limitAlloc, useRegisterAtStart(memoryBase));
+         break;
+       case Scalar::Int64:
+         MOZ_CRASH("NYI");
+       case Scalar::Uint8Clamped:
+diff --git a/js/src/jit/x86/MacroAssembler-x86.cpp b/js/src/jit/x86/MacroAssembler-x86.cpp
+--- a/js/src/jit/x86/MacroAssembler-x86.cpp
++++ b/js/src/jit/x86/MacroAssembler-x86.cpp
+@@ -640,59 +640,30 @@ MacroAssembler::wasmLoad(const wasm::Mem
+         movl(srcAddr, out.gpr());
+         break;
+       case Scalar::Float32:
+         vmovss(srcAddr, out.fpu());
+         break;
+       case Scalar::Float64:
+         vmovsd(srcAddr, out.fpu());
+         break;
+-      case Scalar::Float32x4:
+-        switch (access.numSimdElems()) {
+-          // In memory-to-register mode, movss zeroes out the high lanes.
+-          case 1: vmovss(srcAddr, out.fpu()); break;
+-          // See comment above, which also applies to movsd.
+-          case 2: vmovsd(srcAddr, out.fpu()); break;
+-          case 4: vmovups(srcAddr, out.fpu()); break;
+-          default: MOZ_CRASH("unexpected size for partial load");
+-        }
+-        break;
+-      case Scalar::Int32x4:
+-        switch (access.numSimdElems()) {
+-          // In memory-to-register mode, movd zeroes out the high lanes.
+-          case 1: vmovd(srcAddr, out.fpu()); break;
+-          // See comment above, which also applies to movq.
+-          case 2: vmovq(srcAddr, out.fpu()); break;
+-          case 4: vmovdqu(srcAddr, out.fpu()); break;
+-          default: MOZ_CRASH("unexpected size for partial load");
+-        }
+-        break;
+-      case Scalar::Int8x16:
+-        MOZ_ASSERT(access.numSimdElems() == 16, "unexpected partial load");
+-        vmovdqu(srcAddr, out.fpu());
+-        break;
+-      case Scalar::Int16x8:
+-        MOZ_ASSERT(access.numSimdElems() == 8, "unexpected partial load");
+-        vmovdqu(srcAddr, out.fpu());
+-        break;
+       case Scalar::Int64:
+       case Scalar::Uint8Clamped:
+       case Scalar::MaxTypedArrayViewType:
+         MOZ_CRASH("unexpected type");
+     }
+ 
+     memoryBarrierAfter(access.sync());
+ }
+ 
+ void
+ MacroAssembler::wasmLoadI64(const wasm::MemoryAccessDesc& access, Operand srcAddr, Register64 out)
+ {
+     // Atomic i64 load must use lock_cmpxchg8b.
+     MOZ_ASSERT_IF(access.isAtomic(), access.byteSize() <= 4);
+-    MOZ_ASSERT(!access.isSimd());
+     MOZ_ASSERT(srcAddr.kind() == Operand::MEM_REG_DISP || srcAddr.kind() == Operand::MEM_SCALE);
+ 
+     memoryBarrierBefore(access.sync());
+ 
+     append(access, size());
+     switch (access.type()) {
+       case Scalar::Int8:
+         MOZ_ASSERT(out == Register64(edx, eax));
+@@ -739,20 +710,16 @@ MacroAssembler::wasmLoadI64(const wasm::
+ 
+         append(access, size());
+         movl(HighWord(srcAddr), out.high);
+ 
+         break;
+       }
+       case Scalar::Float32:
+       case Scalar::Float64:
+-      case Scalar::Float32x4:
+-      case Scalar::Int8x16:
+-      case Scalar::Int16x8:
+-      case Scalar::Int32x4:
+         MOZ_CRASH("non-int64 loads should use load()");
+       case Scalar::Uint8Clamped:
+       case Scalar::MaxTypedArrayViewType:
+         MOZ_CRASH("unexpected array type");
+     }
+ 
+     memoryBarrierAfter(access.sync());
+ }
+@@ -780,59 +747,30 @@ MacroAssembler::wasmStore(const wasm::Me
+         movl(value.gpr(), dstAddr);
+         break;
+       case Scalar::Float32:
+         vmovss(value.fpu(), dstAddr);
+         break;
+       case Scalar::Float64:
+         vmovsd(value.fpu(), dstAddr);
+         break;
+-      case Scalar::Float32x4:
+-        switch (access.numSimdElems()) {
+-          // In memory-to-register mode, movss zeroes out the high lanes.
+-          case 1: vmovss(value.fpu(), dstAddr); break;
+-          // See comment above, which also applies to movsd.
+-          case 2: vmovsd(value.fpu(), dstAddr); break;
+-          case 4: vmovups(value.fpu(), dstAddr); break;
+-          default: MOZ_CRASH("unexpected size for partial load");
+-        }
+-        break;
+-      case Scalar::Int32x4:
+-        switch (access.numSimdElems()) {
+-          // In memory-to-register mode, movd zeroes out the high lanes.
+-          case 1: vmovd(value.fpu(), dstAddr); break;
+-          // See comment above, which also applies to movsd.
+-          case 2: vmovq(value.fpu(), dstAddr); break;
+-          case 4: vmovdqu(value.fpu(), dstAddr); break;
+-          default: MOZ_CRASH("unexpected size for partial load");
+-        }
+-        break;
+-      case Scalar::Int8x16:
+-        MOZ_ASSERT(access.numSimdElems() == 16, "unexpected partial store");
+-        vmovdqu(value.fpu(), dstAddr);
+-        break;
+-      case Scalar::Int16x8:
+-        MOZ_ASSERT(access.numSimdElems() == 8, "unexpected partial store");
+-        vmovdqu(value.fpu(), dstAddr);
+-        break;
+       case Scalar::Int64:
+         MOZ_CRASH("Should be handled in storeI64.");
+       case Scalar::MaxTypedArrayViewType:
+         MOZ_CRASH("unexpected type");
+     }
+ 
+     memoryBarrierAfter(access.sync());
+ }
+ 
+ void
+ MacroAssembler::wasmStoreI64(const wasm::MemoryAccessDesc& access, Register64 value, Operand dstAddr)
+ {
+     // Atomic i64 store must use lock_cmpxchg8b.
+     MOZ_ASSERT(!access.isAtomic());
+-    MOZ_ASSERT(!access.isSimd());
+     MOZ_ASSERT(dstAddr.kind() == Operand::MEM_REG_DISP || dstAddr.kind() == Operand::MEM_SCALE);
+ 
+     append(access, size());
+     movl(value.low, LowWord(dstAddr));
+ 
+     append(access, size());
+     movl(value.high, HighWord(dstAddr));
+ }
+diff --git a/js/src/js.msg b/js/src/js.msg
+--- a/js/src/js.msg
++++ b/js/src/js.msg
+@@ -318,17 +318,16 @@ MSG_DEF(JSMSG_SELFHOSTED_METHOD_CALL,  0
+ MSG_DEF(JSMSG_SELFHOSTED_UNBOUND_NAME, 0, JSEXN_TYPEERR, "self-hosted code may not contain unbound name lookups")
+ MSG_DEF(JSMSG_SEMI_AFTER_FOR_COND,     0, JSEXN_SYNTAXERR, "missing ; after for-loop condition")
+ MSG_DEF(JSMSG_SEMI_AFTER_FOR_INIT,     0, JSEXN_SYNTAXERR, "missing ; after for-loop initializer")
+ MSG_DEF(JSMSG_SOURCE_TOO_LONG,         0, JSEXN_RANGEERR, "source is too long")
+ MSG_DEF(JSMSG_STMT_AFTER_RETURN,       0, JSEXN_WARN, "unreachable code after return statement")
+ MSG_DEF(JSMSG_STRICT_CODE_WITH,        0, JSEXN_SYNTAXERR, "strict mode code may not contain 'with' statements")
+ MSG_DEF(JSMSG_STRICT_NON_SIMPLE_PARAMS, 1, JSEXN_SYNTAXERR, "\"use strict\" not allowed in function with {0} parameter")
+ MSG_DEF(JSMSG_TEMPLSTR_UNTERM_EXPR,    0, JSEXN_SYNTAXERR, "missing } in template string")
+-MSG_DEF(JSMSG_SIMD_NOT_A_VECTOR,       2, JSEXN_TYPEERR, "expecting a SIMD {0} object as argument {1}")
+ MSG_DEF(JSMSG_TOO_MANY_CASES,          0, JSEXN_INTERNALERR, "too many switch cases")
+ MSG_DEF(JSMSG_TOO_MANY_CATCH_VARS,     0, JSEXN_SYNTAXERR, "too many catch variables")
+ MSG_DEF(JSMSG_TOO_MANY_CON_ARGS,       0, JSEXN_SYNTAXERR, "too many constructor arguments")
+ MSG_DEF(JSMSG_TOO_MANY_DEFAULTS,       0, JSEXN_SYNTAXERR, "more than one switch default")
+ MSG_DEF(JSMSG_TOO_MANY_FUN_ARGS,       0, JSEXN_SYNTAXERR, "too many function arguments")
+ MSG_DEF(JSMSG_TOO_MANY_LOCALS,         0, JSEXN_SYNTAXERR, "too many local variables")
+ MSG_DEF(JSMSG_TOO_MANY_YIELDS,         0, JSEXN_SYNTAXERR, "too many yield expressions")
+ MSG_DEF(JSMSG_TOUGH_BREAK,             0, JSEXN_SYNTAXERR, "unlabeled break must be inside loop or switch")
+@@ -533,25 +532,23 @@ MSG_DEF(JSMSG_TOO_MANY_PARENS,         0
+ MSG_DEF(JSMSG_UNICODE_OVERFLOW,        1, JSEXN_SYNTAXERR, "Unicode codepoint must not be greater than 0x10FFFF in {0}")
+ MSG_DEF(JSMSG_UNMATCHED_RIGHT_PAREN,   0, JSEXN_SYNTAXERR, "unmatched ) in regular expression")
+ MSG_DEF(JSMSG_UNTERM_CLASS,            0, JSEXN_SYNTAXERR, "unterminated character class")
+ 
+ // Self-hosting
+ MSG_DEF(JSMSG_DEFAULT_LOCALE_ERROR,    0, JSEXN_ERR, "internal error getting the default locale")
+ MSG_DEF(JSMSG_NO_SUCH_SELF_HOSTED_PROP,1, JSEXN_ERR, "No such property on self-hosted object: {0}")
+ 
+-// Typed object / SIMD
++// Typed object
+ MSG_DEF(JSMSG_INVALID_PROTOTYPE,       0, JSEXN_TYPEERR, "prototype field is not an object")
+ MSG_DEF(JSMSG_TYPEDOBJECT_BAD_ARGS,    0, JSEXN_TYPEERR, "invalid arguments")
+ MSG_DEF(JSMSG_TYPEDOBJECT_BINARYARRAY_BAD_INDEX, 0, JSEXN_RANGEERR, "invalid or out-of-range index")
+ MSG_DEF(JSMSG_TYPEDOBJECT_HANDLE_UNATTACHED, 0, JSEXN_TYPEERR, "handle unattached")
+ MSG_DEF(JSMSG_TYPEDOBJECT_STRUCTTYPE_BAD_ARGS, 0, JSEXN_RANGEERR, "invalid field descriptor")
+ MSG_DEF(JSMSG_TYPEDOBJECT_TOO_BIG,     0, JSEXN_ERR, "Type is too large to allocate")
+-MSG_DEF(JSMSG_SIMD_FAILED_CONVERSION,  0, JSEXN_RANGEERR, "SIMD conversion loses precision")
+-MSG_DEF(JSMSG_SIMD_TO_NUMBER,          0, JSEXN_TYPEERR, "can't convert SIMD value to number")
+ 
+ // Array
+ MSG_DEF(JSMSG_TOO_LONG_ARRAY,         0, JSEXN_TYPEERR, "Too long array")
+ 
+ // Typed array
+ MSG_DEF(JSMSG_BAD_INDEX,               0, JSEXN_RANGEERR, "invalid or out-of-range index")
+ MSG_DEF(JSMSG_NON_ARRAY_BUFFER_RETURNED, 0, JSEXN_TYPEERR, "expected ArrayBuffer, but species constructor returned non-ArrayBuffer")
+ MSG_DEF(JSMSG_SAME_ARRAY_BUFFER_RETURNED, 0, JSEXN_TYPEERR, "expected different ArrayBuffer, but species constructor returned same ArrayBuffer")
+diff --git a/js/src/jsapi.cpp b/js/src/jsapi.cpp
+--- a/js/src/jsapi.cpp
++++ b/js/src/jsapi.cpp
+@@ -37,19 +37,16 @@
+ #include "builtin/Eval.h"
+ #include "builtin/JSON.h"
+ #include "builtin/MapObject.h"
+ #include "builtin/Promise.h"
+ #include "builtin/RegExp.h"
+ #include "builtin/Stream.h"
+ #include "builtin/String.h"
+ #include "builtin/Symbol.h"
+-#ifdef ENABLE_SIMD
+-# include "builtin/SIMDConstants.h"
+-#endif
+ #ifdef ENABLE_BINARYDATA
+ # include "builtin/TypedObject.h"
+ #endif
+ #include "frontend/BytecodeCompiler.h"
+ #include "frontend/FullParseHandler.h"  // for JS_BufferIsCompileableUnit
+ #include "frontend/Parser.h" // for JS_BufferIsCompileableUnit
+ #include "gc/FreeOp.h"
+ #include "gc/Marking.h"
+diff --git a/js/src/jsfriendapi.h b/js/src/jsfriendapi.h
+--- a/js/src/jsfriendapi.h
++++ b/js/src/jsfriendapi.h
+@@ -1615,20 +1615,16 @@ enum Type {
+     Uint8Clamped,
+ 
+     /**
+      * Types that don't have their own TypedArray equivalent, for now.
+      */
+     MaxTypedArrayViewType,
+ 
+     Int64,
+-    Float32x4,
+-    Int8x16,
+-    Int16x8,
+-    Int32x4
+ };
+ 
+ static inline size_t
+ byteSize(Type atype)
+ {
+     switch (atype) {
+       case Int8:
+       case Uint8:
+@@ -1639,101 +1635,42 @@ byteSize(Type atype)
+         return 2;
+       case Int32:
+       case Uint32:
+       case Float32:
+         return 4;
+       case Int64:
+       case Float64:
+         return 8;
+-      case Int8x16:
+-      case Int16x8:
+-      case Int32x4:
+-      case Float32x4:
+         return 16;
+       default:
+         MOZ_CRASH("invalid scalar type");
+     }
+ }
+ 
+ static inline bool
+ isSignedIntType(Type atype) {
+     switch (atype) {
+       case Int8:
+       case Int16:
+       case Int32:
+       case Int64:
+-      case Int8x16:
+-      case Int16x8:
+-      case Int32x4:
+         return true;
+       case Uint8:
+       case Uint8Clamped:
+       case Uint16:
+       case Uint32:
+       case Float32:
+       case Float64:
+-      case Float32x4:
+         return false;
+       default:
+         MOZ_CRASH("invalid scalar type");
+     }
+ }
+ 
+-static inline bool
+-isSimdType(Type atype) {
+-    switch (atype) {
+-      case Int8:
+-      case Uint8:
+-      case Uint8Clamped:
+-      case Int16:
+-      case Uint16:
+-      case Int32:
+-      case Uint32:
+-      case Int64:
+-      case Float32:
+-      case Float64:
+-        return false;
+-      case Int8x16:
+-      case Int16x8:
+-      case Int32x4:
+-      case Float32x4:
+-        return true;
+-      case MaxTypedArrayViewType:
+-        break;
+-    }
+-    MOZ_CRASH("invalid scalar type");
+-}
+-
+-static inline size_t
+-scalarByteSize(Type atype) {
+-    switch (atype) {
+-      case Int8x16:
+-        return 1;
+-      case Int16x8:
+-        return 2;
+-      case Int32x4:
+-      case Float32x4:
+-        return 4;
+-      case Int8:
+-      case Uint8:
+-      case Uint8Clamped:
+-      case Int16:
+-      case Uint16:
+-      case Int32:
+-      case Uint32:
+-      case Int64:
+-      case Float32:
+-      case Float64:
+-      case MaxTypedArrayViewType:
+-        break;
+-    }
+-    MOZ_CRASH("invalid simd type");
+-}
+-
+ } /* namespace Scalar */
+ } /* namespace js */
+ 
+ /*
+  * Create a new typed array with nelements elements.
+  *
+  * These functions (except the WithBuffer variants) fill in the array with zeros.
+  */
+diff --git a/js/src/moz.build b/js/src/moz.build
+--- a/js/src/moz.build
++++ b/js/src/moz.build
+@@ -190,17 +190,16 @@ UNIFIED_SOURCES += [
+     'builtin/JSON.cpp',
+     'builtin/MapObject.cpp',
+     'builtin/ModuleObject.cpp',
+     'builtin/Object.cpp',
+     'builtin/Profilers.cpp',
+     'builtin/Promise.cpp',
+     'builtin/Reflect.cpp',
+     'builtin/ReflectParse.cpp',
+-    'builtin/SIMD.cpp',
+     'builtin/Stream.cpp',
+     'builtin/String.cpp',
+     'builtin/Symbol.cpp',
+     'builtin/TestingFunctions.cpp',
+     'builtin/TypedObject.cpp',
+     'builtin/WeakMapObject.cpp',
+     'builtin/WeakSetObject.cpp',
+     'devtools/sharkctl.cpp',
+@@ -263,17 +262,16 @@ UNIFIED_SOURCES += [
+     'jit/BytecodeAnalysis.cpp',
+     'jit/C1Spewer.cpp',
+     'jit/CacheIR.cpp',
+     'jit/CacheIRCompiler.cpp',
+     'jit/CacheIRSpewer.cpp',
+     'jit/CodeGenerator.cpp',
+     'jit/CompileWrappers.cpp',
+     'jit/Disassembler.cpp',
+-    'jit/EagerSimdUnbox.cpp',
+     'jit/EdgeCaseAnalysis.cpp',
+     'jit/EffectiveAddressAnalysis.cpp',
+     'jit/ExecutableAllocator.cpp',
+     'jit/FoldLinearArithConstants.cpp',
+     'jit/InstructionReordering.cpp',
+     'jit/Ion.cpp',
+     'jit/IonAnalysis.cpp',
+     'jit/IonBuilder.cpp',
+@@ -698,17 +696,16 @@ ReservedWordsGenerated.inputs += [
+ DIRS += [
+     'build',
+ ]
+ 
+ FINAL_LIBRARY = 'js'
+ 
+ if CONFIG['NIGHTLY_BUILD']:
+     DEFINES['ENABLE_BINARYDATA'] = True
+-    DEFINES['ENABLE_SIMD'] = True
+     DEFINES['ENABLE_WASM_BULKMEM_OPS'] = True
+     DEFINES['ENABLE_WASM_SATURATING_TRUNC_OPS'] = True
+     DEFINES['ENABLE_WASM_THREAD_OPS'] = True
+     DEFINES['ENABLE_WASM_GC'] = True
+ 
+ # Some huge-mapping optimization instead of bounds checks on supported
+ # platforms.
+ if CONFIG['JS_CODEGEN_X64'] or CONFIG['JS_CODEGEN_ARM64']:
+diff --git a/js/src/tests/jstests.list b/js/src/tests/jstests.list
+--- a/js/src/tests/jstests.list
++++ b/js/src/tests/jstests.list
+@@ -33,21 +33,17 @@ skip-if(!String.prototype.normalize) inc
+ skip-if(!this.hasOwnProperty("Intl")) script test262/built-ins/String/prototype/toLowerCase/Final_Sigma_U180E.js
+ skip-if(!this.hasOwnProperty("Intl")) script test262/built-ins/String/prototype/toLowerCase/special_casing_conditional.js
+ skip-if(!this.hasOwnProperty("Intl")) script test262/built-ins/String/prototype/toLocaleLowerCase/Final_Sigma_U180E.js
+ skip-if(!this.hasOwnProperty("Intl")) script test262/built-ins/String/prototype/toLocaleLowerCase/special_casing_conditional.js
+ 
+ # Skip intl402 tests when Intl isn't available.
+ skip-if(!this.hasOwnProperty("Intl")) include test262/intl402/jstests.list
+ 
+-# Skip built-ins/Simd tests when SIMD isn't available.
+-skip-if(!this.hasOwnProperty("SIMD")) include test262/built-ins/Simd/jstests.list
+-
+ # https://bugzilla.mozilla.org/show_bug.cgi?id=1415303
+-skip-if(!this.hasOwnProperty("SharedArrayBuffer")) script non262/SIMD/load-sab-buffer-compat.js
+ skip-if(!this.hasOwnProperty("Atomics")) include test262/built-ins/Atomics/jstests.list
+ skip-if(!this.hasOwnProperty("SharedArrayBuffer")) include test262/built-ins/SharedArrayBuffer/jstests.list
+ 
+ #####################################
+ # Test262 tests disabled on browser #
+ #####################################
+ 
+ # Defines a non-configurable property on the WindowProxy object.
+@@ -344,29 +340,16 @@ skip script test262/annexB/language/eval
+ skip script test262/annexB/language/eval-code/indirect/global-switch-dflt-eval-global-existing-global-init.js
+ skip script test262/annexB/language/eval-code/indirect/global-if-decl-no-else-eval-global-existing-global-init.js
+ skip script test262/annexB/language/eval-code/indirect/global-if-decl-else-stmt-eval-global-existing-global-init.js
+ skip script test262/annexB/language/eval-code/indirect/global-if-decl-else-decl-b-eval-global-existing-global-init.js
+ skip script test262/annexB/language/eval-code/indirect/global-if-decl-else-decl-a-eval-global-existing-global-init.js
+ skip script test262/annexB/language/eval-code/indirect/global-block-decl-eval-global-existing-global-init.js
+ skip script test262/annexB/language/eval-code/indirect/global-switch-case-eval-global-existing-global-init.js
+ 
+-# SIMD.
+-skip script test262/built-ins/Simd/check.js
+-skip script test262/built-ins/Simd/from.js
+-skip script test262/built-ins/Simd/operators.js
+-skip script test262/built-ins/Simd/replace_lane.js
+-skip script test262/built-ins/Simd/shuffle.js
+-skip script test262/built-ins/Simd/swizzle.js
+-
+-# https://bugzilla.mozilla.org/show_bug.cgi?id=1336991
+-skip script test262/built-ins/Simd/float_operators.js
+-skip script test262/built-ins/Simd/all_true.js
+-skip script test262/built-ins/Simd/any_true.js
+-
+ # https://bugzilla.mozilla.org/show_bug.cgi?id=1340307
+ skip script test262/language/module-code/instn-resolve-empty-export.js
+ skip script test262/language/module-code/instn-resolve-empty-import.js
+ skip script test262/language/module-code/instn-resolve-err-reference.js
+ skip script test262/language/module-code/instn-resolve-order-depth.js
+ skip script test262/language/module-code/instn-resolve-order-src.js
+ skip script test262/language/module-code/parse-err-reference.js
+ skip script test262/language/expressions/postfix-increment/target-cover-yieldexpr.js
+diff --git a/js/src/tests/non262/SIMD/ToSource.js b/js/src/tests/non262/SIMD/ToSource.js
+deleted file mode 100644
+--- a/js/src/tests/non262/SIMD/ToSource.js
++++ /dev/null
+@@ -1,56 +0,0 @@
+-// |reftest| skip-if(!this.hasOwnProperty("SIMD"))
+-
+-function test() {
+-    var Float32x4 = SIMD.Float32x4;
+-    var f = Float32x4(11, 22, 33, 44);
+-    assertEq(f.toSource(), "SIMD.Float32x4(11, 22, 33, 44)");
+-
+-    var Float64x2 = SIMD.Float64x2;
+-    var f = Float64x2(11, 22, 33, 44);
+-    assertEq(f.toSource(), "SIMD.Float64x2(11, 22)");
+-
+-    var Int8x16 = SIMD.Int8x16;
+-    var f = Int8x16(11, 22, 33, 44, -11, -22, -33, -44, 1, 2, 3, 4, -1, -2, -3, -4);
+-    assertEq(f.toSource(), "SIMD.Int8x16(11, 22, 33, 44, -11, -22, -33, -44, 1, 2, 3, 4, -1, -2, -3, -4)");
+-
+-    var Int16x8 = SIMD.Int16x8;
+-    var f = Int16x8(11, 22, 33, 44, -11, -22, -33, -44);
+-    assertEq(f.toSource(), "SIMD.Int16x8(11, 22, 33, 44, -11, -22, -33, -44)");
+-
+-    var Int32x4 = SIMD.Int32x4;
+-    var f = Int32x4(11, 22, 33, 44);
+-    assertEq(f.toSource(), "SIMD.Int32x4(11, 22, 33, 44)");
+-
+-    var Uint8x16 = SIMD.Uint8x16;
+-    var f = Uint8x16(11, 22, 33, 44, 245, 234, 223, 212, 1, 2, 3, 4, 255, 254, 0, 250);
+-    assertEq(f.toSource(), "SIMD.Uint8x16(11, 22, 33, 44, 245, 234, 223, 212, 1, 2, 3, 4, 255, 254, 0, 250)");
+-
+-    var Uint16x8 = SIMD.Uint16x8;
+-    var f = Uint16x8(11, 22, 33, 44, 65535, 65534, 65533, 65532);
+-    assertEq(f.toSource(), "SIMD.Uint16x8(11, 22, 33, 44, 65535, 65534, 65533, 65532)");
+-
+-    var Uint32x4 = SIMD.Uint32x4;
+-    var f = Uint32x4(11, 22, 4294967295, 4294967294);
+-    assertEq(f.toSource(), "SIMD.Uint32x4(11, 22, 4294967295, 4294967294)");
+-
+-    var Bool8x16 = SIMD.Bool8x16;
+-    var f = Bool8x16(true, true, false, false, false, true, true, false, true, true, true, true, false, false, false, false);
+-    assertEq(f.toSource(), "SIMD.Bool8x16(true, true, false, false, false, true, true, false, true, true, true, true, false, false, false, false)");
+-
+-    var Bool16x8 = SIMD.Bool16x8;
+-    var f = Bool16x8(true, true, false, false, true, false, false, true);
+-    assertEq(f.toSource(), "SIMD.Bool16x8(true, true, false, false, true, false, false, true)");
+-
+-    var Bool32x4 = SIMD.Bool32x4;
+-    var f = Bool32x4(true, true, false, false);
+-    assertEq(f.toSource(), "SIMD.Bool32x4(true, true, false, false)");
+-
+-    var Bool64x2 = SIMD.Bool64x2;
+-    var f = Bool64x2(true, false);
+-    assertEq(f.toSource(), "SIMD.Bool64x2(true, false)");
+-
+-    if (typeof reportCompare === "function")
+-        reportCompare(true, true);
+-}
+-
+-test();
+diff --git a/js/src/tests/non262/SIMD/binary-operations.js b/js/src/tests/non262/SIMD/binary-operations.js
+deleted file mode 100644
+--- a/js/src/tests/non262/SIMD/binary-operations.js
++++ /dev/null
+@@ -1,785 +0,0 @@
+-// |reftest| skip-if(!this.hasOwnProperty("SIMD"))
+-var Float32x4 = SIMD.Float32x4;
+-var Int8x16 = SIMD.Int8x16;
+-var Int16x8 = SIMD.Int16x8;
+-var Int32x4 = SIMD.Int32x4;
+-var Uint8x16 = SIMD.Uint8x16;
+-var Uint16x8 = SIMD.Uint16x8;
+-var Uint32x4 = SIMD.Uint32x4;
+-var Bool8x16 = SIMD.Bool8x16;
+-var Bool16x8 = SIMD.Bool16x8;
+-var Bool32x4 = SIMD.Bool32x4;
+-var Bool64x2 = SIMD.Bool64x2;
+-
+-// Float32x4.
+-function testFloat32x4add() {
+-  function addf(a, b) {
+-    return Math.fround(Math.fround(a) + Math.fround(b));
+-  }
+-
+-  var vals = [
+-    [[1, 2, 3, 4], [10, 20, 30, 40]],
+-    [[1.57, 2.27, 3.57, 4.19], [10.31, 20.49, 30.41, 40.72]],
+-    [[NaN, -0, Infinity, -Infinity], [0, -0, -Infinity, -Infinity]]
+-  ];
+-
+-  for (var [v,w] of vals) {
+-    testBinaryFunc(Float32x4(...v), Float32x4(...w), Float32x4.add, addf);
+-  }
+-}
+-
+-function testFloat32x4div() {
+-  function divf(a, b) {
+-    return Math.fround(Math.fround(a) / Math.fround(b));
+-  }
+-
+-  var vals = [
+-    [[1, 2, 3, 4], [10, 20, 30, 40]],
+-    [[1.26, 2.03, 3.17, 4.59], [11.025, 17.3768, 29.1957, 46.4049]],
+-    [[0, -0, Infinity, -Infinity], [1, 1, -Infinity, Infinity]]
+-  ];
+-
+-  for (var [v,w] of vals) {
+-    testBinaryFunc(Float32x4(...v), Float32x4(...w), Float32x4.div, divf);
+-  }
+-}
+-
+-function testFloat32x4mul() {
+-  function mulf(a, b) {
+-    return Math.fround(Math.fround(a) * Math.fround(b));
+-  }
+-
+-  var vals = [
+-    [[1, 2, 3, 4], [10, 20, 30, 40]],
+-    [[1.66, 2.57, 3.73, 4.12], [10.67, 20.68, 30.02, 40.58]],
+-    [[NaN, -0, Infinity, -Infinity], [NaN, -0, -Infinity, 0]]
+-  ];
+-
+-  for (var [v,w] of vals) {
+-    testBinaryFunc(Float32x4(...v), Float32x4(...w), Float32x4.mul, mulf);
+-  }
+-}
+-
+-function testFloat32x4sub() {
+-  function subf(a, b) {
+-    return Math.fround(Math.fround(a) - Math.fround(b));
+-  }
+-
+-  var vals = [
+-    [[1, 2, 3, 4], [10, 20, 30, 40]],
+-    [[1.34, 2.95, 3.17, 4.29], [10.18, 20.43, 30.63, 40.38]],
+-    [[NaN, -0, -Infinity, -Infinity], [NaN, -0, Infinity, -Infinity]]
+-  ];
+-
+-  for (var [v,w] of vals) {
+-    testBinaryFunc(Float32x4(...v), Float32x4(...w), Float32x4.sub, subf);
+-  }
+-}
+-
+-// Helper for saturating arithmetic.
+-// See SIMD.js, 5.1.25 Saturate(descriptor, x)
+-function saturate(lower, upper, x) {
+-    x = x | 0;
+-    if (x > upper)
+-        return upper;
+-    if (x < lower)
+-        return lower;
+-    return x;
+-}
+-
+-var i8x16vals = [
+-  [[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
+-   [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160]],
+-  [[INT8_MAX, INT8_MIN, INT8_MAX, INT8_MIN, INT8_MAX, INT8_MIN, INT8_MAX, INT8_MIN, -2, -3, -4, -5, -6, -7, -8, -9],
+-   [1, 1, -1, -1, INT8_MAX, INT8_MAX, INT8_MIN, INT8_MIN, 8, 9, 10, 11, 12, 13, 14, 15]]
+-];
+-
+-// Int8x16.
+-function testInt8x16add() {
+-  function addi(a, b) {
+-    return (a + b) << 24 >> 24;
+-  }
+-
+-  for (var [v,w] of i8x16vals) {
+-    testBinaryFunc(Int8x16(...v), Int8x16(...w), Int8x16.add, addi);
+-  }
+-}
+-
+-function testInt8x16and() {
+-  function andi(a, b) {
+-    return (a & b) << 24 >> 24;
+-  }
+-
+-  for (var [v,w] of i8x16vals) {
+-    testBinaryFunc(Int8x16(...v), Int8x16(...w), Int8x16.and, andi);
+-  }
+-}
+-
+-function testInt8x16mul() {
+-  function muli(x, y) {
+-    return (x * y) << 24 >> 24;
+-  }
+-
+-  for (var [v,w] of i8x16vals) {
+-    testBinaryFunc(Int8x16(...v), Int8x16(...w), Int8x16.mul, muli);
+-  }
+-}
+-
+-function testInt8x16or() {
+-  function ori(a, b) {
+-    return (a | b) << 24 >> 24;
+-  }
+-
+-  for (var [v,w] of i8x16vals) {
+-    testBinaryFunc(Int8x16(...v), Int8x16(...w), Int8x16.or, ori);
+-  }
+-}
+-
+-function testInt8x16sub() {
+-  function subi(a, b) {
+-    return (a - b) << 24 >> 24;
+-  }
+-
+-  for (var [v,w] of i8x16vals) {
+-    testBinaryFunc(Int8x16(...v), Int8x16(...w), Int8x16.sub, subi);
+-  }
+-}
+-
+-function testInt8x16xor() {
+-  function xori(a, b) {
+-    return (a ^ b) << 24 >> 24;
+-  }
+-
+-  for (var [v,w] of i8x16vals) {
+-    testBinaryFunc(Int8x16(...v), Int8x16(...w), Int8x16.xor, xori);
+-  }
+-}
+-
+-function testInt8x16addSaturate() {
+-  function satadd(a, b) {
+-    return saturate(INT8_MIN, INT8_MAX, a + b);
+-  }
+-
+-  for (var [v,w] of i8x16vals) {
+-    testBinaryFunc(Int8x16(...v), Int8x16(...w), Int8x16.addSaturate, satadd);
+-  }
+-}
+-
+-function testInt8x16subSaturate() {
+-  function satsub(a, b) {
+-    return saturate(INT8_MIN, INT8_MAX, a - b);
+-  }
+-
+-  for (var [v,w] of i8x16vals) {
+-    testBinaryFunc(Int8x16(...v), Int8x16(...w), Int8x16.subSaturate, satsub);
+-  }
+-}
+-
+-// Uint8x16.
+-function testUint8x16add() {
+-  function addi(a, b) {
+-    return (a + b) << 24 >>> 24;
+-  }
+-
+-  for (var [v,w] of i8x16vals) {
+-    testBinaryFunc(Uint8x16(...v), Uint8x16(...w), Uint8x16.add, addi);
+-  }
+-}
+-
+-function testUint8x16and() {
+-  function andi(a, b) {
+-    return (a & b) << 24 >>> 24;
+-  }
+-
+-  for (var [v,w] of i8x16vals) {
+-    testBinaryFunc(Uint8x16(...v), Uint8x16(...w), Uint8x16.and, andi);
+-  }
+-}
+-
+-function testUint8x16mul() {
+-  function muli(x, y) {
+-    return (x * y) << 24 >>> 24;
+-  }
+-
+-  for (var [v,w] of i8x16vals) {
+-    testBinaryFunc(Uint8x16(...v), Uint8x16(...w), Uint8x16.mul, muli);
+-  }
+-}
+-
+-function testUint8x16or() {
+-  function ori(a, b) {
+-    return (a | b) << 24 >>> 24;
+-  }
+-
+-  for (var [v,w] of i8x16vals) {
+-    testBinaryFunc(Uint8x16(...v), Uint8x16(...w), Uint8x16.or, ori);
+-  }
+-}
+-
+-function testUint8x16sub() {
+-  function subi(a, b) {
+-    return (a - b) << 24 >>> 24;
+-  }
+-
+-  for (var [v,w] of i8x16vals) {
+-    testBinaryFunc(Uint8x16(...v), Uint8x16(...w), Uint8x16.sub, subi);
+-  }
+-}
+-
+-function testUint8x16xor() {
+-  function xori(a, b) {
+-    return (a ^ b) << 24 >>> 24;
+-  }
+-
+-  for (var [v,w] of i8x16vals) {
+-    testBinaryFunc(Uint8x16(...v), Uint8x16(...w), Uint8x16.xor, xori);
+-  }
+-}
+-
+-function testUint8x16addSaturate() {
+-  function satadd(a, b) {
+-    return saturate(0, UINT8_MAX, a + b);
+-  }
+-
+-  for (var [v,w] of i8x16vals) {
+-    testBinaryFunc(Uint8x16(...v), Uint8x16(...w), Uint8x16.addSaturate, satadd);
+-  }
+-}
+-
+-function testUint8x16subSaturate() {
+-  function satsub(a, b) {
+-    return saturate(0, UINT8_MAX, a - b);
+-  }
+-
+-  for (var [v,w] of i8x16vals) {
+-    testBinaryFunc(Uint8x16(...v), Uint8x16(...w), Uint8x16.subSaturate, satsub);
+-  }
+-}
+-
+-var i16x8vals = [
+-  [[1, 2, 3, 4, 5, 6, 7, 8],
+-   [10, 20, 30, 40, 50, 60, 70, 80]],
+-  [[INT16_MAX, INT16_MIN, INT16_MAX, INT16_MIN, INT16_MAX, INT16_MIN, INT16_MAX, INT16_MIN],
+-   [1, 1, -1, -1, INT16_MAX, INT16_MAX, INT16_MIN, INT16_MIN]]
+-];
+-
+-// Int16x8.
+-function testInt16x8add() {
+-  function addi(a, b) {
+-    return (a + b) << 16 >> 16;
+-  }
+-
+-  for (var [v,w] of i16x8vals) {
+-    testBinaryFunc(Int16x8(...v), Int16x8(...w), Int16x8.add, addi);
+-  }
+-}
+-
+-function testInt16x8and() {
+-  function andi(a, b) {
+-    return (a & b) << 16 >> 16;
+-  }
+-
+-  for (var [v,w] of i16x8vals) {
+-    testBinaryFunc(Int16x8(...v), Int16x8(...w), Int16x8.and, andi);
+-  }
+-}
+-
+-function testInt16x8mul() {
+-  function muli(x, y) {
+-    return (x * y) << 16 >> 16;
+-  }
+-
+-  for (var [v,w] of i16x8vals) {
+-    testBinaryFunc(Int16x8(...v), Int16x8(...w), Int16x8.mul, muli);
+-  }
+-}
+-
+-function testInt16x8or() {
+-  function ori(a, b) {
+-    return (a | b) << 16 >> 16;
+-  }
+-
+-  for (var [v,w] of i16x8vals) {
+-    testBinaryFunc(Int16x8(...v), Int16x8(...w), Int16x8.or, ori);
+-  }
+-}
+-
+-function testInt16x8sub() {
+-  function subi(a, b) {
+-    return (a - b) << 16 >> 16;
+-  }
+-
+-  for (var [v,w] of i16x8vals) {
+-    testBinaryFunc(Int16x8(...v), Int16x8(...w), Int16x8.sub, subi);
+-  }
+-}
+-
+-function testInt16x8xor() {
+-  function xori(a, b) {
+-    return (a ^ b) << 16 >> 16;
+-  }
+-
+-  for (var [v,w] of i16x8vals) {
+-    testBinaryFunc(Int16x8(...v), Int16x8(...w), Int16x8.xor, xori);
+-  }
+-}
+-
+-function testInt16x8addSaturate() {
+-  function satadd(a, b) {
+-    return saturate(INT16_MIN, INT16_MAX, a + b);
+-  }
+-
+-  for (var [v,w] of i16x8vals) {
+-    testBinaryFunc(Int16x8(...v), Int16x8(...w), Int16x8.addSaturate, satadd);
+-  }
+-}
+-
+-function testInt16x8subSaturate() {
+-  function satsub(a, b) {
+-    return saturate(INT16_MIN, INT16_MAX, a - b);
+-  }
+-
+-  for (var [v,w] of i16x8vals) {
+-    testBinaryFunc(Int16x8(...v), Int16x8(...w), Int16x8.subSaturate, satsub);
+-  }
+-}
+-
+-// Uint16x8.
+-function testUint16x8add() {
+-  function addi(a, b) {
+-    return (a + b) << 16 >>> 16;
+-  }
+-
+-  for (var [v,w] of i16x8vals) {
+-    testBinaryFunc(Uint16x8(...v), Uint16x8(...w), Uint16x8.add, addi);
+-  }
+-}
+-
+-function testUint16x8and() {
+-  function andi(a, b) {
+-    return (a & b) << 16 >>> 16;
+-  }
+-
+-  for (var [v,w] of i16x8vals) {
+-    testBinaryFunc(Uint16x8(...v), Uint16x8(...w), Uint16x8.and, andi);
+-  }
+-}
+-
+-function testUint16x8mul() {
+-  function muli(x, y) {
+-    return (x * y) << 16 >>> 16;
+-  }
+-
+-  for (var [v,w] of i16x8vals) {
+-    testBinaryFunc(Uint16x8(...v), Uint16x8(...w), Uint16x8.mul, muli);
+-  }
+-}
+-
+-function testUint16x8or() {
+-  function ori(a, b) {
+-    return (a | b) << 16 >>> 16;
+-  }
+-
+-  for (var [v,w] of i16x8vals) {
+-    testBinaryFunc(Uint16x8(...v), Uint16x8(...w), Uint16x8.or, ori);
+-  }
+-}
+-
+-function testUint16x8sub() {
+-  function subi(a, b) {
+-    return (a - b) << 16 >>> 16;
+-  }
+-
+-  for (var [v,w] of i16x8vals) {
+-    testBinaryFunc(Uint16x8(...v), Uint16x8(...w), Uint16x8.sub, subi);
+-  }
+-}
+-
+-function testUint16x8xor() {
+-  function xori(a, b) {
+-    return (a ^ b) << 16 >>> 16;
+-  }
+-
+-  for (var [v,w] of i16x8vals) {
+-    testBinaryFunc(Uint16x8(...v), Uint16x8(...w), Uint16x8.xor, xori);
+-  }
+-}
+-
+-function testUint16x8addSaturate() {
+-  function satadd(a, b) {
+-    return saturate(0, UINT16_MAX, a + b);
+-  }
+-
+-  for (var [v,w] of i16x8vals) {
+-    testBinaryFunc(Uint16x8(...v), Uint16x8(...w), Uint16x8.addSaturate, satadd);
+-  }
+-}
+-
+-function testUint16x8subSaturate() {
+-  function satsub(a, b) {
+-    return saturate(0, UINT16_MAX, a - b);
+-  }
+-
+-  for (var [v,w] of i16x8vals) {
+-    testBinaryFunc(Uint16x8(...v), Uint16x8(...w), Uint16x8.subSaturate, satsub);
+-  }
+-}
+-
+-var i32x4vals = [
+-  [[1, 2, 3, 4], [10, 20, 30, 40]],
+-  [[INT32_MAX, INT32_MIN, INT32_MAX, INT32_MIN], [1, -1, 0, 0]],
+-  [[INT32_MAX, INT32_MIN, INT32_MAX, INT32_MIN], [INT32_MIN, INT32_MAX, INT32_MAX, INT32_MIN]],
+-  [[INT32_MAX, INT32_MIN, INT32_MAX, INT32_MIN], [-1, -1, INT32_MIN, INT32_MIN]],
+-  [[INT32_MAX, INT32_MIN, INT32_MAX, INT32_MIN], [-1, 1, INT32_MAX, INT32_MIN]],
+-  [[UINT32_MAX, 0, UINT32_MAX, 0], [1, -1, 0, 0]],
+-  [[UINT32_MAX, 0, UINT32_MAX, 0], [-1, -1, INT32_MIN, INT32_MIN]],
+-  [[UINT32_MAX, 0, UINT32_MAX, 0], [1, -1, 0, 0]],
+-  [[UINT32_MAX, 0, UINT32_MAX, 0], [-1, 1, INT32_MAX, INT32_MIN]]
+-];
+-
+-// Int32x4.
+-function testInt32x4add() {
+-  function addi(a, b) {
+-    return (a + b) | 0;
+-  }
+-
+-  for (var [v,w] of i32x4vals) {
+-    testBinaryFunc(Int32x4(...v), Int32x4(...w), Int32x4.add, addi);
+-  }
+-}
+-
+-function testInt32x4and() {
+-  function andi(a, b) {
+-    return (a & b) | 0;
+-  }
+-
+-  for (var [v,w] of i32x4vals) {
+-    testBinaryFunc(Int32x4(...v), Int32x4(...w), Int32x4.and, andi);
+-  }
+-}
+-
+-function testInt32x4mul() {
+-  function muli(x, y) {
+-    // Deal with lost precision in the 53-bit double mantissa.
+-    // Compute two 48-bit products. Truncate and combine them.
+-    var hi = (x * (y >>> 16)) | 0;
+-    var lo = (x * (y & 0xffff)) | 0;
+-    return (lo + (hi << 16)) | 0;
+-  }
+-
+-  for (var [v,w] of i32x4vals) {
+-    testBinaryFunc(Int32x4(...v), Int32x4(...w), Int32x4.mul, muli);
+-  }
+-}
+-
+-function testInt32x4or() {
+-  function ori(a, b) {
+-    return (a | b) | 0;
+-  }
+-
+-  for (var [v,w] of i32x4vals) {
+-    testBinaryFunc(Int32x4(...v), Int32x4(...w), Int32x4.or, ori);
+-  }
+-}
+-
+-function testInt32x4sub() {
+-  function subi(a, b) {
+-    return (a - b) | 0;
+-  }
+-
+-  for (var [v,w] of i32x4vals) {
+-    testBinaryFunc(Int32x4(...v), Int32x4(...w), Int32x4.sub, subi);
+-  }
+-}
+-
+-function testInt32x4xor() {
+-  function xori(a, b) {
+-    return (a ^ b) | 0;
+-  }
+-
+-  for (var [v,w] of i32x4vals) {
+-    testBinaryFunc(Int32x4(...v), Int32x4(...w), Int32x4.xor, xori);
+-  }
+-}
+-
+-// Uint32x4.
+-function testUint32x4add() {
+-  function addi(a, b) {
+-    return (a + b) >>> 0;
+-  }
+-
+-  for (var [v,w] of i32x4vals) {
+-    testBinaryFunc(Uint32x4(...v), Uint32x4(...w), Uint32x4.add, addi);
+-  }
+-}
+-
+-function testUint32x4and() {
+-  function andi(a, b) {
+-    return (a & b) >>> 0;
+-  }
+-
+-  for (var [v,w] of i32x4vals) {
+-    testBinaryFunc(Uint32x4(...v), Uint32x4(...w), Uint32x4.and, andi);
+-  }
+-}
+-
+-function testUint32x4mul() {
+-  function muli(x, y) {
+-    // Deal with lost precision in the 53-bit double mantissa.
+-    // Compute two 48-bit products. Truncate and combine them.
+-    var hi = (x * (y >>> 16)) >>> 0;
+-    var lo = (x * (y & 0xffff)) >>> 0;
+-    return (lo + (hi << 16)) >>> 0;
+-  }
+-
+-  for (var [v,w] of i32x4vals) {
+-    testBinaryFunc(Uint32x4(...v), Uint32x4(...w), Uint32x4.mul, muli);
+-  }
+-}
+-
+-function testUint32x4or() {
+-  function ori(a, b) {
+-    return (a | b) >>> 0;
+-  }
+-
+-  for (var [v,w] of i32x4vals) {
+-    testBinaryFunc(Uint32x4(...v), Uint32x4(...w), Uint32x4.or, ori);
+-  }
+-}
+-
+-function testUint32x4sub() {
+-  function subi(a, b) {
+-    return (a - b) >>> 0;
+-  }
+-
+-  for (var [v,w] of i32x4vals) {
+-    testBinaryFunc(Uint32x4(...v), Uint32x4(...w), Uint32x4.sub, subi);
+-  }
+-}
+-
+-function testUint32x4xor() {
+-  function xori(a, b) {
+-    return (a ^ b) >>> 0;
+-  }
+-
+-  for (var [v,w] of i32x4vals) {
+-    testBinaryFunc(Uint32x4(...v), Uint32x4(...w), Uint32x4.xor, xori);
+-  }
+-}
+-
+-var b8x16vals = [
+-  [[true, true, true, true, false, false, false, false, true, true, true, true, false, false, false, false],
+-   [false, true, false, true, false, true, false, true, true, true, true, true, false, false, false, false]]
+-];
+-
+-function testBool8x16and() {
+-  function andb(a, b) {
+-    return a && b;
+-  }
+-
+-  for (var [v,w] of b8x16vals) {
+-    testBinaryFunc(Bool8x16(...v), Bool8x16(...w), Bool8x16.and, andb);
+-  }
+-}
+-
+-function testBool8x16or() {
+-  function orb(a, b) {
+-    return a || b;
+-  }
+-
+-  for (var [v,w] of b8x16vals) {
+-    testBinaryFunc(Bool8x16(...v), Bool8x16(...w), Bool8x16.or, orb);
+-  }
+-}
+-
+-function testBool8x16xor() {
+-  function xorb(a, b) {
+-    return a != b;
+-  }
+-
+-  for (var [v,w] of b8x16vals) {
+-    testBinaryFunc(Bool8x16(...v), Bool8x16(...w), Bool8x16.xor, xorb);
+-  }
+-}
+-
+-var b16x8vals = [
+-  [[true, true, true, true, false, false, false, false],
+-   [false, true, false, true, false, true, false, true]]
+-];
+-
+-function testBool16x8and() {
+-  function andb(a, b) {
+-    return a && b;
+-  }
+-
+-  for (var [v,w] of b16x8vals) {
+-    testBinaryFunc(Bool16x8(...v), Bool16x8(...w), Bool16x8.and, andb);
+-  }
+-}
+-
+-function testBool16x8or() {
+-  function orb(a, b) {
+-    return a || b;
+-  }
+-
+-  for (var [v,w] of b16x8vals) {
+-    testBinaryFunc(Bool16x8(...v), Bool16x8(...w), Bool16x8.or, orb);
+-  }
+-}
+-
+-function testBool16x8xor() {
+-  function xorb(a, b) {
+-    return a != b;
+-  }
+-
+-  for (var [v,w] of b16x8vals) {
+-    testBinaryFunc(Bool16x8(...v), Bool16x8(...w), Bool16x8.xor, xorb);
+-  }
+-}
+-
+-var b32x4vals = [
+-  [[true, true, false, false], [false, true, false, true]]
+-];
+-
+-function testBool32x4and() {
+-  function andb(a, b) {
+-    return a && b;
+-  }
+-
+-  for (var [v,w] of b32x4vals) {
+-    testBinaryFunc(Bool32x4(...v), Bool32x4(...w), Bool32x4.and, andb);
+-  }
+-}
+-
+-function testBool32x4or() {
+-  function orb(a, b) {
+-    return a || b;
+-  }
+-
+-  for (var [v,w] of b32x4vals) {
+-    testBinaryFunc(Bool32x4(...v), Bool32x4(...w), Bool32x4.or, orb);
+-  }
+-}
+-
+-function testBool32x4xor() {
+-  function xorb(a, b) {
+-    return a != b;
+-  }
+-
+-  for (var [v,w] of b32x4vals) {
+-    testBinaryFunc(Bool32x4(...v), Bool32x4(...w), Bool32x4.xor, xorb);
+-  }
+-}
+-
+-var b64x2vals = [
+-  [[false, false], [false, true], [true, false], [true, true]]
+-];
+-
+-function testBool64x2and() {
+-  function andb(a, b) {
+-    return a && b;
+-  }
+-
+-  for (var [v,w] of b64x2vals) {
+-    testBinaryFunc(Bool64x2(...v), Bool64x2(...w), Bool64x2.and, andb);
+-  }
+-}
+-
+-function testBool64x2or() {
+-  function orb(a, b) {
+-    return a || b;
+-  }
+-
+-  for (var [v,w] of b64x2vals) {
+-    testBinaryFunc(Bool64x2(...v), Bool64x2(...w), Bool64x2.or, orb);
+-  }
+-}
+-
+-function testBool64x2xor() {
+-  function xorb(a, b) {
+-    return a != b;
+-  }
+-
+-  for (var [v,w] of b64x2vals) {
+-    testBinaryFunc(Bool64x2(...v), Bool64x2(...w), Bool64x2.xor, xorb);
+-  }
+-}
+-
+-function test() {
+-  testFloat32x4add();
+-  testFloat32x4div();
+-  testFloat32x4mul();
+-  testFloat32x4sub();
+-
+-  testInt8x16add();
+-  testInt8x16and();
+-  testInt8x16mul();
+-  testInt8x16or();
+-  testInt8x16sub();
+-  testInt8x16xor();
+-  testInt8x16addSaturate();
+-  testInt8x16subSaturate();
+-
+-  testUint8x16add();
+-  testUint8x16and();
+-  testUint8x16mul();
+-  testUint8x16or();
+-  testUint8x16sub();
+-  testUint8x16xor();
+-  testUint8x16addSaturate();
+-  testUint8x16subSaturate();
+-
+-  testInt16x8add();
+-  testInt16x8and();
+-  testInt16x8mul();
+-  testInt16x8or();
+-  testInt16x8sub();
+-  testInt16x8xor();
+-  testInt16x8addSaturate();
+-  testInt16x8subSaturate();
+-
+-  testUint16x8add();
+-  testUint16x8and();
+-  testUint16x8mul();
+-  testUint16x8or();
+-  testUint16x8sub();
+-  testUint16x8xor();
+-  testUint16x8addSaturate();
+-  testUint16x8subSaturate();
+-
+-  testInt32x4add();
+-  testInt32x4and();
+-  testInt32x4mul();
+-  testInt32x4or();
+-  testInt32x4sub();
+-  testInt32x4xor();
+-
+-  testUint32x4add();
+-  testUint32x4and();
+-  testUint32x4mul();
+-  testUint32x4or();
+-  testUint32x4sub();
+-  testUint32x4xor();
+-
+-  testBool8x16and();
+-  testBool8x16or();
+-  testBool8x16xor();
+-
+-  testBool16x8and();
+-  testBool16x8or();
+-  testBool16x8xor();
+-
+-  testBool32x4and();
+-  testBool32x4or();
+-  testBool32x4xor();
+-
+-  testBool64x2and();
+-  testBool64x2or();
+-  testBool64x2xor();
+-
+-  if (typeof reportCompare === "function") {
+-    reportCompare(true, true);
+-  }
+-}
+-
+-test();
+diff --git a/js/src/tests/non262/SIMD/browser.js b/js/src/tests/non262/SIMD/browser.js
+deleted file mode 100644
+diff --git a/js/src/tests/non262/SIMD/bug1023145.js b/js/src/tests/non262/SIMD/bug1023145.js
+deleted file mode 100644
+--- a/js/src/tests/non262/SIMD/bug1023145.js
++++ /dev/null
+@@ -1,14 +0,0 @@
+-// |reftest| skip-if(!this.hasOwnProperty("SIMD"))
+-
+-/*
+- * Any copyright is dedicated to the Public Domain.
+- * http://creativecommons.org/licenses/publicdomain/
+- */
+-
+-delete Object.prototype.__proto__;
+-var Int32x4 = SIMD.Int32x4;
+-var ar = Int32x4.array(1);
+-var array = new ar([Int32x4(1, 2, 3, 4)]);
+-
+-if (typeof reportCompare === "function")
+-    reportCompare(true, true);
+diff --git a/js/src/tests/non262/SIMD/bug953270.js b/js/src/tests/non262/SIMD/bug953270.js
+deleted file mode 100644
+--- a/js/src/tests/non262/SIMD/bug953270.js
++++ /dev/null
+@@ -1,24 +0,0 @@
+-// |reftest| skip-if(!this.hasOwnProperty("SIMD"))
+-
+-/*
+- * Any copyright is dedicated to the Public Domain.
+- * http://creativecommons.org/licenses/publicdomain/
+- */
+-
+-// Check that NaN normalization is applied when extracting the x lane
+-// out, after bit conversion has occurred.
+-
+-var Int32x4 = SIMD.Int32x4;
+-var a = Int32x4((4294967295), 200, 300, 400);
+-var c = SIMD.Float32x4.fromInt32x4Bits(a);
+-
+-// NaN canonicalization occurs when extracting out x lane:
+-assertEq(SIMD.Float32x4.extractLane(c, 0), NaN);
+-
+-// but underlying bits are faithfully transmitted
+-// (though reinterpreted as a signed integer):
+-var d = SIMD.Int32x4.fromFloat32x4Bits(c);
+-assertEq(SIMD.Int32x4.extractLane(d, 0), -1);
+-
+-if (typeof reportCompare === "function")
+-    reportCompare(true, true);
+diff --git a/js/src/tests/non262/SIMD/check.js b/js/src/tests/non262/SIMD/check.js
+deleted file mode 100644
+--- a/js/src/tests/non262/SIMD/check.js
++++ /dev/null
+@@ -1,214 +0,0 @@
+-// |reftest| skip-if(!this.hasOwnProperty("SIMD"))
+-
+-function test() {
+-
+-  var i4 = SIMD.Int32x4(1,2,3,4);
+-  var i8 = SIMD.Int16x8(1,2,3,4,5,6,7,8);
+-  var i16 = SIMD.Int8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16);
+-  var u4 = SIMD.Uint32x4(1,2,3,4);
+-  var u8 = SIMD.Uint16x8(1,2,3,4,5,6,7,8);
+-  var u16 = SIMD.Uint8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16);
+-  var f4 = SIMD.Float32x4(NaN, -0, Infinity, 13.37);
+-  var f2 = SIMD.Float64x2(-0, 13.37);
+-  var b2 = SIMD.Bool64x2(true, false);
+-  var b4 = SIMD.Bool32x4(true, true, false, false);
+-  var b8 = SIMD.Bool16x8(true, true, false, false, true, true, false, false);
+-  var b16 = SIMD.Bool8x16(true, true, false, false, true, true, false, false, true, true, false, false, true, true, false, false);
+-
+-  var ci4 = SIMD.Int32x4.check(i4);
+-  assertEqX4(ci4, simdToArray(i4));
+-  assertThrowsInstanceOf(() => SIMD.Int32x4.check(f4), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Int32x4.check(f2), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Int32x4.check(i8), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Int32x4.check(i16), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Int32x4.check(u4), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Int32x4.check(u8), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Int32x4.check(u16), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Int32x4.check(b2), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Int32x4.check(b4), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Int32x4.check(b8), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Int32x4.check(b16), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Int32x4.check("i swear i'm a vector"), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Int32x4.check({}), TypeError);
+-
+-  var ci8 = SIMD.Int16x8.check(i8);
+-  assertEqX8(ci8, simdToArray(i8));
+-  assertThrowsInstanceOf(() => SIMD.Int16x8.check(i4), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Int16x8.check(i16), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Int16x8.check(u4), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Int16x8.check(u8), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Int16x8.check(u16), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Int16x8.check(f4), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Int16x8.check(f2), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Int16x8.check(b2), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Int16x8.check(b4), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Int16x8.check(b8), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Int16x8.check(b16), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Int16x8.check("i swear i'm a vector"), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Int16x8.check({}), TypeError);
+-
+-  var ci16 = SIMD.Int8x16.check(i16);
+-  assertEqX16(ci16, simdToArray(i16));
+-  assertThrowsInstanceOf(() => SIMD.Int8x16.check(i4), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Int8x16.check(i8), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Int8x16.check(u4), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Int8x16.check(u8), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Int8x16.check(u16), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Int8x16.check(f4), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Int8x16.check(f2), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Int8x16.check(b2), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Int8x16.check(b4), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Int8x16.check(b8), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Int8x16.check(b16), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Int8x16.check("i swear i'm a vector"), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Int8x16.check({}), TypeError);
+-
+-  var cu4 = SIMD.Uint32x4.check(u4);
+-  assertEqX4(cu4, simdToArray(u4));
+-  assertThrowsInstanceOf(() => SIMD.Uint32x4.check(f4), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Uint32x4.check(f2), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Uint32x4.check(i4), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Uint32x4.check(i8), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Uint32x4.check(i16), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Uint32x4.check(u8), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Uint32x4.check(u16), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Uint32x4.check(b2), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Uint32x4.check(b4), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Uint32x4.check(b8), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Uint32x4.check(b16), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Uint32x4.check("i swear i'm a vector"), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Uint32x4.check({}), TypeError);
+-
+-  var cu8 = SIMD.Uint16x8.check(u8);
+-  assertEqX8(cu8, simdToArray(u8));
+-  assertThrowsInstanceOf(() => SIMD.Uint16x8.check(i4), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Uint16x8.check(i8), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Uint16x8.check(i16), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Uint16x8.check(u4), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Uint16x8.check(u16), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Uint16x8.check(f4), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Uint16x8.check(f2), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Uint16x8.check(b2), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Uint16x8.check(b4), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Uint16x8.check(b8), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Uint16x8.check(b16), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Uint16x8.check("i swear i'm a vector"), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Uint16x8.check({}), TypeError);
+-
+-  var cu16 = SIMD.Uint8x16.check(u16);
+-  assertEqX16(cu16, simdToArray(u16));
+-  assertThrowsInstanceOf(() => SIMD.Uint8x16.check(i4), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Uint8x16.check(i8), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Uint8x16.check(i16), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Uint8x16.check(u4), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Uint8x16.check(u8), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Uint8x16.check(f4), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Uint8x16.check(f2), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Uint8x16.check(b2), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Uint8x16.check(b4), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Uint8x16.check(b8), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Uint8x16.check(b16), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Uint8x16.check("i swear i'm a vector"), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Uint8x16.check({}), TypeError);
+-
+-  var cf4 = SIMD.Float32x4.check(f4);
+-  assertEqX4(cf4, simdToArray(f4));
+-  assertThrowsInstanceOf(() => SIMD.Float32x4.check(i4), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Float32x4.check(i8), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Float32x4.check(i16), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Float32x4.check(u4), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Float32x4.check(u8), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Float32x4.check(u16), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Float32x4.check(f2), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Float32x4.check(b2), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Float32x4.check(b4), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Float32x4.check(b8), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Float32x4.check(b16), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Float32x4.check("i swear i'm a vector"), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Float32x4.check({}), TypeError);
+-
+-  var cf2 = SIMD.Float64x2.check(f2);
+-  assertEqX2(cf2, simdToArray(f2));
+-  assertThrowsInstanceOf(() => SIMD.Float64x2.check(f4), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Float64x2.check(i4), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Float64x2.check(u4), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Float64x2.check(u8), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Float64x2.check(u16), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Float64x2.check(i8), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Float64x2.check(i16), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Float64x2.check(b2), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Float64x2.check(b4), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Float64x2.check(b8), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Float64x2.check(b16), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Float64x2.check("i swear i'm a vector"), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Float64x2.check({}), TypeError);
+-
+-  var cb2 = SIMD.Bool64x2.check(b2);
+-  assertEqX2(cb2, simdToArray(b2));
+-  assertThrowsInstanceOf(() => SIMD.Bool64x2.check(f4), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Bool64x2.check(f2), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Bool64x2.check(i8), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Bool64x2.check(i16), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Bool64x2.check(u4), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Bool64x2.check(u8), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Bool64x2.check(u16), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Bool64x2.check(b4), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Bool64x2.check(b8), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Bool64x2.check(b16), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Bool64x2.check("i swear i'm a vector"), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Bool64x2.check({}), TypeError);
+-
+-  var cb4 = SIMD.Bool32x4.check(b4);
+-  assertEqX4(cb4, simdToArray(b4));
+-  assertThrowsInstanceOf(() => SIMD.Bool32x4.check(f4), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Bool32x4.check(f2), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Bool32x4.check(i4), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Bool32x4.check(i8), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Bool32x4.check(i16), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Bool32x4.check(u4), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Bool32x4.check(u8), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Bool32x4.check(u16), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Bool32x4.check(b2), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Bool32x4.check(b8), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Bool32x4.check(b16), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Bool32x4.check("i swear i'm a vector"), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Bool32x4.check({}), TypeError);
+-
+-  var cb8 = SIMD.Bool16x8.check(b8);
+-  assertEqX8(cb8, simdToArray(b8));
+-  assertThrowsInstanceOf(() => SIMD.Bool16x8.check(f4), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Bool16x8.check(f2), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Bool16x8.check(i4), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Bool16x8.check(i8), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Bool16x8.check(i16), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Bool16x8.check(u4), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Bool16x8.check(u8), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Bool16x8.check(u16), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Bool16x8.check(b2), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Bool16x8.check(b4), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Bool16x8.check(b16), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Bool16x8.check("i swear i'm a vector"), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Bool16x8.check({}), TypeError);
+-
+-  var cb16 = SIMD.Bool8x16.check(b16);
+-  assertEqX16(cb16, simdToArray(b16));
+-  assertThrowsInstanceOf(() => SIMD.Bool8x16.check(f4), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Bool8x16.check(f2), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Bool8x16.check(i4), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Bool8x16.check(i8), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Bool8x16.check(i16), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Bool8x16.check(u4), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Bool8x16.check(u8), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Bool8x16.check(u16), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Bool8x16.check(b2), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Bool8x16.check(b4), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Bool8x16.check(b8), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Bool8x16.check("i swear i'm a vector"), TypeError);
+-  assertThrowsInstanceOf(() => SIMD.Bool8x16.check({}), TypeError);
+-
+-  if (typeof reportCompare === "function")
+-    reportCompare(true, true);
+-}
+-
+-test();
+-
+diff --git a/js/src/tests/non262/SIMD/comparisons.js b/js/src/tests/non262/SIMD/comparisons.js
+deleted file mode 100644
+--- a/js/src/tests/non262/SIMD/comparisons.js
++++ /dev/null
+@@ -1,349 +0,0 @@
+-// |reftest| skip-if(!this.hasOwnProperty("SIMD"))
+-
+-/*
+- * Any copyright is dedicated to the Public Domain.
+- * https://creativecommons.org/publicdomain/zero/1.0/
+- */
+-
+-var Float32x4 = SIMD.Float32x4;
+-var Float64x2 = SIMD.Float64x2;
+-var Int8x16 = SIMD.Int8x16;
+-var Int16x8 = SIMD.Int16x8;
+-var Int32x4 = SIMD.Int32x4;
+-var Uint8x16 = SIMD.Uint8x16;
+-var Uint16x8 = SIMD.Uint16x8;
+-var Uint32x4 = SIMD.Uint32x4;
+-var Bool8x16 = SIMD.Bool8x16;
+-var Bool16x8 = SIMD.Bool16x8;
+-var Bool32x4 = SIMD.Bool32x4;
+-var Bool64x2 = SIMD.Bool64x2;
+-
+-var fround = Math.fround;
+-
+-
+-function testEqualFloat32x4(v, w) {
+-    testBinaryCompare(v, w, Float32x4.equal, (x, y) => fround(x) == fround(y), Bool32x4);
+-}
+-function testNotEqualFloat32x4(v, w) {
+-    testBinaryCompare(v, w, Float32x4.notEqual, (x, y) => fround(x) != fround(y), Bool32x4);
+-}
+-function testLessThanFloat32x4(v, w) {
+-    testBinaryCompare(v, w, Float32x4.lessThan, (x, y) => fround(x) < fround(y), Bool32x4);
+-}
+-function testLessThanOrEqualFloat32x4(v, w) {
+-    testBinaryCompare(v, w, Float32x4.lessThanOrEqual, (x, y) => fround(x) <= fround(y), Bool32x4);
+-}
+-function testGreaterThanFloat32x4(v, w) {
+-    testBinaryCompare(v, w, Float32x4.greaterThan, (x, y) => fround(x) > fround(y), Bool32x4);
+-}
+-function testGreaterThanOrEqualFloat32x4(v, w) {
+-    testBinaryCompare(v, w, Float32x4.greaterThanOrEqual, (x, y) => fround(x) >= fround(y), Bool32x4);
+-}
+-
+-function testEqualFloat64x2(v, w) {
+-    testBinaryCompare(v, w, Float64x2.equal, (x, y) => x == y, Bool64x2);
+-}
+-function testNotEqualFloat64x2(v, w) {
+-    testBinaryCompare(v, w, Float64x2.notEqual, (x, y) => x != y, Bool64x2);
+-}
+-function testLessThanFloat64x2(v, w) {
+-    testBinaryCompare(v, w, Float64x2.lessThan, (x, y) => x < y, Bool64x2);
+-}
+-function testLessThanOrEqualFloat64x2(v, w) {
+-    testBinaryCompare(v, w, Float64x2.lessThanOrEqual, (x, y) => x <= y, Bool64x2);
+-}
+-function testGreaterThanFloat64x2(v, w) {
+-    testBinaryCompare(v, w, Float64x2.greaterThan, (x, y) => x > y, Bool64x2);
+-}
+-function testGreaterThanOrEqualFloat64x2(v, w) {
+-    testBinaryCompare(v, w, Float64x2.greaterThanOrEqual, (x, y) => x >= y, Bool64x2);
+-}
+-
+-function testEqualInt8x16(v, w) {
+-    testBinaryCompare(v, w, Int8x16.equal, (x, y) => x == y, Bool8x16);
+-}
+-function testNotEqualInt8x16(v, w) {
+-    testBinaryCompare(v, w, Int8x16.notEqual, (x, y) => x != y, Bool8x16);
+-}
+-function testLessThanInt8x16(v, w) {
+-    testBinaryCompare(v, w, Int8x16.lessThan, (x, y) => x < y, Bool8x16);
+-}
+-function testLessThanOrEqualInt8x16(v, w) {
+-    testBinaryCompare(v, w, Int8x16.lessThanOrEqual, (x, y) => x <= y, Bool8x16);
+-}
+-function testGreaterThanInt8x16(v, w) {
+-    testBinaryCompare(v, w, Int8x16.greaterThan, (x, y) => x > y, Bool8x16);
+-}
+-function testGreaterThanOrEqualInt8x16(v, w) {
+-    testBinaryCompare(v, w, Int8x16.greaterThanOrEqual, (x, y) => x >= y, Bool8x16);
+-}
+-
+-function testEqualInt16x8(v, w) {
+-    testBinaryCompare(v, w, Int16x8.equal, (x, y) => x == y, Bool16x8);
+-}
+-function testNotEqualInt16x8(v, w) {
+-    testBinaryCompare(v, w, Int16x8.notEqual, (x, y) => x != y, Bool16x8);
+-}
+-function testLessThanInt16x8(v, w) {
+-    testBinaryCompare(v, w, Int16x8.lessThan, (x, y) => x < y, Bool16x8);
+-}
+-function testLessThanOrEqualInt16x8(v, w) {
+-    testBinaryCompare(v, w, Int16x8.lessThanOrEqual, (x, y) => x <= y, Bool16x8);
+-}
+-function testGreaterThanInt16x8(v, w) {
+-    testBinaryCompare(v, w, Int16x8.greaterThan, (x, y) => x > y, Bool16x8);
+-}
+-function testGreaterThanOrEqualInt16x8(v, w) {
+-    testBinaryCompare(v, w, Int16x8.greaterThanOrEqual, (x, y) => x >= y, Bool16x8);
+-}
+-
+-function testEqualInt32x4(v, w) {
+-    testBinaryCompare(v, w, Int32x4.equal, (x, y) => x == y, Bool32x4);
+-}
+-function testNotEqualInt32x4(v, w) {
+-    testBinaryCompare(v, w, Int32x4.notEqual, (x, y) => x != y, Bool32x4);
+-}
+-function testLessThanInt32x4(v, w) {
+-    testBinaryCompare(v, w, Int32x4.lessThan, (x, y) => x < y, Bool32x4);
+-}
+-function testLessThanOrEqualInt32x4(v, w) {
+-    testBinaryCompare(v, w, Int32x4.lessThanOrEqual, (x, y) => x <= y, Bool32x4);
+-}
+-function testGreaterThanInt32x4(v, w) {
+-    testBinaryCompare(v, w, Int32x4.greaterThan, (x, y) => x > y, Bool32x4);
+-}
+-function testGreaterThanOrEqualInt32x4(v, w) {
+-    testBinaryCompare(v, w, Int32x4.greaterThanOrEqual, (x, y) => x >= y, Bool32x4);
+-}
+-
+-function testEqualUint8x16(v, w) {
+-    testBinaryCompare(v, w, Uint8x16.equal, (x, y) => x == y, Bool8x16);
+-}
+-function testNotEqualUint8x16(v, w) {
+-    testBinaryCompare(v, w, Uint8x16.notEqual, (x, y) => x != y, Bool8x16);
+-}
+-function testLessThanUint8x16(v, w) {
+-    testBinaryCompare(v, w, Uint8x16.lessThan, (x, y) => x < y, Bool8x16);
+-}
+-function testLessThanOrEqualUint8x16(v, w) {
+-    testBinaryCompare(v, w, Uint8x16.lessThanOrEqual, (x, y) => x <= y, Bool8x16);
+-}
+-function testGreaterThanUint8x16(v, w) {
+-    testBinaryCompare(v, w, Uint8x16.greaterThan, (x, y) => x > y, Bool8x16);
+-}
+-function testGreaterThanOrEqualUint8x16(v, w) {
+-    testBinaryCompare(v, w, Uint8x16.greaterThanOrEqual, (x, y) => x >= y, Bool8x16);
+-}
+-
+-function testEqualUint16x8(v, w) {
+-    testBinaryCompare(v, w, Uint16x8.equal, (x, y) => x == y, Bool16x8);
+-}
+-function testNotEqualUint16x8(v, w) {
+-    testBinaryCompare(v, w, Uint16x8.notEqual, (x, y) => x != y, Bool16x8);
+-}
+-function testLessThanUint16x8(v, w) {
+-    testBinaryCompare(v, w, Uint16x8.lessThan, (x, y) => x < y, Bool16x8);
+-}
+-function testLessThanOrEqualUint16x8(v, w) {
+-    testBinaryCompare(v, w, Uint16x8.lessThanOrEqual, (x, y) => x <= y, Bool16x8);
+-}
+-function testGreaterThanUint16x8(v, w) {
+-    testBinaryCompare(v, w, Uint16x8.greaterThan, (x, y) => x > y, Bool16x8);
+-}
+-function testGreaterThanOrEqualUint16x8(v, w) {
+-    testBinaryCompare(v, w, Uint16x8.greaterThanOrEqual, (x, y) => x >= y, Bool16x8);
+-}
+-
+-function testEqualUint32x4(v, w) {
+-    testBinaryCompare(v, w, Uint32x4.equal, (x, y) => x == y, Bool32x4);
+-}
+-function testNotEqualUint32x4(v, w) {
+-    testBinaryCompare(v, w, Uint32x4.notEqual, (x, y) => x != y, Bool32x4);
+-}
+-function testLessThanUint32x4(v, w) {
+-    testBinaryCompare(v, w, Uint32x4.lessThan, (x, y) => x < y, Bool32x4);
+-}
+-function testLessThanOrEqualUint32x4(v, w) {
+-    testBinaryCompare(v, w, Uint32x4.lessThanOrEqual, (x, y) => x <= y, Bool32x4);
+-}
+-function testGreaterThanUint32x4(v, w) {
+-    testBinaryCompare(v, w, Uint32x4.greaterThan, (x, y) => x > y, Bool32x4);
+-}
+-function testGreaterThanOrEqualUint32x4(v, w) {
+-    testBinaryCompare(v, w, Uint32x4.greaterThanOrEqual, (x, y) => x >= y, Bool32x4);
+-}
+-
+-function test() {
+-  var Float32x4val = [
+-      Float32x4(1, 20, 30, 4),
+-      Float32x4(10, 2, 3, 40),
+-      Float32x4(9.999, 2.1234, 30.4443, 4),
+-      Float32x4(10, 2.1233, 30.4444, 4.0001),
+-      Float32x4(NaN, -Infinity, +Infinity, -0),
+-      Float32x4(+Infinity, NaN, -0, -Infinity),
+-      Float32x4(13.37, 42.42, NaN, 0)
+-  ];
+-
+-  var v, w;
+-  for (v of Float32x4val) {
+-      for (w of Float32x4val) {
+-          testEqualFloat32x4(v, w);
+-          testNotEqualFloat32x4(v, w);
+-          testLessThanFloat32x4(v, w);
+-          testLessThanOrEqualFloat32x4(v, w);
+-          testGreaterThanFloat32x4(v, w);
+-          testGreaterThanOrEqualFloat32x4(v, w);
+-      }
+-  }
+-
+-  var Float64x2val = [
+-      Float64x2(1, 20),
+-      Float64x2(10, 2),
+-      Float64x2(9.999, 2.1234),
+-      Float64x2(10, 2.1233),
+-      Float64x2(30.4443, 4),
+-      Float64x2(30.4444, 4.0001),
+-      Float64x2(NaN, -Infinity),
+-      Float64x2(+Infinity, NaN),
+-      Float64x2(+Infinity, -0),
+-      Float64x2(-0, -Infinity),
+-      Float64x2(13.37, 42.42),
+-      Float64x2(NaN, 0)
+-  ];
+-
+-  for (v of Float64x2val) {
+-      for (w of Float64x2val) {
+-          testEqualFloat64x2(v, w);
+-          testNotEqualFloat64x2(v, w);
+-          testLessThanFloat64x2(v, w);
+-          testLessThanOrEqualFloat64x2(v, w);
+-          testGreaterThanFloat64x2(v, w);
+-          testGreaterThanOrEqualFloat64x2(v, w);
+-      }
+-  }
+-
+-  var Int8x16val = [
+-      Int8x16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16),
+-      Int8x16(-1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16),
+-      Int8x16(-1, 2, -3, 4, -5, 6, -7, 8, -9, 10, -11, 12, -13, 14, -15, 16),
+-      Int8x16(1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16),
+-      Int8x16(INT8_MAX, INT8_MAX, INT8_MIN, INT8_MIN, INT8_MIN + 1, INT8_MAX - 1, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16),
+-      Int8x16(INT8_MAX, INT8_MIN, INT8_MAX, INT8_MIN, INT8_MAX - 1, INT8_MIN + 1, 7, 8, 9, 10, 11, 12, 13, 14, 15, -16)
+-  ];
+-
+-  for (v of Int8x16val) {
+-      for (w of Int8x16val) {
+-          testEqualInt8x16(v, w);
+-          testNotEqualInt8x16(v, w);
+-          testLessThanInt8x16(v, w);
+-          testLessThanOrEqualInt8x16(v, w);
+-          testGreaterThanInt8x16(v, w);
+-          testGreaterThanOrEqualInt8x16(v, w);
+-      }
+-  }
+-
+-  var Int16x8val = [
+-      Int16x8(1, 2, 3, 4, 5, 6, 7, 8),
+-      Int16x8(-1, -2, -3, -4, -5, -6, -7, -8),
+-      Int16x8(-1, 2, -3, 4, -5, 6, -7, 8),
+-      Int16x8(1, -2, 3, -4, 5, -6, 7, -8),
+-      Int16x8(INT16_MAX, INT16_MAX, INT16_MIN, INT16_MIN, INT16_MIN + 1, INT16_MAX - 1, -7, -8),
+-      Int16x8(INT16_MAX, INT16_MIN, INT16_MAX, INT16_MIN, INT16_MAX - 1, INT16_MIN + 1, 7, -8)
+-  ];
+-
+-  for (v of Int16x8val) {
+-      for (w of Int16x8val) {
+-          testEqualInt16x8(v, w);
+-          testNotEqualInt16x8(v, w);
+-          testLessThanInt16x8(v, w);
+-          testLessThanOrEqualInt16x8(v, w);
+-          testGreaterThanInt16x8(v, w);
+-          testGreaterThanOrEqualInt16x8(v, w);
+-      }
+-  }
+-
+-  var Int32x4val = [
+-      Int32x4(1, 2, 3, 4),
+-      Int32x4(-1, -2, -3, -4),
+-      Int32x4(-1, 2, -3, 4),
+-      Int32x4(1, -2, 3, -4),
+-      Int32x4(INT32_MAX, INT32_MAX, INT32_MIN, INT32_MIN),
+-      Int32x4(INT32_MAX, INT32_MIN, INT32_MAX, INT32_MIN)
+-  ];
+-
+-  for (v of Int32x4val) {
+-      for (w of Int32x4val) {
+-          testEqualInt32x4(v, w);
+-          testNotEqualInt32x4(v, w);
+-          testLessThanInt32x4(v, w);
+-          testLessThanOrEqualInt32x4(v, w);
+-          testGreaterThanInt32x4(v, w);
+-          testGreaterThanOrEqualInt32x4(v, w);
+-      }
+-  }
+-
+-  var Uint8x16val = [
+-      Uint8x16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16),
+-      Uint8x16(-1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16),
+-      Uint8x16(-1, 2, -3, 4, -5, 6, -7, 8, -9, 10, -11, 12, -13, 14, -15, 16),
+-      Uint8x16(1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16),
+-      Uint8x16(UINT8_MAX, UINT8_MAX, 0, 0, 0 + 1, UINT8_MAX - 1, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16),
+-      Uint8x16(UINT8_MAX, 0, UINT8_MAX, 0, UINT8_MAX - 1, 0 + 1, 7, 8, 9, 10, 11, 12, 13, 14, 15, -16)
+-  ];
+-
+-  for (v of Uint8x16val) {
+-      for (w of Uint8x16val) {
+-          testEqualUint8x16(v, w);
+-          testNotEqualUint8x16(v, w);
+-          testLessThanUint8x16(v, w);
+-          testLessThanOrEqualUint8x16(v, w);
+-          testGreaterThanUint8x16(v, w);
+-          testGreaterThanOrEqualUint8x16(v, w);
+-      }
+-  }
+-
+-  var Uint16x8val = [
+-      Uint16x8(1, 2, 3, 4, 5, 6, 7, 8),
+-      Uint16x8(-1, -2, -3, -4, -5, -6, -7, -8),
+-      Uint16x8(-1, 2, -3, 4, -5, 6, -7, 8),
+-      Uint16x8(1, -2, 3, -4, 5, -6, 7, -8),
+-      Uint16x8(UINT16_MAX, UINT16_MAX, 0, 0, 0 + 1, UINT16_MAX - 1, -7, -8),
+-      Uint16x8(UINT16_MAX, 0, UINT16_MAX, 0, UINT16_MAX - 1, 0 + 1, 7, -8)
+-  ];
+-
+-  for (v of Uint16x8val) {
+-      for (w of Uint16x8val) {
+-          testEqualUint16x8(v, w);
+-          testNotEqualUint16x8(v, w);
+-          testLessThanUint16x8(v, w);
+-          testLessThanOrEqualUint16x8(v, w);
+-          testGreaterThanUint16x8(v, w);
+-          testGreaterThanOrEqualUint16x8(v, w);
+-      }
+-  }
+-
+-  var Uint32x4val = [
+-      Uint32x4(1, 2, 3, 4),
+-      Uint32x4(-1, -2, -3, -4),
+-      Uint32x4(-1, 2, -3, 4),
+-      Uint32x4(1, -2, 3, -4),
+-      Uint32x4(UINT32_MAX, UINT32_MAX, 0, 0),
+-      Uint32x4(UINT32_MAX, 0, UINT32_MAX, 0)
+-  ];
+-
+-  for (v of Uint32x4val) {
+-      for (w of Uint32x4val) {
+-          testEqualUint32x4(v, w);
+-          testNotEqualUint32x4(v, w);
+-          testLessThanUint32x4(v, w);
+-          testLessThanOrEqualUint32x4(v, w);
+-          testGreaterThanUint32x4(v, w);
+-          testGreaterThanOrEqualUint32x4(v, w);
+-      }
+-  }
+-
+-  if (typeof reportCompare === "function")
+-    reportCompare(true, true);
+-}
+-
+-test();
+diff --git a/js/src/tests/non262/SIMD/constructors.js b/js/src/tests/non262/SIMD/constructors.js
+deleted file mode 100644
+--- a/js/src/tests/non262/SIMD/constructors.js
++++ /dev/null
+@@ -1,226 +0,0 @@
+-// |reftest| skip-if(!this.hasOwnProperty("SIMD"))
+-
+-var Float64x2 = SIMD.Float64x2;
+-var Float32x4 = SIMD.Float32x4;
+-var Int8x16 = SIMD.Int8x16;
+-var Int16x8 = SIMD.Int16x8;
+-var Int32x4 = SIMD.Int32x4;
+-var Uint8x16 = SIMD.Uint8x16;
+-var Uint16x8 = SIMD.Uint16x8;
+-var Uint32x4 = SIMD.Uint32x4;
+-var Bool8x16 = SIMD.Bool8x16;
+-var Bool16x8 = SIMD.Bool16x8;
+-var Bool32x4 = SIMD.Bool32x4;
+-var Bool64x2 = SIMD.Bool64x2;
+-
+-function TestInt8x16Ctor() {
+-    // Constructors.
+-    assertEqX16(Int8x16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16),   [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]);
+-    assertEqX16(Int8x16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15),       [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0]);
+-    assertEqX16(Int8x16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14),           [1,2,3,4,5,6,7,8,9,10,11,12,13,14,0,0]);
+-    assertEqX16(Int8x16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13),               [1,2,3,4,5,6,7,8,9,10,11,12,13,0,0,0]);
+-    assertEqX16(Int8x16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12),                   [1,2,3,4,5,6,7,8,9,10,11,12,0,0,0,0]);
+-    assertEqX16(Int8x16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11),                       [1,2,3,4,5,6,7,8,9,10,11,0,0,0,0,0]);
+-    assertEqX16(Int8x16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10),                           [1,2,3,4,5,6,7,8,9,10,0,0,0,0,0,0]);
+-    assertEqX16(Int8x16(1, 2, 3, 4, 5, 6, 7, 8, 9),                               [1,2,3,4,5,6,7,8,9,0,0,0,0,0,0,0]);
+-    assertEqX16(Int8x16(1, 2, 3, 4, 5, 6, 7, 8),                                  [1,2,3,4,5,6,7,8,0,0,0,0,0,0,0,0]);
+-    assertEqX16(Int8x16(1, 2, 3, 4, 5, 6, 7),                                     [1,2,3,4,5,6,7,0,0,0,0,0,0,0,0,0]);
+-    assertEqX16(Int8x16(1, 2, 3, 4, 5, 6),                                        [1,2,3,4,5,6,0,0,0,0,0,0,0,0,0,0]);
+-    assertEqX16(Int8x16(1, 2, 3, 4, 5),                                           [1,2,3,4,5,0,0,0,0,0,0,0,0,0,0,0]);
+-    assertEqX16(Int8x16(1, 2, 3, 4),                                              [1,2,3,4,0,0,0,0,0,0,0,0,0,0,0,0]);
+-    assertEqX16(Int8x16(1, 2, 3),                                                 [1,2,3,0,0,0,0,0,0,0,0,0,0,0,0,0]);
+-    assertEqX16(Int8x16(1, 2),                                                    [1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0]);
+-    assertEqX16(Int8x16(1),                                                       [1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]);
+-    assertEqX16(Int8x16(),                                                        [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]);
+-    assertEqX16(Int8x16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17), [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]);
+-    assertEqX16(Int8x16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18), [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]);
+-}
+-
+-function TestInt16x8Ctor() {
+-    // Constructors.
+-    assertEqX8(Int16x8(1, 2, 3, 4, 5, 6, 7, 8),        [1,2,3,4,5,6,7,8]);
+-    assertEqX8(Int16x8(1, 2, 3, 4, 5, 6, 7),           [1,2,3,4,5,6,7,0]);
+-    assertEqX8(Int16x8(1, 2, 3, 4, 5, 6),              [1,2,3,4,5,6,0,0]);
+-    assertEqX8(Int16x8(1, 2, 3, 4, 5),                 [1,2,3,4,5,0,0,0]);
+-    assertEqX8(Int16x8(1, 2, 3, 4),                    [1,2,3,4,0,0,0,0]);
+-    assertEqX8(Int16x8(1, 2, 3),                       [1,2,3,0,0,0,0,0]);
+-    assertEqX8(Int16x8(1, 2),                          [1,2,0,0,0,0,0,0]);
+-    assertEqX8(Int16x8(1),                             [1,0,0,0,0,0,0,0]);
+-    assertEqX8(Int16x8(),                              [0,0,0,0,0,0,0,0]);
+-    assertEqX8(Int16x8(1, 2, 3, 4, 5, 6, 7, 8, 9),     [1,2,3,4,5,6,7,8]);
+-    assertEqX8(Int16x8(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), [1,2,3,4,5,6,7,8]);
+-}
+-
+-function TestInt32x4Ctor() {
+-    // Constructors.
+-    assertEqX4(Int32x4(1, 2, 3, 4),         [1,2,3,4]);
+-    assertEqX4(Int32x4(1, 2, 3),            [1,2,3,0]);
+-    assertEqX4(Int32x4(1, 2),               [1,2,0,0]);
+-    assertEqX4(Int32x4(1),                  [1,0,0,0]);
+-    assertEqX4(Int32x4(),                   [0,0,0,0]);
+-    assertEqX4(Int32x4(1, 2, 3, 4, 5),      [1,2,3,4]);
+-    assertEqX4(Int32x4(1, 2, 3, 4, 5, 6),   [1,2,3,4]);
+-}
+-
+-function TestUint8x16Ctor() {
+-    // Constructors.
+-    assertEqX16(Uint8x16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16),   [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]);
+-    assertEqX16(Uint8x16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15),       [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0]);
+-    assertEqX16(Uint8x16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14),           [1,2,3,4,5,6,7,8,9,10,11,12,13,14,0,0]);
+-    assertEqX16(Uint8x16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13),               [1,2,3,4,5,6,7,8,9,10,11,12,13,0,0,0]);
+-    assertEqX16(Uint8x16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12),                   [1,2,3,4,5,6,7,8,9,10,11,12,0,0,0,0]);
+-    assertEqX16(Uint8x16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11),                       [1,2,3,4,5,6,7,8,9,10,11,0,0,0,0,0]);
+-    assertEqX16(Uint8x16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10),                           [1,2,3,4,5,6,7,8,9,10,0,0,0,0,0,0]);
+-    assertEqX16(Uint8x16(1, 2, 3, 4, 5, 6, 7, 8, 9),                               [1,2,3,4,5,6,7,8,9,0,0,0,0,0,0,0]);
+-    assertEqX16(Uint8x16(1, 2, 3, 4, 5, 6, 7, 8),                                  [1,2,3,4,5,6,7,8,0,0,0,0,0,0,0,0]);
+-    assertEqX16(Uint8x16(1, 2, 3, 4, 5, 6, 7),                                     [1,2,3,4,5,6,7,0,0,0,0,0,0,0,0,0]);
+-    assertEqX16(Uint8x16(1, 2, 3, 4, 5, 6),                                        [1,2,3,4,5,6,0,0,0,0,0,0,0,0,0,0]);
+-    assertEqX16(Uint8x16(1, 2, 3, 4, 5),                                           [1,2,3,4,5,0,0,0,0,0,0,0,0,0,0,0]);
+-    assertEqX16(Uint8x16(1, 2, 3, 4),                                              [1,2,3,4,0,0,0,0,0,0,0,0,0,0,0,0]);
+-    assertEqX16(Uint8x16(1, 2, 3),                                                 [1,2,3,0,0,0,0,0,0,0,0,0,0,0,0,0]);
+-    assertEqX16(Uint8x16(1, 2),                                                    [1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0]);
+-    assertEqX16(Uint8x16(1),                                                       [1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]);
+-    assertEqX16(Uint8x16(),                                                        [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]);
+-    assertEqX16(Uint8x16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17), [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]);
+-    assertEqX16(Uint8x16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18), [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]);
+-}
+-
+-function TestUint16x8Ctor() {
+-    // Constructors.
+-    assertEqX8(Uint16x8(1, 2, 3, 4, 5, 6, 7, 8),        [1,2,3,4,5,6,7,8]);
+-    assertEqX8(Uint16x8(1, 2, 3, 4, 5, 6, 7),           [1,2,3,4,5,6,7,0]);
+-    assertEqX8(Uint16x8(1, 2, 3, 4, 5, 6),              [1,2,3,4,5,6,0,0]);
+-    assertEqX8(Uint16x8(1, 2, 3, 4, 5),                 [1,2,3,4,5,0,0,0]);
+-    assertEqX8(Uint16x8(1, 2, 3, 4),                    [1,2,3,4,0,0,0,0]);
+-    assertEqX8(Uint16x8(1, 2, 3),                       [1,2,3,0,0,0,0,0]);
+-    assertEqX8(Uint16x8(1, 2),                          [1,2,0,0,0,0,0,0]);
+-    assertEqX8(Uint16x8(1),                             [1,0,0,0,0,0,0,0]);
+-    assertEqX8(Uint16x8(),                              [0,0,0,0,0,0,0,0]);
+-    assertEqX8(Uint16x8(1, 2, 3, 4, 5, 6, 7, 8, 9),     [1,2,3,4,5,6,7,8]);
+-    assertEqX8(Uint16x8(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), [1,2,3,4,5,6,7,8]);
+-}
+-
+-function TestUint32x4Ctor() {
+-    // Constructors.
+-    assertEqX4(Uint32x4(1, 2, 3, 4),         [1,2,3,4]);
+-    assertEqX4(Uint32x4(1, 2, 3),            [1,2,3,0]);
+-    assertEqX4(Uint32x4(1, 2),               [1,2,0,0]);
+-    assertEqX4(Uint32x4(1),                  [1,0,0,0]);
+-    assertEqX4(Uint32x4(),                   [0,0,0,0]);
+-    assertEqX4(Uint32x4(1, 2, 3, 4, 5),      [1,2,3,4]);
+-    assertEqX4(Uint32x4(1, 2, 3, 4, 5, 6),   [1,2,3,4]);
+-}
+-
+-function TestFloat32x4Ctor() {
+-    assertEqX4(Float32x4(1, 2, 3, 4),       [1,2,3,4]);
+-    assertEqX4(Float32x4(1, 2, 3),          [1,2,3,NaN]);
+-    assertEqX4(Float32x4(1, 2),             [1,2,NaN,NaN]);
+-    assertEqX4(Float32x4(1),                [1,NaN,NaN,NaN]);
+-    assertEqX4(Float32x4(),                 [NaN,NaN,NaN,NaN]);
+-    assertEqX4(Float32x4(1, 2, 3, 4, 5),    [1,2,3,4]);
+-    assertEqX4(Float32x4(1, 2, 3, 4, 5, 6), [1,2,3,4]);
+-}
+-
+-function TestFloat64x2Ctor() {
+-    assertEqX2(Float64x2(1, 2),             [1,2]);
+-    assertEqX2(Float64x2(1),                [1,NaN]);
+-    assertEqX2(Float64x2(),                 [NaN,NaN]);
+-    assertEqX2(Float64x2(1, 2, 3),          [1,2]);
+-    assertEqX2(Float64x2(1, 2, 3, 4),       [1,2]);
+-    assertEqX2(Float64x2(1, 2, 3, 4, 5),    [1,2]);
+-    assertEqX2(Float64x2(1, 2, 3, 4, 5),    [1,2]);
+-    assertEqX2(Float64x2(1, 2, 3, 4, 5, 6), [1,2]);
+-}
+-
+-function TestBool8x16Ctor() {
+-    assertEqX16(Bool8x16(false, false, true, true, false, false, true, true, false, false, true, true, false, false, true, true),
+-                       [false, false, true, true, false, false, true, true, false, false, true, true, false, false, true, true]);
+-    assertEqX16(Bool8x16(false, true, true, false, false, true, true, false, false, true, true, false, false, true, true),
+-                       [false, true, true, false, false, true, true, false, false, true, true, false, false, true, true, false]);
+-    assertEqX16(Bool8x16(true, true, false, false, true, true, false, false, true, true, false, false, true, true),
+-                       [true, true, false, false, true, true, false, false, true, true, false, false, true, true, false, false]);
+-    assertEqX16(Bool8x16(true, false, false, true, true, false, false, true, true, false, false, true, true),
+-                       [true, false, false, true, true, false, false, true, true, false, false, true, true, false, false, false]);
+-    assertEqX16(Bool8x16(false, false, true, true, false, false, true, true, false, false, true, true),
+-                       [false, false, true, true, false, false, true, true, false, false, true, true, false, false, false, false]);
+-    assertEqX16(Bool8x16(false, true, true, false, false, true, true, false, false, true, true),
+-                       [false, true, true, false, false, true, true, false, false, true, true, false, false, false, false, false]);
+-    assertEqX16(Bool8x16(true, true, false, false, true, true, false, false, true, true),
+-                       [true, true, false, false, true, true, false, false, true, true, false, false, false, false, false, false]);
+-    assertEqX16(Bool8x16(true, false, false, true, true, false, false, true, true),
+-                       [true, false, false, true, true, false, false, true, true, false, false, false, false, false, false, false]);
+-    assertEqX16(Bool8x16(false, false, true, true, false, false, true, true),
+-                       [false, false, true, true, false, false, true, true, false, false, false, false, false, false, false, false]);
+-    assertEqX16(Bool8x16(false, true, true, false, false, true, true),
+-                       [false, true, true, false, false, true, true, false, false, false, false, false, false, false, false, false]);
+-    assertEqX16(Bool8x16(true, true, false, false, true, true),
+-                       [true, true, false, false, true, true, false, false, false, false, false, false, false, false, false, false]);
+-    assertEqX16(Bool8x16(true, false, false, true, true),
+-                       [true, false, false, true, true, false, false, false, false, false, false, false, false, false, false, false]);
+-    assertEqX16(Bool8x16(false, false, true, true),
+-                       [false, false, true, true, false, false, false, false, false, false, false, false, false, false, false, false]);
+-    assertEqX16(Bool8x16(false, true, true),
+-                       [false, true, true, false, false, false, false, false, false, false, false, false, false, false, false, false]);
+-    assertEqX16(Bool8x16(true, true),
+-                       [true, true, false, false, false, false, false, false, false, false, false, false, false, false, false, false]);
+-    assertEqX16(Bool8x16(true),
+-                       [true, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false]);
+-    assertEqX16(Bool8x16(),
+-                       [false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false]);
+-    assertEqX16(Bool8x16(false, false, true, true, false, false, true, true, false, false, true, true, false, false, true, true, false),
+-                       [false, false, true, true, false, false, true, true, false, false, true, true, false, false, true, true]);
+-    assertEqX16(Bool8x16(false, false, true, true, false, false, true, true, false, false, true, true, false, false, true, true, false, true),
+-                       [false, false, true, true, false, false, true, true, false, false, true, true, false, false, true, true]);
+-}
+-
+-function TestBool16x8Ctor() {
+-    assertEqX8(Bool16x8(false, false, true, true, false, false, true, true),        [false, false, true, true, false, false, true, true]);
+-    assertEqX8(Bool16x8(false, true, true, false, false, true, true),               [false, true, true, false, false, true, true, false]);
+-    assertEqX8(Bool16x8(true, true, false, false, true, true),                      [true, true, false, false, true, true, false, false]);
+-    assertEqX8(Bool16x8(true, false, false, true, true),                            [true, false, false, true, true, false, false, false]);
+-    assertEqX8(Bool16x8(false, false, true, true),                                  [false, false, true, true, false, false, false, false]);
+-    assertEqX8(Bool16x8(false, true, true),                                         [false, true, true, false, false, false, false, false]);
+-    assertEqX8(Bool16x8(true, true),                                                [true, true, false, false, false, false, false, false]);
+-    assertEqX8(Bool16x8(true),                                                      [true, false, false, false, false, false, false, false]);
+-    assertEqX8(Bool16x8(),                                                          [false, false, false, false, false, false, false, false]);
+-    assertEqX8(Bool16x8(false, false, true, true, false, false, true, true, true),  [false, false, true, true, false, false, true, true]);
+-    assertEqX8(Bool16x8(false, false, true, true, false, false, true, true, true, true), [false, false, true, true, false, false, true, true]);
+-}
+-
+-function TestBool32x4Ctor() {
+-    assertEqX4(Bool32x4(false, false, true, true),              [false, false, true, true]);
+-    assertEqX4(Bool32x4(false, false, true),                    [false, false, true, false]);
+-    assertEqX4(Bool32x4(false, true),                           [false, true, false, false]);
+-    assertEqX4(Bool32x4(true),                                  [true, false, false, false]);
+-    assertEqX4(Bool32x4(),                                      [false, false, false, false]);
+-    assertEqX4(Bool32x4(false, false, true, true, false),       [false, false, true, true]);
+-    assertEqX4(Bool32x4(false, false, true, true, false, true), [false, false, true, true]);
+-}
+-
+-function TestBool64x2Ctor() {
+-    assertEqX2(Bool64x2(false, true),             [false, true]);
+-    assertEqX2(Bool64x2(true),                    [true, false]);
+-    assertEqX2(Bool64x2(),                        [false, false]);
+-    assertEqX2(Bool64x2(false, true, true),       [false, true]);
+-    assertEqX2(Bool64x2(false, true, true, true), [false, true]);
+-}
+-
+-function test() {
+-    TestFloat32x4Ctor();
+-    TestFloat64x2Ctor();
+-    TestInt8x16Ctor();
+-    TestInt16x8Ctor();
+-    TestInt32x4Ctor();
+-    TestUint8x16Ctor();
+-    TestUint16x8Ctor();
+-    TestUint32x4Ctor();
+-    TestBool8x16Ctor();
+-    TestBool16x8Ctor();
+-    TestBool32x4Ctor();
+-    TestBool64x2Ctor();
+-    if (typeof reportCompare === "function")
+-        reportCompare(true, true);
+-}
+-
+-test();
+-
+diff --git a/js/src/tests/non262/SIMD/conversions.js b/js/src/tests/non262/SIMD/conversions.js
+deleted file mode 100644
+--- a/js/src/tests/non262/SIMD/conversions.js
++++ /dev/null
+@@ -1,1261 +0,0 @@
+-// |reftest| skip-if(!this.hasOwnProperty("SIMD"))
+-var Float32x4 = SIMD.Float32x4;
+-var Float64x2 = SIMD.Float64x2;
+-var Int8x16 = SIMD.Int8x16;
+-var Int16x8 = SIMD.Int16x8;
+-var Int32x4 = SIMD.Int32x4;
+-var Uint8x16 = SIMD.Uint8x16;
+-var Uint16x8 = SIMD.Uint16x8;
+-var Uint32x4 = SIMD.Uint32x4;
+-
+-function testFloat32x4FromFloat64x2Bits() {
+-  var valsExp = [
+-    [[2.000000473111868, 512.0001225471497], [1.0, 2.0, 3.0, 4.0]],
+-    [[-0, NaN], [0, -0, 0, NaN]],
+-    [[Infinity, -Infinity], [0, NaN, 0, NaN]]
+-  ];
+-
+-  for (var [v,w] of valsExp) {
+-    assertEqX4(Float32x4.fromFloat64x2Bits(Float64x2(...v)), w);
+-  }
+-}
+-
+-function testFloat32x4FromInt8x16Bits() {
+-  function expected(v, Buffer) {
+-    var i8 = new Int8Array(new Buffer(16));
+-    var f32 = new Float32Array(i8.buffer);
+-    var asArr = simdToArray(v);
+-    for (var i = 0; i < 16; i++) i8[i] = asArr[i];
+-    return [f32[0], f32[1], f32[2], f32[3]];
+-  }
+-
+-  var vals = [[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
+-              [INT8_MIN, INT8_MAX, INT8_MIN, INT8_MAX, INT8_MIN, INT8_MAX, INT8_MIN, INT8_MAX,
+-               INT8_MAX, INT8_MIN, INT8_MAX, INT8_MIN, INT8_MAX, INT8_MIN, INT8_MAX, INT8_MIN]];
+-  for (var v of vals) {
+-    var i = Int8x16(...v);
+-    assertEqX4(Float32x4.fromInt8x16Bits(i), expected(i, ArrayBuffer));
+-    if (typeof SharedArrayBuffer != "undefined")
+-      assertEqX4(Float32x4.fromInt8x16Bits(i), expected(i, SharedArrayBuffer));
+-  }
+-}
+-
+-function testFloat32x4FromUint8x16Bits() {
+-  function expected(v, Buffer) {
+-    var u8 = new Uint8Array(new Buffer(16));
+-    var f32 = new Float32Array(u8.buffer);
+-    var asArr = simdToArray(v);
+-    for (var i = 0; i < 16; i++) u8[i] = asArr[i];
+-    return [f32[0], f32[1], f32[2], f32[3]];
+-  }
+-
+-  var vals = [[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
+-              [0, UINT8_MAX, 0, UINT8_MAX, 0, UINT8_MAX, 0, UINT8_MAX,
+-               UINT8_MAX, 0, UINT8_MAX, 0, UINT8_MAX, 0, UINT8_MAX, 0]];
+-  for (var v of vals) {
+-    var i = Uint8x16(...v);
+-    assertEqX4(Float32x4.fromUint8x16Bits(i), expected(i, ArrayBuffer));
+-    if (typeof SharedArrayBuffer != "undefined")
+-      assertEqX4(Float32x4.fromUint8x16Bits(i), expected(i, SharedArrayBuffer));
+-  }
+-}
+-
+-function testFloat32x4FromInt16x8Bits() {
+-  function expected(v, Buffer) {
+-    var i16 = new Int16Array(new Buffer(16));
+-    var f32 = new Float32Array(i16.buffer);
+-    var asArr = simdToArray(v);
+-    for (var i = 0; i < 8; i++) i16[i] = asArr[i];
+-    return [f32[0], f32[1], f32[2], f32[3]];
+-  }
+-
+-  var vals = [[1, 2, 3, 4, 5, 6, 7, 8],
+-              [INT16_MIN, INT16_MAX, INT16_MIN, INT16_MAX, INT16_MIN, INT16_MAX, INT16_MIN, INT16_MAX]];
+-  for (var v of vals) {
+-    var i = Int16x8(...v);
+-    assertEqX4(Float32x4.fromInt16x8Bits(i), expected(i, ArrayBuffer));
+-    if (typeof SharedArrayBuffer != "undefined")
+-      assertEqX4(Float32x4.fromInt16x8Bits(i), expected(i, SharedArrayBuffer));
+-  }
+-}
+-
+-function testFloat32x4FromUint16x8Bits() {
+-  function expected(v, Buffer) {
+-    var u16 = new Uint16Array(new Buffer(16));
+-    var f32 = new Float32Array(u16.buffer);
+-    var asArr = simdToArray(v);
+-    for (var i = 0; i < 8; i++) u16[i] = asArr[i];
+-    return [f32[0], f32[1], f32[2], f32[3]];
+-  }
+-
+-  var vals = [[1, 2, 3, 4, 5, 6, 7, 8],
+-              [0, UINT16_MAX, 0, UINT16_MAX, 0, UINT16_MAX, 0, UINT16_MAX]];
+-  for (var v of vals) {
+-    var i = Uint16x8(...v);
+-    assertEqX4(Float32x4.fromUint16x8Bits(i), expected(i, ArrayBuffer));
+-    if (typeof SharedArrayBuffer != "undefined")
+-      assertEqX4(Float32x4.fromUint16x8Bits(i), expected(i, SharedArrayBuffer));
+-  }
+-}
+-
+-function testFloat32x4FromInt32x4() {
+-  function expected(v) {
+-    return v.map(Math.fround);
+-  }
+-  var vals = [
+-    [1, 2, 3, 4],
+-    [INT32_MIN, INT32_MAX, Math.pow(2, 30) - 1, -Math.pow(2, 30)]
+-  ];
+-
+-  for (var v of vals) {
+-    assertEqX4(Float32x4.fromInt32x4(Int32x4(...v)), expected(v));
+-  }
+-
+-  // Check that rounding to nearest, even is applied.
+-  {
+-      var num = makeFloat(0, 150 + 2, 0);
+-      var next = makeFloat(0, 150 + 2, 1);
+-      assertEq(num + 4, next);
+-
+-      v = Float32x4.fromInt32x4(Int32x4(num, num + 1, num + 2, num + 3));
+-      assertEqX4(v, [num, num, /* even */ num, next]);
+-  }
+-
+-  {
+-      var num = makeFloat(0, 150 + 2, 1);
+-      var next = makeFloat(0, 150 + 2, 2);
+-      assertEq(num + 4, next);
+-
+-      v = Float32x4.fromInt32x4(Int32x4(num, num + 1, num + 2, num + 3));
+-      assertEqX4(v, [num, num, /* even */ next, next]);
+-  }
+-
+-  {
+-      var last = makeFloat(0, 157, 0x7fffff);
+-
+-      assertEq(last, Math.fround(last), "float");
+-      assertEq(last < Math.pow(2, 31), true, "less than 2**31");
+-      assertEq(last | 0, last, "it should be an integer, as exponent >= 150");
+-
+-      var diff = (Math.pow(2, 31) - 1) - last;
+-      v = Float32x4.fromInt32x4(Int32x4(Math.pow(2, 31) - 1,
+-                                        Math.pow(2, 30) + 1,
+-                                        last + (diff / 2) | 0,      // nearest is last
+-                                        last + (diff / 2) + 1 | 0   // nearest is Math.pow(2, 31)
+-                                ));
+-      assertEqX4(v, [Math.pow(2, 31),
+-                     Math.pow(2, 30),
+-                     last,
+-                     Math.pow(2, 31)
+-                    ]);
+-  }
+-}
+-
+-function testFloat32x4FromUint32x4() {
+-  function expected(v) {
+-    return v.map(Math.fround);
+-  }
+-  var vals = [
+-    [1, 2, 3, 4],
+-    [0, UINT32_MAX, Math.pow(2, 30) - 1, Math.pow(2, 31)]
+-  ];
+-
+-  for (var v of vals) {
+-    assertEqX4(Float32x4.fromUint32x4(Uint32x4(...v)), expected(v));
+-  }
+-
+-  // Check that rounding to nearest, even is applied.
+-  {
+-      var num = makeFloat(0, 150 + 2, 0);
+-      var next = makeFloat(0, 150 + 2, 1);
+-      assertEq(num + 4, next);
+-
+-      v = Float32x4.fromUint32x4(Uint32x4(num, num + 1, num + 2, num + 3));
+-      assertEqX4(v, [num, num, /* even */ num, next]);
+-  }
+-
+-  {
+-      var num = makeFloat(0, 150 + 2, 1);
+-      var next = makeFloat(0, 150 + 2, 2);
+-      assertEq(num + 4, next);
+-
+-      v = Float32x4.fromUint32x4(Uint32x4(num, num + 1, num + 2, num + 3));
+-      assertEqX4(v, [num, num, /* even */ next, next]);
+-  }
+-
+-  {
+-      var last = makeFloat(0, 157, 0x7fffff);
+-
+-      assertEq(last, Math.fround(last), "float");
+-      assertEq(last < Math.pow(2, 31), true, "less than 2**31");
+-      assertEq(last | 0, last, "it should be an integer, as exponent >= 150");
+-
+-      var diff = (Math.pow(2, 31) - 1) - last;
+-      v = Float32x4.fromUint32x4(Uint32x4(Math.pow(2, 31) - 1,
+-                                        Math.pow(2, 30) + 1,
+-                                        last + (diff / 2) | 0,      // nearest is last
+-                                        last + (diff / 2) + 1 | 0   // nearest is Math.pow(2, 31)
+-                                ));
+-      assertEqX4(v, [Math.pow(2, 31),
+-                     Math.pow(2, 30),
+-                     last,
+-                     Math.pow(2, 31)
+-                    ]);
+-  }
+-}
+-
+-function testFloat32x4FromInt32x4Bits() {
+-  var valsExp = [
+-    [[100, 200, 300, 400], [1.401298464324817e-43, 2.802596928649634e-43, 4.203895392974451e-43, 5.605193857299268e-43]],
+-    [[INT32_MIN, INT32_MAX, 0, 0], [-0, NaN, 0, 0]]
+-  ];
+-
+-  for (var [v,w] of valsExp) {
+-    assertEqX4(Float32x4.fromInt32x4Bits(Int32x4(...v)), w);
+-  }
+-}
+-
+-function testFloat32x4FromUint32x4Bits() {
+-  var valsExp = [
+-    [[100, 200, 300, 400], [1.401298464324817e-43, 2.802596928649634e-43, 4.203895392974451e-43, 5.605193857299268e-43]],
+-    [[INT32_MIN, INT32_MAX, 0, 0], [-0, NaN, 0, 0]]
+-  ];
+-
+-  for (var [v,w] of valsExp) {
+-    assertEqX4(Float32x4.fromUint32x4Bits(Uint32x4(...v)), w);
+-  }
+-}
+-
+-function testFloat64x2FromFloat32x4Bits() {
+-  var valsExp = [
+-    [[0, 1.875, 0, 2], [1.0, 2.0]],
+-    [[NaN, -0, Infinity, -Infinity], [-1.058925634e-314, -1.404448428688076e+306]]
+-  ];
+-
+-  for (var [v,w] of valsExp) {
+-    assertEqX2(Float64x2.fromFloat32x4Bits(Float32x4(...v)), w);
+-  }
+-}
+-
+-function testFloat64x2FromInt8x16Bits() {
+-  function expected(v, Buffer) {
+-    var i8 = new Int8Array(new Buffer(16));
+-    var f64 = new Float64Array(i8.buffer);
+-    var asArr = simdToArray(v);
+-    for (var i = 0; i < 16; i++) i8[i] = asArr[i];
+-    return [f64[0], f64[1]];
+-  }
+-
+-  var vals = [[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
+-              [INT8_MIN, INT8_MAX, INT8_MIN, INT8_MAX, INT8_MIN, INT8_MAX, INT8_MIN, INT8_MAX,
+-               INT8_MAX, INT8_MIN, INT8_MAX, INT8_MIN, INT8_MAX, INT8_MIN, INT8_MAX, INT8_MIN]];
+-
+-  for (var v of vals) {
+-    var f = Int8x16(...v);
+-    assertEqX2(Float64x2.fromInt8x16Bits(f), expected(f, ArrayBuffer));
+-    if (typeof SharedArrayBuffer != "undefined")
+-      assertEqX2(Float64x2.fromInt8x16Bits(f), expected(f, SharedArrayBuffer));
+-  }
+-}
+-
+-function testFloat64x2FromUint8x16Bits() {
+-  function expected(v, Buffer) {
+-    var u8 = new Uint8Array(new Buffer(16));
+-    var f64 = new Float64Array(u8.buffer);
+-    var asArr = simdToArray(v);
+-    for (var i = 0; i < 16; i++) u8[i] = asArr[i];
+-    return [f64[0], f64[1]];
+-  }
+-
+-  var vals = [[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
+-              [0, UINT8_MAX, 0, UINT8_MAX, 0, UINT8_MAX, 0, UINT8_MAX,
+-               UINT8_MAX, 0, UINT8_MAX, 0, UINT8_MAX, 0, UINT8_MAX, 0]];
+-
+-  for (var v of vals) {
+-    var f = Uint8x16(...v);
+-    assertEqX2(Float64x2.fromUint8x16Bits(f), expected(f, ArrayBuffer));
+-    if (typeof SharedArrayBuffer != "undefined")
+-      assertEqX2(Float64x2.fromUint8x16Bits(f), expected(f, SharedArrayBuffer));
+-  }
+-}
+-
+-function testFloat64x2FromInt16x8Bits() {
+-  function expected(v, Buffer) {
+-    var i16 = new Int16Array(new Buffer(16));
+-    var f64 = new Float64Array(i16.buffer);
+-    var asArr = simdToArray(v);
+-    for (var i = 0; i < 8; i++) i16[i] = asArr[i];
+-    return [f64[0], f64[1]];
+-  }
+-
+-  var vals = [[1, 2, 3, 4, 5, 6, 7, 8],
+-              [INT16_MIN, INT16_MAX, INT16_MIN, INT16_MAX, INT16_MIN, INT16_MAX, INT16_MIN, INT16_MAX]];
+-
+-  for (var v of vals) {
+-    var f = Int16x8(...v);
+-    assertEqX2(Float64x2.fromInt16x8Bits(f), expected(f, ArrayBuffer));
+-    if (typeof SharedArrayBuffer != "undefined")
+-      assertEqX2(Float64x2.fromInt16x8Bits(f), expected(f, SharedArrayBuffer));
+-  }
+-}
+-
+-function testFloat64x2FromUint16x8Bits() {
+-  function expected(v, Buffer) {
+-    var u16 = new Uint16Array(new Buffer(16));
+-    var f64 = new Float64Array(u16.buffer);
+-    var asArr = simdToArray(v);
+-    for (var i = 0; i < 8; i++) u16[i] = asArr[i];
+-    return [f64[0], f64[1]];
+-  }
+-
+-  var vals = [[1, 2, 3, 4, 5, 6, 7, 8],
+-              [0, UINT16_MAX, 0, UINT16_MAX, 0, UINT16_MAX, 0, UINT16_MAX]];
+-
+-  for (var v of vals) {
+-    var f = Uint16x8(...v);
+-    assertEqX2(Float64x2.fromUint16x8Bits(f), expected(f, ArrayBuffer));
+-    if (typeof SharedArrayBuffer != "undefined")
+-      assertEqX2(Float64x2.fromUint16x8Bits(f), expected(f, SharedArrayBuffer));
+-  }
+-}
+-
+-function testFloat64x2FromInt32x4Bits() {
+-  var valsExp = [
+-    [[0x00000000, 0x3ff00000, 0x0000000, 0x40000000], [1.0, 2.0]],
+-    [[0xabcdef12, 0x3ff00000, 0x21fedcba, 0x40000000], [1.0000006400213732, 2.0000002532866263]]
+-  ];
+-
+-  for (var [v,w] of valsExp) {
+-    assertEqX2(Float64x2.fromInt32x4Bits(Int32x4(...v)), w);
+-  }
+-}
+-
+-function testFloat64x2FromUint32x4Bits() {
+-  var valsExp = [
+-    [[0x00000000, 0x3ff00000, 0x0000000, 0x40000000], [1.0, 2.0]],
+-    [[0xabcdef12, 0x3ff00000, 0x21fedcba, 0x40000000], [1.0000006400213732, 2.0000002532866263]]
+-  ];
+-
+-  for (var [v,w] of valsExp) {
+-    assertEqX2(Float64x2.fromUint32x4Bits(Uint32x4(...v)), w);
+-  }
+-}
+-
+-function testInt32x4FromFloat32x4() {
+-  var d = Float32x4(1.1, 2.2, 3.3, 4.6);
+-  assertEqX4(Int32x4.fromFloat32x4(d), [1, 2, 3, 4]);
+-
+-  var d = Float32x4(NaN, 0, 0, 0);
+-  assertThrowsInstanceOf(() => SIMD.Int32x4.fromFloat32x4(d), RangeError);
+-
+-  var d = Float32x4(Infinity, 0, 0, 0);
+-  assertThrowsInstanceOf(() => SIMD.Int32x4.fromFloat32x4(d), RangeError);
+-
+-  var d = Float32x4(-Infinity, 0, 0, 0);
+-  assertThrowsInstanceOf(() => SIMD.Int32x4.fromFloat32x4(d), RangeError);
+-
+-  // Test high boundaries: float(0, 157, 0x7fffff) < INT32_MAX < float(0, 158, 0)
+-  var d = Float32x4(makeFloat(0, 127 + 31, 0), 0, 0, 0);
+-  assertThrowsInstanceOf(() => SIMD.Int32x4.fromFloat32x4(d), RangeError);
+-
+-  var lastFloat = makeFloat(0, 127 + 30, 0x7FFFFF);
+-  var d = Float32x4(lastFloat, 0, 0, 0);
+-  var e = SIMD.Int32x4.fromFloat32x4(d);
+-  assertEqX4(e, [lastFloat, 0, 0, 0]);
+-
+-  // Test low boundaries
+-  assertEq(makeFloat(1, 127 + 31, 0), INT32_MIN);
+-  var d = Float32x4(makeFloat(1, 127 + 31, 0), 0, 0, 0);
+-  var e = SIMD.Int32x4.fromFloat32x4(d);
+-  assertEqX4(e, [INT32_MIN, 0, 0, 0]);
+-
+-  var d = Float32x4(makeFloat(1, 127 + 31, 1), 0, 0, 0);
+-  assertThrowsInstanceOf(() => SIMD.Int32x4.fromFloat32x4(d), RangeError);
+-}
+-
+-function testUint32x4FromFloat32x4() {
+-  var d = Float32x4(1.1, 2.2, -0.9, 4.6);
+-  assertEqX4(Uint32x4.fromFloat32x4(d), [1, 2, 0, 4]);
+-
+-  var d = Float32x4(NaN, 0, 0, 0);
+-  assertThrowsInstanceOf(() => SIMD.Uint32x4.fromFloat32x4(d), RangeError);
+-
+-  var d = Float32x4(Infinity, 0, 0, 0);
+-  assertThrowsInstanceOf(() => SIMD.Uint32x4.fromFloat32x4(d), RangeError);
+-
+-  var d = Float32x4(-Infinity, 0, 0, 0);
+-  assertThrowsInstanceOf(() => SIMD.Uint32x4.fromFloat32x4(d), RangeError);
+-
+-  // Test high boundaries: float(0, 158, 0x7fffff) < UINT32_MAX < float(0, 159, 0)
+-  var d = Float32x4(makeFloat(0, 127 + 32, 0), 0, 0, 0);
+-  assertThrowsInstanceOf(() => SIMD.Uint32x4.fromFloat32x4(d), RangeError);
+-
+-  var lastFloat = makeFloat(0, 127 + 31, 0x7FFFFF);
+-  var d = Float32x4(lastFloat, 0, 0, 0);
+-  var e = SIMD.Uint32x4.fromFloat32x4(d);
+-  assertEqX4(e, [lastFloat, 0, 0, 0]);
+-}
+-
+-function testInt32x4FromFloat32x4Bits() {
+-  var valsExp = [
+-    [[1, 2, 3, 4], [0x3f800000 | 0, 0x40000000 | 0, 0x40400000 | 0, 0x40800000 | 0]],
+-    [[NaN, -0, Infinity, -Infinity], [0x7fc00000 | 0, 0x80000000 | 0, 0x7f800000 | 0, 0xff800000 | 0]]
+-  ];
+-
+-  for (var [v,w] of valsExp) {
+-    assertEqX4(Int32x4.fromFloat32x4Bits(Float32x4(...v)), w);
+-  }
+-}
+-
+-function testUint32x4FromFloat32x4Bits() {
+-  var valsExp = [
+-    [[1, 2, 3, 4], [0x3f800000, 0x40000000, 0x40400000, 0x40800000]],
+-    [[NaN, -0, Infinity, -Infinity], [0x7fc00000, 0x80000000, 0x7f800000, 0xff800000]]
+-  ];
+-
+-  for (var [v,w] of valsExp) {
+-    assertEqX4(Uint32x4.fromFloat32x4Bits(Float32x4(...v)), w);
+-  }
+-}
+-
+-function testInt32x4FromFloat64x2Bits() {
+-  var valsExp = [
+-    [[1.0, 2.0], [0x00000000, 0x3FF00000, 0x00000000, 0x40000000]],
+-    [[+Infinity, -Infinity], [0x00000000, 0x7ff00000, 0x00000000, -0x100000]],
+-    [[-0, NaN], [0x00000000, -0x80000000, 0x00000000, 0x7ff80000]],
+-    [[1.0000006400213732, 2.0000002532866263], [-0x543210ee, 0x3ff00000, 0x21fedcba, 0x40000000]]
+-  ];
+-
+-  for (var [v,w] of valsExp) {
+-    assertEqX4(Int32x4.fromFloat64x2Bits(Float64x2(...v)), w);
+-  }
+-}
+-
+-function testUint32x4FromFloat64x2Bits() {
+-  var valsExp = [
+-    [[1.0, 2.0], [0x00000000, 0x3FF00000, 0x00000000, 0x40000000]],
+-    [[+Infinity, -Infinity], [0x00000000, 0x7ff00000, 0x00000000, 0xfff00000]],
+-    [[-0, NaN], [0x00000000, 0x80000000, 0x00000000, 0x7ff80000]],
+-    [[1.0000006400213732, 2.0000002532866263], [0xabcdef12, 0x3ff00000, 0x21fedcba, 0x40000000]]
+-  ];
+-
+-  for (var [v,w] of valsExp) {
+-    assertEqX4(Uint32x4.fromFloat64x2Bits(Float64x2(...v)), w);
+-  }
+-}
+-
+-function testInt32x4FromInt8x16Bits() {
+-  function expected(v, Buffer) {
+-    var i8 = new Int8Array(new Buffer(16));
+-    var i32 = new Int32Array(i8.buffer);
+-    var asArr = simdToArray(v);
+-    for (var i = 0; i < 16; i++) i8[i] = asArr[i];
+-    return [i32[0], i32[1], i32[2], i32[3]];
+-  }
+-
+-  var vals = [[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
+-              [INT8_MIN, INT8_MAX, INT8_MIN, INT8_MAX, INT8_MIN, INT8_MAX, INT8_MIN, INT8_MAX,
+-               INT8_MAX, INT8_MIN, INT8_MAX, INT8_MIN, INT8_MAX, INT8_MIN, INT8_MAX, INT8_MIN]];
+-
+-  for (var v of vals) {
+-    var i = Int8x16(...v);
+-    assertEqX4(Int32x4.fromInt8x16Bits(i), expected(i, ArrayBuffer));
+-    if (typeof SharedArrayBuffer != "undefined")
+-      assertEqX4(Int32x4.fromInt8x16Bits(i), expected(i, SharedArrayBuffer));
+-  }
+-}
+-
+-function testInt32x4FromUint8x16Bits() {
+-  function expected(v, Buffer) {
+-    var u8 = new Uint8Array(new Buffer(16));
+-    var i32 = new Int32Array(u8.buffer);
+-    var asArr = simdToArray(v);
+-    for (var i = 0; i < 16; i++) u8[i] = asArr[i];
+-    return [i32[0], i32[1], i32[2], i32[3]];
+-  }
+-
+-  var vals = [[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
+-              [0, UINT8_MAX, 0, UINT8_MAX, 0, UINT8_MAX, 0, UINT8_MAX,
+-               UINT8_MAX, 0, UINT8_MAX, 0, UINT8_MAX, 0, UINT8_MAX, 0]];
+-
+-  for (var v of vals) {
+-    var i = Uint8x16(...v);
+-    assertEqX4(Int32x4.fromUint8x16Bits(i), expected(i, ArrayBuffer));
+-    if (typeof SharedArrayBuffer != "undefined")
+-      assertEqX4(Int32x4.fromUint8x16Bits(i), expected(i, SharedArrayBuffer));
+-  }
+-}
+-
+-function testInt32x4FromInt16x8Bits() {
+-  function expected(v, Buffer) {
+-    var i16 = new Int16Array(new Buffer(16));
+-    var i32 = new Int32Array(i16.buffer);
+-    var asArr = simdToArray(v);
+-    for (var i = 0; i < 8; i++) i16[i] = asArr[i];
+-    return [i32[0], i32[1], i32[2], i32[3]];
+-  }
+-
+-  var vals = [[1, 2, 3, 4, 5, 6, 7, 8],
+-              [INT16_MIN, INT16_MAX, INT16_MIN, INT16_MAX, INT16_MIN, INT16_MAX, INT16_MIN, INT16_MAX]];
+-
+-  for (var v of vals) {
+-    var i = Int16x8(...v);
+-    assertEqX4(Int32x4.fromInt16x8Bits(i), expected(i, ArrayBuffer));
+-    if (typeof SharedArrayBuffer != "undefined")
+-      assertEqX4(Int32x4.fromInt16x8Bits(i), expected(i, SharedArrayBuffer));
+-  }
+-}
+-
+-function testInt32x4FromUint16x8Bits() {
+-  function expected(v, Buffer) {
+-    var u16 = new Uint16Array(new Buffer(16));
+-    var i32 = new Int32Array(u16.buffer);
+-    var asArr = simdToArray(v);
+-    for (var i = 0; i < 8; i++) u16[i] = asArr[i];
+-    return [i32[0], i32[1], i32[2], i32[3]];
+-  }
+-
+-  var vals = [[1, 2, 3, 4, 5, 6, 7, 8],
+-              [0, UINT16_MAX, 0, UINT16_MAX, 0, UINT16_MAX, 0, UINT16_MAX]];
+-
+-  for (var v of vals) {
+-    var i = Uint16x8(...v);
+-    assertEqX4(Int32x4.fromUint16x8Bits(i), expected(i, ArrayBuffer));
+-    if (typeof SharedArrayBuffer != "undefined")
+-      assertEqX4(Int32x4.fromUint16x8Bits(i), expected(i, SharedArrayBuffer));
+-  }
+-}
+-
+-function testInt32x4FromUint32x4Bits() {
+-  function expected(v, Buffer) {
+-    var u32 = new Uint32Array(new Buffer(16));
+-    var i32 = new Int32Array(u32.buffer);
+-    var asArr = simdToArray(v);
+-    for (var i = 0; i < 8; i++) u32[i] = asArr[i];
+-    return [i32[0], i32[1], i32[2], i32[3]];
+-  }
+-
+-  var vals = [[0, 1, -2, 3], [INT8_MIN, UINT32_MAX, INT32_MIN, INT32_MAX]];
+-
+-  for (var v of vals) {
+-    var i = Uint32x4(...v);
+-    assertEqX4(Int32x4.fromUint32x4Bits(i), expected(i, ArrayBuffer));
+-    if (typeof SharedArrayBuffer != "undefined")
+-      assertEqX4(Int32x4.fromUint32x4Bits(i), expected(i, SharedArrayBuffer));
+-  }
+-}
+-
+-function testInt8x16FromFloat32x4Bits() {
+-  function expected(v, Buffer) {
+-    var f32 = new Float32Array(new Buffer(16));
+-    var i8 = new Int8Array(f32.buffer);
+-    var asArr = simdToArray(v);
+-    for (var i = 0; i < 4; i++) f32[i] = asArr[i];
+-    return [i8[0], i8[1], i8[2], i8[3], i8[4], i8[5], i8[6], i8[7],
+-            i8[8], i8[9], i8[10], i8[11], i8[12], i8[13], i8[14], i8[15]];
+-  }
+-
+-  var vals = [[1, -2, 3, -4], [Infinity, -Infinity, NaN, -0]];
+-
+-  for (var v of vals) {
+-    var f = Float32x4(...v);
+-    assertEqX16(Int8x16.fromFloat32x4Bits(f), expected(f, ArrayBuffer));
+-    if (typeof SharedArrayBuffer != "undefined")
+-      assertEqX16(Int8x16.fromFloat32x4Bits(f), expected(f, SharedArrayBuffer));
+-  }
+-}
+-
+-function testInt8x16FromFloat64x2Bits() {
+-  function expected(v, Buffer) {
+-    var f64 = new Float64Array(new Buffer(16));
+-    var i8 = new Int8Array(f64.buffer);
+-    f64[0] = Float64x2.extractLane(v, 0);
+-    f64[1] = Float64x2.extractLane(v, 1);
+-    return [i8[0], i8[1], i8[2], i8[3], i8[4], i8[5], i8[6], i8[7],
+-            i8[8], i8[9], i8[10], i8[11], i8[12], i8[13], i8[14], i8[15]];
+-  }
+-  var vals = [[1, -2], [-3, 4], [Infinity, -Infinity], [NaN, -0]];
+-
+-  for (var v of vals) {
+-    var f = Float64x2(...v);
+-    assertEqX16(Int8x16.fromFloat64x2Bits(f), expected(f, ArrayBuffer));
+-    if (typeof SharedArrayBuffer != "undefined")
+-      assertEqX16(Int8x16.fromFloat64x2Bits(f), expected(f, SharedArrayBuffer));
+-  }
+-}
+-
+-function testInt8x16FromUint8x16Bits() {
+-  function expected(v, Buffer) {
+-    var u8 = new Uint8Array(new Buffer(16));
+-    var i8 = new Int8Array(u8.buffer);
+-    var asArr = simdToArray(v);
+-    for (var i = 0; i < 16; i++) u8[i] = asArr[i];
+-    return [i8[0], i8[1], i8[2], i8[3], i8[4], i8[5], i8[6], i8[7],
+-            i8[8], i8[9], i8[10], i8[11], i8[12], i8[13], i8[14], i8[15]];
+-  }
+-
+-  var vals = [[0, 1, -2, 3, -4, 5, INT8_MIN, UINT8_MAX, -6, 7, -8, 9, -10, 11, -12, 13]];
+-
+-  for (var v of vals) {
+-    var i = Uint8x16(...v);
+-    assertEqX16(Int8x16.fromUint8x16Bits(i), expected(i, ArrayBuffer));
+-    if (typeof SharedArrayBuffer != "undefined")
+-      assertEqX16(Int8x16.fromUint8x16Bits(i), expected(i, SharedArrayBuffer));
+-  }
+-}
+-
+-function testInt8x16FromInt16x8Bits() {
+-  function expected(v, Buffer) {
+-    var i16 = new Int16Array(new Buffer(16));
+-    var i8 = new Int8Array(i16.buffer);
+-    var asArr = simdToArray(v);
+-    for (var i = 0; i < 8; i++) i16[i] = asArr[i];
+-    return [i8[0], i8[1], i8[2], i8[3], i8[4], i8[5], i8[6], i8[7],
+-            i8[8], i8[9], i8[10], i8[11], i8[12], i8[13], i8[14], i8[15]];
+-  }
+-
+-  var vals = [[0, 1, -2, 3, INT8_MIN, INT8_MAX, INT16_MIN, INT16_MAX]];
+-  for (var v of vals) {
+-    var i = Int16x8(...v);
+-    assertEqX16(Int8x16.fromInt16x8Bits(i), expected(i, ArrayBuffer));
+-    if (typeof SharedArrayBuffer != "undefined")
+-      assertEqX16(Int8x16.fromInt16x8Bits(i), expected(i, SharedArrayBuffer));
+-  }
+-}
+-
+-function testInt8x16FromUint16x8Bits() {
+-  function expected(v, Buffer) {
+-    var u16 = new Uint16Array(new Buffer(16));
+-    var i8 = new Int8Array(u16.buffer);
+-    var asArr = simdToArray(v);
+-    for (var i = 0; i < 8; i++) u16[i] = asArr[i];
+-    return [i8[0], i8[1], i8[2], i8[3], i8[4], i8[5], i8[6], i8[7],
+-            i8[8], i8[9], i8[10], i8[11], i8[12], i8[13], i8[14], i8[15]];
+-  }
+-
+-  var vals = [[0, 1, -2, UINT16_MAX, INT8_MIN, INT8_MAX, INT16_MIN, INT16_MAX]];
+-  for (var v of vals) {
+-    var i = Uint16x8(...v);
+-    assertEqX16(Int8x16.fromUint16x8Bits(i), expected(i, ArrayBuffer));
+-    if (typeof SharedArrayBuffer != "undefined")
+-      assertEqX16(Int8x16.fromUint16x8Bits(i), expected(i, SharedArrayBuffer));
+-  }
+-}
+-
+-function testInt8x16FromInt32x4Bits() {
+-  function expected(v, Buffer) {
+-    var i32 = new Int32Array(new Buffer(16));
+-    var i8 = new Int8Array(i32.buffer);
+-    var asArr = simdToArray(v);
+-    for (var i = 0; i < 4; i++) i32[i] = asArr[i];
+-    return [i8[0], i8[1], i8[2], i8[3], i8[4], i8[5], i8[6], i8[7],
+-            i8[8], i8[9], i8[10], i8[11], i8[12], i8[13], i8[14], i8[15]];
+-  }
+-
+-  var vals = [[0, 1, -2, 3], [INT8_MIN, INT8_MAX, INT32_MIN, INT32_MAX]];
+-  for (var v of vals) {
+-    var i = Int32x4(...v);
+-    assertEqX16(Int8x16.fromInt32x4Bits(i), expected(i, ArrayBuffer));
+-    if (typeof SharedArrayBuffer != "undefined")
+-      assertEqX16(Int8x16.fromInt32x4Bits(i), expected(i, SharedArrayBuffer));
+-  }
+-}
+-
+-function testInt8x16FromUint32x4Bits() {
+-  function expected(v, Buffer) {
+-    var u32 = new Uint32Array(new Buffer(16));
+-    var i8 = new Int8Array(u32.buffer);
+-    var asArr = simdToArray(v);
+-    for (var i = 0; i < 4; i++) u32[i] = asArr[i];
+-    return [i8[0], i8[1], i8[2], i8[3], i8[4], i8[5], i8[6], i8[7],
+-            i8[8], i8[9], i8[10], i8[11], i8[12], i8[13], i8[14], i8[15]];
+-  }
+-
+-  var vals = [[0, 1, -2, 3], [INT8_MIN, INT8_MAX, INT32_MIN, INT32_MAX]];
+-  for (var v of vals) {
+-    var i = Uint32x4(...v);
+-    assertEqX16(Int8x16.fromUint32x4Bits(i), expected(i, ArrayBuffer));
+-    if (typeof SharedArrayBuffer != "undefined")
+-      assertEqX16(Int8x16.fromUint32x4Bits(i), expected(i, SharedArrayBuffer));
+-  }
+-}
+-
+-function testInt16x8FromFloat32x4Bits() {
+-  function expected(v, Buffer) {
+-    var f32 = new Float32Array(new Buffer(16));
+-    var i16 = new Int16Array(f32.buffer);
+-    var asArr = simdToArray(v);
+-    for (var i = 0; i < 4; i++) f32[i] = asArr[i];
+-    return [i16[0], i16[1], i16[2], i16[3], i16[4], i16[5], i16[6], i16[7]];
+-  }
+-
+-  var vals = [[1, -2, 3, -4], [Infinity, -Infinity, NaN, -0]];
+-
+-  for (var v of vals) {
+-    var f = Float32x4(...v);
+-    assertEqX8(Int16x8.fromFloat32x4Bits(f), expected(f, ArrayBuffer));
+-    if (typeof SharedArrayBuffer != "undefined")
+-      assertEqX8(Int16x8.fromFloat32x4Bits(f), expected(f, SharedArrayBuffer));
+-  }
+-}
+-
+-function testInt16x8FromFloat64x2Bits() {
+-  function expected(v, Buffer) {
+-    var f64 = new Float64Array(new Buffer(16));
+-    var i16 = new Int16Array(f64.buffer);
+-    f64[0] = Float64x2.extractLane(v, 0);
+-    f64[1] = Float64x2.extractLane(v, 1);
+-    return [i16[0], i16[1], i16[2], i16[3], i16[4], i16[5], i16[6], i16[7]];
+-  }
+-
+-  var vals = [[1, -2], [-3, 4], [Infinity, -Infinity], [NaN, -0]];
+-
+-  for (var v of vals) {
+-    var f = Float64x2(...v);
+-    assertEqX8(Int16x8.fromFloat64x2Bits(f), expected(f, ArrayBuffer));
+-    if (typeof SharedArrayBuffer != "undefined")
+-      assertEqX8(Int16x8.fromFloat64x2Bits(f), expected(f, SharedArrayBuffer));
+-  }
+-}
+-
+-function testInt16x8FromInt8x16Bits() {
+-  function expected(v, Buffer) {
+-    var i8 = new Int8Array(new Buffer(16));
+-    var i16 = new Int16Array(i8.buffer);
+-    var asArr = simdToArray(v);
+-    for (var i = 0; i < 16; i++) i8[i] = asArr[i];
+-    return [i16[0], i16[1], i16[2], i16[3], i16[4], i16[5], i16[6], i16[7]];
+-  }
+-
+-  var vals = [[0, 1, -2, 3, -4, 5, INT8_MIN, INT8_MAX, -6, 7, -8, 9, -10, 11, -12, 13]];
+-
+-  for (var v of vals) {
+-    var i = Int8x16(...v);
+-    assertEqX8(Int16x8.fromInt8x16Bits(i), expected(i, ArrayBuffer));
+-    if (typeof SharedArrayBuffer != "undefined")
+-      assertEqX8(Int16x8.fromInt8x16Bits(i), expected(i, SharedArrayBuffer));
+-  }
+-}
+-
+-function testInt16x8FromUint8x16Bits() {
+-  function expected(v, Buffer) {
+-    var u8 = new Uint8Array(new Buffer(16));
+-    var i16 = new Int16Array(u8.buffer);
+-    var asArr = simdToArray(v);
+-    for (var i = 0; i < 16; i++) u8[i] = asArr[i];
+-    return [i16[0], i16[1], i16[2], i16[3], i16[4], i16[5], i16[6], i16[7]];
+-  }
+-
+-  var vals = [[0, 1, -2, 3, -4, UINT8_MAX, INT8_MIN, INT8_MAX, -6, 7, -8, 9, -10, 11, -12, 13]];
+-
+-  for (var v of vals) {
+-    var i = Uint8x16(...v);
+-    assertEqX8(Int16x8.fromUint8x16Bits(i), expected(i, ArrayBuffer));
+-    if (typeof SharedArrayBuffer != "undefined")
+-      assertEqX8(Int16x8.fromUint8x16Bits(i), expected(i, SharedArrayBuffer));
+-  }
+-}
+-
+-function testInt16x8FromUint16x8Bits() {
+-  function expected(v, Buffer) {
+-    var u16 = new Uint16Array(new Buffer(16));
+-    var i16 = new Int16Array(u16.buffer);
+-    var asArr = simdToArray(v);
+-    for (var i = 0; i < 8; i++) u16[i] = asArr[i];
+-    return [i16[0], i16[1], i16[2], i16[3], i16[4], i16[5], i16[6], i16[7]];
+-  }
+-
+-  var vals = [[1, 2, 3, 4, 5, 6, 7, 8],
+-              [0, UINT16_MAX, 0, UINT16_MAX, 0, UINT16_MAX, 0, UINT16_MAX]];
+-
+-  for (var v of vals) {
+-    var i = Uint16x8(...v);
+-    assertEqX8(Int16x8.fromUint16x8Bits(i), expected(i, ArrayBuffer));
+-    if (typeof SharedArrayBuffer != "undefined")
+-      assertEqX8(Int16x8.fromUint16x8Bits(i), expected(i, SharedArrayBuffer));
+-  }
+-}
+-
+-function testInt16x8FromInt32x4Bits() {
+-  function expected(v, Buffer) {
+-    var i32 = new Int32Array(new Buffer(16));
+-    var i16 = new Int16Array(i32.buffer);
+-    var asArr = simdToArray(v);
+-    for (var i = 0; i < 4; i++) i32[i] = asArr[i];
+-    return [i16[0], i16[1], i16[2], i16[3], i16[4], i16[5], i16[6], i16[7]];
+-  }
+-
+-  var vals = [[1, -2, -3, 4], [INT16_MAX, INT16_MIN, INT32_MAX, INT32_MIN]];
+-
+-  for (var v of vals) {
+-    var i = Int32x4(...v);
+-    assertEqX8(Int16x8.fromInt32x4Bits(i), expected(i, ArrayBuffer));
+-    if (typeof SharedArrayBuffer != "undefined")
+-      assertEqX8(Int16x8.fromInt32x4Bits(i), expected(i, SharedArrayBuffer));
+-  }
+-}
+-
+-function testInt16x8FromUint32x4Bits() {
+-  function expected(v, Buffer) {
+-    var u32 = new Uint32Array(new Buffer(16));
+-    var i16 = new Int16Array(u32.buffer);
+-    var asArr = simdToArray(v);
+-    for (var i = 0; i < 4; i++) u32[i] = asArr[i];
+-    return [i16[0], i16[1], i16[2], i16[3], i16[4], i16[5], i16[6], i16[7]];
+-  }
+-
+-  var vals = [[1, -2, -3, 4], [INT16_MAX, INT16_MIN, INT32_MAX, INT32_MIN]];
+-
+-  for (var v of vals) {
+-    var i = Uint32x4(...v);
+-    assertEqX8(Int16x8.fromUint32x4Bits(i), expected(i, ArrayBuffer));
+-    if (typeof SharedArrayBuffer != "undefined")
+-      assertEqX8(Int16x8.fromUint32x4Bits(i), expected(i, SharedArrayBuffer));
+-  }
+-}
+-
+-function testUint32x4FromInt8x16Bits() {
+-  function expected(v, Buffer) {
+-    var i8 = new Int8Array(new Buffer(16));
+-    var u32 = new Uint32Array(i8.buffer);
+-    var asArr = simdToArray(v);
+-    for (var i = 0; i < 16; i++) i8[i] = asArr[i];
+-    return [u32[0], u32[1], u32[2], u32[3]];
+-  }
+-
+-  var vals = [[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
+-              [INT8_MIN, INT8_MAX, INT8_MIN, INT8_MAX, INT8_MIN, INT8_MAX, INT8_MIN, INT8_MAX,
+-               INT8_MAX, INT8_MIN, INT8_MAX, INT8_MIN, INT8_MAX, INT8_MIN, INT8_MAX, INT8_MIN]];
+-
+-  for (var v of vals) {
+-    var i = Int8x16(...v);
+-    assertEqX4(Uint32x4.fromInt8x16Bits(i), expected(i, ArrayBuffer));
+-    if (typeof SharedArrayBuffer != "undefined")
+-      assertEqX4(Uint32x4.fromInt8x16Bits(i), expected(i, SharedArrayBuffer));
+-  }
+-}
+-
+-function testUint32x4FromUint8x16Bits() {
+-  function expected(v, Buffer) {
+-    var u8 = new Uint8Array(new Buffer(16));
+-    var u32 = new Uint32Array(u8.buffer);
+-    var asArr = simdToArray(v);
+-    for (var i = 0; i < 16; i++) u8[i] = asArr[i];
+-    return [u32[0], u32[1], u32[2], u32[3]];
+-  }
+-
+-  var vals = [[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
+-              [0, UINT8_MAX, 0, UINT8_MAX, 0, UINT8_MAX, 0, UINT8_MAX,
+-               UINT8_MAX, 0, UINT8_MAX, 0, UINT8_MAX, 0, UINT8_MAX, 0]];
+-
+-  for (var v of vals) {
+-    var i = Uint8x16(...v);
+-    assertEqX4(Uint32x4.fromUint8x16Bits(i), expected(i, ArrayBuffer));
+-    if (typeof SharedArrayBuffer != "undefined")
+-      assertEqX4(Uint32x4.fromUint8x16Bits(i), expected(i, SharedArrayBuffer));
+-  }
+-}
+-
+-function testUint32x4FromInt16x8Bits() {
+-  function expected(v, Buffer) {
+-    var i16 = new Int16Array(new Buffer(16));
+-    var u32 = new Uint32Array(i16.buffer);
+-    var asArr = simdToArray(v);
+-    for (var i = 0; i < 8; i++) i16[i] = asArr[i];
+-    return [u32[0], u32[1], u32[2], u32[3]];
+-  }
+-
+-  var vals = [[1, 2, 3, 4, 5, 6, 7, 8],
+-              [INT16_MIN, INT16_MAX, INT16_MIN, INT16_MAX, INT16_MIN, INT16_MAX, INT16_MIN, INT16_MAX]];
+-
+-  for (var v of vals) {
+-    var i = Int16x8(...v);
+-    assertEqX4(Uint32x4.fromInt16x8Bits(i), expected(i, ArrayBuffer));
+-    if (typeof SharedArrayBuffer != "undefined")
+-      assertEqX4(Uint32x4.fromInt16x8Bits(i), expected(i, SharedArrayBuffer));
+-  }
+-}
+-
+-function testUint32x4FromUint16x8Bits() {
+-  function expected(v, Buffer) {
+-    var u16 = new Uint16Array(new Buffer(16));
+-    var u32 = new Uint32Array(u16.buffer);
+-    var asArr = simdToArray(v);
+-    for (var i = 0; i < 8; i++) u16[i] = asArr[i];
+-    return [u32[0], u32[1], u32[2], u32[3]];
+-  }
+-
+-  var vals = [[1, 2, 3, 4, 5, 6, 7, 8],
+-              [0, UINT16_MAX, 0, UINT16_MAX, 0, UINT16_MAX, 0, UINT16_MAX]];
+-
+-  for (var v of vals) {
+-    var i = Uint16x8(...v);
+-    assertEqX4(Uint32x4.fromUint16x8Bits(i), expected(i, ArrayBuffer));
+-    if (typeof SharedArrayBuffer != "undefined")
+-      assertEqX4(Uint32x4.fromUint16x8Bits(i), expected(i, SharedArrayBuffer));
+-  }
+-}
+-
+-function testUint32x4FromInt32x4Bits() {
+-  function expected(v, Buffer) {
+-    var i32 = new Int32Array(new Buffer(16));
+-    var u32 = new Uint32Array(i32.buffer);
+-    var asArr = simdToArray(v);
+-    for (var i = 0; i < 8; i++) i32[i] = asArr[i];
+-    return [u32[0], u32[1], u32[2], u32[3]];
+-  }
+-
+-  var vals = [[0, 1, -2, 3], [INT8_MIN, UINT32_MAX, INT32_MIN, INT32_MAX]];
+-
+-  for (var v of vals) {
+-    var i = Int32x4(...v);
+-    assertEqX4(Uint32x4.fromInt32x4Bits(i), expected(i, ArrayBuffer));
+-    if (typeof SharedArrayBuffer != "undefined")
+-      assertEqX4(Uint32x4.fromInt32x4Bits(i), expected(i, SharedArrayBuffer));
+-  }
+-}
+-
+-function testUint8x16FromFloat32x4Bits() {
+-  function expected(v, Buffer) {
+-    var f32 = new Float32Array(new Buffer(16));
+-    var u8 = new Uint8Array(f32.buffer);
+-    var asArr = simdToArray(v);
+-    for (var i = 0; i < 4; i++) f32[i] = asArr[i];
+-    return [u8[0], u8[1], u8[2], u8[3], u8[4], u8[5], u8[6], u8[7],
+-            u8[8], u8[9], u8[10], u8[11], u8[12], u8[13], u8[14], u8[15]];
+-  }
+-
+-  var vals = [[1, -2, 3, -4], [Infinity, -Infinity, NaN, -0]];
+-
+-  for (var v of vals) {
+-    var f = Float32x4(...v);
+-    assertEqX16(Uint8x16.fromFloat32x4Bits(f), expected(f, ArrayBuffer));
+-    if (typeof SharedArrayBuffer != "undefined")
+-      assertEqX16(Uint8x16.fromFloat32x4Bits(f), expected(f, SharedArrayBuffer));
+-  }
+-}
+-
+-function testUint8x16FromFloat64x2Bits() {
+-  function expected(v, Buffer) {
+-    var f64 = new Float64Array(new Buffer(16));
+-    var u8 = new Uint8Array(f64.buffer);
+-    f64[0] = Float64x2.extractLane(v, 0);
+-    f64[1] = Float64x2.extractLane(v, 1);
+-    return [u8[0], u8[1], u8[2], u8[3], u8[4], u8[5], u8[6], u8[7],
+-            u8[8], u8[9], u8[10], u8[11], u8[12], u8[13], u8[14], u8[15]];
+-  }
+-  var vals = [[1, -2], [-3, 4], [Infinity, -Infinity], [NaN, -0]];
+-
+-  for (var v of vals) {
+-    var f = Float64x2(...v);
+-    assertEqX16(Uint8x16.fromFloat64x2Bits(f), expected(f, ArrayBuffer));
+-    if (typeof SharedArrayBuffer != "undefined")
+-      assertEqX16(Uint8x16.fromFloat64x2Bits(f), expected(f, SharedArrayBuffer));
+-  }
+-}
+-
+-function testUint8x16FromInt8x16Bits() {
+-  function expected(v, Buffer) {
+-    var i8 = new Int8Array(new Buffer(16));
+-    var u8 = new Uint8Array(i8.buffer);
+-    var asArr = simdToArray(v);
+-    for (var i = 0; i < 16; i++) i8[i] = asArr[i];
+-    return [u8[0], u8[1], u8[2], u8[3], u8[4], u8[5], u8[6], u8[7],
+-            u8[8], u8[9], u8[10], u8[11], u8[12], u8[13], u8[14], u8[15]];
+-  }
+-
+-  var vals = [[0, 1, -2, 3, -4, 5, INT8_MIN, UINT8_MAX, -6, 7, INT8_MAX, 9, -10, 11, -12, 13]];
+-
+-  for (var v of vals) {
+-    var i = Int8x16(...v);
+-    assertEqX16(Uint8x16.fromInt8x16Bits(i), expected(i, ArrayBuffer));
+-    if (typeof SharedArrayBuffer != "undefined")
+-      assertEqX16(Uint8x16.fromInt8x16Bits(i), expected(i, SharedArrayBuffer));
+-  }
+-}
+-
+-function testUint8x16FromInt16x8Bits() {
+-  function expected(v, Buffer) {
+-    var i16 = new Int16Array(new Buffer(16));
+-    var u8 = new Uint8Array(i16.buffer);
+-    var asArr = simdToArray(v);
+-    for (var i = 0; i < 8; i++) i16[i] = asArr[i];
+-    return [u8[0], u8[1], u8[2], u8[3], u8[4], u8[5], u8[6], u8[7],
+-            u8[8], u8[9], u8[10], u8[11], u8[12], u8[13], u8[14], u8[15]];
+-  }
+-
+-  var vals = [[0, 1, -2, 3, INT8_MIN, INT8_MAX, INT16_MIN, INT16_MAX]];
+-  for (var v of vals) {
+-    var i = Int16x8(...v);
+-    assertEqX16(Uint8x16.fromInt16x8Bits(i), expected(i, ArrayBuffer));
+-    if (typeof SharedArrayBuffer != "undefined")
+-      assertEqX16(Uint8x16.fromInt16x8Bits(i), expected(i, SharedArrayBuffer));
+-  }
+-}
+-
+-function testUint8x16FromUint16x8Bits() {
+-  function expected(v, Buffer) {
+-    var u16 = new Uint16Array(new Buffer(16));
+-    var u8 = new Uint8Array(u16.buffer);
+-    var asArr = simdToArray(v);
+-    for (var i = 0; i < 8; i++) u16[i] = asArr[i];
+-    return [u8[0], u8[1], u8[2], u8[3], u8[4], u8[5], u8[6], u8[7],
+-            u8[8], u8[9], u8[10], u8[11], u8[12], u8[13], u8[14], u8[15]];
+-  }
+-
+-  var vals = [[0, 1, -2, UINT16_MAX, INT8_MIN, INT8_MAX, INT16_MIN, INT16_MAX]];
+-  for (var v of vals) {
+-    var i = Uint16x8(...v);
+-    assertEqX16(Uint8x16.fromUint16x8Bits(i), expected(i, ArrayBuffer));
+-    if (typeof SharedArrayBuffer != "undefined")
+-      assertEqX16(Uint8x16.fromUint16x8Bits(i), expected(i, SharedArrayBuffer));
+-  }
+-}
+-
+-function testUint8x16FromInt32x4Bits() {
+-  function expected(v, Buffer) {
+-    var i32 = new Int32Array(new Buffer(16));
+-    var u8 = new Uint8Array(i32.buffer);
+-    var asArr = simdToArray(v);
+-    for (var i = 0; i < 4; i++) i32[i] = asArr[i];
+-    return [u8[0], u8[1], u8[2], u8[3], u8[4], u8[5], u8[6], u8[7],
+-            u8[8], u8[9], u8[10], u8[11], u8[12], u8[13], u8[14], u8[15]];
+-  }
+-
+-  var vals = [[0, 1, -2, 3], [INT8_MIN, INT8_MAX, INT32_MIN, INT32_MAX]];
+-  for (var v of vals) {
+-    var i = Int32x4(...v);
+-    assertEqX16(Uint8x16.fromInt32x4Bits(i), expected(i, ArrayBuffer));
+-    if (typeof SharedArrayBuffer != "undefined")
+-      assertEqX16(Uint8x16.fromInt32x4Bits(i), expected(i, SharedArrayBuffer));
+-  }
+-}
+-
+-function testUint8x16FromUint32x4Bits() {
+-  function expected(v, Buffer) {
+-    var u32 = new Uint32Array(new Buffer(16));
+-    var u8 = new Uint8Array(u32.buffer);
+-    var asArr = simdToArray(v);
+-    for (var i = 0; i < 4; i++) u32[i] = asArr[i];
+-    return [u8[0], u8[1], u8[2], u8[3], u8[4], u8[5], u8[6], u8[7],
+-            u8[8], u8[9], u8[10], u8[11], u8[12], u8[13], u8[14], u8[15]];
+-  }
+-
+-  var vals = [[0, 1, -2, 3], [INT8_MIN, INT8_MAX, INT32_MIN, INT32_MAX]];
+-  for (var v of vals) {
+-    var i = Uint32x4(...v);
+-    assertEqX16(Uint8x16.fromUint32x4Bits(i), expected(i, ArrayBuffer));
+-    if (typeof SharedArrayBuffer != "undefined")
+-      assertEqX16(Uint8x16.fromUint32x4Bits(i), expected(i, SharedArrayBuffer));
+-  }
+-}
+-
+-function testUint16x8FromFloat32x4Bits() {
+-  function expected(v, Buffer) {
+-    var f32 = new Float32Array(new Buffer(16));
+-    var u16 = new Uint16Array(f32.buffer);
+-    var asArr = simdToArray(v);
+-    for (var i = 0; i < 4; i++) f32[i] = asArr[i];
+-    return [u16[0], u16[1], u16[2], u16[3], u16[4], u16[5], u16[6], u16[7]];
+-  }
+-
+-  var vals = [[1, -2, 3, -4], [Infinity, -Infinity, NaN, -0]];
+-
+-  for (var v of vals) {
+-    var f = Float32x4(...v);
+-    assertEqX8(Uint16x8.fromFloat32x4Bits(f), expected(f, ArrayBuffer));
+-    if (typeof SharedArrayBuffer != "undefined")
+-      assertEqX8(Uint16x8.fromFloat32x4Bits(f), expected(f, SharedArrayBuffer));
+-  }
+-}
+-
+-function testUint16x8FromFloat64x2Bits() {
+-  function expected(v, Buffer) {
+-    var f64 = new Float64Array(new Buffer(16));
+-    var u16 = new Uint16Array(f64.buffer);
+-    f64[0] = Float64x2.extractLane(v, 0);
+-    f64[1] = Float64x2.extractLane(v, 1);
+-    return [u16[0], u16[1], u16[2], u16[3], u16[4], u16[5], u16[6], u16[7]];
+-  }
+-
+-  var vals = [[1, -2], [-3, 4], [Infinity, -Infinity], [NaN, -0]];
+-
+-  for (var v of vals) {
+-    var f = Float64x2(...v);
+-    assertEqX8(Uint16x8.fromFloat64x2Bits(f), expected(f, ArrayBuffer));
+-    if (typeof SharedArrayBuffer != "undefined")
+-      assertEqX8(Uint16x8.fromFloat64x2Bits(f), expected(f, SharedArrayBuffer));
+-  }
+-}
+-
+-function testUint16x8FromInt8x16Bits() {
+-  function expected(v, Buffer) {
+-    var i8 = new Int8Array(new Buffer(16));
+-    var u16 = new Uint16Array(i8.buffer);
+-    var asArr = simdToArray(v);
+-    for (var i = 0; i < 16; i++) i8[i] = asArr[i];
+-    return [u16[0], u16[1], u16[2], u16[3], u16[4], u16[5], u16[6], u16[7]];
+-  }
+-
+-  var vals = [[0, 1, -2, 3, -4, 5, INT8_MIN, INT8_MAX, -6, 7, -8, 9, -10, 11, -12, 13]];
+-
+-  for (var v of vals) {
+-    var i = Int8x16(...v);
+-    assertEqX8(Uint16x8.fromInt8x16Bits(i), expected(i, ArrayBuffer));
+-    if (typeof SharedArrayBuffer != "undefined")
+-      assertEqX8(Uint16x8.fromInt8x16Bits(i), expected(i, SharedArrayBuffer));
+-  }
+-}
+-
+-function testUint16x8FromUint8x16Bits() {
+-  function expected(v, Buffer) {
+-    var u8 = new Uint8Array(new Buffer(16));
+-    var u16 = new Uint16Array(u8.buffer);
+-    var asArr = simdToArray(v);
+-    for (var i = 0; i < 16; i++) u8[i] = asArr[i];
+-    return [u16[0], u16[1], u16[2], u16[3], u16[4], u16[5], u16[6], u16[7]];
+-  }
+-
+-  var vals = [[0, 1, -2, 3, -4, UINT8_MAX, INT8_MIN, INT8_MAX, -6, 7, -8, 9, -10, 11, -12, 13]];
+-
+-  for (var v of vals) {
+-    var i = Uint8x16(...v);
+-    assertEqX8(Uint16x8.fromUint8x16Bits(i), expected(i, ArrayBuffer));
+-    if (typeof SharedArrayBuffer != "undefined")
+-      assertEqX8(Uint16x8.fromUint8x16Bits(i), expected(i, SharedArrayBuffer));
+-  }
+-}
+-
+-function testUint16x8FromInt16x8Bits() {
+-  function expected(v, Buffer) {
+-    var i16 = new Int16Array(new Buffer(16));
+-    var u16 = new Uint16Array(i16.buffer);
+-    var asArr = simdToArray(v);
+-    for (var i = 0; i < 8; i++) i16[i] = asArr[i];
+-    return [u16[0], u16[1], u16[2], u16[3], u16[4], u16[5], u16[6], u16[7]];
+-  }
+-
+-  var vals = [[1, 2, 3, 4, 5, 6, 7, 8],
+-              [INT16_MIN, UINT16_MAX, INT16_MAX, UINT16_MAX, 0, UINT16_MAX, 0, UINT16_MAX]];
+-
+-  for (var v of vals) {
+-    var i = Int16x8(...v);
+-    assertEqX8(Uint16x8.fromInt16x8Bits(i), expected(i, ArrayBuffer));
+-    if (typeof SharedArrayBuffer != "undefined")
+-      assertEqX8(Uint16x8.fromInt16x8Bits(i), expected(i, SharedArrayBuffer));
+-  }
+-}
+-
+-function testUint16x8FromInt32x4Bits() {
+-  function expected(v, Buffer) {
+-    var i32 = new Int32Array(new Buffer(16));
+-    var u16 = new Uint16Array(i32.buffer);
+-    var asArr = simdToArray(v);
+-    for (var i = 0; i < 4; i++) i32[i] = asArr[i];
+-    return [u16[0], u16[1], u16[2], u16[3], u16[4], u16[5], u16[6], u16[7]];
+-  }
+-
+-  var vals = [[1, -2, -3, 4], [INT16_MAX, INT16_MIN, INT32_MAX, INT32_MIN]];
+-
+-  for (var v of vals) {
+-    var i = Int32x4(...v);
+-    assertEqX8(Uint16x8.fromInt32x4Bits(i), expected(i, ArrayBuffer));
+-    if (typeof SharedArrayBuffer != "undefined")
+-      assertEqX8(Uint16x8.fromInt32x4Bits(i), expected(i, SharedArrayBuffer));
+-  }
+-}
+-
+-function testUint16x8FromUint32x4Bits() {
+-  function expected(v, Buffer) {
+-    var u32 = new Uint32Array(new Buffer(16));
+-    var u16 = new Uint16Array(u32.buffer);
+-    var asArr = simdToArray(v);
+-    for (var i = 0; i < 4; i++) u32[i] = asArr[i];
+-    return [u16[0], u16[1], u16[2], u16[3], u16[4], u16[5], u16[6], u16[7]];
+-  }
+-
+-  var vals = [[1, -2, -3, 4], [INT16_MAX, INT16_MIN, INT32_MAX, INT32_MIN]];
+-
+-  for (var v of vals) {
+-    var i = Uint32x4(...v);
+-    assertEqX8(Uint16x8.fromUint32x4Bits(i), expected(i, ArrayBuffer));
+-    if (typeof SharedArrayBuffer != "undefined")
+-      assertEqX8(Uint16x8.fromUint32x4Bits(i), expected(i, SharedArrayBuffer));
+-  }
+-}
+-
+-function test() {
+-  testFloat32x4FromFloat64x2Bits();
+-  testFloat32x4FromInt8x16Bits();
+-  testFloat32x4FromInt16x8Bits();
+-  testFloat32x4FromInt32x4();
+-  testFloat32x4FromInt32x4Bits();
+-  testFloat32x4FromUint8x16Bits();
+-  testFloat32x4FromUint16x8Bits();
+-  testFloat32x4FromUint32x4();
+-  testFloat32x4FromUint32x4Bits();
+-
+-  testFloat64x2FromFloat32x4Bits();
+-  testFloat64x2FromInt8x16Bits();
+-  testFloat64x2FromInt16x8Bits();
+-  testFloat64x2FromInt32x4Bits();
+-  testFloat64x2FromUint8x16Bits();
+-  testFloat64x2FromUint16x8Bits();
+-  testFloat64x2FromUint32x4Bits();
+-
+-  testInt8x16FromFloat32x4Bits();
+-  testInt8x16FromFloat64x2Bits();
+-  testInt8x16FromInt16x8Bits();
+-  testInt8x16FromInt32x4Bits();
+-  testInt8x16FromUint8x16Bits();
+-  testInt8x16FromUint16x8Bits();
+-  testInt8x16FromUint32x4Bits();
+-
+-  testInt16x8FromFloat32x4Bits();
+-  testInt16x8FromFloat64x2Bits();
+-  testInt16x8FromInt8x16Bits();
+-  testInt16x8FromInt32x4Bits();
+-  testInt16x8FromUint8x16Bits();
+-  testInt16x8FromUint16x8Bits();
+-  testInt16x8FromUint32x4Bits();
+-
+-  testInt32x4FromFloat32x4();
+-  testInt32x4FromFloat32x4Bits();
+-  testInt32x4FromFloat64x2Bits();
+-  testInt32x4FromInt8x16Bits();
+-  testInt32x4FromInt16x8Bits();
+-  testInt32x4FromUint8x16Bits();
+-  testInt32x4FromUint16x8Bits();
+-  testInt32x4FromUint32x4Bits();
+-
+-  testUint8x16FromFloat32x4Bits();
+-  testUint8x16FromFloat64x2Bits();
+-  testUint8x16FromInt8x16Bits();
+-  testUint8x16FromInt16x8Bits();
+-  testUint8x16FromInt32x4Bits();
+-  testUint8x16FromUint16x8Bits();
+-  testUint8x16FromUint32x4Bits();
+-
+-  testUint16x8FromFloat32x4Bits();
+-  testUint16x8FromFloat64x2Bits();
+-  testUint16x8FromInt8x16Bits();
+-  testUint16x8FromInt16x8Bits();
+-  testUint16x8FromInt32x4Bits();
+-  testUint16x8FromUint8x16Bits();
+-  testUint16x8FromUint32x4Bits();
+-
+-  testUint32x4FromFloat32x4();
+-  testUint32x4FromFloat32x4Bits();
+-  testUint32x4FromFloat64x2Bits();
+-  testUint32x4FromInt8x16Bits();
+-  testUint32x4FromInt16x8Bits();
+-  testUint32x4FromInt32x4Bits();
+-  testUint32x4FromUint8x16Bits();
+-  testUint32x4FromUint16x8Bits();
+-
+-  if (typeof reportCompare === "function") {
+-    reportCompare(true, true);
+-  }
+-}
+-
+-test();
+diff --git a/js/src/tests/non262/SIMD/float64x2-arithmetic.js b/js/src/tests/non262/SIMD/float64x2-arithmetic.js
+deleted file mode 100644
+--- a/js/src/tests/non262/SIMD/float64x2-arithmetic.js
++++ /dev/null
+@@ -1,69 +0,0 @@
+-// |reftest| skip-if(!this.hasOwnProperty("SIMD"))
+-var Float64x2 = SIMD.Float64x2;
+-
+-/*
+- * Any copyright is dedicated to the Public Domain.
+- * https://creativecommons.org/publicdomain/zero/1.0/
+- */
+-
+-function add(a, b) { return a + b; }
+-function sub(a, b) { return a - b; }
+-function mul(a, b) { return a * b; }
+-function div(a, b) { return a / b; }
+-function neg(a) { return -a; }
+-function reciprocalApproximation(a) { return 1 / a; }
+-function reciprocalSqrtApproximation(a) { return 1 / Math.sqrt(a); }
+-
+-function testAdd(v, w) {
+-    return testBinaryFunc(v, w, Float64x2.add, add);
+-}
+-function testSub(v, w) {
+-    return testBinaryFunc(v, w, Float64x2.sub, sub);
+-}
+-function testMul(v, w) {
+-    return testBinaryFunc(v, w, Float64x2.mul, mul);
+-}
+-function testDiv(v, w) {
+-    return testBinaryFunc(v, w, Float64x2.div, div);
+-}
+-function testAbs(v) {
+-    return testUnaryFunc(v, Float64x2.abs, Math.abs);
+-}
+-function testNeg(v) {
+-    return testUnaryFunc(v, Float64x2.neg, neg);
+-}
+-function testReciprocalApproximation(v) {
+-    return testUnaryFunc(v, Float64x2.reciprocalApproximation, reciprocalApproximation);
+-}
+-function testReciprocalSqrtApproximation(v) {
+-    return testUnaryFunc(v, Float64x2.reciprocalSqrtApproximation, reciprocalSqrtApproximation);
+-}
+-function testSqrt(v) {
+-    return testUnaryFunc(v, Float64x2.sqrt, Math.sqrt);
+-}
+-
+-function test() {
+-  var v, w;
+-  for ([v, w] of [[Float64x2(1, 2), Float64x2(3, 4)],
+-                  [Float64x2(1.894, 2.8909), Float64x2(100.764, 200.987)],
+-                  [Float64x2(-1, -2), Float64x2(-14.54, 57)],
+-                  [Float64x2(+Infinity, -Infinity), Float64x2(NaN, -0)],
+-                  [Float64x2(Math.pow(2, 31), Math.pow(2, -31)), Float64x2(Math.pow(2, -1047), Math.pow(2, -149))]])
+-  {
+-      testAdd(v, w);
+-      testSub(v, w);
+-      testMul(v, w);
+-      testDiv(v, w);
+-      testAbs(v);
+-      testNeg(v);
+-      testReciprocalApproximation(v);
+-      testSqrt(v);
+-      testReciprocalSqrtApproximation(v);
+-  }
+-
+-  if (typeof reportCompare === "function")
+-    reportCompare(true, true);
+-}
+-
+-test();
+-
+diff --git a/js/src/tests/non262/SIMD/load-floats.js b/js/src/tests/non262/SIMD/load-floats.js
+deleted file mode 100644
+--- a/js/src/tests/non262/SIMD/load-floats.js
++++ /dev/null
+@@ -1,19 +0,0 @@
+-// |reftest| skip-if(!this.hasOwnProperty("SIMD"))
+-
+-/*
+- * Any copyright is dedicated to the Public Domain.
+- * https://creativecommons.org/publicdomain/zero/1.0/
+- */
+-
+-var { testLoad } = Helpers;
+-
+-testLoad('Float32x4', new Float32Array(SIZE_32_ARRAY));
+-testLoad('Float64x2', new Float64Array(SIZE_64_ARRAY));
+-
+-if (typeof SharedArrayBuffer != "undefined") {
+-  testLoad('Float32x4', new Float32Array(new SharedArrayBuffer(SIZE_8_ARRAY)));
+-  testLoad('Float64x2', new Float64Array(new SharedArrayBuffer(SIZE_8_ARRAY)));
+-}
+-
+-if (typeof reportCompare === "function")
+-    reportCompare(true, true);
+diff --git a/js/src/tests/non262/SIMD/load-int16x8.js b/js/src/tests/non262/SIMD/load-int16x8.js
+deleted file mode 100644
+--- a/js/src/tests/non262/SIMD/load-int16x8.js
++++ /dev/null
+@@ -1,17 +0,0 @@
+-// |reftest| skip-if(!this.hasOwnProperty("SIMD"))
+-
+-/*
+- * Any copyright is dedicated to the Public Domain.
+- * https://creativecommons.org/publicdomain/zero/1.0/
+- */
+-
+-var { testLoad } = Helpers;
+-
+-testLoad('Int16x8', new Int16Array(SIZE_16_ARRAY));
+-
+-if (typeof SharedArrayBuffer != "undefined") {
+-    testLoad('Int16x8', new Int16Array(new SharedArrayBuffer(SIZE_8_ARRAY)));
+-}
+-
+-if (typeof reportCompare === "function")
+-    reportCompare(true, true);
+diff --git a/js/src/tests/non262/SIMD/load-int32x4.js b/js/src/tests/non262/SIMD/load-int32x4.js
+deleted file mode 100644
+--- a/js/src/tests/non262/SIMD/load-int32x4.js
++++ /dev/null
+@@ -1,18 +0,0 @@
+-// |reftest| skip-if(!this.hasOwnProperty("SIMD"))
+-
+-/*
+- * Any copyright is dedicated to the Public Domain.
+- * https://creativecommons.org/publicdomain/zero/1.0/
+- */
+-
+-var { testLoad } = Helpers;
+-
+-testLoad('Int32x4', new Int32Array(SIZE_32_ARRAY));
+-
+-if (typeof SharedArrayBuffer != "undefined") {
+-    testLoad('Int32x4', new Int32Array(new SharedArrayBuffer(SIZE_8_ARRAY)));
+-}
+-
+-if (typeof reportCompare === "function")
+-    reportCompare(true, true);
+-
+diff --git a/js/src/tests/non262/SIMD/load-int8x16.js b/js/src/tests/non262/SIMD/load-int8x16.js
+deleted file mode 100644
+--- a/js/src/tests/non262/SIMD/load-int8x16.js
++++ /dev/null
+@@ -1,17 +0,0 @@
+-// |reftest| skip-if(!this.hasOwnProperty("SIMD"))
+-
+-/*
+- * Any copyright is dedicated to the Public Domain.
+- * https://creativecommons.org/publicdomain/zero/1.0/
+- */
+-
+-var { testLoad } = Helpers;
+-
+-testLoad('Int8x16', new Int8Array(SIZE_8_ARRAY));
+-
+-if (typeof SharedArrayBuffer != "undefined") {
+-    testLoad('Int8x16', new Int8Array(new SharedArrayBuffer(SIZE_8_ARRAY)));
+-}
+-
+-if (typeof reportCompare === "function")
+-    reportCompare(true, true);
+diff --git a/js/src/tests/non262/SIMD/load-sab-buffer-compat.js b/js/src/tests/non262/SIMD/load-sab-buffer-compat.js
+deleted file mode 100644
+--- a/js/src/tests/non262/SIMD/load-sab-buffer-compat.js
++++ /dev/null
+@@ -1,49 +0,0 @@
+-// |reftest| skip-if(!this.hasOwnProperty("SIMD"))
+-
+-/*
+- * Any copyright is dedicated to the Public Domain.
+- * https://creativecommons.org/publicdomain/zero/1.0/
+- */
+-
+-var { MakeComparator } = Helpers;
+-
+-function testSharedArrayBufferCompat() {
+-    var TA = new Float32Array(new SharedArrayBuffer(16*4));
+-    for (var i = 0; i < 16; i++)
+-        TA[i] = i + 1;
+-
+-    for (var ta of [
+-                    new Uint8Array(TA.buffer),
+-                    new Int8Array(TA.buffer),
+-                    new Uint16Array(TA.buffer),
+-                    new Int16Array(TA.buffer),
+-                    new Uint32Array(TA.buffer),
+-                    new Int32Array(TA.buffer),
+-                    new Float32Array(TA.buffer),
+-                    new Float64Array(TA.buffer)
+-                   ])
+-    {
+-        for (var kind of ['Int32x4', 'Uint32x4', 'Float32x4', 'Float64x2']) {
+-            var comp = MakeComparator(kind, ta);
+-            comp.load(0);
+-            comp.load1(0);
+-            comp.load2(0);
+-            comp.load3(0);
+-
+-            comp.load(3);
+-            comp.load1(3);
+-            comp.load2(3);
+-            comp.load3(3);
+-        }
+-
+-        assertThrowsInstanceOf(() => SIMD.Int32x4.load(ta, 1024), RangeError);
+-        assertThrowsInstanceOf(() => SIMD.Uint32x4.load(ta, 1024), RangeError);
+-        assertThrowsInstanceOf(() => SIMD.Float32x4.load(ta, 1024), RangeError);
+-        assertThrowsInstanceOf(() => SIMD.Float64x2.load(ta, 1024), RangeError);
+-    }
+-}
+-
+-testSharedArrayBufferCompat();
+-
+-if (typeof reportCompare === "function")
+-    reportCompare(true, true);
+diff --git a/js/src/tests/non262/SIMD/load-unsigned-integers.js b/js/src/tests/non262/SIMD/load-unsigned-integers.js
+deleted file mode 100644
+--- a/js/src/tests/non262/SIMD/load-unsigned-integers.js
++++ /dev/null
+@@ -1,22 +0,0 @@
+-// |reftest| skip-if(!this.hasOwnProperty("SIMD"))
+-
+-/*
+- * Any copyright is dedicated to the Public Domain.
+- * https://creativecommons.org/publicdomain/zero/1.0/
+- */
+-
+-var { testLoad } = Helpers;
+-
+-testLoad('Uint8x16', new Uint8Array(SIZE_8_ARRAY));
+-testLoad('Uint16x8', new Uint16Array(SIZE_16_ARRAY));
+-testLoad('Uint32x4', new Uint32Array(SIZE_32_ARRAY));
+-
+-if (typeof SharedArrayBuffer != "undefined") {
+-  testLoad('Uint8x16', new Uint8Array(new SharedArrayBuffer(SIZE_8_ARRAY)));
+-  testLoad('Uint16x8', new Uint16Array(new SharedArrayBuffer(SIZE_8_ARRAY)));
+-  testLoad('Uint32x4', new Uint32Array(new SharedArrayBuffer(SIZE_8_ARRAY)));
+-}
+-
+-if (typeof reportCompare === "function")
+-    reportCompare(true, true);
+-
+diff --git a/js/src/tests/non262/SIMD/minmax.js b/js/src/tests/non262/SIMD/minmax.js
+deleted file mode 100644
+--- a/js/src/tests/non262/SIMD/minmax.js
++++ /dev/null
+@@ -1,85 +0,0 @@
+-// |reftest| skip-if(!this.hasOwnProperty("SIMD"))
+-
+-/*
+- * Any copyright is dedicated to the Public Domain.
+- * https://creativecommons.org/publicdomain/zero/1.0/
+- */
+-
+-var Float32x4 = SIMD.Float32x4;
+-var Float64x2 = SIMD.Float64x2;
+-
+-function testMaxFloat32(v, w) {
+-    return testBinaryFunc(v, w, Float32x4.max, (x, y) => Math.fround(Math.max(x, y)), 4);
+-}
+-function testMinFloat32(v, w) {
+-    return testBinaryFunc(v, w, Float32x4.min, (x, y) => Math.fround(Math.min(x, y)), 4);
+-}
+-
+-function testMaxFloat64(v, w) {
+-    return testBinaryFunc(v, w, Float64x2.max, (x, y) => Math.max(x, y), 2);
+-}
+-function testMinFloat64(v, w) {
+-    return testBinaryFunc(v, w, Float64x2.min, (x, y) => Math.min(x, y), 2);
+-}
+-
+-function maxNum(x, y) {
+-    if (x != x)
+-        return y;
+-    if (y != y)
+-        return x;
+-    return Math.max(x, y);
+-}
+-
+-function minNum(x, y) {
+-    if (x != x)
+-        return y;
+-    if (y != y)
+-        return x;
+-    return Math.min(x, y);
+-}
+-
+-function testMaxNumFloat32(v, w) {
+-    return testBinaryFunc(v, w, Float32x4.maxNum, maxNum, 4);
+-}
+-function testMinNumFloat32(v, w) {
+-    return testBinaryFunc(v, w, Float32x4.minNum, minNum, 4);
+-}
+-
+-function testMaxNumFloat64(v, w) {
+-    return testBinaryFunc(v, w, Float64x2.maxNum, maxNum, 2);
+-}
+-function testMinNumFloat64(v, w) {
+-    return testBinaryFunc(v, w, Float64x2.minNum, minNum, 2);
+-}
+-
+-function test() {
+-  var v, w;
+-  for ([v, w] of [[Float32x4(1, 20, 30, 4), Float32x4(10, 2, 3, 40)],
+-                  [Float32x4(9.999, 2.1234, 30.4443, 4), Float32x4(10, 2.1233, 30.4444, 4.0001)],
+-                  [Float32x4(NaN, -Infinity, +Infinity, -0), Float32x4(13.37, 42.42, NaN, 0)]])
+-  {
+-      testMinFloat32(v, w);
+-      testMaxFloat32(v, w);
+-      testMinNumFloat32(v, w);
+-      testMaxNumFloat32(v, w);
+-  }
+-
+-  for ([v, w] of [[Float64x2(1, 20), Float64x2(10, 2)],
+-                  [Float64x2(30, 4), Float64x2(3, 40)],
+-                  [Float64x2(9.999, 2.1234), Float64x2(10, 2.1233)],
+-                  [Float64x2(30.4443, 4), Float64x2(30.4444, 4.0001)],
+-                  [Float64x2(NaN, -Infinity), Float64x2(13.37, 42.42)],
+-                  [Float64x2(+Infinity, -0), Float64x2(NaN, 0)]])
+-  {
+-      testMinFloat64(v, w);
+-      testMaxFloat64(v, w);
+-      testMinNumFloat64(v, w);
+-      testMaxNumFloat64(v, w);
+-  }
+-
+-  if (typeof reportCompare === "function")
+-    reportCompare(true, true);
+-}
+-
+-test();
+-
+diff --git a/js/src/tests/non262/SIMD/replaceLane.js b/js/src/tests/non262/SIMD/replaceLane.js
+deleted file mode 100644
+--- a/js/src/tests/non262/SIMD/replaceLane.js
++++ /dev/null
+@@ -1,226 +0,0 @@
+-// |reftest| skip-if(!this.hasOwnProperty("SIMD"))
+-var Float32x4 = SIMD.Float32x4;
+-var Float64x2 = SIMD.Float64x2;
+-var Int8x16 = SIMD.Int8x16;
+-var Int16x8 = SIMD.Int16x8;
+-var Int32x4 = SIMD.Int32x4;
+-var Uint8x16 = SIMD.Uint8x16;
+-var Uint16x8 = SIMD.Uint16x8;
+-var Uint32x4 = SIMD.Uint32x4;
+-var Bool8x16 = SIMD.Bool8x16;
+-var Bool16x8 = SIMD.Bool16x8;
+-var Bool32x4 = SIMD.Bool32x4;
+-var Bool64x2 = SIMD.Bool64x2;
+-
+-function replaceLaneN(laneIndex, arr, value) {
+-    var copy = arr.slice();
+-    assertEq(laneIndex <= arr.length, true);
+-    copy[laneIndex] = value;
+-    return copy;
+-}
+-
+-var replaceLane0 = replaceLaneN.bind(null, 0);
+-var replaceLane1 = replaceLaneN.bind(null, 1);
+-var replaceLane2 = replaceLaneN.bind(null, 2);
+-var replaceLane3 = replaceLaneN.bind(null, 3);
+-var replaceLane4 = replaceLaneN.bind(null, 4);
+-var replaceLane5 = replaceLaneN.bind(null, 5);
+-var replaceLane6 = replaceLaneN.bind(null, 6);
+-var replaceLane7 = replaceLaneN.bind(null, 7);
+-var replaceLane8 = replaceLaneN.bind(null, 8);
+-var replaceLane9 = replaceLaneN.bind(null, 9);
+-var replaceLane10 = replaceLaneN.bind(null, 10);
+-var replaceLane11 = replaceLaneN.bind(null, 11);
+-var replaceLane12 = replaceLaneN.bind(null, 12);
+-var replaceLane13 = replaceLaneN.bind(null, 13);
+-var replaceLane14 = replaceLaneN.bind(null, 14);
+-var replaceLane15 = replaceLaneN.bind(null, 15);
+-
+-function testReplaceLane(vec, scalar, simdFunc, func) {
+-    var varr = simdToArray(vec);
+-    var observed = simdToArray(simdFunc(vec, scalar));
+-    var expected = func(varr, scalar);
+-    for (var i = 0; i < observed.length; i++)
+-        assertEq(observed[i], expected[i]);
+-}
+-
+-function test() {
+-  function testType(type, inputs) {
+-      var length = simdToArray(inputs[0][0]).length;
+-      for (var [vec, s] of inputs) {
+-          testReplaceLane(vec, s, (x,y) => SIMD[type].replaceLane(x, 0, y), replaceLane0);
+-          testReplaceLane(vec, s, (x,y) => SIMD[type].replaceLane(x, 1, y), replaceLane1);
+-          if (length <= 2)
+-              continue;
+-          testReplaceLane(vec, s, (x,y) => SIMD[type].replaceLane(x, 2, y), replaceLane2);
+-          testReplaceLane(vec, s, (x,y) => SIMD[type].replaceLane(x, 3, y), replaceLane3);
+-          if (length <= 4)
+-              continue;
+-          testReplaceLane(vec, s, (x,y) => SIMD[type].replaceLane(x, 4, y), replaceLane4);
+-          testReplaceLane(vec, s, (x,y) => SIMD[type].replaceLane(x, 5, y), replaceLane5);
+-          testReplaceLane(vec, s, (x,y) => SIMD[type].replaceLane(x, 6, y), replaceLane6);
+-          testReplaceLane(vec, s, (x,y) => SIMD[type].replaceLane(x, 7, y), replaceLane7);
+-          if (length <= 8)
+-              continue;
+-          testReplaceLane(vec, s, (x,y) => SIMD[type].replaceLane(x, 8, y), replaceLane8);
+-          testReplaceLane(vec, s, (x,y) => SIMD[type].replaceLane(x, 9, y), replaceLane9);
+-          testReplaceLane(vec, s, (x,y) => SIMD[type].replaceLane(x, 10, y), replaceLane10);
+-          testReplaceLane(vec, s, (x,y) => SIMD[type].replaceLane(x, 11, y), replaceLane11);
+-          testReplaceLane(vec, s, (x,y) => SIMD[type].replaceLane(x, 12, y), replaceLane12);
+-          testReplaceLane(vec, s, (x,y) => SIMD[type].replaceLane(x, 13, y), replaceLane13);
+-          testReplaceLane(vec, s, (x,y) => SIMD[type].replaceLane(x, 14, y), replaceLane14);
+-          testReplaceLane(vec, s, (x,y) => SIMD[type].replaceLane(x, 15, y), replaceLane15);
+-      }
+-  }
+-
+-  function TestError(){};
+-  var good = {valueOf: () => 42};
+-  var bad = {valueOf: () => {throw new TestError(); }};
+-
+-  var Float32x4inputs = [
+-      [Float32x4(1, 2, 3, 4), 5],
+-      [Float32x4(1.87, 2.08, 3.84, 4.17), Math.fround(13.37)],
+-      [Float32x4(NaN, -0, Infinity, -Infinity), 0]
+-  ];
+-  testType('Float32x4', Float32x4inputs);
+-
+-  var v = Float32x4inputs[1][0];
+-  assertEqX4(Float32x4.replaceLane(v, 0), replaceLane0(simdToArray(v), NaN));
+-  assertEqX4(Float32x4.replaceLane(v, 0, good), replaceLane0(simdToArray(v), good | 0));
+-  assertThrowsInstanceOf(() => Float32x4.replaceLane(v, 0, bad), TestError);
+-  assertThrowsInstanceOf(() => Float32x4.replaceLane(v, 4, good), RangeError);
+-  assertThrowsInstanceOf(() => Float32x4.replaceLane(v, 1.1, good), RangeError);
+-
+-  var Float64x2inputs = [
+-      [Float64x2(1, 2), 5],
+-      [Float64x2(1.87, 2.08), Math.fround(13.37)],
+-      [Float64x2(NaN, -0), 0]
+-  ];
+-  testType('Float64x2', Float64x2inputs);
+-
+-  var v = Float64x2inputs[1][0];
+-  assertEqX2(Float64x2.replaceLane(v, 0), replaceLane0(simdToArray(v), NaN));
+-  assertEqX2(Float64x2.replaceLane(v, 0, good), replaceLane0(simdToArray(v), good | 0));
+-  assertThrowsInstanceOf(() => Float64x2.replaceLane(v, 0, bad), TestError);
+-  assertThrowsInstanceOf(() => Float64x2.replaceLane(v, 2, good), RangeError);
+-  assertThrowsInstanceOf(() => Float64x2.replaceLane(v, 1.1, good), RangeError);
+-
+-  var Int8x16inputs = [[Int8x16(0, 1, 2, 3, 4, 5, 6, 7, -1, -2, -3, -4, -5, -6, INT8_MIN, INT8_MAX), 17]];
+-  testType('Int8x16', Int8x16inputs);
+-
+-  var v = Int8x16inputs[0][0];
+-  assertEqX16(Int8x16.replaceLane(v, 0), replaceLane0(simdToArray(v), 0));
+-  assertEqX16(Int8x16.replaceLane(v, 0, good), replaceLane0(simdToArray(v), good | 0));
+-  assertThrowsInstanceOf(() => Int8x16.replaceLane(v, 0, bad), TestError);
+-  assertThrowsInstanceOf(() => Int8x16.replaceLane(v, 16, good), RangeError);
+-  assertThrowsInstanceOf(() => Int8x16.replaceLane(v, 1.1, good), RangeError);
+-
+-  var Int16x8inputs = [[Int16x8(0, 1, 2, 3, -1, -2, INT16_MIN, INT16_MAX), 9]];
+-  testType('Int16x8', Int16x8inputs);
+-
+-  var v = Int16x8inputs[0][0];
+-  assertEqX8(Int16x8.replaceLane(v, 0), replaceLane0(simdToArray(v), 0));
+-  assertEqX8(Int16x8.replaceLane(v, 0, good), replaceLane0(simdToArray(v), good | 0));
+-  assertThrowsInstanceOf(() => Int16x8.replaceLane(v, 0, bad), TestError);
+-  assertThrowsInstanceOf(() => Int16x8.replaceLane(v, 8, good), RangeError);
+-  assertThrowsInstanceOf(() => Int16x8.replaceLane(v, 1.1, good), RangeError);
+-
+-  var Int32x4inputs = [
+-      [Int32x4(1, 2, 3, 4), 5],
+-      [Int32x4(INT32_MIN, INT32_MAX, 3, 4), INT32_MIN],
+-  ];
+-  testType('Int32x4', Int32x4inputs);
+-
+-  var v = Int32x4inputs[1][0];
+-  assertEqX4(Int32x4.replaceLane(v, 0), replaceLane0(simdToArray(v), 0));
+-  assertEqX4(Int32x4.replaceLane(v, 0, good), replaceLane0(simdToArray(v), good | 0));
+-  assertThrowsInstanceOf(() => Int32x4.replaceLane(v, 0, bad), TestError);
+-  assertThrowsInstanceOf(() => Int32x4.replaceLane(v, 4, good), RangeError);
+-  assertThrowsInstanceOf(() => Int32x4.replaceLane(v, 1.1, good), RangeError);
+-
+-  var Uint8x16inputs = [[Uint8x16(0, 1, 2, 3, 4, 5, 6, 7, -1, -2, -3, -4, -5, -6, INT8_MIN, UINT8_MAX), 17]];
+-  testType('Uint8x16', Uint8x16inputs);
+-
+-  var v = Uint8x16inputs[0][0];
+-  assertEqX16(Uint8x16.replaceLane(v, 0), replaceLane0(simdToArray(v), 0));
+-  assertEqX16(Uint8x16.replaceLane(v, 0, good), replaceLane0(simdToArray(v), good | 0));
+-  assertThrowsInstanceOf(() => Uint8x16.replaceLane(v, 0, bad), TestError);
+-  assertThrowsInstanceOf(() => Uint8x16.replaceLane(v, 16, good), RangeError);
+-  assertThrowsInstanceOf(() => Uint8x16.replaceLane(v, 1.1, good), RangeError);
+-
+-  var Uint16x8inputs = [[Uint16x8(0, 1, 2, 3, -1, -2, INT16_MIN, UINT16_MAX), 9]];
+-  testType('Uint16x8', Uint16x8inputs);
+-
+-  var v = Uint16x8inputs[0][0];
+-  assertEqX8(Uint16x8.replaceLane(v, 0), replaceLane0(simdToArray(v), 0));
+-  assertEqX8(Uint16x8.replaceLane(v, 0, good), replaceLane0(simdToArray(v), good | 0));
+-  assertThrowsInstanceOf(() => Uint16x8.replaceLane(v, 0, bad), TestError);
+-  assertThrowsInstanceOf(() => Uint16x8.replaceLane(v, 8, good), RangeError);
+-  assertThrowsInstanceOf(() => Uint16x8.replaceLane(v, 1.1, good), RangeError);
+-
+-  var Uint32x4inputs = [
+-      [Uint32x4(1, 2, 3, 4), 5],
+-      [Uint32x4(INT32_MIN, UINT32_MAX, INT32_MAX, 4), UINT32_MAX],
+-  ];
+-  testType('Uint32x4', Uint32x4inputs);
+-
+-  var v = Uint32x4inputs[1][0];
+-  assertEqX4(Uint32x4.replaceLane(v, 0), replaceLane0(simdToArray(v), 0));
+-  assertEqX4(Uint32x4.replaceLane(v, 0, good), replaceLane0(simdToArray(v), good | 0));
+-  assertThrowsInstanceOf(() => Uint32x4.replaceLane(v, 0, bad), TestError);
+-  assertThrowsInstanceOf(() => Uint32x4.replaceLane(v, 4, good), RangeError);
+-  assertThrowsInstanceOf(() => Uint32x4.replaceLane(v, 1.1, good), RangeError);
+-
+-  var Bool64x2inputs = [
+-      [Bool64x2(true, true), false],
+-  ];
+-  testType('Bool64x2', Bool64x2inputs);
+-
+-  var v = Bool64x2inputs[0][0];
+-  assertEqX2(Bool64x2.replaceLane(v, 0),       replaceLane0(simdToArray(v), false));
+-  assertEqX2(Bool64x2.replaceLane(v, 0, true), replaceLane0(simdToArray(v), true));
+-  assertEqX2(Bool64x2.replaceLane(v, 0, bad),  replaceLane0(simdToArray(v), true));
+-  assertThrowsInstanceOf(() => Bool64x2.replaceLane(v, 4, true), RangeError);
+-  assertThrowsInstanceOf(() => Bool64x2.replaceLane(v, 1.1, false), RangeError);
+-
+-  var Bool32x4inputs = [
+-      [Bool32x4(true, true, true, true), false],
+-  ];
+-  testType('Bool32x4', Bool32x4inputs);
+-
+-  var v = Bool32x4inputs[0][0];
+-  assertEqX4(Bool32x4.replaceLane(v, 0),       replaceLane0(simdToArray(v), false));
+-  assertEqX4(Bool32x4.replaceLane(v, 0, true), replaceLane0(simdToArray(v), true));
+-  assertEqX4(Bool32x4.replaceLane(v, 0, bad),  replaceLane0(simdToArray(v), true));
+-  assertThrowsInstanceOf(() => Bool32x4.replaceLane(v, 4, true), RangeError);
+-  assertThrowsInstanceOf(() => Bool32x4.replaceLane(v, 1.1, false), RangeError);
+-
+-  var Bool16x8inputs = [
+-      [Bool16x8(true, true, true, true, true, true, true, true), false],
+-  ];
+-
+-  testType('Bool16x8', Bool16x8inputs);
+-  var v = Bool16x8inputs[0][0];
+-  assertEqX8(Bool16x8.replaceLane(v, 0),       replaceLane0(simdToArray(v), false));
+-  assertEqX8(Bool16x8.replaceLane(v, 0, true), replaceLane0(simdToArray(v), true));
+-  assertEqX8(Bool16x8.replaceLane(v, 0, bad),  replaceLane0(simdToArray(v), true));
+-  assertThrowsInstanceOf(() => Bool16x8.replaceLane(v, 16, true), RangeError);
+-  assertThrowsInstanceOf(() => Bool16x8.replaceLane(v, 1.1, false), RangeError);
+-
+-  var Bool8x16inputs = [
+-      [Bool8x16(true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true), false],
+-  ];
+-
+-  testType('Bool8x16', Bool8x16inputs);
+-  var v = Bool8x16inputs[0][0];
+-  assertEqX16(Bool8x16.replaceLane(v, 0),       replaceLane0(simdToArray(v), false));
+-  assertEqX16(Bool8x16.replaceLane(v, 0, true), replaceLane0(simdToArray(v), true));
+-  assertEqX16(Bool8x16.replaceLane(v, 0, bad),  replaceLane0(simdToArray(v), true));
+-  assertThrowsInstanceOf(() => Bool8x16.replaceLane(v, 16, true), RangeError);
+-  assertThrowsInstanceOf(() => Bool8x16.replaceLane(v, 1.1, false), RangeError);
+-
+-  if (typeof reportCompare === "function")
+-    reportCompare(true, true);
+-}
+-
+-test();
+diff --git a/js/src/tests/non262/SIMD/select-bitselect.js b/js/src/tests/non262/SIMD/select-bitselect.js
+deleted file mode 100644
+--- a/js/src/tests/non262/SIMD/select-bitselect.js
++++ /dev/null
+@@ -1,133 +0,0 @@
+-// |reftest| skip-if(!this.hasOwnProperty("SIMD"))
+-
+-/*
+- * Any copyright is dedicated to the Public Domain.
+- * https://creativecommons.org/publicdomain/zero/1.0/
+- */
+-
+-var Float32x4 = SIMD.Float32x4;
+-var Float64x2 = SIMD.Float64x2;
+-var Int8x16 = SIMD.Int8x16;
+-var Int16x8 = SIMD.Int16x8;
+-var Int32x4 = SIMD.Int32x4;
+-var Uint8x16 = SIMD.Uint8x16;
+-var Uint16x8 = SIMD.Uint16x8;
+-var Uint32x4 = SIMD.Uint32x4;
+-var Bool8x16 = SIMD.Bool8x16;
+-var Bool16x8 = SIMD.Bool16x8;
+-var Bool32x4 = SIMD.Bool32x4;
+-var Bool64x2 = SIMD.Bool64x2;
+-
+-function getMask(i, maskLength) {
+-    var args = [];
+-    for (var j = 0; j < maskLength; j++)
+-        args.push((i >> j) & 1);
+-    if (maskLength == 2)
+-        return Bool64x2(...args);
+-    else if (maskLength == 4)
+-        return Bool32x4(...args);
+-    else if (maskLength == 8)
+-        return Bool16x8(...args);
+-    else if (maskLength == 16)
+-        return Bool8x16(...args);
+-    else
+-        throw new Error("Invalid mask length.");
+-}
+-
+-function select(mask, ifTrue, ifFalse) {
+-    var m = simdToArray(mask);
+-    var tv = simdToArray(ifTrue);
+-    var fv = simdToArray(ifFalse);
+-    return m.map(function(v, i) {
+-        return (v ? tv : fv)[i];
+-    });
+-}
+-
+-/**
+- * Tests type.select on all input pairs, for all possible masks. As the mask
+- * has 4 lanes (for Int32x4) and 2 possible values (true or false), there are 16 possible
+- * masks. For Int8x16, the mask has 16 lanes and 2 possible values, so there are 256
+- * possible masks. For Int16x8, the mask has 8 lanes and 2 possible values, so there
+- * are 64 possible masks.
+- */
+-function testSelect(type, inputs) {
+-    var x, y;
+-    var maskLength = simdLengthType(type);
+-    for (var i = 0; i < Math.pow(maskLength, 2); i++) {
+-        var mask = getMask(i, maskLength);
+-        for ([x, y] of inputs)
+-            assertEqVec(type.select(mask, x, y), select(mask, x, y));
+-    }
+-}
+-
+-function test() {
+-    var inputs = [
+-        [Int8x16(0,4,9,16,25,36,49,64,81,121,-4,-9,-16,-25,-36,-49), Int8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16)],
+-        [Int8x16(-1, 2, INT8_MAX, INT8_MIN, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16),
+-         Int8x16(INT8_MAX, -4, INT8_MIN, 42, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)]
+-    ];
+-
+-    testSelect(Int8x16, inputs);
+-
+-    inputs = [
+-        [Int16x8(0,4,9,16,25,36,49,64), Int16x8(1,2,3,4,5,6,7,8)],
+-        [Int16x8(-1, 2, INT16_MAX, INT16_MIN, 5, 6, 7, 8),
+-         Int16x8(INT16_MAX, -4, INT16_MIN, 42, 5, 6, 7, 8)]
+-    ];
+-
+-    testSelect(Int16x8, inputs);
+-
+-    inputs = [
+-        [Int32x4(0,4,9,16), Int32x4(1,2,3,4)],
+-        [Int32x4(-1, 2, INT32_MAX, INT32_MIN), Int32x4(INT32_MAX, -4, INT32_MIN, 42)]
+-    ];
+-
+-    testSelect(Int32x4, inputs);
+-
+-    inputs = [
+-        [Uint8x16(0,4,9,16,25,36,49,64,81,121,-4,-9,-16,-25,-36,-49), Uint8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16)],
+-        [Uint8x16(-1, 2, INT8_MAX, INT8_MIN, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16),
+-         Uint8x16(INT8_MAX, -4, INT8_MIN, 42, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)]
+-    ];
+-
+-    testSelect(Uint8x16, inputs);
+-
+-    inputs = [
+-        [Uint16x8(0,4,9,16,25,36,49,64), Uint16x8(1,2,3,4,5,6,7,8)],
+-        [Uint16x8(-1, 2, INT16_MAX, INT16_MIN, 5, 6, 7, 8),
+-         Uint16x8(INT16_MAX, -4, INT16_MIN, 42, 5, 6, 7, 8)]
+-    ];
+-
+-    testSelect(Uint16x8, inputs);
+-
+-    inputs = [
+-        [Uint32x4(0,4,9,16), Uint32x4(1,2,3,4)],
+-        [Uint32x4(-1, 2, INT32_MAX, INT32_MIN), Uint32x4(INT32_MAX, -4, INT32_MIN, 42)]
+-    ];
+-
+-    testSelect(Uint32x4, inputs);
+-
+-    inputs = [
+-        [Float32x4(0.125,4.25,9.75,16.125), Float32x4(1.5,2.75,3.25,4.5)],
+-        [Float32x4(-1.5,-0,NaN,-Infinity), Float32x4(1,-2,13.37,3.13)],
+-        [Float32x4(1.5,2.75,NaN,Infinity), Float32x4(-NaN,-Infinity,9.75,16.125)]
+-    ];
+-
+-    testSelect(Float32x4, inputs);
+-
+-    inputs = [
+-        [Float64x2(0.125,4.25), Float64x2(9.75,16.125)],
+-        [Float64x2(1.5,2.75), Float64x2(3.25,4.5)],
+-        [Float64x2(-1.5,-0), Float64x2(NaN,-Infinity)],
+-        [Float64x2(1,-2), Float64x2(13.37,3.13)],
+-        [Float64x2(1.5,2.75), Float64x2(NaN,Infinity)],
+-        [Float64x2(-NaN,-Infinity), Float64x2(9.75,16.125)]
+-    ];
+-
+-    testSelect(Float64x2, inputs);
+-
+-    if (typeof reportCompare === "function")
+-        reportCompare(true, true);
+-}
+-
+-test();
+diff --git a/js/src/tests/non262/SIMD/shell.js b/js/src/tests/non262/SIMD/shell.js
+deleted file mode 100644
+--- a/js/src/tests/non262/SIMD/shell.js
++++ /dev/null
+@@ -1,580 +0,0 @@
+-function makeFloat(sign, exp, mantissa) {
+-    assertEq(sign, sign & 0x1);
+-    assertEq(exp, exp & 0xFF);
+-    assertEq(mantissa, mantissa & 0x7FFFFF);
+-
+-    var i32 = new Int32Array(1);
+-    var f32 = new Float32Array(i32.buffer);
+-
+-    i32[0] = (sign << 31) | (exp << 23) | mantissa;
+-    return f32[0];
+-}
+-
+-function makeDouble(sign, exp, mantissa) {
+-    assertEq(sign, sign & 0x1);
+-    assertEq(exp, exp & 0x7FF);
+-
+-    // Can't use bitwise operations on mantissa, as it might be a double
+-    assertEq(mantissa <= 0xfffffffffffff, true);
+-    var highBits = (mantissa / Math.pow(2, 32)) | 0;
+-    var lowBits = mantissa - highBits * Math.pow(2, 32);
+-
+-    var i32 = new Int32Array(2);
+-    var f64 = new Float64Array(i32.buffer);
+-
+-    // Note that this assumes little-endian order, which is the case on tier-1
+-    // platforms.
+-    i32[0] = lowBits;
+-    i32[1] = (sign << 31) | (exp << 20) | highBits;
+-    return f64[0];
+-}
+-
+-function GetType(v) {
+-    switch (Object.getPrototypeOf(v)) {
+-        case SIMD.Int8x16.prototype:   return SIMD.Int8x16;
+-        case SIMD.Int16x8.prototype:   return SIMD.Int16x8;
+-        case SIMD.Int32x4.prototype:   return SIMD.Int32x4;
+-        case SIMD.Uint8x16.prototype:  return SIMD.Uint8x16;
+-        case SIMD.Uint16x8.prototype:  return SIMD.Uint16x8;
+-        case SIMD.Uint32x4.prototype:  return SIMD.Uint32x4;
+-        case SIMD.Float32x4.prototype: return SIMD.Float32x4;
+-        case SIMD.Float64x2.prototype: return SIMD.Float64x2;
+-        case SIMD.Bool8x16.prototype:  return SIMD.Bool8x16;
+-        case SIMD.Bool16x8.prototype:  return SIMD.Bool16x8;
+-        case SIMD.Bool32x4.prototype:  return SIMD.Bool32x4;
+-        case SIMD.Bool64x2.prototype:  return SIMD.Bool64x2;
+-    }
+-}
+-
+-function assertEqFloat64x2(v, arr) {
+-    try {
+-        assertEq(SIMD.Float64x2.extractLane(v, 0), arr[0]);
+-        assertEq(SIMD.Float64x2.extractLane(v, 1), arr[1]);
+-    } catch (e) {
+-        print("stack trace:", e.stack);
+-        throw e;
+-    }
+-}
+-
+-function assertEqBool64x2(v, arr) {
+-    try {
+-        assertEq(SIMD.Bool64x2.extractLane(v, 0), arr[0]);
+-        assertEq(SIMD.Bool64x2.extractLane(v, 1), arr[1]);
+-    } catch (e) {
+-        print("stack trace:", e.stack);
+-        throw e;
+-    }
+-}
+-
+-function assertEqX2(v, arr) {
+-    var Type = GetType(v);
+-    if (Type === SIMD.Float64x2) assertEqFloat64x2(v, arr);
+-    else if (Type === SIMD.Bool64x2) assertEqBool64x2(v, arr);
+-    else throw new TypeError("Unknown SIMD kind.");
+-}
+-
+-function assertEqInt32x4(v, arr) {
+-    try {
+-        for (var i = 0; i < 4; i++)
+-            assertEq(SIMD.Int32x4.extractLane(v, i), arr[i]);
+-    } catch (e) {
+-        print("stack trace:", e.stack);
+-        throw e;
+-    }
+-}
+-
+-function assertEqUint32x4(v, arr) {
+-    try {
+-        for (var i = 0; i < 4; i++)
+-            assertEq(SIMD.Uint32x4.extractLane(v, i), arr[i]);
+-    } catch (e) {
+-        print("stack trace:", e.stack);
+-        throw e;
+-    }
+-}
+-
+-function assertEqFloat32x4(v, arr) {
+-    try {
+-        for (var i = 0; i < 4; i++)
+-            assertEq(SIMD.Float32x4.extractLane(v, i), arr[i]);
+-    } catch (e) {
+-        print("stack trace:", e.stack);
+-        throw e;
+-    }
+-}
+-
+-function assertEqBool32x4(v, arr) {
+-    try {
+-        for (var i = 0; i < 4; i++)
+-            assertEq(SIMD.Bool32x4.extractLane(v, i), arr[i]);
+-    } catch (e) {
+-        print("stack trace:", e.stack);
+-        throw e;
+-    }
+-}
+-
+-function assertEqX4(v, arr) {
+-    var Type = GetType(v);
+-    if (Type === SIMD.Int32x4) assertEqInt32x4(v, arr);
+-    else if (Type === SIMD.Uint32x4) assertEqUint32x4(v, arr);
+-    else if (Type === SIMD.Float32x4) assertEqFloat32x4(v, arr);
+-    else if (Type === SIMD.Bool32x4) assertEqBool32x4(v, arr);
+-    else throw new TypeError("Unknown SIMD kind.");
+-}
+-
+-function assertEqInt16x8(v, arr) {
+-    try {
+-        for (var i = 0; i < 8; i++)
+-            assertEq(SIMD.Int16x8.extractLane(v, i), arr[i]);
+-    } catch (e) {
+-        print("stack trace:", e.stack);
+-        throw e;
+-    }
+-}
+-
+-function assertEqUint16x8(v, arr) {
+-    try {
+-        for (var i = 0; i < 8; i++)
+-            assertEq(SIMD.Uint16x8.extractLane(v, i), arr[i]);
+-    } catch (e) {
+-        print("stack trace:", e.stack);
+-        throw e;
+-    }
+-}
+-
+-function assertEqBool16x8(v, arr) {
+-    try {
+-        for (var i = 0; i < 8; i++){
+-            assertEq(SIMD.Bool16x8.extractLane(v, i), arr[i]);
+-        }
+-    } catch (e) {
+-        print("stack trace:", e.stack);
+-        throw e;
+-    }
+-}
+-
+-function assertEqX8(v, arr) {
+-    var Type = GetType(v);
+-    if (Type === SIMD.Int16x8) assertEqInt16x8(v, arr);
+-    else if (Type === SIMD.Uint16x8) assertEqUint16x8(v, arr);
+-    else if (Type === SIMD.Bool16x8) assertEqBool16x8(v, arr);
+-    else throw new TypeError("Unknown x8 vector.");
+-}
+-
+-function assertEqInt8x16(v, arr) {
+-    try {
+-        for (var i = 0; i < 16; i++)
+-            assertEq(SIMD.Int8x16.extractLane(v, i), arr[i]);
+-    } catch (e) {
+-        print("stack trace:", e.stack);
+-        throw e;
+-    }
+-}
+-
+-function assertEqUint8x16(v, arr) {
+-    try {
+-        for (var i = 0; i < 16; i++)
+-            assertEq(SIMD.Uint8x16.extractLane(v, i), arr[i]);
+-    } catch (e) {
+-        print("stack trace:", e.stack);
+-        throw e;
+-    }
+-}
+-
+-function assertEqBool8x16(v, arr) {
+-    try {
+-        for (var i = 0; i < 16; i++)
+-            assertEq(SIMD.Bool8x16.extractLane(v, i), arr[i]);
+-    } catch (e) {
+-        print("stack trace:", e.stack);
+-        throw e;
+-    }
+-}
+-
+-function assertEqX16(v, arr) {
+-    var Type = GetType(v);
+-    if (Type === SIMD.Int8x16) assertEqInt8x16(v, arr);
+-    else if (Type === SIMD.Uint8x16) assertEqUint8x16(v, arr);
+-    else if (Type === SIMD.Bool8x16) assertEqBool8x16(v, arr);
+-    else throw new TypeError("Unknown x16 vector.");
+-}
+-
+-function simdLength(v) {
+-    var pt = Object.getPrototypeOf(v);
+-    if (pt == SIMD.Int8x16.prototype || pt == SIMD.Uint8x16.prototype ||
+-            pt === SIMD.Bool8x16.prototype)
+-        return 16;
+-    if (pt == SIMD.Int16x8.prototype || pt == SIMD.Uint16x8.prototype ||
+-            pt === SIMD.Bool16x8.prototype)
+-        return 8;
+-    if (pt === SIMD.Int32x4.prototype || pt === SIMD.Uint32x4.prototype ||
+-            pt === SIMD.Float32x4.prototype || pt === SIMD.Bool32x4.prototype)
+-        return 4;
+-    if (pt === SIMD.Float64x2.prototype || pt == SIMD.Bool64x2.prototype)
+-        return 2;
+-    throw new TypeError("Unknown SIMD kind.");
+-}
+-
+-function simdLengthType(t) {
+-    if (t == SIMD.Int8x16 || t == SIMD.Uint8x16 || t == SIMD.Bool8x16)
+-        return 16;
+-    else if (t == SIMD.Int16x8 || t == SIMD.Uint16x8 || t == SIMD.Bool16x8)
+-        return 8;
+-    else if (t == SIMD.Int32x4 || t == SIMD.Uint32x4 || t == SIMD.Float32x4 || t == SIMD.Bool32x4)
+-        return 4;
+-    else if (t == SIMD.Float64x2 || t == SIMD.Bool64x2)
+-        return 2;
+-    else
+-        throw new TypeError("Unknown SIMD kind.");
+-}
+-
+-function getAssertFuncFromLength(l) {
+-    if (l == 2)
+-        return assertEqX2;
+-    else if (l == 4)
+-        return assertEqX4;
+-    else if (l == 8)
+-        return assertEqX8;
+-    else if (l == 16)
+-        return assertEqX16;
+-    else
+-        throw new TypeError("Unknown SIMD kind.");
+-}
+-
+-function assertEqVec(v, arr) {
+-    var Type = GetType(v);
+-    if (Type === SIMD.Int8x16) assertEqInt8x16(v, arr);
+-    else if (Type === SIMD.Int16x8) assertEqInt16x8(v, arr);
+-    else if (Type === SIMD.Int32x4) assertEqInt32x4(v, arr);
+-    else if (Type === SIMD.Uint8x16) assertEqUint8x16(v, arr);
+-    else if (Type === SIMD.Uint16x8) assertEqUint16x8(v, arr);
+-    else if (Type === SIMD.Uint32x4) assertEqUint32x4(v, arr);
+-    else if (Type === SIMD.Float32x4) assertEqFloat32x4(v, arr);
+-    else if (Type === SIMD.Float64x2) assertEqFloat64x2(v, arr);
+-    else if (Type === SIMD.Bool8x16) assertEqBool8x16(v, arr);
+-    else if (Type === SIMD.Bool16x8) assertEqBool16x8(v, arr);
+-    else if (Type === SIMD.Bool32x4) assertEqBool32x4(v, arr);
+-    else if (Type === SIMD.Bool64x2) assertEqBool64x2(v, arr);
+-    else throw new TypeError("Unknown SIMD Kind");
+-}
+-
+-function simdToArray(v) {
+-    var Type = GetType(v);
+-
+-    function indexes(n) {
+-        var arr = [];
+-        for (var i = 0; i < n; i++) arr.push(i);
+-        return arr;
+-    }
+-
+-    if (Type === SIMD.Bool8x16) {
+-        return indexes(16).map((i) => SIMD.Bool8x16.extractLane(v, i));
+-    }
+-
+-    if (Type === SIMD.Bool16x8) {
+-        return indexes(8).map((i) => SIMD.Bool16x8.extractLane(v, i));
+-    }
+-
+-    if (Type === SIMD.Bool32x4) {
+-        return indexes(4).map((i) => SIMD.Bool32x4.extractLane(v, i));
+-    }
+-
+-    if (Type === SIMD.Bool64x2) {
+-        return indexes(2).map((i) => SIMD.Bool64x2.extractLane(v, i));
+-    }
+-
+-    if (Type === SIMD.Int8x16) {
+-        return indexes(16).map((i) => SIMD.Int8x16.extractLane(v, i));
+-    }
+-
+-    if (Type === SIMD.Int16x8) {
+-        return indexes(8).map((i) => SIMD.Int16x8.extractLane(v, i));
+-    }
+-
+-    if (Type === SIMD.Int32x4) {
+-        return indexes(4).map((i) => SIMD.Int32x4.extractLane(v, i));
+-    }
+-
+-    if (Type === SIMD.Uint8x16) {
+-        return indexes(16).map((i) => SIMD.Uint8x16.extractLane(v, i));
+-    }
+-
+-    if (Type === SIMD.Uint16x8) {
+-        return indexes(8).map((i) => SIMD.Uint16x8.extractLane(v, i));
+-    }
+-
+-    if (Type === SIMD.Uint32x4) {
+-        return indexes(4).map((i) => SIMD.Uint32x4.extractLane(v, i));
+-    }
+-
+-    if (Type === SIMD.Float32x4) {
+-        return indexes(4).map((i) => SIMD.Float32x4.extractLane(v, i));
+-    }
+-
+-    if (Type === SIMD.Float64x2) {
+-        return indexes(2).map((i) => SIMD.Float64x2.extractLane(v, i));
+-    }
+-
+-    throw new TypeError("Unknown SIMD Kind");
+-}
+-
+-const INT8_MAX = Math.pow(2, 7) -1;
+-const INT8_MIN = -Math.pow(2, 7);
+-assertEq((INT8_MAX + 1) << 24 >> 24, INT8_MIN);
+-const INT16_MAX = Math.pow(2, 15) - 1;
+-const INT16_MIN = -Math.pow(2, 15);
+-assertEq((INT16_MAX + 1) << 16 >> 16, INT16_MIN);
+-const INT32_MAX = Math.pow(2, 31) - 1;
+-const INT32_MIN = -Math.pow(2, 31);
+-assertEq(INT32_MAX + 1 | 0, INT32_MIN);
+-
+-const UINT8_MAX = Math.pow(2, 8) - 1;
+-const UINT16_MAX = Math.pow(2, 16) - 1;
+-const UINT32_MAX = Math.pow(2, 32) - 1;
+-
+-function testUnaryFunc(v, simdFunc, func) {
+-    var varr = simdToArray(v);
+-
+-    var observed = simdToArray(simdFunc(v));
+-    var expected = varr.map(function(v, i) { return func(varr[i]); });
+-
+-    for (var i = 0; i < observed.length; i++)
+-        assertEq(observed[i], expected[i]);
+-}
+-
+-function testBinaryFunc(v, w, simdFunc, func) {
+-    var varr = simdToArray(v);
+-    var warr = simdToArray(w);
+-
+-    var observed = simdToArray(simdFunc(v, w));
+-    var expected = varr.map(function(v, i) { return func(varr[i], warr[i]); });
+-
+-    for (var i = 0; i < observed.length; i++)
+-        assertEq(observed[i], expected[i]);
+-}
+-
+-function testBinaryCompare(v, w, simdFunc, func, outType) {
+-    var varr = simdToArray(v);
+-    var warr = simdToArray(w);
+-
+-    var inLanes = simdLength(v);
+-    var observed = simdToArray(simdFunc(v, w));
+-    var outTypeLen = simdLengthType(outType);
+-    assertEq(observed.length, outTypeLen);
+-    for (var i = 0; i < outTypeLen; i++) {
+-        var j = ((i * inLanes) / outTypeLen) | 0;
+-        assertEq(observed[i], func(varr[j], warr[j]));
+-    }
+-}
+-
+-function testBinaryScalarFunc(v, scalar, simdFunc, func) {
+-    var varr = simdToArray(v);
+-
+-    var observed = simdToArray(simdFunc(v, scalar));
+-    var expected = varr.map(function(v, i) { return func(varr[i], scalar); });
+-
+-    for (var i = 0; i < observed.length; i++)
+-        assertEq(observed[i], expected[i]);
+-}
+-
+-// Our array for Int32x4 and Float32x4 will have 16 elements
+-const SIZE_8_ARRAY = 64;
+-const SIZE_16_ARRAY = 32;
+-const SIZE_32_ARRAY = 16;
+-const SIZE_64_ARRAY = 8;
+-
+-const SIZE_BYTES = SIZE_32_ARRAY * 4;
+-
+-function MakeComparator(kind, arr, shared) {
+-    var bpe = arr.BYTES_PER_ELEMENT;
+-    var uint8 = (bpe != 1) ? new Uint8Array(arr.buffer) : arr;
+-
+-    // Size in bytes of a single element in the SIMD vector.
+-    var sizeOfLaneElem;
+-    // Typed array constructor corresponding to the SIMD kind.
+-    var typedArrayCtor;
+-    switch (kind) {
+-      case 'Int8x16':
+-        sizeOfLaneElem = 1;
+-        typedArrayCtor = Int8Array;
+-        break;
+-      case 'Int16x8':
+-        sizeOfLaneElem = 2;
+-        typedArrayCtor = Int16Array;
+-        break;
+-      case 'Int32x4':
+-        sizeOfLaneElem = 4;
+-        typedArrayCtor = Int32Array;
+-        break;
+-      case 'Uint8x16':
+-        sizeOfLaneElem = 1;
+-        typedArrayCtor = Uint8Array;
+-        break;
+-      case 'Uint16x8':
+-        sizeOfLaneElem = 2;
+-        typedArrayCtor = Uint16Array;
+-        break;
+-      case 'Uint32x4':
+-        sizeOfLaneElem = 4;
+-        typedArrayCtor = Uint32Array;
+-        break;
+-      case 'Float32x4':
+-        sizeOfLaneElem = 4;
+-        typedArrayCtor = Float32Array;
+-        break;
+-      case 'Float64x2':
+-        sizeOfLaneElem = 8;
+-        typedArrayCtor = Float64Array;
+-        break;
+-      default:
+-        assertEq(true, false, "unknown SIMD kind");
+-    }
+-    var lanes = 16 / sizeOfLaneElem;
+-    // Reads (numElemToRead * sizeOfLaneElem) bytes in arr, and reinterprets
+-    // these bytes as a typed array equivalent to the typed SIMD vector.
+-    var slice = function(start, numElemToRead) {
+-        // Read enough bytes
+-        var startBytes = start * bpe;
+-        var endBytes = startBytes + numElemToRead * sizeOfLaneElem;
+-        var asArray = Array.prototype.slice.call(uint8, startBytes, endBytes);
+-
+-        // If length is less than SIZE_BYTES bytes, fill with 0.
+-        // This is needed for load1, load2, load3 which do only partial
+-        // reads.
+-        for (var i = asArray.length; i < SIZE_BYTES; i++) asArray[i] = 0;
+-        assertEq(asArray.length, SIZE_BYTES);
+-
+-        return new typedArrayCtor(new Uint8Array(asArray).buffer);
+-    }
+-
+-    var assertFunc = getAssertFuncFromLength(lanes);
+-    var type = SIMD[kind];
+-    return {
+-        load1: function(index) {
+-            if (lanes >= 8) // Int8x16 and Int16x8 only support load, no load1/load2/etc.
+-                return
+-            var v = type.load1(arr, index);
+-            assertFunc(v, slice(index, 1));
+-        },
+-
+-        load2: function(index) {
+-            if (lanes !== 4)
+-                return;
+-            var v = type.load2(arr, index);
+-            assertFunc(v, slice(index, 2));
+-        },
+-
+-       load3: function(index) {
+-           if (lanes !== 4)
+-               return;
+-           var v = type.load3(arr, index);
+-           assertFunc(v, slice(index, 3));
+-        },
+-
+-        load: function(index) {
+-           var v = type.load(arr, index);
+-           assertFunc(v, slice(index, lanes));
+-        }
+-    }
+-}
+-
+-function testLoad(kind, TA) {
+-    var lanes = TA.length / 4;
+-    for (var i = TA.length; i--;)
+-        TA[i] = i;
+-
+-    for (var ta of [
+-                    new Uint8Array(TA.buffer),
+-                    new Int8Array(TA.buffer),
+-                    new Uint16Array(TA.buffer),
+-                    new Int16Array(TA.buffer),
+-                    new Uint32Array(TA.buffer),
+-                    new Int32Array(TA.buffer),
+-                    new Float32Array(TA.buffer),
+-                    new Float64Array(TA.buffer)
+-                   ])
+-    {
+-        // Invalid args
+-        assertThrowsInstanceOf(() => SIMD[kind].load(), TypeError);
+-        assertThrowsInstanceOf(() => SIMD[kind].load(ta), TypeError);
+-        assertThrowsInstanceOf(() => SIMD[kind].load("hello", 0), TypeError);
+-        // Indexes must be integers, there is no rounding.
+-        assertThrowsInstanceOf(() => SIMD[kind].load(ta, 1.5), RangeError);
+-        assertThrowsInstanceOf(() => SIMD[kind].load(ta, -1), RangeError);
+-        assertThrowsInstanceOf(() => SIMD[kind].load(ta, "hello"), RangeError);
+-        assertThrowsInstanceOf(() => SIMD[kind].load(ta, NaN), RangeError);
+-        // Try to trip up the bounds checking. Int32 is enough for everybody.
+-        assertThrowsInstanceOf(() => SIMD[kind].load(ta, 0x100000000), RangeError);
+-        assertThrowsInstanceOf(() => SIMD[kind].load(ta, 0x80000000), RangeError);
+-        assertThrowsInstanceOf(() => SIMD[kind].load(ta, 0x40000000), RangeError);
+-        assertThrowsInstanceOf(() => SIMD[kind].load(ta, 0x20000000), RangeError);
+-        assertThrowsInstanceOf(() => SIMD[kind].load(ta, (1<<30) * (1<<23) - 1), RangeError);
+-        assertThrowsInstanceOf(() => SIMD[kind].load(ta, (1<<30) * (1<<23)), RangeError);
+-
+-        // Valid and invalid reads
+-        var C = MakeComparator(kind, ta);
+-        var bpe = ta.BYTES_PER_ELEMENT;
+-
+-        var lastValidArgLoad1   = (SIZE_BYTES - (16 / lanes))  / bpe | 0;
+-        var lastValidArgLoad2   = (SIZE_BYTES - 8)  / bpe | 0;
+-        var lastValidArgLoad3   = (SIZE_BYTES - 12) / bpe | 0;
+-        var lastValidArgLoad    = (SIZE_BYTES - 16) / bpe | 0;
+-
+-        C.load(0);
+-        C.load(1);
+-        C.load(2);
+-        C.load(3);
+-        C.load(lastValidArgLoad);
+-
+-        C.load1(0);
+-        C.load1(1);
+-        C.load1(2);
+-        C.load1(3);
+-        C.load1(lastValidArgLoad1);
+-
+-        C.load2(0);
+-        C.load2(1);
+-        C.load2(2);
+-        C.load2(3);
+-        C.load2(lastValidArgLoad2);
+-
+-        C.load3(0);
+-        C.load3(1);
+-        C.load3(2);
+-        C.load3(3);
+-        C.load3(lastValidArgLoad3);
+-
+-        assertThrowsInstanceOf(() => SIMD[kind].load(ta, lastValidArgLoad + 1), RangeError);
+-        if (lanes <= 4) {
+-            assertThrowsInstanceOf(() => SIMD[kind].load1(ta, lastValidArgLoad1 + 1), RangeError);
+-        }
+-        if (lanes == 4) {
+-            assertThrowsInstanceOf(() => SIMD[kind].load2(ta, lastValidArgLoad2 + 1), RangeError);
+-            assertThrowsInstanceOf(() => SIMD[kind].load3(ta, lastValidArgLoad3 + 1), RangeError);
+-        }
+-
+-        // Indexes are coerced with ToNumber. Try some strings that
+-        // CanonicalNumericIndexString() would reject.
+-        C.load("1.0e0");
+-        C.load(" 2");
+-    }
+-
+-    if (lanes == 4) {
+-        // Test ToNumber behavior.
+-        var obj = {
+-            valueOf: function() { return 12 }
+-        }
+-        var v = SIMD[kind].load(TA, obj);
+-        assertEqX4(v, [12, 13, 14, 15]);
+-    }
+-
+-    var obj = {
+-        valueOf: function() { throw new TypeError("i ain't a number"); }
+-    }
+-    assertThrowsInstanceOf(() => SIMD[kind].load(TA, obj), TypeError);
+-}
+-
+-var Helpers = {
+-    testLoad,
+-    MakeComparator
+-};
+diff --git a/js/src/tests/non262/SIMD/shifts.js b/js/src/tests/non262/SIMD/shifts.js
+deleted file mode 100644
+--- a/js/src/tests/non262/SIMD/shifts.js
++++ /dev/null
+@@ -1,202 +0,0 @@
+-// |reftest| skip-if(!this.hasOwnProperty("SIMD"))
+-
+-/*
+- * Any copyright is dedicated to the Public Domain.
+- * https://creativecommons.org/publicdomain/zero/1.0/
+- */
+-
+-var Int8x16 = SIMD.Int8x16;
+-var Int16x8 = SIMD.Int16x8;
+-var Int32x4 = SIMD.Int32x4;
+-var Uint8x16 = SIMD.Uint8x16;
+-var Uint16x8 = SIMD.Uint16x8;
+-var Uint32x4 = SIMD.Uint32x4;
+-
+-// Int8 shifts.
+-function lsh8(a, b) {
+-    return (a << (b & 7)) << 24 >> 24;
+-}
+-function rsha8(a, b) {
+-    return (a >> (b & 7)) << 24 >> 24;
+-}
+-function rshl8(a, b) {
+-    return (a >>> (b & 7)) << 24 >> 24;
+-}
+-
+-// Int16 shifts.
+-function lsh16(a, b) {
+-    return (a << (b & 15)) << 16 >> 16;
+-}
+-function rsha16(a, b) {
+-    return (a >> (b & 15)) << 16 >> 16;
+-}
+-function rshl16(a, b) {
+-    return (a >>> (b & 15)) << 16 >> 16;
+-}
+-
+-// Int32 shifts.
+-function lsh32(a, b) {
+-    return (a << (b & 31)) | 0;
+-}
+-function rsha32(a, b) {
+-    return (a >> (b & 31)) | 0;
+-}
+-function rshl32(a, b) {
+-    return (a >>> (b & 31)) | 0;
+-}
+-
+-// Uint8 shifts.
+-function ulsh8(a, b) {
+-    return (a << (b & 7)) << 24 >>> 24;
+-}
+-function ursha8(a, b) {
+-    return ((a << 24 >> 24) >> (b & 7)) << 24 >>> 24;
+-}
+-function urshl8(a, b) {
+-    return (a >>> (b & 7)) << 24 >>> 24;
+-}
+-
+-// Uint16 shifts.
+-function ulsh16(a, b) {
+-    return (a << (b & 15)) << 16 >>> 16;
+-}
+-function ursha16(a, b) {
+-    return ((a << 16 >> 16) >> (b & 15)) << 16 >>> 16;
+-}
+-function urshl16(a, b) {
+-    return (a >>> (b & 15)) << 16 >>> 16;
+-}
+-
+-// Uint32 shifts.
+-function ulsh32(a, b) {
+-    return (a << (b & 31)) >>> 0;
+-}
+-function ursha32(a, b) {
+-    return ((a | 0) >> (b & 31)) >>> 0;
+-}
+-function urshl32(a, b) {
+-    return (a >>> (b & 31)) >>> 0;
+-}
+-
+-function test() {
+-  function TestError() {};
+-
+-  var good = {valueOf: () => 21};
+-  var bad = {valueOf: () => {throw new TestError(); }};
+-
+-  for (var v of [
+-            Int8x16(-1, 2, -3, 4, -5, 6, -7, 8, -9, 10, -11, 12, -13, 14, -15, 16),
+-            Int8x16(INT8_MAX, INT8_MIN, INT8_MAX - 1, INT8_MIN + 1)
+-       ])
+-  {
+-      for (var bits = -2; bits < 12; bits++) {
+-          testBinaryScalarFunc(v, bits, Int8x16.shiftLeftByScalar, lsh8);
+-          testBinaryScalarFunc(v, bits, Int8x16.shiftRightByScalar, rsha8);
+-      }
+-      // Test that the shift count is coerced to an int32.
+-      testBinaryScalarFunc(v, undefined, Int8x16.shiftLeftByScalar, lsh8);
+-      testBinaryScalarFunc(v, 3.5, Int8x16.shiftLeftByScalar, lsh8);
+-      testBinaryScalarFunc(v, good, Int8x16.shiftLeftByScalar, lsh8);
+-  }
+-  for (var v of [
+-            Int16x8(-1, 2, -3, 4, -5, 6, -7, 8),
+-            Int16x8(INT16_MAX, INT16_MIN, INT16_MAX - 1, INT16_MIN + 1)
+-       ])
+-  {
+-      for (var bits = -2; bits < 20; bits++) {
+-          testBinaryScalarFunc(v, bits, Int16x8.shiftLeftByScalar, lsh16);
+-          testBinaryScalarFunc(v, bits, Int16x8.shiftRightByScalar, rsha16);
+-      }
+-      // Test that the shift count is coerced to an int32.
+-      testBinaryScalarFunc(v, undefined, Int16x8.shiftLeftByScalar, lsh16);
+-      testBinaryScalarFunc(v, 3.5, Int16x8.shiftLeftByScalar, lsh16);
+-      testBinaryScalarFunc(v, good, Int16x8.shiftLeftByScalar, lsh16);
+-  }
+-  for (var v of [
+-            Int32x4(-1, 2, -3, 4),
+-            Int32x4(INT32_MAX, INT32_MIN, INT32_MAX - 1, INT32_MIN + 1)
+-       ])
+-  {
+-      for (var bits = -2; bits < 36; bits++) {
+-          testBinaryScalarFunc(v, bits, Int32x4.shiftLeftByScalar, lsh32);
+-          testBinaryScalarFunc(v, bits, Int32x4.shiftRightByScalar, rsha32);
+-      }
+-      // Test that the shift count is coerced to an int32.
+-      testBinaryScalarFunc(v, undefined, Int32x4.shiftLeftByScalar, lsh32);
+-      testBinaryScalarFunc(v, 3.5, Int32x4.shiftLeftByScalar, lsh32);
+-      testBinaryScalarFunc(v, good, Int32x4.shiftLeftByScalar, lsh32);
+-  }
+-
+-  for (var v of [
+-            Uint8x16(-1, 2, -3, 4, -5, 6, -7, 8, -9, 10, -11, 12, -13, 14, -15, 16),
+-            Uint8x16(INT8_MAX, INT8_MIN, INT8_MAX - 1, INT8_MIN + 1, UINT8_MAX, UINT8_MAX - 1)
+-       ])
+-  {
+-      for (var bits = -2; bits < 12; bits++) {
+-          testBinaryScalarFunc(v, bits, Uint8x16.shiftLeftByScalar, ulsh8);
+-          testBinaryScalarFunc(v, bits, Uint8x16.shiftRightByScalar, urshl8);
+-      }
+-      // Test that the shift count is coerced to an int32.
+-      testBinaryScalarFunc(v, undefined, Uint8x16.shiftLeftByScalar, ulsh8);
+-      testBinaryScalarFunc(v, 3.5, Uint8x16.shiftLeftByScalar, ulsh8);
+-      testBinaryScalarFunc(v, good, Uint8x16.shiftLeftByScalar, ulsh8);
+-  }
+-  for (var v of [
+-            Uint16x8(-1, 2, -3, 4, -5, 6, -7, 8),
+-            Uint16x8(INT16_MAX, INT16_MIN, INT16_MAX - 1, INT16_MIN + 1, UINT16_MAX, UINT16_MAX - 1)
+-       ])
+-  {
+-      for (var bits = -2; bits < 20; bits++) {
+-          testBinaryScalarFunc(v, bits, Uint16x8.shiftLeftByScalar, ulsh16);
+-          testBinaryScalarFunc(v, bits, Uint16x8.shiftRightByScalar, urshl16);
+-      }
+-      // Test that the shift count is coerced to an int32.
+-      testBinaryScalarFunc(v, undefined, Uint16x8.shiftLeftByScalar, ulsh16);
+-      testBinaryScalarFunc(v, 3.5, Uint16x8.shiftLeftByScalar, ulsh16);
+-      testBinaryScalarFunc(v, good, Uint16x8.shiftLeftByScalar, ulsh16);
+-  }
+-  for (var v of [
+-            Uint32x4(-1, 2, -3, 4),
+-            Uint32x4(UINT32_MAX, UINT32_MAX - 1, 0, 1),
+-            Uint32x4(INT32_MAX, INT32_MIN, INT32_MAX - 1, INT32_MIN + 1)
+-       ])
+-  {
+-      for (var bits = -2; bits < 36; bits++) {
+-          testBinaryScalarFunc(v, bits, Uint32x4.shiftLeftByScalar, ulsh32);
+-          testBinaryScalarFunc(v, bits, Uint32x4.shiftRightByScalar, urshl32);
+-      }
+-      // Test that the shift count is coerced to an int32.
+-      testBinaryScalarFunc(v, undefined, Uint32x4.shiftLeftByScalar, ulsh32);
+-      testBinaryScalarFunc(v, 3.5, Uint32x4.shiftLeftByScalar, ulsh32);
+-      testBinaryScalarFunc(v, good, Uint32x4.shiftLeftByScalar, ulsh32);
+-  }
+-
+-  var v = SIMD.Int8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16);
+-  assertThrowsInstanceOf(() => SIMD.Int8x16.shiftLeftByScalar(v, bad), TestError);
+-  assertThrowsInstanceOf(() => SIMD.Int8x16.shiftRightByScalar(v, bad), TestError);
+-
+-  var v = SIMD.Int16x8(1,2,3,4,5,6,7,8);
+-  assertThrowsInstanceOf(() => SIMD.Int16x8.shiftLeftByScalar(v, bad), TestError);
+-  assertThrowsInstanceOf(() => SIMD.Int16x8.shiftRightByScalar(v, bad), TestError);
+-
+-  var v = SIMD.Int32x4(1,2,3,4);
+-  assertThrowsInstanceOf(() => SIMD.Int32x4.shiftLeftByScalar(v, bad), TestError);
+-  assertThrowsInstanceOf(() => SIMD.Int32x4.shiftRightByScalar(v, bad), TestError);
+-
+-  var v = SIMD.Uint8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16);
+-  assertThrowsInstanceOf(() => SIMD.Uint8x16.shiftLeftByScalar(v, bad), TestError);
+-  assertThrowsInstanceOf(() => SIMD.Uint8x16.shiftRightByScalar(v, bad), TestError);
+-
+-  var v = SIMD.Uint16x8(1,2,3,4,5,6,7,8);
+-  assertThrowsInstanceOf(() => SIMD.Uint16x8.shiftLeftByScalar(v, bad), TestError);
+-  assertThrowsInstanceOf(() => SIMD.Uint16x8.shiftRightByScalar(v, bad), TestError);
+-
+-  var v = SIMD.Uint32x4(1,2,3,4);
+-  assertThrowsInstanceOf(() => SIMD.Uint32x4.shiftLeftByScalar(v, bad), TestError);
+-  assertThrowsInstanceOf(() => SIMD.Uint32x4.shiftRightByScalar(v, bad), TestError);
+-
+-  if (typeof reportCompare === "function")
+-    reportCompare(true, true);
+-}
+-
+-test();
+diff --git a/js/src/tests/non262/SIMD/splat.js b/js/src/tests/non262/SIMD/splat.js
+deleted file mode 100644
+--- a/js/src/tests/non262/SIMD/splat.js
++++ /dev/null
+@@ -1,97 +0,0 @@
+-// |reftest| skip-if(!this.hasOwnProperty("SIMD"))
+-
+-var Float64x2 = SIMD.Float64x2;
+-var Float32x4 = SIMD.Float32x4;
+-var Int8x16 = SIMD.Int8x16;
+-var Int16x8 = SIMD.Int16x8;
+-var Int32x4 = SIMD.Int32x4;
+-var Uint8x16 = SIMD.Uint8x16;
+-var Uint16x8 = SIMD.Uint16x8;
+-var Uint32x4 = SIMD.Uint32x4;
+-var Bool8x16 = SIMD.Bool8x16;
+-var Bool16x8 = SIMD.Bool16x8;
+-var Bool32x4 = SIMD.Bool32x4;
+-
+-function TestSplatX16(type, inputs, coerceFunc) {
+-    for (var x of inputs) {
+-        assertEqX16(SIMD[type].splat(x), [x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x].map(coerceFunc));
+-    }
+-}
+-
+-function TestSplatX8(type, inputs, coerceFunc) {
+-    for (var x of inputs) {
+-        assertEqX8(SIMD[type].splat(x), [x, x, x, x, x, x, x, x].map(coerceFunc));
+-    }
+-}
+-
+-function TestSplatX4(type, inputs, coerceFunc) {
+-    for (var x of inputs) {
+-        assertEqX4(SIMD[type].splat(x), [x, x, x, x].map(coerceFunc));
+-    }
+-}
+-
+-function TestSplatX2(type, inputs, coerceFunc) {
+-    for (var x of inputs) {
+-        assertEqX2(SIMD[type].splat(x), [x, x].map(coerceFunc));
+-    }
+-}
+-
+-function test() {
+-    function TestError(){};
+-
+-    var good = {valueOf: () => 19.89};
+-    var bad = {valueOf: () => { throw new TestError(); }};
+-
+-    TestSplatX16('Int8x16', [0, 1, 2, -1, -2, 3, -3, 4, -4, 5, -5, 6, INT8_MIN, INT8_MAX, INT8_MIN - 1, INT8_MAX + 1], (x) => x << 24 >> 24);
+-    assertEqX16(Int8x16.splat(), [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
+-    assertThrowsInstanceOf(() => SIMD.Int8x16.splat(bad), TestError);
+-
+-    TestSplatX8('Int16x8', [0, 1, 2, -1, INT16_MIN, INT16_MAX, INT16_MIN - 1, INT16_MAX + 1], (x) => x << 16 >> 16);
+-    assertEqX8(Int16x8.splat(), [0, 0, 0, 0, 0, 0, 0, 0]);
+-    assertThrowsInstanceOf(() => SIMD.Int16x8.splat(bad), TestError);
+-
+-    TestSplatX4('Int32x4', [0, undefined, 3.5, 42, -1337, INT32_MAX, INT32_MAX + 1, good], (x) => x | 0);
+-    assertEqX4(SIMD.Int32x4.splat(), [0, 0, 0, 0]);
+-    assertThrowsInstanceOf(() => SIMD.Int32x4.splat(bad), TestError);
+-
+-    TestSplatX16('Uint8x16', [0, 1, 2, -1, -2, 3, -3, 4, -4, 5, -5, 6, INT8_MIN, INT8_MAX, INT8_MIN - 1, INT8_MAX + 1], (x) => x << 24 >>> 24);
+-    assertEqX16(Uint8x16.splat(), [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
+-    assertThrowsInstanceOf(() => SIMD.Uint8x16.splat(bad), TestError);
+-
+-    TestSplatX8('Uint16x8', [0, 1, 2, -1, INT16_MIN, INT16_MAX, INT16_MIN - 1, INT16_MAX + 1], (x) => x << 16 >>> 16);
+-    assertEqX8(Uint16x8.splat(), [0, 0, 0, 0, 0, 0, 0, 0]);
+-    assertThrowsInstanceOf(() => SIMD.Uint16x8.splat(bad), TestError);
+-
+-    TestSplatX4('Uint32x4', [0, undefined, 3.5, 42, INT32_MAX, INT32_MIN, UINT32_MAX, UINT32_MAX + 1, good], (x) => x >>> 0);
+-    assertEqX4(SIMD.Uint32x4.splat(), [0, 0, 0, 0]);
+-    assertThrowsInstanceOf(() => SIMD.Uint32x4.splat(bad), TestError);
+-
+-    TestSplatX4('Float32x4', [0, undefined, 3.5, 42, -13.37, Infinity, NaN, -0, good], (x) => Math.fround(x));
+-    assertEqX4(SIMD.Float32x4.splat(), [NaN, NaN, NaN, NaN]);
+-    assertThrowsInstanceOf(() => SIMD.Float32x4.splat(bad), TestError);
+-
+-    TestSplatX2('Float64x2', [0, undefined, 3.5, 42, -13.37, Infinity, NaN, -0, good], (x) => +x);
+-    assertEqX2(SIMD.Float64x2.splat(), [NaN, NaN]);
+-    assertThrowsInstanceOf(() => SIMD.Float64x2.splat(bad), TestError);
+-
+-    TestSplatX16('Bool8x16', [true, false], (x) => !!x);
+-    assertEqX16(Bool8x16.splat(),    [false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false]);
+-    assertEqX16(Bool8x16.splat(bad), [true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true]);
+-
+-    TestSplatX8('Bool16x8', [true, false], (x) => !!x);
+-    assertEqX8(Bool16x8.splat(),    [false, false, false, false, false, false, false, false]);
+-    assertEqX8(Bool16x8.splat(bad), [true, true, true, true, true, true, true, true]);
+-
+-    TestSplatX4('Bool32x4', [true, false], (x) => !!x);
+-    assertEqX4(SIMD.Bool32x4.splat(),    [false, false, false, false]);
+-    assertEqX4(SIMD.Bool32x4.splat(bad), [true, true, true, true]);
+-
+-    TestSplatX2('Bool64x2', [true, false], (x) => !!x);
+-    assertEqX2(SIMD.Bool64x2.splat(),    [false, false]);
+-    assertEqX2(SIMD.Bool64x2.splat(bad), [true, true]);
+-
+-    if (typeof reportCompare === "function")
+-        reportCompare(true, true);
+-}
+-
+-test();
+diff --git a/js/src/tests/non262/SIMD/store.js b/js/src/tests/non262/SIMD/store.js
+deleted file mode 100644
+--- a/js/src/tests/non262/SIMD/store.js
++++ /dev/null
+@@ -1,264 +0,0 @@
+-// |reftest| skip-if(!this.hasOwnProperty("SIMD"))
+-
+-/*
+- * Any copyright is dedicated to the Public Domain.
+- * https://creativecommons.org/publicdomain/zero/1.0/
+- */
+-
+-// As SIMD.*.store is entirely symmetric to SIMD.*.load, this file just
+-// contains basic tests to store on one single TypedArray kind, while load is
+-// exhaustively tested. See load.js for more details.
+-
+-const POISON = 42;
+-
+-function reset(ta) {
+-    for (var i = 0; i < ta.length; i++)
+-        ta[i] = POISON + i;
+-}
+-
+-function assertChanged(ta, from, expected) {
+-    var i = 0;
+-    for (; i < from; i++)
+-        assertEq(ta[i], POISON + i);
+-    for (; i < from + expected.length; i++)
+-        assertEq(ta[i], expected[i - from]);
+-    for (; i < ta.length; i++)
+-        assertEq(ta[i], POISON + i);
+-}
+-
+-function testStore(ta, kind, i, v) {
+-    var asArr = simdToArray(v);
+-
+-    reset(ta);
+-    SIMD[kind].store(ta, i, v);
+-    assertChanged(ta, i, asArr);
+-
+-    var length = asArr.length;
+-    if (length >= 8) // Int8x16 and Int16x8 only support store, and not store1/store2/etc.
+-        return;
+-
+-    reset(ta);
+-    SIMD[kind].store1(ta, i, v);
+-    assertChanged(ta, i, [asArr[0]]);
+-    if (length > 2) {
+-        reset(ta);
+-        SIMD[kind].store2(ta, i, v);
+-        assertChanged(ta, i, [asArr[0], asArr[1]]);
+-
+-        reset(ta);
+-        SIMD[kind].store3(ta, i, v);
+-        assertChanged(ta, i, [asArr[0], asArr[1], asArr[2]]);
+-    }
+-}
+-
+-function testStoreInt8x16(Buffer) {
+-    var I8 = new Int8Array(new Buffer(32));
+-
+-    var v = SIMD.Int8x16(0, 1, INT8_MAX, INT8_MIN, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
+-    testStore(I8, 'Int8x16', 0, v);
+-    testStore(I8, 'Int8x16', 1, v);
+-    testStore(I8, 'Int8x16', 2, v);
+-    testStore(I8, 'Int8x16', 16, v);
+-
+-    assertThrowsInstanceOf(() => SIMD.Int8x16.store(I8), TypeError);
+-    assertThrowsInstanceOf(() => SIMD.Int8x16.store(I8, 0), TypeError);
+-    assertThrowsInstanceOf(() => SIMD.Int16x8.store(I8, 0, v), TypeError);
+-}
+-
+-function testStoreInt16x8(Buffer) {
+-    var I16 = new Int16Array(new Buffer(64));
+-
+-    var v = SIMD.Int16x8(0, 1, INT16_MAX, INT16_MIN, 4, 5, 6, 7);
+-    testStore(I16, 'Int16x8', 0, v);
+-    testStore(I16, 'Int16x8', 1, v);
+-    testStore(I16, 'Int16x8', 2, v);
+-    testStore(I16, 'Int16x8', 24, v);
+-
+-    assertThrowsInstanceOf(() => SIMD.Int16x8.store(I16), TypeError);
+-    assertThrowsInstanceOf(() => SIMD.Int16x8.store(I16, 0), TypeError);
+-    assertThrowsInstanceOf(() => SIMD.Int8x16.store(I16, 0, v), TypeError);
+-}
+-
+-function testStoreInt32x4(Buffer) {
+-    var I32 = new Int32Array(new Buffer(64));
+-
+-    var v = SIMD.Int32x4(0, 1, Math.pow(2,31) - 1, -Math.pow(2, 31));
+-    testStore(I32, 'Int32x4', 0, v);
+-    testStore(I32, 'Int32x4', 1, v);
+-    testStore(I32, 'Int32x4', 2, v);
+-    testStore(I32, 'Int32x4', 12, v);
+-
+-    assertThrowsInstanceOf(() => SIMD.Int32x4.store(I32), TypeError);
+-    assertThrowsInstanceOf(() => SIMD.Int32x4.store(I32, 0), TypeError);
+-    assertThrowsInstanceOf(() => SIMD.Float32x4.store(I32, 0, v), TypeError);
+-}
+-
+-function testStoreUint8x16(Buffer) {
+-    var I8 = new Uint8Array(new Buffer(32));
+-
+-    var v = SIMD.Uint8x16(0, 1, INT8_MAX, INT8_MIN, UINT8_MAX, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
+-    testStore(I8, 'Uint8x16', 0, v);
+-    testStore(I8, 'Uint8x16', 1, v);
+-    testStore(I8, 'Uint8x16', 2, v);
+-    testStore(I8, 'Uint8x16', 16, v);
+-
+-    assertThrowsInstanceOf(() => SIMD.Uint8x16.store(I8), TypeError);
+-    assertThrowsInstanceOf(() => SIMD.Uint8x16.store(I8, 0), TypeError);
+-    assertThrowsInstanceOf(() => SIMD.Uint16x8.store(I8, 0, v), TypeError);
+-}
+-
+-function testStoreUint16x8(Buffer) {
+-    var I16 = new Uint16Array(new Buffer(64));
+-
+-    var v = SIMD.Uint16x8(0, 1, INT16_MAX, INT16_MIN, 4, 5, 6, 7);
+-    testStore(I16, 'Uint16x8', 0, v);
+-    testStore(I16, 'Uint16x8', 1, v);
+-    testStore(I16, 'Uint16x8', 2, v);
+-    testStore(I16, 'Uint16x8', 24, v);
+-
+-    assertThrowsInstanceOf(() => SIMD.Uint16x8.store(I16), TypeError);
+-    assertThrowsInstanceOf(() => SIMD.Uint16x8.store(I16, 0), TypeError);
+-    assertThrowsInstanceOf(() => SIMD.Uint8x16.store(I16, 0, v), TypeError);
+-}
+-
+-function testStoreUint32x4(Buffer) {
+-    var I32 = new Uint32Array(new Buffer(64));
+-
+-    var v = SIMD.Uint32x4(0, 1, Math.pow(2,31) - 1, -Math.pow(2, 31));
+-    testStore(I32, 'Uint32x4', 0, v);
+-    testStore(I32, 'Uint32x4', 1, v);
+-    testStore(I32, 'Uint32x4', 2, v);
+-    testStore(I32, 'Uint32x4', 12, v);
+-
+-    assertThrowsInstanceOf(() => SIMD.Uint32x4.store(I32), TypeError);
+-    assertThrowsInstanceOf(() => SIMD.Uint32x4.store(I32, 0), TypeError);
+-    assertThrowsInstanceOf(() => SIMD.Float32x4.store(I32, 0, v), TypeError);
+-}
+-
+-function testStoreFloat32x4(Buffer) {
+-    var F32 = new Float32Array(new Buffer(64));
+-
+-    var v = SIMD.Float32x4(1,2,3,4);
+-    testStore(F32, 'Float32x4', 0, v);
+-    testStore(F32, 'Float32x4', 1, v);
+-    testStore(F32, 'Float32x4', 2, v);
+-    testStore(F32, 'Float32x4', 12, v);
+-
+-    var v = SIMD.Float32x4(NaN, -0, -Infinity, 5e-324);
+-    testStore(F32, 'Float32x4', 0, v);
+-    testStore(F32, 'Float32x4', 1, v);
+-    testStore(F32, 'Float32x4', 2, v);
+-    testStore(F32, 'Float32x4', 12, v);
+-
+-    assertThrowsInstanceOf(() => SIMD.Float32x4.store(F32), TypeError);
+-    assertThrowsInstanceOf(() => SIMD.Float32x4.store(F32, 0), TypeError);
+-    assertThrowsInstanceOf(() => SIMD.Int32x4.store(F32, 0, v), TypeError);
+-}
+-
+-function testStoreFloat64x2(Buffer) {
+-    var F64 = new Float64Array(new Buffer(128));
+-
+-    var v = SIMD.Float64x2(1, 2);
+-    testStore(F64, 'Float64x2', 0, v);
+-    testStore(F64, 'Float64x2', 1, v);
+-    testStore(F64, 'Float64x2', 14, v);
+-
+-    var v = SIMD.Float64x2(NaN, -0);
+-    testStore(F64, 'Float64x2', 0, v);
+-    testStore(F64, 'Float64x2', 1, v);
+-    testStore(F64, 'Float64x2', 14, v);
+-
+-    var v = SIMD.Float64x2(-Infinity, +Infinity);
+-    testStore(F64, 'Float64x2', 0, v);
+-    testStore(F64, 'Float64x2', 1, v);
+-    testStore(F64, 'Float64x2', 14, v);
+-
+-    assertThrowsInstanceOf(() => SIMD.Float64x2.store(F64), TypeError);
+-    assertThrowsInstanceOf(() => SIMD.Float64x2.store(F64, 0), TypeError);
+-    assertThrowsInstanceOf(() => SIMD.Float32x4.store(F64, 0, v), TypeError);
+-}
+-
+-function testSharedArrayBufferCompat() {
+-    var I32 = new Int32Array(new SharedArrayBuffer(16*4));
+-    var TA = I32;
+-
+-    var I8 = new Int8Array(TA.buffer);
+-    var I16 = new Int16Array(TA.buffer);
+-    var F32 = new Float32Array(TA.buffer);
+-    var F64 = new Float64Array(TA.buffer);
+-
+-    var Int8x16 = SIMD.Int8x16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
+-    var Int16x8 = SIMD.Int16x8(1, 2, 3, 4, 5, 6, 7, 8);
+-    var Int32x4 = SIMD.Int32x4(1, 2, 3, 4);
+-    var Uint8x16 = SIMD.Uint8x16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
+-    var Uint16x8 = SIMD.Uint16x8(1, 2, 3, 4, 5, 6, 7, 8);
+-    var Uint32x4 = SIMD.Uint32x4(1, 2, 3, 4);
+-    var Float32x4 = SIMD.Float32x4(1, 2, 3, 4);
+-    var Float64x2 = SIMD.Float64x2(1, 2);
+-
+-    for (var ta of [
+-                    new Uint8Array(TA.buffer),
+-                    new Int8Array(TA.buffer),
+-                    new Uint16Array(TA.buffer),
+-                    new Int16Array(TA.buffer),
+-                    new Uint32Array(TA.buffer),
+-                    new Int32Array(TA.buffer),
+-                    new Float32Array(TA.buffer),
+-                    new Float64Array(TA.buffer)
+-                   ])
+-    {
+-        SIMD.Int8x16.store(ta, 0, Int8x16);
+-        for (var i = 0; i < 16; i++) assertEq(I8[i], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16][i]);
+-
+-        SIMD.Int16x8.store(ta, 0, Int16x8);
+-        for (var i = 0; i < 8; i++) assertEq(I16[i], [1, 2, 3, 4, 5, 6, 7, 8][i]);
+-
+-        SIMD.Int32x4.store(ta, 0, Int32x4);
+-        for (var i = 0; i < 4; i++) assertEq(I32[i], [1, 2, 3, 4][i]);
+-
+-        SIMD.Uint8x16.store(ta, 0, Uint8x16);
+-        for (var i = 0; i < 16; i++) assertEq(I8[i], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16][i]);
+-
+-        SIMD.Uint16x8.store(ta, 0, Uint16x8);
+-        for (var i = 0; i < 8; i++) assertEq(I16[i], [1, 2, 3, 4, 5, 6, 7, 8][i]);
+-
+-        SIMD.Uint32x4.store(ta, 0, Uint32x4);
+-        for (var i = 0; i < 4; i++) assertEq(I32[i], [1, 2, 3, 4][i]);
+-
+-        SIMD.Float32x4.store(ta, 0, Float32x4);
+-        for (var i = 0; i < 4; i++) assertEq(F32[i], [1, 2, 3, 4][i]);
+-
+-        SIMD.Float64x2.store(ta, 0, Float64x2);
+-        for (var i = 0; i < 2; i++) assertEq(F64[i], [1, 2][i]);
+-
+-        assertThrowsInstanceOf(() => SIMD.Int8x16.store(ta, 1024, Int8x16), RangeError);
+-        assertThrowsInstanceOf(() => SIMD.Int16x8.store(ta, 1024, Int16x8), RangeError);
+-        assertThrowsInstanceOf(() => SIMD.Int32x4.store(ta, 1024, Int32x4), RangeError);
+-        assertThrowsInstanceOf(() => SIMD.Float32x4.store(ta, 1024, Float32x4), RangeError);
+-        assertThrowsInstanceOf(() => SIMD.Float64x2.store(ta, 1024, Float64x2), RangeError);
+-    }
+-}
+-
+-testStoreInt8x16(ArrayBuffer);
+-testStoreInt16x8(ArrayBuffer);
+-testStoreInt32x4(ArrayBuffer);
+-testStoreUint8x16(ArrayBuffer);
+-testStoreUint16x8(ArrayBuffer);
+-testStoreUint32x4(ArrayBuffer);
+-testStoreFloat32x4(ArrayBuffer);
+-testStoreFloat64x2(ArrayBuffer);
+-
+-if (typeof SharedArrayBuffer != "undefined") {
+-  testStoreInt8x16(SharedArrayBuffer);
+-  testStoreInt16x8(SharedArrayBuffer);
+-  testStoreInt32x4(SharedArrayBuffer);
+-  testStoreUint8x16(SharedArrayBuffer);
+-  testStoreUint16x8(SharedArrayBuffer);
+-  testStoreUint32x4(SharedArrayBuffer);
+-  testStoreFloat32x4(SharedArrayBuffer);
+-  testStoreFloat64x2(SharedArrayBuffer);
+-  testSharedArrayBufferCompat();
+-}
+-
+-if (typeof reportCompare === "function")
+-    reportCompare(true, true);
+diff --git a/js/src/tests/non262/SIMD/swizzle-shuffle.js b/js/src/tests/non262/SIMD/swizzle-shuffle.js
+deleted file mode 100644
+--- a/js/src/tests/non262/SIMD/swizzle-shuffle.js
++++ /dev/null
+@@ -1,507 +0,0 @@
+-// |reftest| skip-if(!this.hasOwnProperty("SIMD"))
+-
+-/*
+- * Any copyright is dedicated to the Public Domain.
+- * https://creativecommons.org/publicdomain/zero/1.0/
+- */
+-
+-var Float32x4 = SIMD.Float32x4;
+-var Float64x2 = SIMD.Float64x2;
+-var Int8x16 = SIMD.Int8x16;
+-var Int16x8 = SIMD.Int16x8;
+-var Int32x4 = SIMD.Int32x4;
+-var Uint8x16 = SIMD.Uint8x16;
+-var Uint16x8 = SIMD.Uint16x8;
+-var Uint32x4 = SIMD.Uint32x4;
+-
+-function swizzle2(arr, x, y) {
+-    return [arr[x], arr[y]];
+-}
+-
+-function swizzle4(arr, x, y, z, w) {
+-    return [arr[x], arr[y], arr[z], arr[w]];
+-}
+-
+-function swizzle8(arr, s0, s1, s2, s3, s4, s5, s6, s7) {
+-    return [arr[s0], arr[s1], arr[s2], arr[s3], arr[s4], arr[s5], arr[s6], arr[s7]];
+-}
+-
+-function swizzle16(arr, s0, s1, s2, s3, s4, s5, s6, s7,
+-                   s8, s9, s10, s11, s12, s13, s14, s15) {
+-    return [arr[s0], arr[s1], arr[s2], arr[s3], arr[s4], arr[s5], arr[s6], arr[s7],
+-            arr[s8], arr[s9], arr[s10], arr[s11], arr[s12], arr[s13], arr[s14], arr[s15]];
+-}
+-
+-function getNumberOfLanesFromType(type) {
+-    switch (type) {
+-      case Int8x16:
+-      case Uint8x16:
+-        return 16;
+-      case Int16x8:
+-      case Uint16x8:
+-        return 8;
+-      case Float32x4:
+-      case Int32x4:
+-      case Uint32x4:
+-        return 4;
+-      case Float64x2:
+-        return 2;
+-    }
+-    throw new TypeError("Unknown SIMD type.");
+-}
+-
+-function testSwizzleForType(type) {
+-    var lanes = getNumberOfLanesFromType(type);
+-    var v;
+-    switch (lanes) {
+-      case 2:
+-        v = type(1, 2);
+-        break;
+-      case 4:
+-        v = type(1, 2, 3, 4);
+-        break;
+-      case 8:
+-        v = type(1, 2, 3, 4, 5, 6, 7, 8);
+-        break;
+-      case 16:
+-        v = type(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
+-        break;
+-    }
+-
+-    assertThrowsInstanceOf(() => type.swizzle()               , TypeError);
+-    assertThrowsInstanceOf(() => type.swizzle(v, 0)           , TypeError);
+-    assertThrowsInstanceOf(() => type.swizzle(v, 0, 1, 2)     , TypeError);
+-    assertThrowsInstanceOf(() => type.swizzle(0, 1, 2, 3, v)  , TypeError);
+-
+-    // Test all possible swizzles.
+-    if (lanes == 2) {
+-        var x, y;
+-        for (var i = 0; i < Math.pow(2, 2); i++) {
+-          [x, y] = [x & 1, (y >> 1) & 1];
+-          assertEqVec(type.swizzle(v, x, y), swizzle2(simdToArray(v), x, y));
+-        }
+-    } else if (lanes == 4) {
+-        var x, y, z, w;
+-        for (var i = 0; i < Math.pow(4, 4); i++) {
+-            [x, y, z, w] = [i & 3, (i >> 2) & 3, (i >> 4) & 3, (i >> 6) & 3];
+-            assertEqVec(type.swizzle(v, x, y, z, w), swizzle4(simdToArray(v), x, y, z, w));
+-        }
+-    } else if (lanes == 8) {
+-        var vals = [[1, 2, 1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1, 1, 1, 1], [0, 1, 2, 3, 4, 5, 6, 7],
+-                    [7, 6, 5, 4, 3, 2, 1, 0], [5, 3, 2, 6, 1, 7, 4, 0]];
+-        for (var t of vals) {
+-          assertEqVec(type.swizzle(v, ...t), swizzle8(simdToArray(v), ...t));
+-        }
+-    } else {
+-        assertEq(lanes, 16);
+-
+-        var vals = [[11, 2, 11, 2, 11, 2, 11, 2, 11, 2, 11, 2, 11, 2, 11, 2],
+-                    [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
+-                    [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
+-                    [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0],
+-                    [5, 14, 3, 2, 6, 9, 1, 10, 7, 11, 4, 0, 13, 15, 8, 12]];
+-        for (var t of vals) {
+-          assertEqVec(type.swizzle(v, ...t), swizzle16(simdToArray(v), ...t));
+-        }
+-    }
+-
+-    // Test that we throw if an lane argument doesn't coerce to an integer in bounds.
+-    if (lanes == 2) {
+-        assertThrowsInstanceOf(() => type.swizzle(v, 0, 0.5), RangeError);
+-        assertThrowsInstanceOf(() => type.swizzle(v, 0, {}), RangeError);
+-        assertThrowsInstanceOf(() => type.swizzle(v, 0, {valueOf: function(){return 42}}), RangeError);
+-        assertThrowsInstanceOf(() => type.swizzle(v, 0, "one"), RangeError);
+-        assertThrowsInstanceOf(() => type.swizzle(v, 0, undefined), RangeError);
+-
+-        type.swizzle(v, 0, "00.0");
+-        type.swizzle(v, 0, null);
+-        type.swizzle(v, 0, false);
+-        type.swizzle(v, 0, true);
+-
+-        // In bounds is [0, 1]
+-        assertThrowsInstanceOf(() => type.swizzle(v, 0, -1), RangeError);
+-        assertThrowsInstanceOf(() => type.swizzle(v, 0, 2), RangeError);
+-    } else if (lanes == 4) {
+-        assertThrowsInstanceOf(() => type.swizzle(v, 0, 0, 0, 0.5), RangeError);
+-        assertThrowsInstanceOf(() => type.swizzle(v, 0, 0, 0, {}), RangeError);
+-        assertThrowsInstanceOf(() => type.swizzle(v, 0, 0, 0, {valueOf: function(){return 42}}), RangeError);
+-        assertThrowsInstanceOf(() => type.swizzle(v, 0, 0, 0, "one"), RangeError);
+-        assertThrowsInstanceOf(() => type.swizzle(v, 0, 0, 0, undefined), RangeError);
+-
+-        type.swizzle(v, 0, 0, 0, "00.0");
+-        type.swizzle(v, 0, 0, 0, null);
+-        type.swizzle(v, 0, 0, 0, false);
+-        type.swizzle(v, 0, 0, 0, true);
+-
+-        // In bounds is [0, 3]
+-        assertThrowsInstanceOf(() => type.swizzle(v, 0, 0, 0, -1), RangeError);
+-        assertThrowsInstanceOf(() => type.swizzle(v, 0, 0, 0, 4), RangeError);
+-    } else if (lanes == 8) {
+-        assertThrowsInstanceOf(() => type.swizzle(v, 0, 0, 0, 0, 0, 0, 0, 0.5), RangeError);
+-        assertThrowsInstanceOf(() => type.swizzle(v, 0, 0, 0, 0, 0, 0, 0, {}), RangeError);
+-        assertThrowsInstanceOf(() => type.swizzle(v, 0, 0, 0, 0, 0, 0, 0, {valueOf: function(){return 42}}), RangeError);
+-        assertThrowsInstanceOf(() => type.swizzle(v, 0, 0, 0, 0, 0, 0, 0, "one"), RangeError);
+-        assertThrowsInstanceOf(() => type.swizzle(v, 0, 0, 0, 0, 0, 0, 0, undefined), RangeError);
+-
+-        type.swizzle(v, 0, 0, 0, 0, 0, 0, 0, "00.0");
+-        type.swizzle(v, 0, 0, 0, 0, 0, 0, 0, null);
+-        type.swizzle(v, 0, 0, 0, 0, 0, 0, 0, false);
+-        type.swizzle(v, 0, 0, 0, 0, 0, 0, 0, true);
+-
+-        // In bounds is [0, 7]
+-        assertThrowsInstanceOf(() => type.swizzle(v, 0, 0, 0, 0, 0, 0, 0, -1), RangeError);
+-        assertThrowsInstanceOf(() => type.swizzle(v, 0, 0, 0, 0, 0, 0, 0, 8), RangeError);
+-    } else {
+-        assertEq(lanes, 16);
+-
+-        assertThrowsInstanceOf(() => type.swizzle(v, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.5), RangeError);
+-        assertThrowsInstanceOf(() => type.swizzle(v, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {}), RangeError);
+-        assertThrowsInstanceOf(() => type.swizzle(v, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {valueOf: function(){return 42}}), RangeError);
+-        assertThrowsInstanceOf(() => type.swizzle(v, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "one"), RangeError);
+-        assertThrowsInstanceOf(() => type.swizzle(v, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, undefined), RangeError);
+-
+-        type.swizzle(v, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "00.0");
+-        type.swizzle(v, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, null);
+-        type.swizzle(v, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, false);
+-        type.swizzle(v, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, true);
+-
+-        // In bounds is [0, 15]
+-        assertThrowsInstanceOf(() => type.swizzle(v, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1), RangeError);
+-        assertThrowsInstanceOf(() => type.swizzle(v, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16), RangeError);
+-    }
+-}
+-
+-function testSwizzleInt8x16() {
+-    var v = Int16x8(1, 2, 3, 4, 5, 6, 7, 8);
+-
+-    assertThrowsInstanceOf(function() {
+-        Int8x16.swizzle(v, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+-    }, TypeError);
+-
+-    testSwizzleForType(Int8x16);
+-}
+-
+-function testSwizzleInt16x8() {
+-    var v = Int8x16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
+-
+-    assertThrowsInstanceOf(function() {
+-        Int16x8.swizzle(v, 0, 0, 0, 0, 0, 0, 0, 0);
+-    }, TypeError);
+-
+-    testSwizzleForType(Int16x8);
+-}
+-
+-function testSwizzleInt32x4() {
+-    var v = Int32x4(1, 2, 3, 4);
+-
+-    assertThrowsInstanceOf(function() {
+-        Float32x4.swizzle(v, 0, 0, 0, 0);
+-    }, TypeError);
+-
+-    testSwizzleForType(Int32x4);
+-}
+-
+-function testSwizzleUint8x16() {
+-    var v = Uint16x8(1, 2, 3, 4, 5, 6, 7, 8);
+-
+-    assertThrowsInstanceOf(function() {
+-        Uint8x16.swizzle(v, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+-    }, TypeError);
+-
+-    testSwizzleForType(Uint8x16);
+-}
+-
+-function testSwizzleUint16x8() {
+-    var v = Uint8x16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
+-
+-    assertThrowsInstanceOf(function() {
+-        Uint16x8.swizzle(v, 0, 0, 0, 0, 0, 0, 0, 0);
+-    }, TypeError);
+-
+-    testSwizzleForType(Uint16x8);
+-}
+-
+-function testSwizzleUint32x4() {
+-    var v = Uint32x4(1, 2, 3, 4);
+-
+-    assertThrowsInstanceOf(function() {
+-        Float32x4.swizzle(v, 0, 0, 0, 0);
+-    }, TypeError);
+-
+-    testSwizzleForType(Uint32x4);
+-}
+-
+-function testSwizzleFloat32x4() {
+-    var v = Float32x4(1, 2, 3, 4);
+-
+-    assertThrowsInstanceOf(function() {
+-        Int32x4.swizzle(v, 0, 0, 0, 0);
+-    }, TypeError);
+-
+-    testSwizzleForType(Float32x4);
+-}
+-
+-function testSwizzleFloat64x2() {
+-    var v = Float64x2(1, 2);
+-
+-    assertThrowsInstanceOf(function() {
+-        Float32x4.swizzle(v, 0, 0, 0, 0);
+-    }, TypeError);
+-
+-    testSwizzleForType(Float64x2);
+-}
+-
+-function shuffle2(lhsa, rhsa, x, y) {
+-    return [(x < 2 ? lhsa : rhsa)[x % 2],
+-            (y < 2 ? lhsa : rhsa)[y % 2]];
+-}
+-function shuffle4(lhsa, rhsa, x, y, z, w) {
+-    return [(x < 4 ? lhsa : rhsa)[x % 4],
+-            (y < 4 ? lhsa : rhsa)[y % 4],
+-            (z < 4 ? lhsa : rhsa)[z % 4],
+-            (w < 4 ? lhsa : rhsa)[w % 4]];
+-}
+-
+-function shuffle8(lhsa, rhsa, s0, s1, s2, s3, s4, s5, s6, s7, s8) {
+-    return [(s0 < 8 ? lhsa : rhsa)[s0 % 8],
+-            (s1 < 8 ? lhsa : rhsa)[s1 % 8],
+-            (s2 < 8 ? lhsa : rhsa)[s2 % 8],
+-            (s3 < 8 ? lhsa : rhsa)[s3 % 8],
+-            (s4 < 8 ? lhsa : rhsa)[s4 % 8],
+-            (s5 < 8 ? lhsa : rhsa)[s5 % 8],
+-            (s6 < 8 ? lhsa : rhsa)[s6 % 8],
+-            (s7 < 8 ? lhsa : rhsa)[s7 % 8]];
+-}
+-
+-function shuffle16(lhsa, rhsa, s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15) {
+-    return [(s0 < 16 ? lhsa : rhsa)[s0 % 16],
+-            (s1 < 16 ? lhsa : rhsa)[s1 % 16],
+-            (s2 < 16 ? lhsa : rhsa)[s2 % 16],
+-            (s3 < 16 ? lhsa : rhsa)[s3 % 16],
+-            (s4 < 16 ? lhsa : rhsa)[s4 % 16],
+-            (s5 < 16 ? lhsa : rhsa)[s5 % 16],
+-            (s6 < 16 ? lhsa : rhsa)[s6 % 16],
+-            (s7 < 16 ? lhsa : rhsa)[s7 % 16],
+-            (s8 < 16 ? lhsa : rhsa)[s8 % 16],
+-            (s9 < 16 ? lhsa : rhsa)[s9 % 16],
+-            (s10 < 16 ? lhsa : rhsa)[s10 % 16],
+-            (s11 < 16 ? lhsa : rhsa)[s11 % 16],
+-            (s12 < 16 ? lhsa : rhsa)[s12 % 16],
+-            (s13 < 16 ? lhsa : rhsa)[s13 % 16],
+-            (s14 < 16 ? lhsa : rhsa)[s14 % 16],
+-            (s15 < 16 ? lhsa : rhsa)[s15 % 16]];
+-}
+-
+-function testShuffleForType(type) {
+-    var lanes = getNumberOfLanesFromType(type);
+-    var lhs, rhs;
+-    if (lanes == 2) {
+-        lhs = type(1, 2);
+-        rhs = type(3, 4);
+-    } else if (lanes == 4) {
+-        lhs = type(1, 2, 3, 4);
+-        rhs = type(5, 6, 7, 8);
+-    } else if (lanes == 8) {
+-        lhs = type(1, 2, 3, 4, 5, 6, 7, 8);
+-        rhs = type(9, 10, 11, 12, 13, 14, 15, 16);
+-    } else {
+-        assertEq(lanes, 16);
+-
+-        lhs = type(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
+-        rhs = type(17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32);
+-    }
+-
+-    assertThrowsInstanceOf(() => type.shuffle(lhs)                   , TypeError);
+-    assertThrowsInstanceOf(() => type.shuffle(lhs, rhs)              , TypeError);
+-    assertThrowsInstanceOf(() => type.shuffle(lhs, rhs, 0)           , TypeError);
+-    assertThrowsInstanceOf(() => type.shuffle(lhs, rhs, 0, 1, 2)     , TypeError);
+-    assertThrowsInstanceOf(() => type.shuffle(lhs, 0, 1, 2, 7, rhs)  , TypeError);
+-
+-    // Test all possible shuffles.
+-    var x, y, z, w;
+-    if (lanes == 2) {
+-        var x, y;
+-        for (var i = 0; i < Math.pow(4, 2); i++) {
+-            [x, y] = [i & 3, (i >> 3) & 3];
+-            assertEqVec(type.shuffle(lhs, rhs, x, y),
+-                        shuffle2(simdToArray(lhs), simdToArray(rhs), x, y));
+-        }
+-    } else if (lanes == 4) {
+-        var x, y, z, w;
+-        for (var i = 0; i < Math.pow(8, 4); i++) {
+-            [x, y, z, w] = [i & 7, (i >> 3) & 7, (i >> 6) & 7, (i >> 9) & 7];
+-            assertEqVec(type.shuffle(lhs, rhs, x, y, z, w),
+-                        shuffle4(simdToArray(lhs), simdToArray(rhs), x, y, z, w));
+-        }
+-    } else if (lanes == 8) {
+-        var s0, s1, s2, s3, s4, s5, s6, s7;
+-        var vals = [[15, 8, 15, 8, 15, 8, 15, 8], [9, 7, 9, 7, 9, 7, 9, 7],
+-                    [7, 3, 8, 9, 2, 15, 14, 6], [2, 2, 2, 2, 2, 2, 2, 2],
+-                    [8, 8, 8, 8, 8, 8, 8, 8], [11, 11, 11, 11, 11, 11, 11, 11]];
+-        for (var t of vals) {
+-            [s0, s1, s2, s3, s4, s5, s6, s7] = t;
+-            assertEqVec(type.shuffle(lhs, rhs, s0, s1, s2, s3, s4, s5, s6, s7),
+-                        shuffle8(simdToArray(lhs), simdToArray(rhs), s0, s1, s2, s3, s4, s5, s6, s7));
+-        }
+-    } else {
+-        assertEq(lanes, 16);
+-
+-        var s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15;
+-        var vals = [[30, 16, 30, 16, 30, 16, 30, 16, 30, 16, 30, 16, 30, 16, 30, 16],
+-                    [19, 17, 19, 17, 19, 17, 19, 17, 19, 17, 19, 17, 19, 17, 19, 17],
+-                    [7, 3, 8, 18, 9, 21, 2, 15, 14, 6, 16, 22, 29, 31, 30, 1],
+-                    [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2],
+-                    [16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16],
+-                    [21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21]];
+-        for (var t of vals) {
+-            [s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15] = t;
+-            assertEqVec(type.shuffle(lhs, rhs, s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15),
+-                        shuffle16(simdToArray(lhs), simdToArray(rhs), s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15));
+-        }
+-    }
+-
+-    // Test that we throw if an lane argument isn't an int32 or isn't in bounds.
+-    if (lanes == 2) {
+-        assertThrowsInstanceOf(() => type.shuffle(lhs, rhs, 0, 0.5), RangeError);
+-        assertThrowsInstanceOf(() => type.shuffle(lhs, rhs, 0, {}), RangeError);
+-        assertThrowsInstanceOf(() => type.shuffle(lhs, rhs, 0, {valueOf: function(){return 42}}), RangeError);
+-        assertThrowsInstanceOf(() => type.shuffle(lhs, rhs, 0, "one"), RangeError);
+-        assertThrowsInstanceOf(() => type.shuffle(lhs, rhs, 0, undefined), RangeError);
+-
+-        // In bounds is [0, 3]
+-        assertThrowsInstanceOf(() => type.shuffle(lhs, rhs, 0, -1), RangeError);
+-        assertThrowsInstanceOf(() => type.shuffle(lhs, rhs, 0, 4), RangeError);
+-    } else if (lanes == 4) {
+-        assertThrowsInstanceOf(() => type.shuffle(lhs, rhs, 0, 0, 0, 0.5), RangeError);
+-        assertThrowsInstanceOf(() => type.shuffle(lhs, rhs, 0, 0, 0, {}), RangeError);
+-        assertThrowsInstanceOf(() => type.shuffle(lhs, rhs, 0, 0, 0, {valueOf: function(){return 42}}), RangeError);
+-        assertThrowsInstanceOf(() => type.shuffle(lhs, rhs, 0, 0, 0, "one"), RangeError);
+-        assertThrowsInstanceOf(() => type.shuffle(lhs, rhs, 0, 0, 0, undefined), RangeError);
+-
+-        // In bounds is [0, 7]
+-        assertThrowsInstanceOf(() => type.shuffle(lhs, rhs, 0, 0, 0, -1), RangeError);
+-        assertThrowsInstanceOf(() => type.shuffle(lhs, rhs, 0, 0, 0, 8), RangeError);
+-    } else if (lanes == 8) {
+-        assertThrowsInstanceOf(() => type.shuffle(lhs, rhs, 0, 0, 0, 0, 0, 0, 0, 0.5), RangeError);
+-        assertThrowsInstanceOf(() => type.shuffle(lhs, rhs, 0, 0, 0, 0, 0, 0, 0, {}), RangeError);
+-        assertThrowsInstanceOf(() => type.shuffle(lhs, rhs, 0, 0, 0, 0, 0, 0, 0, {valueOf: function(){return 42}}), RangeError);
+-        assertThrowsInstanceOf(() => type.shuffle(lhs, rhs, 0, 0, 0, 0, 0, 0, 0, "one"), RangeError);
+-        assertThrowsInstanceOf(() => type.shuffle(lhs, rhs, 0, 0, 0, 0, 0, 0, 0, undefined), RangeError);
+-
+-        // In bounds is [0, 15]
+-        assertThrowsInstanceOf(() => type.shuffle(lhs, rhs, 0, 0, 0, 0, 0, 0, 0, -1), RangeError);
+-        assertThrowsInstanceOf(() => type.shuffle(lhs, rhs, 0, 0, 0, 0, 0, 0, 0, 16), RangeError);
+-    } else {
+-        assertEq(lanes, 16);
+-
+-        assertThrowsInstanceOf(() => type.shuffle(lhs, rhs, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.5), RangeError);
+-        assertThrowsInstanceOf(() => type.shuffle(lhs, rhs, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {}), RangeError);
+-        assertThrowsInstanceOf(() => type.shuffle(lhs, rhs, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {valueOf: function(){return 42}}), RangeError);
+-        assertThrowsInstanceOf(() => type.shuffle(lhs, rhs, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "one"), RangeError);
+-        assertThrowsInstanceOf(() => type.shuffle(lhs, rhs, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, undefined), RangeError);
+-
+-        // In bounds is [0, 31]
+-        assertThrowsInstanceOf(() => type.shuffle(lhs, rhs, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1), RangeError);
+-        assertThrowsInstanceOf(() => type.shuffle(lhs, rhs, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32), RangeError);
+-    }
+-}
+-
+-function testShuffleInt8x16() {
+-    var v = Int16x8(1, 2, 3, 4, 5, 6, 7, 8);
+-
+-    assertThrowsInstanceOf(function() {
+-        Int8x16.shuffle(v, v, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+-    }, TypeError);
+-
+-    testShuffleForType(Int8x16);
+-}
+-
+-function testShuffleInt16x8() {
+-    var v = Int8x16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
+-
+-    assertThrowsInstanceOf(function() {
+-        Int16x8.shuffle(v, v, 0, 0, 0, 0, 0, 0, 0, 0);
+-    }, TypeError);
+-
+-    testShuffleForType(Int16x8);
+-}
+-
+-function testShuffleInt32x4() {
+-    var v = Int32x4(1, 2, 3, 4);
+-
+-    assertThrowsInstanceOf(function() {
+-        Float32x4.shuffle(v, v, 0, 0, 0, 0);
+-    }, TypeError);
+-
+-    testShuffleForType(Int32x4);
+-}
+-
+-function testShuffleUint8x16() {
+-    var v = Uint16x8(1, 2, 3, 4, 5, 6, 7, 8);
+-
+-    assertThrowsInstanceOf(function() {
+-        Uint8x16.shuffle(v, v, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+-    }, TypeError);
+-
+-    testShuffleForType(Uint8x16);
+-}
+-
+-function testShuffleUint16x8() {
+-    var v = Uint8x16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
+-
+-    assertThrowsInstanceOf(function() {
+-        Uint16x8.shuffle(v, v, 0, 0, 0, 0, 0, 0, 0, 0);
+-    }, TypeError);
+-
+-    testShuffleForType(Uint16x8);
+-}
+-
+-function testShuffleUint32x4() {
+-    var v = Uint32x4(1, 2, 3, 4);
+-
+-    assertThrowsInstanceOf(function() {
+-        Float32x4.shuffle(v, v, 0, 0, 0, 0);
+-    }, TypeError);
+-
+-    testShuffleForType(Uint32x4);
+-}
+-
+-function testShuffleFloat32x4() {
+-    var v = Float32x4(1, 2, 3, 4);
+-
+-    assertThrowsInstanceOf(function() {
+-        Int32x4.shuffle(v, v, 0, 0, 0, 0);
+-    }, TypeError);
+-
+-    testShuffleForType(Float32x4);
+-}
+-
+-function testShuffleFloat64x2() {
+-    var v = Float64x2(1, 2);
+-
+-    assertThrowsInstanceOf(function() {
+-        Float32x4.shuffle(v, v, 0, 0, 0, 0);
+-    }, TypeError);
+-
+-    testShuffleForType(Float64x2);
+-}
+-
+-testSwizzleInt8x16();
+-testSwizzleInt16x8();
+-testSwizzleInt32x4();
+-testSwizzleUint8x16();
+-testSwizzleUint16x8();
+-testSwizzleUint32x4();
+-testSwizzleFloat32x4();
+-testSwizzleFloat64x2();
+-testShuffleInt8x16();
+-testShuffleInt16x8();
+-testShuffleInt32x4();
+-testShuffleUint8x16();
+-testShuffleUint16x8();
+-testShuffleUint32x4();
+-testShuffleFloat32x4();
+-testShuffleFloat64x2();
+-
+-if (typeof reportCompare === "function")
+-    reportCompare(true, true);
+diff --git a/js/src/tests/non262/SIMD/toString.js b/js/src/tests/non262/SIMD/toString.js
+deleted file mode 100644
+--- a/js/src/tests/non262/SIMD/toString.js
++++ /dev/null
+@@ -1,90 +0,0 @@
+-// |reftest| skip-if(!this.hasOwnProperty("SIMD"))
+-
+-function test() {
+-    var Float32x4 = SIMD.Float32x4;
+-    var f = Float32x4(11, 22, NaN, -44);
+-    assertEq(f.toString(), "SIMD.Float32x4(11, 22, NaN, -44)");
+-
+-    // Polyfill check should show that we already have a toString.
+-    assertEq(Float32x4.prototype.hasOwnProperty("toString"), true);
+-
+-    // This toString method type checks its argument.
+-    var ts = Float32x4.prototype.toString;
+-    assertThrowsInstanceOf(() => ts.call(5), TypeError);
+-    assertThrowsInstanceOf(() => ts.call({}), TypeError);
+-
+-    // Can't convert SIMD objects to numbers.
+-    assertThrowsInstanceOf(() => +f, TypeError);
+-    assertThrowsInstanceOf(() => f.valueOf(), TypeError);
+-
+-    var Float64x2 = SIMD.Float64x2;
+-    var f = Float64x2(11, 22);
+-    assertEq(f.toString(), "SIMD.Float64x2(11, 22)");
+-    assertThrowsInstanceOf(() => +f, TypeError);
+-    assertThrowsInstanceOf(() => f.valueOf(), TypeError);
+-
+-    var Int8x16 = SIMD.Int8x16;
+-    var f = Int8x16(11, 22, 33, 44, -11, -22, -33, -44, 1, 2, 3, 4, -1, -2, -3, -4);
+-    assertEq(f.toString(), "SIMD.Int8x16(11, 22, 33, 44, -11, -22, -33, -44, 1, 2, 3, 4, -1, -2, -3, -4)");
+-    assertThrowsInstanceOf(() => +f, TypeError);
+-    assertThrowsInstanceOf(() => f.valueOf(), TypeError);
+-
+-    var Int16x8 = SIMD.Int16x8;
+-    var f = Int16x8(11, 22, 33, 44, -11, -22, -33, -44);
+-    assertEq(f.toString(), "SIMD.Int16x8(11, 22, 33, 44, -11, -22, -33, -44)");
+-    assertThrowsInstanceOf(() => +f, TypeError);
+-    assertThrowsInstanceOf(() => f.valueOf(), TypeError);
+-
+-    var Int32x4 = SIMD.Int32x4;
+-    var f = Int32x4(11, 22, 33, 44);
+-    assertEq(f.toString(), "SIMD.Int32x4(11, 22, 33, 44)");
+-    assertThrowsInstanceOf(() => +f, TypeError);
+-    assertThrowsInstanceOf(() => f.valueOf(), TypeError);
+-
+-    var Uint8x16 = SIMD.Uint8x16;
+-    var f = Uint8x16(11, 22, 33, 44, 245, 234, 223, 212, 1, 2, 3, 4, 255, 254, 0, 250);
+-    assertEq(f.toString(), "SIMD.Uint8x16(11, 22, 33, 44, 245, 234, 223, 212, 1, 2, 3, 4, 255, 254, 0, 250)");
+-    assertThrowsInstanceOf(() => +f, TypeError);
+-    assertThrowsInstanceOf(() => f.valueOf(), TypeError);
+-
+-    var Uint16x8 = SIMD.Uint16x8;
+-    var f = Uint16x8(11, 22, 33, 44, 65535, 65534, 65533, 65532);
+-    assertEq(f.toString(), "SIMD.Uint16x8(11, 22, 33, 44, 65535, 65534, 65533, 65532)");
+-    assertThrowsInstanceOf(() => +f, TypeError);
+-    assertThrowsInstanceOf(() => f.valueOf(), TypeError);
+-
+-    var Uint32x4 = SIMD.Uint32x4;
+-    var f = Uint32x4(11, 22, 4294967295, 4294967294);
+-    assertEq(f.toString(), "SIMD.Uint32x4(11, 22, 4294967295, 4294967294)");
+-    assertThrowsInstanceOf(() => +f, TypeError);
+-    assertThrowsInstanceOf(() => f.valueOf(), TypeError);
+-
+-    var Bool8x16 = SIMD.Bool8x16;
+-    var f = Bool8x16(true, true, false, false, false, true, true, false, true, true, true, true, false, false, false, false);
+-    assertEq(f.toString(), "SIMD.Bool8x16(true, true, false, false, false, true, true, false, true, true, true, true, false, false, false, false)");
+-    assertThrowsInstanceOf(() => +f, TypeError);
+-    assertThrowsInstanceOf(() => f.valueOf(), TypeError);
+-
+-    var Bool16x8 = SIMD.Bool16x8;
+-    var f = Bool16x8(true, true, false, false, true, false, false, true);
+-    assertEq(f.toString(), "SIMD.Bool16x8(true, true, false, false, true, false, false, true)");
+-    assertThrowsInstanceOf(() => +f, TypeError);
+-    assertThrowsInstanceOf(() => f.valueOf(), TypeError);
+-
+-    var Bool32x4 = SIMD.Bool32x4;
+-    var f = Bool32x4(true, true, false, false);
+-    assertEq(f.toString(), "SIMD.Bool32x4(true, true, false, false)");
+-    assertThrowsInstanceOf(() => +f, TypeError);
+-    assertThrowsInstanceOf(() => f.valueOf(), TypeError);
+-
+-    var Bool64x2 = SIMD.Bool64x2;
+-    var f = Bool64x2(true, false);
+-    assertEq(f.toString(), "SIMD.Bool64x2(true, false)");
+-    assertThrowsInstanceOf(() => +f, TypeError);
+-    assertThrowsInstanceOf(() => f.valueOf(), TypeError);
+-
+-    if (typeof reportCompare === "function")
+-        reportCompare(true, true);
+-}
+-
+-test();
+diff --git a/js/src/tests/non262/SIMD/typedobjects.js b/js/src/tests/non262/SIMD/typedobjects.js
+deleted file mode 100644
+--- a/js/src/tests/non262/SIMD/typedobjects.js
++++ /dev/null
+@@ -1,1077 +0,0 @@
+-// |reftest| skip-if(!this.hasOwnProperty("SIMD"))
+-var Float32x4 = SIMD.Float32x4;
+-var Float64x2 = SIMD.Float64x2;
+-var Int8x16 = SIMD.Int8x16;
+-var Int16x8 = SIMD.Int16x8;
+-var Int32x4 = SIMD.Int32x4;
+-var Uint8x16 = SIMD.Uint8x16;
+-var Uint16x8 = SIMD.Uint16x8;
+-var Uint32x4 = SIMD.Uint32x4;
+-var Bool8x16 = SIMD.Bool8x16;
+-var Bool16x8 = SIMD.Bool16x8;
+-var Bool32x4 = SIMD.Bool32x4;
+-var Bool64x2 = SIMD.Bool64x2;
+-
+-var {StructType, Handle} = TypedObject;
+-var {float32, float64, int8, int16, int32, uint8} = TypedObject;
+-
+-function testFloat32x4Alignment() {
+-  assertEq(Float32x4.byteLength, 16);
+-  assertEq(Float32x4.byteAlignment, 16);
+-
+-  var Compound = new StructType({c: uint8, d: uint8, f: Float32x4});
+-  assertEq(Compound.fieldOffsets.c, 0);
+-  assertEq(Compound.fieldOffsets.d, 1);
+-  assertEq(Compound.fieldOffsets.f, 16);
+-}
+-
+-function testFloat32x4Getters() {
+-  var f = Float32x4(11, 22, 33, 44);
+-  assertEq(Float32x4.extractLane(f, 0), 11);
+-  assertEq(Float32x4.extractLane(f, 1), 22);
+-  assertEq(Float32x4.extractLane(f, 2), 33);
+-  assertEq(Float32x4.extractLane(f, 3), 44);
+-
+-  assertThrowsInstanceOf(() => Float32x4.extractLane(f, 4), RangeError);
+-  assertThrowsInstanceOf(() => Float32x4.extractLane(f, -1), RangeError);
+-  assertThrowsInstanceOf(() => Float32x4.extractLane(f, 0.5), RangeError);
+-  assertThrowsInstanceOf(() => Float32x4.extractLane(f, {}), RangeError);
+-  assertThrowsInstanceOf(() => Float32x4.extractLane(Int32x4(1,2,3,4), 0), TypeError);
+-  assertThrowsInstanceOf(() => Float32x4.extractLane(1, 0), TypeError);
+-  assertThrowsInstanceOf(() => Float32x4.extractLane(f, f), TypeError);
+-}
+-
+-function testFloat32x4Handles() {
+-  var Array = Float32x4.array(3);
+-  var array = new Array([Float32x4(1, 2, 3, 4),
+-                         Float32x4(5, 6, 7, 8),
+-                         Float32x4(9, 10, 11, 12)]);
+-
+-  // Test that trying to create handle into the interior of a
+-  // Float32x4 fails.
+-  assertThrowsInstanceOf(function() {
+-    var h = float32.handle(array, 1, 0);
+-  }, TypeError, "Creating a float32 handle to elem via ctor");
+-
+-  assertThrowsInstanceOf(function() {
+-    var h = float32.handle();
+-    Handle.move(h, array, 1, 0);
+-  }, TypeError, "Creating a float32 handle to elem via move");
+-}
+-
+-function testFloat32x4Reify() {
+-  var Array = Float32x4.array(3);
+-  var array = new Array([Float32x4(1, 2, 3, 4),
+-                         Float32x4(5, 6, 7, 8),
+-                         Float32x4(9, 10, 11, 12)]);
+-
+-  // Test that reading array[1] produces a *copy* of Float32x4, not an
+-  // alias into the array.
+-
+-  var f = array[1];
+-  assertEq(Float32x4.extractLane(f, 3), 8);
+-  assertEq(Float32x4.extractLane(array[1], 3), 8);
+-  array[1] = Float32x4(15, 16, 17, 18);
+-  assertEq(Float32x4.extractLane(f, 3), 8);
+-  assertEq(Float32x4.extractLane(array[1], 3), 18);
+-}
+-
+-function testFloat32x4Setters() {
+-  var Array = Float32x4.array(3);
+-  var array = new Array([Float32x4(1, 2, 3, 4),
+-                         Float32x4(5, 6, 7, 8),
+-                         Float32x4(9, 10, 11, 12)]);
+-  assertEq(Float32x4.extractLane(array[1], 3), 8);
+-
+-  // Test that we are allowed to write Float32x4 values into array,
+-  // but not other things.
+-
+-  array[1] = Float32x4(15, 16, 17, 18);
+-  assertEq(Float32x4.extractLane(array[1], 3), 18);
+-
+-  assertThrowsInstanceOf(function() {
+-    array[1] = {x: 15, y: 16, z: 17, w: 18};
+-  }, TypeError, "Setting Float32x4 from an object");
+-
+-  assertThrowsInstanceOf(function() {
+-    array[1] = [15, 16, 17, 18];
+-  }, TypeError, "Setting Float32x4 from an array");
+-
+-  assertThrowsInstanceOf(function() {
+-    array[1] = 22;
+-  }, TypeError, "Setting Float32x4 from a number");
+-}
+-
+-function testFloat64x2Alignment() {
+-  assertEq(Float64x2.byteLength, 16);
+-  assertEq(Float64x2.byteAlignment, 16);
+-
+-  var Compound = new StructType({c: uint8, d: uint8, f: Float64x2});
+-  assertEq(Compound.fieldOffsets.c, 0);
+-  assertEq(Compound.fieldOffsets.d, 1);
+-  assertEq(Compound.fieldOffsets.f, 16);
+-}
+-
+-function testFloat64x2Getters() {
+-  // Create a Float64x2 and check that the getters work:
+-  var f = Float64x2(11, 22);
+-  assertEq(Float64x2.extractLane(f, 0), 11);
+-  assertEq(Float64x2.extractLane(f, 1), 22);
+-
+-  assertThrowsInstanceOf(() => Float64x2.extractLane(f, 2), RangeError);
+-  assertThrowsInstanceOf(() => Float64x2.extractLane(f, -1), RangeError);
+-  assertThrowsInstanceOf(() => Float64x2.extractLane(f, 0.5), RangeError);
+-  assertThrowsInstanceOf(() => Float64x2.extractLane(f, {}), RangeError);
+-  assertThrowsInstanceOf(() => Float64x2.extractLane(Float32x4(1,2,3,4), 0), TypeError);
+-  assertThrowsInstanceOf(() => Float64x2.extractLane(1, 0), TypeError);
+-  assertThrowsInstanceOf(() => Float64x2.extractLane(f, f), TypeError);
+-}
+-
+-function testFloat64x2Handles() {
+-  var Array = Float64x2.array(3);
+-  var array = new Array([Float64x2(1, 2),
+-                         Float64x2(3, 4),
+-                         Float64x2(5, 6)]);
+-
+-  // Test that trying to create handle into the interior of a
+-  // Float64x2 fails.
+-  assertThrowsInstanceOf(function() {
+-    var h = float64.handle(array, 1, 0);
+-  }, TypeError, "Creating a float64 handle to elem via ctor");
+-
+-  assertThrowsInstanceOf(function() {
+-    var h = float64.handle();
+-    Handle.move(h, array, 1, 0);
+-  }, TypeError, "Creating a float64 handle to elem via move");
+-}
+-
+-function testFloat64x2Reify() {
+-  var Array = Float64x2.array(3);
+-  var array = new Array([Float64x2(1, 2),
+-                         Float64x2(3, 4),
+-                         Float64x2(5, 6)]);
+-
+-  // Test that reading array[1] produces a *copy* of Float64x2, not an
+-  // alias into the array.
+-
+-  var f = array[1];
+-  assertEq(Float64x2.extractLane(f, 1), 4);
+-  assertEq(Float64x2.extractLane(array[1], 1), 4);
+-  array[1] = Float64x2(7, 8);
+-  assertEq(Float64x2.extractLane(f, 1), 4);
+-  assertEq(Float64x2.extractLane(array[1], 1), 8);
+-}
+-
+-function testFloat64x2Setters() {
+-  var Array = Float64x2.array(3);
+-  var array = new Array([Float64x2(1, 2),
+-                         Float64x2(3, 4),
+-                         Float64x2(5, 6)]);
+-  assertEq(Float64x2.extractLane(array[1], 1), 4);
+-
+-  // Test that we are allowed to write Float64x2 values into array,
+-  // but not other things.
+-
+-  array[1] = Float64x2(7, 8);
+-  assertEq(Float64x2.extractLane(array[1], 1), 8);
+-
+-  assertThrowsInstanceOf(function() {
+-    array[1] = {x: 7, y: 8 };
+-  }, TypeError, "Setting Float64x2 from an object");
+-
+-  assertThrowsInstanceOf(function() {
+-    array[1] = [ 7, 8 ];
+-  }, TypeError, "Setting Float64x2 from an array");
+-
+-  assertThrowsInstanceOf(function() {
+-    array[1] = 9;
+-  }, TypeError, "Setting Float64x2 from a number");
+-}
+-
+-function testInt8x16Alignment() {
+-  assertEq(Int8x16.byteLength, 16);
+-  assertEq(Int8x16.byteAlignment, 16);
+-
+-  var Compound = new StructType({c: uint8, d: uint8, f: Int8x16});
+-  assertEq(Compound.fieldOffsets.c, 0);
+-  assertEq(Compound.fieldOffsets.d, 1);
+-  assertEq(Compound.fieldOffsets.f, 16);
+-}
+-
+-function testInt8x16Getters() {
+-  // Create a Int8x16 and check that the getters work:
+-  var f = Int8x16(11, 22, 33, 44, 55, 66, 77, 88, 99, 10, 20, 30, 40, 50, 60, 70);
+-  assertEq(Int8x16.extractLane(f, 0), 11);
+-  assertEq(Int8x16.extractLane(f, 1), 22);
+-  assertEq(Int8x16.extractLane(f, 2), 33);
+-  assertEq(Int8x16.extractLane(f, 3), 44);
+-  assertEq(Int8x16.extractLane(f, 4), 55);
+-  assertEq(Int8x16.extractLane(f, 5), 66);
+-  assertEq(Int8x16.extractLane(f, 6), 77);
+-  assertEq(Int8x16.extractLane(f, 7), 88);
+-  assertEq(Int8x16.extractLane(f, 8), 99);
+-  assertEq(Int8x16.extractLane(f, 9), 10);
+-  assertEq(Int8x16.extractLane(f, 10), 20);
+-  assertEq(Int8x16.extractLane(f, 11), 30);
+-  assertEq(Int8x16.extractLane(f, 12), 40);
+-  assertEq(Int8x16.extractLane(f, 13), 50);
+-  assertEq(Int8x16.extractLane(f, 14), 60);
+-  assertEq(Int8x16.extractLane(f, 15), 70);
+-
+-  assertThrowsInstanceOf(() => Int8x16.extractLane(f, 16), RangeError);
+-  assertThrowsInstanceOf(() => Int8x16.extractLane(f, -1), RangeError);
+-  assertThrowsInstanceOf(() => Int8x16.extractLane(f, 0.5), RangeError);
+-  assertThrowsInstanceOf(() => Int8x16.extractLane(f, {}), RangeError);
+-  assertThrowsInstanceOf(() => Int8x16.extractLane(Int32x4(1,2,3,4), 0), TypeError);
+-  assertThrowsInstanceOf(() => Int8x16.extractLane(1, 0), TypeError);
+-  assertThrowsInstanceOf(() => Int8x16.extractLane(f, f), TypeError);
+-}
+-
+-function testInt8x16Handles() {
+-  var Array = Int8x16.array(3);
+-  var array = new Array([Int8x16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16),
+-                         Int8x16(17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32),
+-                         Int8x16(33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48)]);
+-
+-  // Test that trying to create handle into the interior of a
+-  // Int8x16 fails.
+-  assertThrowsInstanceOf(function() {
+-    var h = int8.handle(array, 1, 0);
+-  }, TypeError, "Creating a int8 handle to elem via ctor");
+-
+-  assertThrowsInstanceOf(function() {
+-    var h = int8.handle();
+-    Handle.move(h, array, 1, 0);
+-  }, TypeError, "Creating a int8 handle to elem via move");
+-}
+-
+-function testInt8x16Reify() {
+-  var Array = Int8x16.array(3);
+-  var array = new Array([Int8x16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16),
+-                         Int8x16(17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32),
+-                         Int8x16(33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48)]);
+-
+-  // Test that reading array[1] produces a *copy* of Int8x16, not an
+-  // alias into the array.
+-
+-  var f = array[1];
+-
+-  var sj1 = Int8x16.extractLane(f, 3);
+-
+-  assertEq(sj1, 20);
+-  assertEq(Int8x16.extractLane(array[1], 3), 20);
+-  array[1] = Int8x16(49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64);
+-  assertEq(Int8x16.extractLane(f, 3), 20);
+-  assertEq(Int8x16.extractLane(array[1], 3), 52);
+-}
+-
+-function testInt8x16Setters() {
+-  var Array = Int8x16.array(3);
+-  var array = new Array([Int8x16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16),
+-                         Int8x16(17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32),
+-                         Int8x16(33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48)]);
+-  assertEq(Int8x16.extractLane(array[1], 3), 20);
+-
+-  // Test that we are allowed to write Int8x16 values into array,
+-  // but not other things.
+-
+-  array[1] = Int8x16(49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64);
+-  assertEq(Int8x16.extractLane(array[1], 3), 52);
+-
+-  assertThrowsInstanceOf(function() {
+-    array[1] = {s0: 49, s1: 50, s2: 51, s3: 52, s4: 53, s5: 54, s6: 55, s7: 56,
+-                s8: 57, s9: 58, s10: 59, s11: 60, s12: 61, s13: 62, s14: 63, s15: 64};
+-  }, TypeError, "Setting Int8x16 from an object");
+-
+-  assertThrowsInstanceOf(function() {
+-    array[1] = [49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64];
+-  }, TypeError, "Setting Int8x16 from an array");
+-
+-  assertThrowsInstanceOf(function() {
+-    array[1] = 52;
+-  }, TypeError, "Setting Int8x16 from a number");
+-}
+-
+-function testInt16x8Alignment() {
+-  assertEq(Int16x8.byteLength, 16);
+-  assertEq(Int16x8.byteAlignment, 16);
+-
+-  var Compound = new StructType({c: uint8, d: uint8, f: Int16x8});
+-  assertEq(Compound.fieldOffsets.c, 0);
+-  assertEq(Compound.fieldOffsets.d, 1);
+-  assertEq(Compound.fieldOffsets.f, 16);
+-}
+-
+-function testInt16x8Getters() {
+-  // Create a Int16x8 and check that the getters work:
+-  var f = Int16x8(11, 22, 33, 44, 55, 66, 77, 88);
+-  assertEq(Int16x8.extractLane(f, 0), 11);
+-  assertEq(Int16x8.extractLane(f, 1), 22);
+-  assertEq(Int16x8.extractLane(f, 2), 33);
+-  assertEq(Int16x8.extractLane(f, 3), 44);
+-  assertEq(Int16x8.extractLane(f, 4), 55);
+-  assertEq(Int16x8.extractLane(f, 5), 66);
+-  assertEq(Int16x8.extractLane(f, 6), 77);
+-  assertEq(Int16x8.extractLane(f, 7), 88);
+-
+-  assertThrowsInstanceOf(() => Int16x8.extractLane(f, 8), RangeError);
+-  assertThrowsInstanceOf(() => Int16x8.extractLane(f, -1), RangeError);
+-  assertThrowsInstanceOf(() => Int16x8.extractLane(f, 0.5), RangeError);
+-  assertThrowsInstanceOf(() => Int16x8.extractLane(f, {}), RangeError);
+-  assertThrowsInstanceOf(() => Int16x8.extractLane(Int32x4(1,2,3,4), 0), TypeError);
+-  assertThrowsInstanceOf(() => Int16x8.extractLane(1, 0), TypeError);
+-  assertThrowsInstanceOf(() => Int16x8.extractLane(f, f), TypeError);
+-}
+-
+-function testInt16x8Handles() {
+-  var Array = Int16x8.array(3);
+-  var array = new Array([Int16x8(1, 2, 3, 4, 5, 6, 7, 8),
+-                         Int16x8(9, 10, 11, 12, 13, 14, 15, 16),
+-                         Int16x8(17, 18, 19, 20, 21, 22, 23, 24)]);
+-
+-  // Test that trying to create handle into the interior of a
+-  // Int16x8 fails.
+-  assertThrowsInstanceOf(function() {
+-    var h = int16.handle(array, 1, 0);
+-  }, TypeError, "Creating a int16 handle to elem via ctor");
+-
+-  assertThrowsInstanceOf(function() {
+-    var h = int16.handle();
+-    Handle.move(h, array, 1, 0);
+-  }, TypeError, "Creating a int16 handle to elem via move");
+-}
+-
+-function testInt16x8Reify() {
+-  var Array = Int16x8.array(3);
+-  var array = new Array([Int16x8(1, 2, 3, 4, 5, 6, 7, 8),
+-                         Int16x8(9, 10, 11, 12, 13, 14, 15, 16),
+-                         Int16x8(17, 18, 19, 20, 21, 22, 23, 24)]);
+-
+-  // Test that reading array[1] produces a *copy* of Int16x8, not an
+-  // alias into the array.
+-
+-  var f = array[1];
+-  assertEq(Int16x8.extractLane(f, 3), 12);
+-  assertEq(Int16x8.extractLane(array[1], 3), 12);
+-  array[1] = Int16x8(25, 26, 27, 28, 29, 30, 31, 32);
+-  assertEq(Int16x8.extractLane(f, 3), 12);
+-  assertEq(Int16x8.extractLane(array[1], 3), 28);
+-}
+-
+-function testInt16x8Setters() {
+-  var Array = Int16x8.array(3);
+-  var array = new Array([Int16x8(1, 2, 3, 4, 5, 6, 7, 8),
+-                         Int16x8(9, 10, 11, 12, 13, 14, 15, 16),
+-                         Int16x8(17, 18, 19, 20, 21, 22, 23, 24)]);
+-  assertEq(Int16x8.extractLane(array[1], 3), 12);
+-
+-  // Test that we are allowed to write Int16x8 values into array,
+-  // but not other things.
+-
+-  array[1] = Int16x8(25, 26, 27, 28, 29, 30, 31, 32);
+-  assertEq(Int16x8.extractLane(array[1], 3), 28);
+-
+-  assertThrowsInstanceOf(function() {
+-    array[1] = {s0: 25, s1: 26, s2: 27, s3: 28, s4: 29, s5: 30, s6: 31, s7: 32};
+-  }, TypeError, "Setting Int16x8 from an object");
+-
+-  assertThrowsInstanceOf(function() {
+-    array[1] = [25, 26, 27, 28, 29, 30, 31, 32];
+-  }, TypeError, "Setting Int16x8 from an array");
+-
+-  assertThrowsInstanceOf(function() {
+-    array[1] = 28;
+-  }, TypeError, "Setting Int16x8 from a number");
+-}
+-
+-function testInt32x4Alignment() {
+-  assertEq(Int32x4.byteLength, 16);
+-  assertEq(Int32x4.byteAlignment, 16);
+-
+-  var Compound = new StructType({c: uint8, d: uint8, f: Int32x4});
+-  assertEq(Compound.fieldOffsets.c, 0);
+-  assertEq(Compound.fieldOffsets.d, 1);
+-  assertEq(Compound.fieldOffsets.f, 16);
+-}
+-
+-function testInt32x4Getters() {
+-  // Create a Int32x4 and check that the getters work:
+-  var f = Int32x4(11, 22, 33, 44);
+-  assertEq(Int32x4.extractLane(f, 0), 11);
+-  assertEq(Int32x4.extractLane(f, 1), 22);
+-  assertEq(Int32x4.extractLane(f, 2), 33);
+-  assertEq(Int32x4.extractLane(f, 3), 44);
+-
+-  assertThrowsInstanceOf(() => Int32x4.extractLane(f, 4), RangeError);
+-  assertThrowsInstanceOf(() => Int32x4.extractLane(f, -1), RangeError);
+-  assertThrowsInstanceOf(() => Int32x4.extractLane(f, 0.5), RangeError);
+-  assertThrowsInstanceOf(() => Int32x4.extractLane(f, {}), RangeError);
+-  assertThrowsInstanceOf(() => Int32x4.extractLane(Float32x4(1,2,3,4), 0), TypeError);
+-  assertThrowsInstanceOf(() => Int32x4.extractLane(1, 0), TypeError);
+-  assertThrowsInstanceOf(() => Int32x4.extractLane(f, f), TypeError);
+-}
+-
+-function testInt32x4Handles() {
+-  var Array = Int32x4.array(3);
+-  var array = new Array([Int32x4(1, 2, 3, 4),
+-                         Int32x4(5, 6, 7, 8),
+-                         Int32x4(9, 10, 11, 12)]);
+-
+-  // Test that trying to create handle into the interior of a
+-  // Int32x4 fails.
+-  assertThrowsInstanceOf(function() {
+-    var h = int32.handle(array, 1, 0);
+-  }, TypeError, "Creating a int32 handle to elem via ctor");
+-
+-  assertThrowsInstanceOf(function() {
+-    var h = int32.handle();
+-    Handle.move(h, array, 1, 0);
+-  }, TypeError, "Creating a int32 handle to elem via move");
+-}
+-
+-function testInt32x4Reify() {
+-  var Array = Int32x4.array(3);
+-  var array = new Array([Int32x4(1, 2, 3, 4),
+-                         Int32x4(5, 6, 7, 8),
+-                         Int32x4(9, 10, 11, 12)]);
+-
+-  // Test that reading array[1] produces a *copy* of Int32x4, not an
+-  // alias into the array.
+-
+-  var f = array[1];
+-  assertEq(Int32x4.extractLane(f, 3), 8);
+-  assertEq(Int32x4.extractLane(array[1], 3), 8);
+-  array[1] = Int32x4(15, 16, 17, 18);
+-  assertEq(Int32x4.extractLane(f, 3), 8);
+-  assertEq(Int32x4.extractLane(array[1], 3), 18);
+-}
+-
+-function testInt32x4Setters() {
+-  var Array = Int32x4.array(3);
+-  var array = new Array([Int32x4(1, 2, 3, 4),
+-                         Int32x4(5, 6, 7, 8),
+-                         Int32x4(9, 10, 11, 12)]);
+-  assertEq(Int32x4.extractLane(array[1], 3), 8);
+-
+-  // Test that we are allowed to write Int32x4 values into array,
+-  // but not other things.
+-  array[1] = Int32x4(15, 16, 17, 18);
+-  assertEq(Int32x4.extractLane(array[1], 3), 18);
+-
+-  assertThrowsInstanceOf(function() {
+-    array[1] = {x: 15, y: 16, z: 17, w: 18};
+-  }, TypeError, "Setting Int32x4 from an object");
+-
+-  assertThrowsInstanceOf(function() {
+-    array[1] = [15, 16, 17, 18];
+-  }, TypeError, "Setting Int32x4 from an array");
+-
+-  assertThrowsInstanceOf(function() {
+-    array[1] = 22;
+-  }, TypeError, "Setting Int32x4 from a number");
+-}
+-
+-function testUint8x16Alignment() {
+-  assertEq(Uint8x16.byteLength, 16);
+-  assertEq(Uint8x16.byteAlignment, 16);
+-
+-  var Compound = new StructType({c: uint8, d: uint8, f: Uint8x16});
+-  assertEq(Compound.fieldOffsets.c, 0);
+-  assertEq(Compound.fieldOffsets.d, 1);
+-  assertEq(Compound.fieldOffsets.f, 16);
+-}
+-
+-function testUint8x16Getters() {
+-  // Create a Uint8x16 and check that the getters work:
+-  var f = Uint8x16(11, 22, 33, 44, 55, 66, 77, 88, 99, 10, 20, 30, 40, 50, 60, 70);
+-  assertEq(Uint8x16.extractLane(f, 0), 11);
+-  assertEq(Uint8x16.extractLane(f, 1), 22);
+-  assertEq(Uint8x16.extractLane(f, 2), 33);
+-  assertEq(Uint8x16.extractLane(f, 3), 44);
+-  assertEq(Uint8x16.extractLane(f, 4), 55);
+-  assertEq(Uint8x16.extractLane(f, 5), 66);
+-  assertEq(Uint8x16.extractLane(f, 6), 77);
+-  assertEq(Uint8x16.extractLane(f, 7), 88);
+-  assertEq(Uint8x16.extractLane(f, 8), 99);
+-  assertEq(Uint8x16.extractLane(f, 9), 10);
+-  assertEq(Uint8x16.extractLane(f, 10), 20);
+-  assertEq(Uint8x16.extractLane(f, 11), 30);
+-  assertEq(Uint8x16.extractLane(f, 12), 40);
+-  assertEq(Uint8x16.extractLane(f, 13), 50);
+-  assertEq(Uint8x16.extractLane(f, 14), 60);
+-  assertEq(Uint8x16.extractLane(f, 15), 70);
+-
+-  assertThrowsInstanceOf(() => Uint8x16.extractLane(f, 16), RangeError);
+-  assertThrowsInstanceOf(() => Uint8x16.extractLane(f, -1), RangeError);
+-  assertThrowsInstanceOf(() => Uint8x16.extractLane(f, 0.5), RangeError);
+-  assertThrowsInstanceOf(() => Uint8x16.extractLane(f, {}), RangeError);
+-  assertThrowsInstanceOf(() => Uint8x16.extractLane(Uint32x4(1,2,3,4), 0), TypeError);
+-  assertThrowsInstanceOf(() => Uint8x16.extractLane(1, 0), TypeError);
+-  assertThrowsInstanceOf(() => Uint8x16.extractLane(f, f), TypeError);
+-}
+-
+-function testUint8x16Handles() {
+-  var Array = Uint8x16.array(3);
+-  var array = new Array([Uint8x16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16),
+-                         Uint8x16(17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32),
+-                         Uint8x16(33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48)]);
+-
+-  // Test that trying to create handle into the interior of a
+-  // Uint8x16 fails.
+-  assertThrowsInstanceOf(function() {
+-    var h = int8.handle(array, 1, 0);
+-  }, TypeError, "Creating a int8 handle to elem via ctor");
+-
+-  assertThrowsInstanceOf(function() {
+-    var h = int8.handle();
+-    Handle.move(h, array, 1, 0);
+-  }, TypeError, "Creating a int8 handle to elem via move");
+-}
+-
+-function testUint8x16Reify() {
+-  var Array = Uint8x16.array(3);
+-  var array = new Array([Uint8x16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16),
+-                         Uint8x16(17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32),
+-                         Uint8x16(33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48)]);
+-
+-  // Test that reading array[1] produces a *copy* of Uint8x16, not an
+-  // alias into the array.
+-
+-  var f = array[1];
+-
+-  var sj1 = Uint8x16.extractLane(f, 3);
+-
+-  assertEq(sj1, 20);
+-  assertEq(Uint8x16.extractLane(array[1], 3), 20);
+-  array[1] = Uint8x16(49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64);
+-  assertEq(Uint8x16.extractLane(f, 3), 20);
+-  assertEq(Uint8x16.extractLane(array[1], 3), 52);
+-}
+-
+-function testUint8x16Setters() {
+-  var Array = Uint8x16.array(3);
+-  var array = new Array([Uint8x16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16),
+-                         Uint8x16(17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32),
+-                         Uint8x16(33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48)]);
+-  assertEq(Uint8x16.extractLane(array[1], 3), 20);
+-
+-  // Test that we are allowed to write Uint8x16 values into array,
+-  // but not other things.
+-
+-  array[1] = Uint8x16(49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64);
+-  assertEq(Uint8x16.extractLane(array[1], 3), 52);
+-
+-  assertThrowsInstanceOf(function() {
+-    array[1] = {s0: 49, s1: 50, s2: 51, s3: 52, s4: 53, s5: 54, s6: 55, s7: 56,
+-                s8: 57, s9: 58, s10: 59, s11: 60, s12: 61, s13: 62, s14: 63, s15: 64};
+-  }, TypeError, "Setting Uint8x16 from an object");
+-
+-  assertThrowsInstanceOf(function() {
+-    array[1] = [49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64];
+-  }, TypeError, "Setting Uint8x16 from an array");
+-
+-  assertThrowsInstanceOf(function() {
+-    array[1] = 52;
+-  }, TypeError, "Setting Uint8x16 from a number");
+-}
+-
+-function testUint16x8Alignment() {
+-  assertEq(Uint16x8.byteLength, 16);
+-  assertEq(Uint16x8.byteAlignment, 16);
+-
+-  var Compound = new StructType({c: uint8, d: uint8, f: Uint16x8});
+-  assertEq(Compound.fieldOffsets.c, 0);
+-  assertEq(Compound.fieldOffsets.d, 1);
+-  assertEq(Compound.fieldOffsets.f, 16);
+-}
+-
+-function testUint16x8Getters() {
+-  // Create a Uint16x8 and check that the getters work:
+-  var f = Uint16x8(11, 22, 33, 44, 55, 66, 77, 88);
+-  assertEq(Uint16x8.extractLane(f, 0), 11);
+-  assertEq(Uint16x8.extractLane(f, 1), 22);
+-  assertEq(Uint16x8.extractLane(f, 2), 33);
+-  assertEq(Uint16x8.extractLane(f, 3), 44);
+-  assertEq(Uint16x8.extractLane(f, 4), 55);
+-  assertEq(Uint16x8.extractLane(f, 5), 66);
+-  assertEq(Uint16x8.extractLane(f, 6), 77);
+-  assertEq(Uint16x8.extractLane(f, 7), 88);
+-
+-  assertThrowsInstanceOf(() => Uint16x8.extractLane(f, 8), RangeError);
+-  assertThrowsInstanceOf(() => Uint16x8.extractLane(f, -1), RangeError);
+-  assertThrowsInstanceOf(() => Uint16x8.extractLane(f, 0.5), RangeError);
+-  assertThrowsInstanceOf(() => Uint16x8.extractLane(f, {}), RangeError);
+-  assertThrowsInstanceOf(() => Uint16x8.extractLane(Uint32x4(1,2,3,4), 0), TypeError);
+-  assertThrowsInstanceOf(() => Uint16x8.extractLane(1, 0), TypeError);
+-  assertThrowsInstanceOf(() => Uint16x8.extractLane(f, f), TypeError);
+-}
+-
+-function testUint16x8Handles() {
+-  var Array = Uint16x8.array(3);
+-  var array = new Array([Uint16x8(1, 2, 3, 4, 5, 6, 7, 8),
+-                         Uint16x8(9, 10, 11, 12, 13, 14, 15, 16),
+-                         Uint16x8(17, 18, 19, 20, 21, 22, 23, 24)]);
+-
+-  // Test that trying to create handle into the interior of a
+-  // Uint16x8 fails.
+-  assertThrowsInstanceOf(function() {
+-    var h = int16.handle(array, 1, 0);
+-  }, TypeError, "Creating a int16 handle to elem via ctor");
+-
+-  assertThrowsInstanceOf(function() {
+-    var h = int16.handle();
+-    Handle.move(h, array, 1, 0);
+-  }, TypeError, "Creating a int16 handle to elem via move");
+-}
+-
+-function testUint16x8Reify() {
+-  var Array = Uint16x8.array(3);
+-  var array = new Array([Uint16x8(1, 2, 3, 4, 5, 6, 7, 8),
+-                         Uint16x8(9, 10, 11, 12, 13, 14, 15, 16),
+-                         Uint16x8(17, 18, 19, 20, 21, 22, 23, 24)]);
+-
+-  // Test that reading array[1] produces a *copy* of Uint16x8, not an
+-  // alias into the array.
+-
+-  var f = array[1];
+-  assertEq(Uint16x8.extractLane(f, 3), 12);
+-  assertEq(Uint16x8.extractLane(array[1], 3), 12);
+-  array[1] = Uint16x8(25, 26, 27, 28, 29, 30, 31, 32);
+-  assertEq(Uint16x8.extractLane(f, 3), 12);
+-  assertEq(Uint16x8.extractLane(array[1], 3), 28);
+-}
+-
+-function testUint16x8Setters() {
+-  var Array = Uint16x8.array(3);
+-  var array = new Array([Uint16x8(1, 2, 3, 4, 5, 6, 7, 8),
+-                         Uint16x8(9, 10, 11, 12, 13, 14, 15, 16),
+-                         Uint16x8(17, 18, 19, 20, 21, 22, 23, 24)]);
+-  assertEq(Uint16x8.extractLane(array[1], 3), 12);
+-
+-  // Test that we are allowed to write Uint16x8 values into array,
+-  // but not other things.
+-
+-  array[1] = Uint16x8(25, 26, 27, 28, 29, 30, 31, 32);
+-  assertEq(Uint16x8.extractLane(array[1], 3), 28);
+-
+-  assertThrowsInstanceOf(function() {
+-    array[1] = {s0: 25, s1: 26, s2: 27, s3: 28, s4: 29, s5: 30, s6: 31, s7: 32};
+-  }, TypeError, "Setting Uint16x8 from an object");
+-
+-  assertThrowsInstanceOf(function() {
+-    array[1] = [25, 26, 27, 28, 29, 30, 31, 32];
+-  }, TypeError, "Setting Uint16x8 from an array");
+-
+-  assertThrowsInstanceOf(function() {
+-    array[1] = 28;
+-  }, TypeError, "Setting Uint16x8 from a number");
+-}
+-
+-function testUint32x4Alignment() {
+-  assertEq(Uint32x4.byteLength, 16);
+-  assertEq(Uint32x4.byteAlignment, 16);
+-
+-  var Compound = new StructType({c: uint8, d: uint8, f: Uint32x4});
+-  assertEq(Compound.fieldOffsets.c, 0);
+-  assertEq(Compound.fieldOffsets.d, 1);
+-  assertEq(Compound.fieldOffsets.f, 16);
+-}
+-
+-function testUint32x4Getters() {
+-  // Create a Uint32x4 and check that the getters work:
+-  var f = Uint32x4(11, 22, 33, 44);
+-  assertEq(Uint32x4.extractLane(f, 0), 11);
+-  assertEq(Uint32x4.extractLane(f, 1), 22);
+-  assertEq(Uint32x4.extractLane(f, 2), 33);
+-  assertEq(Uint32x4.extractLane(f, 3), 44);
+-
+-  assertThrowsInstanceOf(() => Uint32x4.extractLane(f, 4), RangeError);
+-  assertThrowsInstanceOf(() => Uint32x4.extractLane(f, -1), RangeError);
+-  assertThrowsInstanceOf(() => Uint32x4.extractLane(f, 0.5), RangeError);
+-  assertThrowsInstanceOf(() => Uint32x4.extractLane(f, {}), RangeError);
+-  assertThrowsInstanceOf(() => Uint32x4.extractLane(Float32x4(1,2,3,4), 0), TypeError);
+-  assertThrowsInstanceOf(() => Uint32x4.extractLane(1, 0), TypeError);
+-  assertThrowsInstanceOf(() => Uint32x4.extractLane(f, f), TypeError);
+-}
+-
+-function testUint32x4Handles() {
+-  var Array = Uint32x4.array(3);
+-  var array = new Array([Uint32x4(1, 2, 3, 4),
+-                         Uint32x4(5, 6, 7, 8),
+-                         Uint32x4(9, 10, 11, 12)]);
+-
+-  // Test that trying to create handle into the interior of a
+-  // Uint32x4 fails.
+-  assertThrowsInstanceOf(function() {
+-    var h = int32.handle(array, 1, 0);
+-  }, TypeError, "Creating a int32 handle to elem via ctor");
+-
+-  assertThrowsInstanceOf(function() {
+-    var h = int32.handle();
+-    Handle.move(h, array, 1, 0);
+-  }, TypeError, "Creating a int32 handle to elem via move");
+-}
+-
+-function testUint32x4Reify() {
+-  var Array = Uint32x4.array(3);
+-  var array = new Array([Uint32x4(1, 2, 3, 4),
+-                         Uint32x4(5, 6, 7, 8),
+-                         Uint32x4(9, 10, 11, 12)]);
+-
+-  // Test that reading array[1] produces a *copy* of Uint32x4, not an
+-  // alias into the array.
+-
+-  var f = array[1];
+-  assertEq(Uint32x4.extractLane(f, 3), 8);
+-  assertEq(Uint32x4.extractLane(array[1], 3), 8);
+-  array[1] = Uint32x4(15, 16, 17, 18);
+-  assertEq(Uint32x4.extractLane(f, 3), 8);
+-  assertEq(Uint32x4.extractLane(array[1], 3), 18);
+-}
+-
+-function testUint32x4Setters() {
+-  var Array = Uint32x4.array(3);
+-  var array = new Array([Uint32x4(1, 2, 3, 4),
+-                         Uint32x4(5, 6, 7, 8),
+-                         Uint32x4(9, 10, 11, 12)]);
+-  assertEq(Uint32x4.extractLane(array[1], 3), 8);
+-
+-  // Test that we are allowed to write Uint32x4 values into array,
+-  // but not other things.
+-  array[1] = Uint32x4(15, 16, 17, 18);
+-  assertEq(Uint32x4.extractLane(array[1], 3), 18);
+-
+-  assertThrowsInstanceOf(function() {
+-    array[1] = {x: 15, y: 16, z: 17, w: 18};
+-  }, TypeError, "Setting Uint32x4 from an object");
+-
+-  assertThrowsInstanceOf(function() {
+-    array[1] = [15, 16, 17, 18];
+-  }, TypeError, "Setting Uint32x4 from an array");
+-
+-  assertThrowsInstanceOf(function() {
+-    array[1] = 22;
+-  }, TypeError, "Setting Uint32x4 from a number");
+-}
+-
+-function testBool8x16Getters() {
+-  // Create a Bool8x16 and check that the getters work:
+-  var f = Bool8x16(true, false, true, false, true, false, true, false, true, true, false, false, true, true, false, false);
+-  assertEq(Bool8x16.extractLane(f, 0), true);
+-  assertEq(Bool8x16.extractLane(f, 1), false);
+-  assertEq(Bool8x16.extractLane(f, 2), true);
+-  assertEq(Bool8x16.extractLane(f, 3), false);
+-  assertEq(Bool8x16.extractLane(f, 4), true);
+-  assertEq(Bool8x16.extractLane(f, 5), false);
+-  assertEq(Bool8x16.extractLane(f, 6), true);
+-  assertEq(Bool8x16.extractLane(f, 7), false);
+-  assertEq(Bool8x16.extractLane(f, 8), true);
+-  assertEq(Bool8x16.extractLane(f, 9), true);
+-  assertEq(Bool8x16.extractLane(f, 10), false);
+-  assertEq(Bool8x16.extractLane(f, 11), false);
+-  assertEq(Bool8x16.extractLane(f, 12), true);
+-  assertEq(Bool8x16.extractLane(f, 13), true);
+-  assertEq(Bool8x16.extractLane(f, 14), false);
+-  assertEq(Bool8x16.extractLane(f, 15), false);
+-
+-  assertThrowsInstanceOf(() => Bool8x16.extractLane(f, 16), RangeError);
+-  assertThrowsInstanceOf(() => Bool8x16.extractLane(f, -1), RangeError);
+-  assertThrowsInstanceOf(() => Bool8x16.extractLane(f, 0.5), RangeError);
+-  assertThrowsInstanceOf(() => Bool8x16.extractLane(f, {}), RangeError);
+-  assertThrowsInstanceOf(() => Bool8x16.extractLane(Float32x4(1, 2, 3, 4), 0), TypeError);
+-  assertThrowsInstanceOf(() => Bool8x16.extractLane(1, 0), TypeError);
+-  assertThrowsInstanceOf(() => Bool8x16.extractLane(f, f), TypeError);
+-}
+-
+-function testBool8x16Reify() {
+-  var Array = Bool8x16.array(3);
+-  var array = new Array([Bool8x16(true, false, true, false, true, false, true, false, true, false, true, false, true, false, true, false),
+-                         Bool8x16(false, true, false, true, false, true, false, true, false, true, false, true, false, true, false, true),
+-                         Bool8x16(true, true, true, true, false, false, false, false, true, true, true, true, false, false, false, false)]);
+-
+-  // Test that reading array[1] produces a *copy* of Bool8x16, not an
+-  // alias into the array.
+-
+-  var f = array[1];
+-  assertEq(Bool8x16.extractLane(f, 2), false);
+-  assertEq(Bool8x16.extractLane(array[1], 2), false);
+-  assertEq(Bool8x16.extractLane(f, 3), true);
+-  assertEq(Bool8x16.extractLane(array[1], 3), true);
+-  array[1] = Bool8x16(true, false, true, false, true, false, true, false, true, false, true, false, true, false, true, false);
+-  assertEq(Bool8x16.extractLane(f, 3), true);
+-  assertEq(Bool8x16.extractLane(array[1], 3), false);
+-}
+-
+-function testBool8x16Setters() {
+-  var Array = Bool8x16.array(3);
+-  var array = new Array([Bool8x16(true, false, true, false, true, false, true, false, true, false, true, false, true, false, true, false),
+-                         Bool8x16(false, true, false, true, false, true, false, true, false, true, false, true, false, true, false, true),
+-                         Bool8x16(true, true, true, true, false, false, false, false, true, true, true, true, false, false, false, false)]);
+-
+-  assertEq(Bool8x16.extractLane(array[1], 3), true);
+-  // Test that we are allowed to write Bool8x16 values into array,
+-  // but not other things.
+-  array[1] = Bool8x16(true, false, true, false, true, false, true, false, true, false, true, false, true, false, true, false);
+-  assertEq(Bool8x16.extractLane(array[1], 3), false);
+-  assertThrowsInstanceOf(function() {
+-    array[1] = {s0: true, s1: true, s2: true, s3: true, s4: true, s5: true, s6: true, s7: true,
+-                s8: false, s9: false, s10: false, s11: false, s12: false, s13: false, s14: false, s15: false};
+-  }, TypeError, "Setting Bool8x16 from an object");
+-  assertThrowsInstanceOf(function() {
+-    array[1] = [true, false, true, false, true, false, true, false, true, false, true, false, true, false, true, false];
+-  }, TypeError, "Setting Bool8x16 from an array");
+-  assertThrowsInstanceOf(function() {
+-    array[1] = false;
+-  }, TypeError, "Setting Bool8x16 from a boolean");
+-}
+-
+-function testBool16x8Getters() {
+-  // Create a Bool8x16 and check that the getters work:
+-  var f = Bool16x8(true, false, true, false, true, true, false, false);
+-  assertEq(Bool16x8.extractLane(f, 0), true);
+-  assertEq(Bool16x8.extractLane(f, 1), false);
+-  assertEq(Bool16x8.extractLane(f, 2), true);
+-  assertEq(Bool16x8.extractLane(f, 3), false);
+-  assertEq(Bool16x8.extractLane(f, 4), true);
+-  assertEq(Bool16x8.extractLane(f, 5), true);
+-  assertEq(Bool16x8.extractLane(f, 6), false);
+-  assertEq(Bool16x8.extractLane(f, 7), false);
+-
+-  assertThrowsInstanceOf(() => Bool16x8.extractLane(f, 8), RangeError);
+-  assertThrowsInstanceOf(() => Bool16x8.extractLane(f, -1), RangeError);
+-  assertThrowsInstanceOf(() => Bool16x8.extractLane(f, 0.5), RangeError);
+-  assertThrowsInstanceOf(() => Bool16x8.extractLane(f, {}), RangeError);
+-  assertThrowsInstanceOf(() => Bool16x8.extractLane(Float32x4(1, 2, 3, 4), 0), TypeError);
+-  assertThrowsInstanceOf(() => Bool16x8.extractLane(1, 0), TypeError);
+-  assertThrowsInstanceOf(() => Bool16x8.extractLane(f, f), TypeError);
+-}
+-
+-function testBool16x8Reify() {
+-  var Array = Bool16x8.array(3);
+-  var array = new Array([Bool16x8(true, false, true, false, true, false, true, false),
+-                         Bool16x8(false, true, false, true, false, true, false, true),
+-                         Bool16x8(true, true, true, false, true, true, true, false)]);
+-  // Test that reading array[1] produces a *copy* of Bool16x8, not an
+-  // alias into the array.
+-  var f = array[1];
+-  assertEq(Bool16x8.extractLane(f, 2), false);
+-  assertEq(Bool16x8.extractLane(array[1], 2), false);
+-  assertEq(Bool16x8.extractLane(f, 3), true);
+-  assertEq(Bool16x8.extractLane(array[1], 3), true);
+-  array[1] = Bool16x8(true, false, true, false, true, false, true, false);
+-  assertEq(Bool16x8.extractLane(f, 3), true);
+-  assertEq(Bool16x8.extractLane(array[1], 3), false);
+-}
+-
+-function testBool16x8Setters() {
+-  var Array = Bool16x8.array(3);
+-  var array = new Array([Bool16x8(true, false, true, false, true, false, true, false),
+-                         Bool16x8(false, true, false, true, false, true, false, true),
+-                         Bool16x8(true, true, true, false, true, true, true, false)]);
+-
+-
+-  assertEq(Bool16x8.extractLane(array[1], 3), true);
+-  // Test that we are allowed to write Bool16x8 values into array,
+-  // but not other things.
+-  array[1] = Bool16x8(true, false, true, false, true, false, true, false);
+-  assertEq(Bool16x8.extractLane(array[1], 3), false);
+-  assertThrowsInstanceOf(function() {
+-    array[1] = {s0: false, s1: true, s2: false, s3: true, s4: false, s5: true, s6: false, s7: true};
+-  }, TypeError, "Setting Bool16x8 from an object");
+-  assertThrowsInstanceOf(function() {
+-    array[1] = [true, false, false, true, true, true, false, false];
+-  }, TypeError, "Setting Bool16x8 from an array");
+-  assertThrowsInstanceOf(function() {
+-    array[1] = false;
+-  }, TypeError, "Setting Bool16x8 from a boolean");
+-}
+-
+-function testBool32x4Getters() {
+-  // Create a Bool32x4 and check that the getters work:
+-  var f = Bool32x4(true, false, false, true);
+-  assertEq(Bool32x4.extractLane(f, 0), true);
+-  assertEq(Bool32x4.extractLane(f, 1), false);
+-  assertEq(Bool32x4.extractLane(f, 2), false);
+-  assertEq(Bool32x4.extractLane(f, 3), true);
+-  assertThrowsInstanceOf(() => Bool32x4.extractLane(f, 4), RangeError);
+-  assertThrowsInstanceOf(() => Bool32x4.extractLane(f, -1), RangeError);
+-  assertThrowsInstanceOf(() => Bool32x4.extractLane(f, 0.5), RangeError);
+-  assertThrowsInstanceOf(() => Bool32x4.extractLane(f, {}), RangeError);
+-  assertThrowsInstanceOf(() => Bool32x4.extractLane(Float32x4(1, 2, 3, 4), 0), TypeError);
+-  assertThrowsInstanceOf(() => Bool32x4.extractLane(1, 0), TypeError);
+-  assertThrowsInstanceOf(() => Bool32x4.extractLane(f, f), TypeError);
+-}
+-
+-function testBool32x4Reify() {
+-  var Array = Bool32x4.array(3);
+-  var array = new Array([Bool32x4(true, false, false, true),
+-                         Bool32x4(true, false, true, false),
+-                         Bool32x4(true, true, true, false)]);
+-
+-  // Test that reading array[1] produces a *copy* of Bool32x4, not an
+-  // alias into the array.
+-
+-  var f = array[1];
+-  assertEq(Bool32x4.extractLane(f, 2), true);
+-  assertEq(Bool32x4.extractLane(array[1], 2), true);
+-  assertEq(Bool32x4.extractLane(f, 3), false);
+-  assertEq(Bool32x4.extractLane(array[1], 3), false);
+-  array[1] = Bool32x4(false, true, false, true);
+-  assertEq(Bool32x4.extractLane(f, 3), false);
+-  assertEq(Bool32x4.extractLane(array[1], 3), true);
+-}
+-
+-function testBool32x4Setters() {
+-  var Array = Bool32x4.array(3);
+-  var array = new Array([Bool32x4(true, false, false, true),
+-                         Bool32x4(true, false, true, false),
+-                         Bool32x4(true, true, true, false)]);
+-
+-
+-  assertEq(Bool32x4.extractLane(array[1], 3), false);
+-  // Test that we are allowed to write Bool32x4 values into array,
+-  // but not other things.
+-  array[1] = Bool32x4(false, true, false, true);
+-  assertEq(Bool32x4.extractLane(array[1], 3), true);
+-  assertThrowsInstanceOf(function() {
+-    array[1] = {x: false, y: true, z: false, w: true};
+-  }, TypeError, "Setting Bool32x4 from an object");
+-  assertThrowsInstanceOf(function() {
+-    array[1] = [true, false, false, true];
+-  }, TypeError, "Setting Bool32x4 from an array");
+-  assertThrowsInstanceOf(function() {
+-    array[1] = false;
+-  }, TypeError, "Setting Bool32x4 from a number");
+-}
+-
+-function testBool64x2Getters() {
+-  // Create a Bool64x2 and check that the getters work:
+-  var f = Bool64x2(true, false);
+-  assertEq(Bool64x2.extractLane(f, 0), true);
+-  assertEq(Bool64x2.extractLane(f, 1), false);
+-
+-  assertThrowsInstanceOf(() => Bool64x2.extractLane(f, 2), RangeError);
+-  assertThrowsInstanceOf(() => Bool64x2.extractLane(f, -1), RangeError);
+-  assertThrowsInstanceOf(() => Bool64x2.extractLane(f, 0.5), RangeError);
+-  assertThrowsInstanceOf(() => Bool64x2.extractLane(f, {}), RangeError);
+-  assertThrowsInstanceOf(() => Bool64x2.extractLane(Bool32x4(1,2,3,4), 0), TypeError);
+-  assertThrowsInstanceOf(() => Bool64x2.extractLane(1, 0), TypeError);
+-  assertThrowsInstanceOf(() => Bool64x2.extractLane(f, f), TypeError);
+-}
+-
+-function testBool64x2Reify() {
+-  var Array = Bool64x2.array(3);
+-  var array = new Array([Bool64x2(true, false),
+-                         Bool64x2(false, true),
+-                         Bool64x2(true, true)]);
+-
+-  // Test that reading array[1] produces a *copy* of Bool64x2, not an
+-  // alias into the array.
+-
+-  var f = array[1];
+-  assertEq(Bool64x2.extractLane(f, 1), true);
+-  assertEq(Bool64x2.extractLane(array[1], 1), true);
+-  array[1] = Bool64x2(false, false);
+-  assertEq(Bool64x2.extractLane(f, 1), true);
+-  assertEq(Bool64x2.extractLane(array[1], 1), false);
+-}
+-
+-function testBool64x2Setters() {
+-  var Array = Bool64x2.array(3);
+-  var array = new Array([Bool64x2(true, false),
+-                         Bool64x2(false, true),
+-                         Bool64x2(true, true)]);
+-  assertEq(Bool64x2.extractLane(array[1], 1), true);
+-
+-  // Test that we are allowed to write Bool64x2 values into array,
+-  // but not other things.
+-
+-  array[1] = Bool64x2(false, false);
+-  assertEq(Bool64x2.extractLane(array[1], 1), false);
+-
+-  assertThrowsInstanceOf(function() {
+-    array[1] = {x: false, y: false };
+-  }, TypeError, "Setting Bool64x2 from an object");
+-
+-  assertThrowsInstanceOf(function() {
+-    array[1] = [ false, false ];
+-  }, TypeError, "Setting Bool64x2 from an array");
+-
+-  assertThrowsInstanceOf(function() {
+-    array[1] = 9;
+-  }, TypeError, "Setting Bool64x2 from a number");
+-}
+-
+-
+-function test() {
+-
+-  testFloat32x4Alignment();
+-  testFloat32x4Getters();
+-  testFloat32x4Handles();
+-  testFloat32x4Reify();
+-  testFloat32x4Setters();
+-
+-  testFloat64x2Alignment();
+-  testFloat64x2Getters();
+-  testFloat64x2Handles();
+-  testFloat64x2Reify();
+-  testFloat64x2Setters();
+-
+-  testInt8x16Alignment();
+-  testInt8x16Getters();
+-  testInt8x16Handles();
+-  testInt8x16Reify();
+-  testInt8x16Setters();
+-
+-  testInt16x8Alignment();
+-  testInt16x8Getters();
+-  testInt16x8Handles();
+-  testInt16x8Reify();
+-  testInt16x8Setters();
+-
+-  testInt32x4Alignment();
+-  testInt32x4Getters();
+-  testInt32x4Handles();
+-  testInt32x4Reify();
+-  testInt32x4Setters();
+-
+-  testUint8x16Alignment();
+-  testUint8x16Getters();
+-  testUint8x16Handles();
+-  testUint8x16Reify();
+-  testUint8x16Setters();
+-
+-  testUint16x8Alignment();
+-  testUint16x8Getters();
+-  testUint16x8Handles();
+-  testUint16x8Reify();
+-  testUint16x8Setters();
+-
+-  testUint32x4Alignment();
+-  testUint32x4Getters();
+-  testUint32x4Handles();
+-  testUint32x4Reify();
+-  testUint32x4Setters();
+-
+-  testBool8x16Getters();
+-  testBool8x16Reify();
+-  testBool8x16Setters();
+-
+-  testBool16x8Getters();
+-  testBool16x8Reify();
+-  testBool16x8Setters();
+-
+-  testBool32x4Getters();
+-  testBool32x4Reify();
+-  testBool32x4Setters();
+-
+-  testBool64x2Getters();
+-  testBool64x2Reify();
+-  testBool64x2Setters();
+-
+-  if (typeof reportCompare === "function") {
+-    reportCompare(true, true);
+-  }
+-}
+-
+-test();
+diff --git a/js/src/tests/non262/SIMD/unary-operations.js b/js/src/tests/non262/SIMD/unary-operations.js
+deleted file mode 100644
+--- a/js/src/tests/non262/SIMD/unary-operations.js
++++ /dev/null
+@@ -1,410 +0,0 @@
+-// |reftest| skip-if(!this.hasOwnProperty("SIMD"))
+-var Float32x4 = SIMD.Float32x4;
+-var Int8x16 = SIMD.Int8x16;
+-var Int16x8 = SIMD.Int16x8;
+-var Int32x4 = SIMD.Int32x4;
+-var Uint8x16 = SIMD.Uint8x16;
+-var Uint16x8 = SIMD.Uint16x8;
+-var Uint32x4 = SIMD.Uint32x4;
+-var Bool8x16 = SIMD.Bool8x16;
+-var Bool16x8 = SIMD.Bool16x8;
+-var Bool32x4 = SIMD.Bool32x4;
+-var Bool64x2 = SIMD.Bool64x2;
+-
+-function testFloat32x4abs() {
+-  function absf(a) {
+-    return Math.abs(Math.fround(a));
+-  }
+-
+-  var vals = [
+-    [-1, 2, -3, 4],
+-    [-1.63, 2.46, -3.17, 4.94],
+-    [NaN, -0, Infinity, -Infinity]
+-  ];
+-  for (var v of vals) {
+-    assertEqX4(Float32x4.abs(Float32x4(...v)), v.map(absf));
+-  }
+-}
+-
+-function testFloat32x4neg() {
+-  function negf(a) {
+-    return -1 * Math.fround(a);
+-  }
+-
+-  var vals = [
+-    [1, 2, 3, 4],
+-    [0.999, -0.001, 3.78, 4.05],
+-    [NaN, -0, Infinity, -Infinity]
+-  ];
+-  for (var v of vals) {
+-    assertEqX4(Float32x4.neg(Float32x4(...v)), v.map(negf));
+-  }
+-}
+-
+-function testFloat32x4reciprocalApproximation() {
+-  function reciprocalf(a) {
+-    return Math.fround(1 / Math.fround(a));
+-  }
+-
+-  var vals = [
+-    [[1, 0.5, 0.25, 0.125], [1, 2, 4, 8]],
+-    [[1.6, 0.8, 0.4, 0.2], [1.6, 0.8, 0.4, 0.2].map(reciprocalf)],
+-    [[NaN, -0, Infinity, -Infinity], [NaN, -Infinity, 0, -0]]
+-  ];
+-
+-  for (var [v,w] of vals) {
+-    assertEqX4(Float32x4.reciprocalApproximation(Float32x4(...v)), w);
+-  }
+-}
+-
+-function testFloat32x4reciprocalSqrtApproximation() {
+-  function reciprocalsqrtf(a) {
+-    assertEq(Math.fround(a), a);
+-    return Math.fround(1 / Math.fround(Math.sqrt(a)));
+-  }
+-
+-  var vals = [
+-    [[1, 1, 0.25, 0.25], [1, 1, 2, 2]],
+-    [[25, 16, 6.25, 1.5625], [25, 16, 6.25, 1.5625].map(reciprocalsqrtf)],
+-    [[NaN, -0, Infinity, -Infinity], [NaN, -0, Infinity, -Infinity].map(reciprocalsqrtf)],
+-    [[Math.pow(2, 32), Math.pow(2, -32), +0, Math.pow(2, -148)],
+-     [Math.pow(2, -16), Math.pow(2, 16), Infinity, Math.pow(2, 74)]]
+-  ];
+-
+-  for (var [v,w] of vals) {
+-    assertEqX4(Float32x4.reciprocalSqrtApproximation(Float32x4(...v)), w);
+-  }
+-}
+-
+-function testFloat32x4sqrt() {
+-  function sqrtf(a) {
+-    return Math.fround(Math.sqrt(Math.fround(a)));
+-  }
+-
+-  var vals = [
+-    [[1, 4, 9, 16], [1, 2, 3, 4]],
+-    [[2.7225, 7.3441, 9.4249, -1], [2.7225, 7.3441, 9.4249, -1].map(sqrtf)],
+-    [[NaN, -0, Infinity, -Infinity], [NaN, -0, Infinity, NaN]]
+-  ];
+-
+-  for (var [v,w] of vals) {
+-    assertEqX4(Float32x4.sqrt(Float32x4(...v)), w);
+-  }
+-}
+-
+-function testInt8x16neg() {
+-  var vals = [
+-    [[1, 2, 3, 4, 5, 6, 7, 8, -1, -2, -3, -4, -5, INT8_MAX, INT8_MIN, 0],
+-     [-1, -2, -3, -4, -5, -6, -7, -8, 1, 2, 3, 4, 5, -INT8_MAX, INT8_MIN, 0]]
+-  ];
+-  for (var [v,w] of vals) {
+-    assertEqX16(Int8x16.neg(Int8x16(...v)), w);
+-  }
+-}
+-
+-function testInt8x16not() {
+-  var vals = [
+-    [[1, 2, 3, 4, 5, 6, 7, -1, -2, -3, -4, -5, -6, 0, INT8_MIN, INT8_MAX],
+-     [1, 2, 3, 4, 5, 6, 7, -1, -2, -3, -4, -5, -6, 0, INT8_MIN, INT8_MAX].map((x) => ~x << 24 >> 24)]
+-  ];
+-  for (var [v,w] of vals) {
+-    assertEqX16(Int8x16.not(Int8x16(...v)), w);
+-  }
+-}
+-
+-function testInt16x8neg() {
+-  var vals = [
+-    [[1, 2, 3, -1, -2, 0, INT16_MIN, INT16_MAX],
+-     [-1, -2, -3, 1, 2, 0, INT16_MIN, -INT16_MAX]]
+-  ];
+-  for (var [v,w] of vals) {
+-    assertEqX8(Int16x8.neg(Int16x8(...v)), w);
+-  }
+-}
+-
+-function testInt16x8not() {
+-  var vals = [
+-    [[1, 2, 3, -1, -2, 0, INT16_MIN, INT16_MAX],
+-     [1, 2, 3, -1, -2, 0, INT16_MIN, INT16_MAX].map((x) => ~x << 16 >> 16)]
+-  ];
+-  for (var [v,w] of vals) {
+-    assertEqX8(Int16x8.not(Int16x8(...v)), w);
+-  }
+-}
+-
+-function testInt32x4neg() {
+-  var valsExp = [
+-    [[1, 2, 3, 4], [-1, -2, -3, -4]],
+-    [[INT32_MAX, INT32_MIN, -0, 0], [-INT32_MAX | 0, -INT32_MIN | 0, 0, 0]]
+-  ];
+-  for (var [v,w] of valsExp) {
+-    assertEqX4(Int32x4.neg(Int32x4(...v)), w);
+-  }
+-}
+-
+-function testInt32x4not() {
+-  var valsExp = [
+-    [[1, 2, 3, 4], [-2, -3, -4, -5]],
+-    [[INT32_MAX, INT32_MIN, 0, 0], [~INT32_MAX | 0, ~INT32_MIN | 0, ~0 | 0,  ~0 | 0]]
+-  ];
+-  for (var [v,w] of valsExp) {
+-    assertEqX4(Int32x4.not(Int32x4(...v)), w);
+-  }
+-}
+-
+-function testUint8x16neg() {
+-  var vals = [
+-    [[  1,   2,   3,   4,   5,   6,   7, 0, -1, -2, -3, -4, UINT8_MAX,   INT8_MAX, 0, 0],
+-     [255, 254, 253, 252, 251, 250, 249, 0, 1,   2,  3,  4,         1, INT8_MAX+2, 0, 0]]
+-  ];
+-  for (var [v,w] of vals) {
+-    assertEqX16(Uint8x16.neg(Uint8x16(...v)), w);
+-  }
+-}
+-
+-function testUint8x16not() {
+-  var vals = [
+-    [[1, 2, 3, 4, 5, 6, 7, -1, -2, -3, -4, -5, -6, 0, INT8_MIN, INT8_MAX],
+-     [1, 2, 3, 4, 5, 6, 7, -1, -2, -3, -4, -5, -6, 0, INT8_MIN, INT8_MAX].map((x) => ~x << 24 >>> 24)]
+-  ];
+-  for (var [v,w] of vals) {
+-    assertEqX16(Uint8x16.not(Uint8x16(...v)), w);
+-  }
+-}
+-
+-function testUint16x8neg() {
+-  var vals = [
+-    [[1, 2, UINT16_MAX, -1, -2, 0, INT16_MIN, INT16_MAX],
+-     [1, 2, UINT16_MAX, -1, -2, 0, INT16_MIN, INT16_MAX].map((x) => -x << 16 >>> 16)]
+-  ];
+-  for (var [v,w] of vals) {
+-    assertEqX8(Uint16x8.neg(Uint16x8(...v)), w);
+-  }
+-}
+-
+-function testUint16x8not() {
+-  var vals = [
+-    [[1, 2, UINT16_MAX, -1, -2, 0, INT16_MIN, INT16_MAX],
+-     [1, 2, UINT16_MAX, -1, -2, 0, INT16_MIN, INT16_MAX].map((x) => ~x << 16 >>> 16)]
+-  ];
+-  for (var [v,w] of vals) {
+-    assertEqX8(Uint16x8.not(Uint16x8(...v)), w);
+-  }
+-}
+-
+-function testUint32x4neg() {
+-  var valsExp = [
+-    [[1, 2, 3, 4], [-1 >>> 0, -2 >>> 0, -3 >>> 0, -4 >>> 0]],
+-    [[INT32_MAX, INT32_MIN, -0, 0], [-INT32_MAX >>> 0, -INT32_MIN >>> 0, 0, 0]]
+-  ];
+-  for (var [v,w] of valsExp) {
+-    assertEqX4(Uint32x4.neg(Uint32x4(...v)), w);
+-  }
+-}
+-
+-function testUint32x4not() {
+-  var valsExp = [
+-    [[1, 2, 3, 4], [~1 >>> 0, ~2 >>> 0, ~3 >>> 0, ~4 >>> 0]],
+-    [[INT32_MAX, INT32_MIN, UINT32_MAX, 0], [~INT32_MAX >>> 0, ~INT32_MIN >>> 0, 0,  ~0 >>> 0]]
+-  ];
+-  for (var [v,w] of valsExp) {
+-    assertEqX4(Uint32x4.not(Uint32x4(...v)), w);
+-  }
+-}
+-
+-function testBool8x16not() {
+-  var valsExp = [
+-    [[true, false, true, false, true, false, true, false, true, false, true, false, true, false, true, false],
+-     [false, true, false, true, false, true, false, true, false, true, false, true, false, true, false, true]],
+-    [[true, true, false, false, true, true, false, false, true, true, false, false, true, true, false, false],
+-    [false, false, true, true, false, false, true, true, false, false, true, true, false, false, true, true]]
+-  ];
+-  for (var [v,w] of valsExp) {
+-    assertEqX16(Bool8x16.not(Bool8x16(...v)), w);
+-  }
+-}
+-
+-function testBool8x16allTrue() {
+-  var valsExp = [
+-    [[false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false], false],
+-    [[true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true], true],
+-    [[false, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true], false],
+-    [[true, false, true, true, true, true, true, true, true, true, true, true, true, true, true, true], false],
+-    [[true, false, true, true, true, true, true, true, true, true, true, true, true, true, false, true], false],
+-    [[true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, false], false],
+-  ];
+-  for (var [v,w] of valsExp) {
+-    assertEq(Bool8x16.allTrue(Bool8x16(...v)), w);
+-  }
+-}
+-
+-function testBool8x16anyTrue() {
+-  var valsExp = [
+-    [[false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false], false],
+-    [[true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true], true],
+-    [[false, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true], true],
+-    [[true, false, true, true, true, true, true, true, true, true, true, true, true, true, true, true], true],
+-    [[true, false, true, true, true, true, true, true, true, true, true, true, true, true, false, true], true],
+-    [[true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, false], true],
+-  ];
+-  for (var [v,w] of valsExp) {
+-    assertEq(Bool8x16.anyTrue(Bool8x16(...v)), w);
+-  }
+-}
+-
+-function testBool16x8not() {
+-  var valsExp = [
+-    [[true, false, true, false, true, false, true, false], [false, true, false, true, false, true, false, true]],
+-    [[true, true, false, false, true, true, false, false], [false, false, true, true, false, false, true, true]]
+-  ];
+-  for (var [v,w] of valsExp) {
+-    assertEqX8(Bool16x8.not(Bool16x8(...v)), w);
+-  }
+-}
+-
+-function testBool16x8allTrue() {
+-  var valsExp = [
+-    [[false, false, false, false, false, false, false, false], false],
+-    [[true, true, true, true, true, true, true, true], true],
+-    [[false, true, true, true, true, true, true, true], false],
+-    [[true, false, true, true, true, true, true, true], false],
+-    [[true, true, true, true, true, true, false, true], false],
+-    [[true, true, true, true, true, true, true, false], false],
+-  ];
+-  for (var [v,w] of valsExp) {
+-    assertEq(Bool16x8.allTrue(Bool16x8(...v)), w);
+-  }
+-}
+-
+-function testBool16x8anyTrue() {
+-  var valsExp = [
+-    [[false, false, false, false, false, false, false, false], false],
+-    [[true, true, true, true, true, true, true, true], true],
+-    [[false, false, false, false, false, false, false, true], true],
+-    [[false, false, false, false, false, false, true, false], true],
+-    [[false, true, false, false, false, false, false, true], true],
+-    [[true, false, false, false, false, false, false, false], true],
+-  ];
+-  for (var [v,w] of valsExp) {
+-    assertEq(Bool16x8.anyTrue(Bool16x8(...v)), w);
+-  }
+-}
+-
+-function testBool32x4not() {
+-  var valsExp = [
+-    [[true, false, true, false], [false, true, false, true]],
+-    [[true, true, false, false], [false, false, true, true]]
+-  ];
+-  for (var [v,w] of valsExp) {
+-    assertEqX4(Bool32x4.not(Bool32x4(...v)), w);
+-  }
+-}
+-
+-function testBool32x4allTrue() {
+-  var valsExp = [
+-    [[false, false, false, false], false],
+-    [[true, false, true, false], false],
+-    [[true, true, true, true], true],
+-    [[true, true, false, false], false]
+-  ];
+-  for (var [v,w] of valsExp) {
+-    assertEq(Bool32x4.allTrue(Bool32x4(...v)), w);
+-  }
+-}
+-
+-function testBool32x4anyTrue() {
+-  var valsExp = [
+-    [[false, false, false, false], false],
+-    [[true, false, true, false], true],
+-    [[true, true, true, true], true],
+-    [[true, true, false, false], true]
+-  ];
+-  for (var [v,w] of valsExp) {
+-    assertEq(Bool32x4.anyTrue(Bool32x4(...v)), w);
+-  }
+-}
+-
+-function testBool64x2not() {
+-  var valsExp = [
+-    [[false, false], [true, true]],
+-    [[false, true], [true, false]],
+-    [[true, false], [false, true]],
+-    [[true, true], [false, false]]
+-  ];
+-  for (var [v,w] of valsExp) {
+-    assertEqX2(Bool64x2.not(Bool64x2(...v)), w);
+-  }
+-}
+-
+-function testBool64x2allTrue() {
+-  var valsExp = [
+-    [[false, false], false],
+-    [[false, true], false],
+-    [[true, false], false],
+-    [[true, true], true]
+-  ];
+-  for (var [v,w] of valsExp) {
+-    assertEq(Bool64x2.allTrue(Bool64x2(...v)), w);
+-  }
+-}
+-
+-function testBool64x2anyTrue() {
+-  var valsExp = [
+-    [[false, false], false],
+-    [[false, true], true],
+-    [[true, false], true],
+-    [[true, true], true]
+-  ];
+-  for (var [v,w] of valsExp) {
+-    assertEq(Bool64x2.anyTrue(Bool64x2(...v)), w);
+-  }
+-}
+-
+-function test() {
+-  testFloat32x4abs();
+-  testFloat32x4neg();
+-  testFloat32x4reciprocalApproximation();
+-  testFloat32x4reciprocalSqrtApproximation();
+-  testFloat32x4sqrt();
+-
+-  testInt8x16neg();
+-  testInt8x16not();
+-
+-  testInt16x8neg();
+-  testInt16x8not();
+-
+-  testInt32x4neg();
+-  testInt32x4not();
+-
+-  testUint8x16neg();
+-  testUint8x16not();
+-
+-  testUint16x8neg();
+-  testUint16x8not();
+-
+-  testUint32x4neg();
+-  testUint32x4not();
+-
+-  testBool8x16not();
+-  testBool8x16allTrue();
+-  testBool8x16anyTrue();
+-
+-  testBool16x8not();
+-  testBool16x8allTrue();
+-  testBool16x8anyTrue();
+-
+-  testBool32x4not();
+-  testBool32x4allTrue();
+-  testBool32x4anyTrue();
+-
+-  testBool64x2not();
+-  testBool64x2allTrue();
+-  testBool64x2anyTrue();
+-
+-
+-  if (typeof reportCompare === "function") {
+-    reportCompare(true, true);
+-  }
+-}
+-
+-test();
+diff --git a/js/src/tests/non262/TypedObject/method_from.js b/js/src/tests/non262/TypedObject/method_from.js
+--- a/js/src/tests/non262/TypedObject/method_from.js
++++ b/js/src/tests/non262/TypedObject/method_from.js
+@@ -200,19 +200,16 @@ function fromUntypedArrayToUint8s() {
+   assertTypedEqual(type, r1, r2);
+ }
+ 
+ function fromNonArrayTypedObjects() {
+     var type = TypedObject.uint32.array(4);
+     var myStruct = new StructType({x: uint32});
+     var r1 = type.from(new myStruct({x: 42}), j => j);
+     assertTypedEqual(type, r1, new type([0,0,0,0]));
+-
+-    var r2 = type.from(SIMD.Int32x4(0,0,0,0), j => j);
+-    assertTypedEqual(type, r1, new type([0,0,0,0]));
+ }
+ 
+ function runTests() {
+     print(BUGNUMBER + ": " + summary);
+ 
+     fromOneDimArrayOfUint8ToUint32s();
+     fromOneDimArrayOfUint32ToUint8s();
+ 
+diff --git a/js/src/vm/ArrayBufferObject.cpp b/js/src/vm/ArrayBufferObject.cpp
+--- a/js/src/vm/ArrayBufferObject.cpp
++++ b/js/src/vm/ArrayBufferObject.cpp
+@@ -925,18 +925,18 @@ ArrayBufferObject::prepareForAsmJS(JSCon
+     if (buffer->forInlineTypedObject())
+         return false;
+ 
+     if (needGuard) {
+         if (buffer->isWasm() && buffer->isPreparedForAsmJS())
+             return true;
+ 
+         // Non-prepared-for-asm.js wasm buffers can be detached at any time.
+-        // This error can only be triggered for SIMD.js (which isn't shipping)
+-        // on !WASM_HUGE_MEMORY so this error is only visible in testing.
++        // This error can only be triggered for Atomics on !WASM_HUGE_MEMORY
++        // so this error is only visible in testing.
+         if (buffer->isWasm() || buffer->isPreparedForAsmJS())
+             return false;
+ 
+         uint32_t length = buffer->byteLength();
+         WasmArrayRawBuffer* wasmBuf = WasmArrayRawBuffer::Allocate(length, Some(length));
+         if (!wasmBuf) {
+             ReportOutOfMemory(cx);
+             return false;
+diff --git a/js/src/vm/GlobalObject.h b/js/src/vm/GlobalObject.h
+--- a/js/src/vm/GlobalObject.h
++++ b/js/src/vm/GlobalObject.h
+@@ -20,19 +20,16 @@
+ #include "vm/Runtime.h"
+ 
+ namespace js {
+ 
+ class Debugger;
+ class TypedObjectModuleObject;
+ class LexicalEnvironmentObject;
+ 
+-class SimdTypeDescr;
+-enum class SimdType;
+-
+ /*
+  * Global object slots are reserved as follows:
+  *
+  * [0, APPLICATION_SLOTS)
+  *   Pre-reserved slots in all global objects set aside for the embedding's
+  *   use. As with all reserved slots these start out as UndefinedValue() and
+  *   are traced for GC purposes. Apart from that the engine never touches
+  *   these slots, so the embedding can do whatever it wants with them.
+@@ -448,27 +445,16 @@ class GlobalObject : public NativeObject
+     }
+ 
+     static JSObject*
+     getOrCreateTypedObjectModule(JSContext* cx, Handle<GlobalObject*> global) {
+         return getOrCreateObject(cx, global, APPLICATION_SLOTS + JSProto_TypedObject,
+                                  initTypedObjectModule);
+     }
+ 
+-    static JSObject*
+-    getOrCreateSimdGlobalObject(JSContext* cx, Handle<GlobalObject*> global) {
+-        return getOrCreateObject(cx, global, APPLICATION_SLOTS + JSProto_SIMD, initSimdObject);
+-    }
+-
+-    // Get the type descriptor for one of the SIMD types.
+-    // simdType is one of the JS_SIMDTYPEREPR_* constants.
+-    // Implemented in builtin/SIMD.cpp.
+-    static SimdTypeDescr*
+-    getOrCreateSimdTypeDescr(JSContext* cx, Handle<GlobalObject*> global, SimdType simdType);
+-
+     TypedObjectModuleObject& getTypedObjectModule() const;
+ 
+     static JSObject*
+     getOrCreateCollatorPrototype(JSContext* cx, Handle<GlobalObject*> global) {
+         return getOrCreateObject(cx, global, COLLATOR_PROTO, initIntlObject);
+     }
+ 
+     static JSFunction*
+@@ -776,20 +762,16 @@ class GlobalObject : public NativeObject
+     static bool initModuleProto(JSContext* cx, Handle<GlobalObject*> global);
+     static bool initImportEntryProto(JSContext* cx, Handle<GlobalObject*> global);
+     static bool initExportEntryProto(JSContext* cx, Handle<GlobalObject*> global);
+     static bool initRequestedModuleProto(JSContext* cx, Handle<GlobalObject*> global);
+ 
+     // Implemented in builtin/TypedObject.cpp
+     static bool initTypedObjectModule(JSContext* cx, Handle<GlobalObject*> global);
+ 
+-    // Implemented in builtin/SIMD.cpp
+-    static bool initSimdObject(JSContext* cx, Handle<GlobalObject*> global);
+-    static bool initSimdType(JSContext* cx, Handle<GlobalObject*> global, SimdType simdType);
+-
+     static bool initStandardClasses(JSContext* cx, Handle<GlobalObject*> global);
+     static bool initSelfHostingBuiltins(JSContext* cx, Handle<GlobalObject*> global,
+                                         const JSFunctionSpec* builtins);
+ 
+     typedef js::Vector<js::ReadBarriered<js::Debugger*>, 0, js::SystemAllocPolicy> DebuggerVector;
+ 
+     /*
+      * The collection of Debugger objects debugging this global. If this global
+diff --git a/js/src/vm/JSContext.cpp b/js/src/vm/JSContext.cpp
+--- a/js/src/vm/JSContext.cpp
++++ b/js/src/vm/JSContext.cpp
+@@ -34,16 +34,22 @@
+ 
+ #include "builtin/String.h"
+ #include "gc/FreeOp.h"
+ #include "gc/Marking.h"
+ #include "jit/Ion.h"
+ #include "jit/PcScriptCache.h"
+ #include "js/CharacterEncoding.h"
+ #include "js/Printf.h"
++#ifdef JS_SIMULATOR_ARM64
++# include "jit/arm64/vixl/Simulator-vixl.h"
++#endif
++#ifdef JS_SIMULATOR_ARM
++# include "jit/arm/Simulator-arm.h"
++#endif
+ #include "util/DoubleToString.h"
+ #include "util/NativeStack.h"
+ #include "util/Windows.h"
+ #include "vm/BytecodeUtil.h"
+ #include "vm/ErrorReporting.h"
+ #include "vm/HelperThreads.h"
+ #include "vm/Iteration.h"
+ #include "vm/JSAtom.h"
+diff --git a/js/src/vm/SelfHosting.cpp b/js/src/vm/SelfHosting.cpp
+--- a/js/src/vm/SelfHosting.cpp
++++ b/js/src/vm/SelfHosting.cpp
+@@ -24,17 +24,16 @@
+ #include "builtin/intl/RelativeTimeFormat.h"
+ #include "builtin/MapObject.h"
+ #include "builtin/ModuleObject.h"
+ #include "builtin/Object.h"
+ #include "builtin/Promise.h"
+ #include "builtin/Reflect.h"
+ #include "builtin/RegExp.h"
+ #include "builtin/SelfHostingDefines.h"
+-#include "builtin/SIMD.h"
+ #include "builtin/Stream.h"
+ #include "builtin/String.h"
+ #include "builtin/TypedObject.h"
+ #include "builtin/WeakMapObject.h"
+ #include "gc/HashUtil.h"
+ #include "gc/Marking.h"
+ #include "gc/Policy.h"
+ #include "jit/AtomicOperations.h"
+@@ -2409,29 +2408,16 @@ static const JSFunctionSpec intrinsic_fu
+     JS_FN("std_String_localeCompare",            str_localeCompare,            1,0),
+ #else
+     JS_FN("std_String_normalize",                str_normalize,                0,0),
+ #endif
+     JS_FN("std_String_concat",                   str_concat,                   1,0),
+ 
+     JS_FN("std_TypedArray_buffer",               js::TypedArray_bufferGetter,  1,0),
+ 
+-    JS_FN("std_SIMD_Int8x16_extractLane",        simd_int8x16_extractLane,     2,0),
+-    JS_FN("std_SIMD_Int16x8_extractLane",        simd_int16x8_extractLane,     2,0),
+-    JS_INLINABLE_FN("std_SIMD_Int32x4_extractLane",   simd_int32x4_extractLane,  2,0, SimdInt32x4_extractLane),
+-    JS_FN("std_SIMD_Uint8x16_extractLane",       simd_uint8x16_extractLane,    2,0),
+-    JS_FN("std_SIMD_Uint16x8_extractLane",       simd_uint16x8_extractLane,    2,0),
+-    JS_FN("std_SIMD_Uint32x4_extractLane",       simd_uint32x4_extractLane,    2,0),
+-    JS_INLINABLE_FN("std_SIMD_Float32x4_extractLane", simd_float32x4_extractLane,2,0, SimdFloat32x4_extractLane),
+-    JS_FN("std_SIMD_Float64x2_extractLane",      simd_float64x2_extractLane,   2,0),
+-    JS_FN("std_SIMD_Bool8x16_extractLane",       simd_bool8x16_extractLane,    2,0),
+-    JS_FN("std_SIMD_Bool16x8_extractLane",       simd_bool16x8_extractLane,    2,0),
+-    JS_FN("std_SIMD_Bool32x4_extractLane",       simd_bool32x4_extractLane,    2,0),
+-    JS_FN("std_SIMD_Bool64x2_extractLane",       simd_bool64x2_extractLane,    2,0),
+-
+     // Helper funtions after this point.
+     JS_INLINABLE_FN("ToObject",      intrinsic_ToObject,                1,0, IntrinsicToObject),
+     JS_INLINABLE_FN("IsObject",      intrinsic_IsObject,                1,0, IntrinsicIsObject),
+     JS_INLINABLE_FN("IsArray",       intrinsic_IsArray,                 1,0, ArrayIsArray),
+     JS_INLINABLE_FN("IsWrappedArrayConstructor", intrinsic_IsWrappedArrayConstructor, 1,0,
+                     IntrinsicIsWrappedArrayConstructor),
+     JS_INLINABLE_FN("ToInteger",     intrinsic_ToInteger,               1,0, IntrinsicToInteger),
+     JS_INLINABLE_FN("ToString",      intrinsic_ToString,                1,0, IntrinsicToString),
+@@ -2619,17 +2605,16 @@ static const JSFunctionSpec intrinsic_fu
+     JS_FN("NewDerivedTypedObject",          js::NewDerivedTypedObject, 3, 0),
+     JS_FN("TypedObjectBuffer",              TypedObject::GetBuffer, 1, 0),
+     JS_FN("TypedObjectByteOffset",          TypedObject::GetByteOffset, 1, 0),
+     JS_FN("AttachTypedObject",              js::AttachTypedObject, 3, 0),
+     JS_FN("TypedObjectIsAttached",          js::TypedObjectIsAttached, 1, 0),
+     JS_FN("TypedObjectTypeDescr",           js::TypedObjectTypeDescr, 1, 0),
+     JS_FN("ClampToUint8",                   js::ClampToUint8, 1, 0),
+     JS_FN("GetTypedObjectModule",           js::GetTypedObjectModule, 0, 0),
+-    JS_FN("GetSimdTypeDescr",               js::GetSimdTypeDescr, 1, 0),
+ 
+     JS_INLINABLE_FN("ObjectIsTypeDescr"    ,          js::ObjectIsTypeDescr, 1, 0,
+                     IntrinsicObjectIsTypeDescr),
+     JS_INLINABLE_FN("ObjectIsTypedObject",            js::ObjectIsTypedObject, 1, 0,
+                     IntrinsicObjectIsTypedObject),
+     JS_INLINABLE_FN("ObjectIsOpaqueTypedObject",      js::ObjectIsOpaqueTypedObject, 1, 0,
+                     IntrinsicObjectIsOpaqueTypedObject),
+     JS_INLINABLE_FN("ObjectIsTransparentTypedObject", js::ObjectIsTransparentTypedObject, 1, 0,
+diff --git a/js/src/vm/TypedArrayObject.cpp b/js/src/vm/TypedArrayObject.cpp
+--- a/js/src/vm/TypedArrayObject.cpp
++++ b/js/src/vm/TypedArrayObject.cpp
+@@ -1800,20 +1800,16 @@ TypedArrayObject::getElement(uint32_t in
+         return Uint32Array::getIndexValue(this, index);
+       case Scalar::Float32:
+         return Float32Array::getIndexValue(this, index);
+       case Scalar::Float64:
+         return Float64Array::getIndexValue(this, index);
+       case Scalar::Uint8Clamped:
+         return Uint8ClampedArray::getIndexValue(this, index);
+       case Scalar::Int64:
+-      case Scalar::Float32x4:
+-      case Scalar::Int8x16:
+-      case Scalar::Int16x8:
+-      case Scalar::Int32x4:
+       case Scalar::MaxTypedArrayViewType:
+         break;
+     }
+ 
+     MOZ_CRASH("Unknown TypedArray type");
+ }
+ 
+ void
+@@ -1850,20 +1846,16 @@ TypedArrayObject::setElement(TypedArrayO
+         return;
+       case Scalar::Float32:
+         Float32Array::setIndexValue(obj, index, d);
+         return;
+       case Scalar::Float64:
+         Float64Array::setIndexValue(obj, index, d);
+         return;
+       case Scalar::Int64:
+-      case Scalar::Float32x4:
+-      case Scalar::Int8x16:
+-      case Scalar::Int16x8:
+-      case Scalar::Int32x4:
+       case Scalar::MaxTypedArrayViewType:
+         break;
+     }
+ 
+     MOZ_CRASH("Unknown TypedArray type");
+ }
+ 
+ void
+diff --git a/js/src/vm/TypedArrayObject.h b/js/src/vm/TypedArrayObject.h
+--- a/js/src/vm/TypedArrayObject.h
++++ b/js/src/vm/TypedArrayObject.h
+@@ -396,21 +396,16 @@ TypedArrayShift(Scalar::Type viewType)
+         return 1;
+       case Scalar::Int32:
+       case Scalar::Uint32:
+       case Scalar::Float32:
+         return 2;
+       case Scalar::Int64:
+       case Scalar::Float64:
+         return 3;
+-      case Scalar::Float32x4:
+-      case Scalar::Int8x16:
+-      case Scalar::Int16x8:
+-      case Scalar::Int32x4:
+-        return 4;
+       default:;
+     }
+     MOZ_CRASH("Unexpected array type");
+ }
+ 
+ static inline unsigned
+ TypedArrayElemSize(Scalar::Type viewType)
+ {
+diff --git a/js/src/wasm/AsmJS.cpp b/js/src/wasm/AsmJS.cpp
+--- a/js/src/wasm/AsmJS.cpp
++++ b/js/src/wasm/AsmJS.cpp
+@@ -23,17 +23,16 @@
+ #include "mozilla/MathAlgorithms.h"
+ #include "mozilla/Unused.h"
+ 
+ #include <new>
+ 
+ #include "jsmath.h"
+ #include "jsutil.h"
+ 
+-#include "builtin/SIMD.h"
+ #include "builtin/String.h"
+ #include "frontend/Parser.h"
+ #include "gc/Policy.h"
+ #include "jit/AtomicOperations.h"
+ #include "js/MemoryMetrics.h"
+ #include "js/Printf.h"
+ #include "js/Wrapper.h"
+ #include "util/StringBuffer.h"
+@@ -120,17 +119,17 @@ enum AsmJSAtomicsBuiltinFunction
+ };
+ 
+ 
+ // An AsmJSGlobal represents a JS global variable in the asm.js module function.
+ class AsmJSGlobal
+ {
+   public:
+     enum Which { Variable, FFI, ArrayView, ArrayViewCtor, MathBuiltinFunction,
+-                 AtomicsBuiltinFunction, Constant, SimdCtor, SimdOp };
++                 AtomicsBuiltinFunction, Constant };
+     enum VarInitKind { InitConstant, InitImport };
+     enum ConstantKind { GlobalConstant, MathConstant };
+ 
+   private:
+     struct CacheablePod {
+         Which which_;
+         union V {
+             struct {
+@@ -140,21 +139,16 @@ class AsmJSGlobal
+                     LitVal val_;
+                     U() : val_(LitVal()) {}
+                 } u;
+             } var;
+             uint32_t ffiIndex_;
+             Scalar::Type viewType_;
+             AsmJSMathBuiltinFunction mathBuiltinFunc_;
+             AsmJSAtomicsBuiltinFunction atomicsBuiltinFunc_;
+-            SimdType simdCtorType_;
+-            struct {
+-                SimdType type_;
+-                SimdOperation which_;
+-            } simdOp;
+             struct {
+                 ConstantKind kind_;
+                 double value_;
+             } constant;
+             V() : ffiIndex_(0) {}
+         } u;
+     } pod;
+     CacheableChars field_;
+@@ -203,28 +197,16 @@ class AsmJSGlobal
+     AsmJSMathBuiltinFunction mathBuiltinFunction() const {
+         MOZ_ASSERT(pod.which_ == MathBuiltinFunction);
+         return pod.u.mathBuiltinFunc_;
+     }
+     AsmJSAtomicsBuiltinFunction atomicsBuiltinFunction() const {
+         MOZ_ASSERT(pod.which_ == AtomicsBuiltinFunction);
+         return pod.u.atomicsBuiltinFunc_;
+     }
+-    SimdType simdCtorType() const {
+-        MOZ_ASSERT(pod.which_ == SimdCtor);
+-        return pod.u.simdCtorType_;
+-    }
+-    SimdOperation simdOperation() const {
+-        MOZ_ASSERT(pod.which_ == SimdOp);
+-        return pod.u.simdOp.which_;
+-    }
+-    SimdType simdOperationType() const {
+-        MOZ_ASSERT(pod.which_ == SimdOp);
+-        return pod.u.simdOp.type_;
+-    }
+     ConstantKind constantKind() const {
+         MOZ_ASSERT(pod.which_ == Constant);
+         return pod.u.constant.kind_;
+     }
+     double constantValue() const {
+         MOZ_ASSERT(pod.which_ == Constant);
+         return pod.u.constant.value_;
+     }
+@@ -291,17 +273,16 @@ enum class CacheResult
+ // AsmJSMetadata is built incrementally by ModuleValidator and then shared
+ // immutably between AsmJSModules.
+ 
+ struct AsmJSMetadataCacheablePod
+ {
+     uint32_t                numFFIs = 0;
+     uint32_t                srcLength = 0;
+     uint32_t                srcLengthWithRightBrace = 0;
+-    bool                    usesSimd = false;
+ 
+     AsmJSMetadataCacheablePod() = default;
+ };
+ 
+ struct js::AsmJSMetadata : Metadata, AsmJSMetadataCacheablePod
+ {
+     AsmJSGlobalVector       asmJSGlobals;
+     AsmJSImportVector       asmJSImports;
+@@ -815,107 +796,63 @@ ParseVarOrConstStatement(AsmJSParser& pa
+ // A literal is a double iff the literal contains a decimal point (even if the
+ // fractional part is 0). Otherwise, integers may be classified:
+ //  fixnum: [0, 2^31)
+ //  negative int: [-2^31, 0)
+ //  big unsigned: [2^31, 2^32)
+ //  out of range: otherwise
+ // Lastly, a literal may be a float literal which is any double or integer
+ // literal coerced with Math.fround.
+-//
+-// This class distinguishes between signed and unsigned integer SIMD types like
+-// Int32x4 and Uint32x4, and so does Type below. The wasm ValType and ExprType
+-// enums, and the wasm::LitVal class do not.
+ class NumLit
+ {
+   public:
+     enum Which {
+         Fixnum,
+         NegativeInt,
+         BigUnsigned,
+         Double,
+         Float,
+-        Int8x16,
+-        Int16x8,
+-        Int32x4,
+-        Uint8x16,
+-        Uint16x8,
+-        Uint32x4,
+-        Float32x4,
+-        Bool8x16,
+-        Bool16x8,
+-        Bool32x4,
+         OutOfRangeInt = -1
+     };
+ 
+   private:
+     Which which_;
+-    union U {
+-        JS::Value scalar_;
+-        SimdConstant simd_;
+-
+-        // |scalar_| has a non-trivial constructor and therefore MUST be
+-        // placement-new'd into existence.
+-        MOZ_PUSH_DISABLE_NONTRIVIAL_UNION_WARNINGS
+-        U() {}
+-        MOZ_POP_DISABLE_NONTRIVIAL_UNION_WARNINGS
+-    } u;
++    JS::Value value_;
+ 
+   public:
+     NumLit() = default;
+ 
+-    NumLit(Which w, const Value& v) : which_(w) {
+-        new (&u.scalar_) Value(v);
+-        MOZ_ASSERT(!isSimd());
+-    }
+-
+-    NumLit(Which w, SimdConstant c) : which_(w) {
+-        new (&u.simd_) SimdConstant(c);
+-        MOZ_ASSERT(isSimd());
+-    }
++    NumLit(Which w, const Value& v) : which_(w), value_(v) {}
+ 
+     Which which() const {
+         return which_;
+     }
+ 
+     int32_t toInt32() const {
+         MOZ_ASSERT(which_ == Fixnum || which_ == NegativeInt || which_ == BigUnsigned);
+-        return u.scalar_.toInt32();
++        return value_.toInt32();
+     }
+ 
+     uint32_t toUint32() const {
+         return (uint32_t)toInt32();
+     }
+ 
+     double toDouble() const {
+         MOZ_ASSERT(which_ == Double);
+-        return u.scalar_.toDouble();
++        return value_.toDouble();
+     }
+ 
+     float toFloat() const {
+         MOZ_ASSERT(which_ == Float);
+-        return float(u.scalar_.toDouble());
++        return float(value_.toDouble());
+     }
+ 
+     Value scalarValue() const {
+         MOZ_ASSERT(which_ != OutOfRangeInt);
+-        return u.scalar_;
+-    }
+-
+-    bool isSimd() const
+-    {
+-        return which_ == Int8x16 || which_ == Uint8x16 || which_ == Int16x8 ||
+-               which_ == Uint16x8 || which_ == Int32x4 || which_ == Uint32x4 ||
+-               which_ == Float32x4 || which_ == Bool8x16 || which_ == Bool16x8 ||
+-               which_ == Bool32x4;
+-    }
+-
+-    const SimdConstant& simdValue() const {
+-        MOZ_ASSERT(isSimd());
+-        return u.simd_;
++        return value_;
+     }
+ 
+     bool valid() const {
+         return which_ != OutOfRangeInt;
+     }
+ 
+     bool isZeroBits() const {
+         MOZ_ASSERT(valid());
+@@ -923,145 +860,81 @@ class NumLit
+           case NumLit::Fixnum:
+           case NumLit::NegativeInt:
+           case NumLit::BigUnsigned:
+             return toInt32() == 0;
+           case NumLit::Double:
+             return IsPositiveZero(toDouble());
+           case NumLit::Float:
+             return IsPositiveZero(toFloat());
+-          case NumLit::Int8x16:
+-          case NumLit::Uint8x16:
+-          case NumLit::Bool8x16:
+-            return simdValue() == SimdConstant::SplatX16(0);
+-          case NumLit::Int16x8:
+-          case NumLit::Uint16x8:
+-          case NumLit::Bool16x8:
+-            return simdValue() == SimdConstant::SplatX8(0);
+-          case NumLit::Int32x4:
+-          case NumLit::Uint32x4:
+-          case NumLit::Bool32x4:
+-            return simdValue() == SimdConstant::SplatX4(0);
+-          case NumLit::Float32x4:
+-            return simdValue() == SimdConstant::SplatX4(0.f);
+           case NumLit::OutOfRangeInt:
+             MOZ_CRASH("can't be here because of valid() check above");
+         }
+         return false;
+     }
+ 
+     LitVal value() const {
+         switch (which_) {
+           case NumLit::Fixnum:
+           case NumLit::NegativeInt:
+           case NumLit::BigUnsigned:
+             return LitVal(toUint32());
+           case NumLit::Float:
+             return LitVal(toFloat());
+           case NumLit::Double:
+             return LitVal(toDouble());
+-          case NumLit::Int8x16:
+-          case NumLit::Uint8x16:
+-            return LitVal(simdValue().asInt8x16());
+-          case NumLit::Int16x8:
+-          case NumLit::Uint16x8:
+-            return LitVal(simdValue().asInt16x8());
+-          case NumLit::Int32x4:
+-          case NumLit::Uint32x4:
+-            return LitVal(simdValue().asInt32x4());
+-          case NumLit::Float32x4:
+-            return LitVal(simdValue().asFloat32x4());
+-          case NumLit::Bool8x16:
+-            return LitVal(simdValue().asInt8x16(), ValType::B8x16);
+-          case NumLit::Bool16x8:
+-            return LitVal(simdValue().asInt16x8(), ValType::B16x8);
+-          case NumLit::Bool32x4:
+-            return LitVal(simdValue().asInt32x4(), ValType::B32x4);
+           case NumLit::OutOfRangeInt:;
+         }
+         MOZ_CRASH("bad literal");
+     }
+ };
+ 
+ // Represents the type of a general asm.js expression.
+ //
+ // A canonical subset of types representing the coercion targets: Int, Float,
+-// Double, and the SIMD types. This is almost equivalent to wasm::ValType,
+-// except the integer SIMD types have signed/unsigned variants.
++// Double.
+ //
+ // Void is also part of the canonical subset which then maps to wasm::ExprType.
+-//
+-// Note that while the canonical subset distinguishes signed and unsigned SIMD
+-// types, it only uses |Int| to represent signed and unsigned 32-bit integers.
+-// This is because the scalar coersions x|0 and x>>>0 work with any kind of
+-// integer input, while the SIMD check functions throw a TypeError if the passed
+-// type doesn't match.
+-//
++
+ class Type
+ {
+   public:
+     enum Which {
+         Fixnum = NumLit::Fixnum,
+         Signed = NumLit::NegativeInt,
+         Unsigned = NumLit::BigUnsigned,
+         DoubleLit = NumLit::Double,
+         Float = NumLit::Float,
+-        Int8x16 = NumLit::Int8x16,
+-        Int16x8 = NumLit::Int16x8,
+-        Int32x4 = NumLit::Int32x4,
+-        Uint8x16 = NumLit::Uint8x16,
+-        Uint16x8 = NumLit::Uint16x8,
+-        Uint32x4 = NumLit::Uint32x4,
+-        Float32x4 = NumLit::Float32x4,
+-        Bool8x16 = NumLit::Bool8x16,
+-        Bool16x8 = NumLit::Bool16x8,
+-        Bool32x4 = NumLit::Bool32x4,
+         Double,
+         MaybeDouble,
+         MaybeFloat,
+         Floatish,
+         Int,
+         Intish,
+         Void
+     };
+ 
+   private:
+     Which which_;
+ 
+   public:
+     Type() = default;
+     MOZ_IMPLICIT Type(Which w) : which_(w) {}
+-    MOZ_IMPLICIT Type(SimdType type) {
+-        switch (type) {
+-          case SimdType::Int8x16:   which_ = Int8x16;   return;
+-          case SimdType::Int16x8:   which_ = Int16x8;   return;
+-          case SimdType::Int32x4:   which_ = Int32x4;   return;
+-          case SimdType::Uint8x16:  which_ = Uint8x16;  return;
+-          case SimdType::Uint16x8:  which_ = Uint16x8;  return;
+-          case SimdType::Uint32x4:  which_ = Uint32x4;  return;
+-          case SimdType::Float32x4: which_ = Float32x4; return;
+-          case SimdType::Bool8x16:  which_ = Bool8x16;  return;
+-          case SimdType::Bool16x8:  which_ = Bool16x8;  return;
+-          case SimdType::Bool32x4:  which_ = Bool32x4;  return;
+-          default:                  break;
+-        }
+-        MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("bad SimdType");
+-    }
+ 
+     // Map an already canonicalized Type to the return type of a function call.
+     static Type ret(Type t) {
+         MOZ_ASSERT(t.isCanonical());
+         // The 32-bit external type is Signed, not Int.
+         return t.isInt() ? Signed: t;
+     }
+ 
+     static Type lit(const NumLit& lit) {
+         MOZ_ASSERT(lit.valid());
+         Which which = Type::Which(lit.which());
+-        MOZ_ASSERT(which >= Fixnum && which <= Bool32x4);
++        MOZ_ASSERT(which >= Fixnum && which <= Float);
+         Type t;
+         t.which_ = which;
+         return t;
+     }
+ 
+     // Map |t| to one of the canonical vartype representations of a
+     // wasm::ExprType.
+     static Type canonicalize(Type t) {
+@@ -1077,28 +950,16 @@ class Type
+ 
+           case DoubleLit:
+           case Double:
+             return Double;
+ 
+           case Void:
+             return Void;
+ 
+-          case Int8x16:
+-          case Int16x8:
+-          case Int32x4:
+-          case Uint8x16:
+-          case Uint16x8:
+-          case Uint32x4:
+-          case Float32x4:
+-          case Bool8x16:
+-          case Bool16x8:
+-          case Bool32x4:
+-            return t;
+-
+           case MaybeDouble:
+           case MaybeFloat:
+           case Floatish:
+           case Intish:
+             // These types need some kind of coercion, they can't be mapped
+             // to an ExprType.
+             break;
+         }
+@@ -1112,26 +973,16 @@ class Type
+ 
+     bool operator<=(Type rhs) const {
+         switch (rhs.which_) {
+           case Signed:      return isSigned();
+           case Unsigned:    return isUnsigned();
+           case DoubleLit:   return isDoubleLit();
+           case Double:      return isDouble();
+           case Float:       return isFloat();
+-          case Int8x16:     return isInt8x16();
+-          case Int16x8:     return isInt16x8();
+-          case Int32x4:     return isInt32x4();
+-          case Uint8x16:    return isUint8x16();
+-          case Uint16x8:    return isUint16x8();
+-          case Uint32x4:    return isUint32x4();
+-          case Float32x4:   return isFloat32x4();
+-          case Bool8x16:    return isBool8x16();
+-          case Bool16x8:    return isBool16x8();
+-          case Bool32x4:    return isBool32x4();
+           case MaybeDouble: return isMaybeDouble();
+           case MaybeFloat:  return isMaybeFloat();
+           case Floatish:    return isFloatish();
+           case Int:         return isInt();
+           case Intish:      return isIntish();
+           case Fixnum:      return isFixnum();
+           case Void:        return isVoid();
+         }
+@@ -1185,74 +1036,24 @@ class Type
+     bool isVoid() const {
+         return which_ == Void;
+     }
+ 
+     bool isExtern() const {
+         return isDouble() || isSigned();
+     }
+ 
+-    bool isInt8x16() const {
+-        return which_ == Int8x16;
+-    }
+-
+-    bool isInt16x8() const {
+-        return which_ == Int16x8;
+-    }
+-
+-    bool isInt32x4() const {
+-        return which_ == Int32x4;
+-    }
+-
+-    bool isUint8x16() const {
+-        return which_ == Uint8x16;
+-    }
+-
+-    bool isUint16x8() const {
+-        return which_ == Uint16x8;
+-    }
+-
+-    bool isUint32x4() const {
+-        return which_ == Uint32x4;
+-    }
+-
+-    bool isFloat32x4() const {
+-        return which_ == Float32x4;
+-    }
+-
+-    bool isBool8x16() const {
+-        return which_ == Bool8x16;
+-    }
+-
+-    bool isBool16x8() const {
+-        return which_ == Bool16x8;
+-    }
+-
+-    bool isBool32x4() const {
+-        return which_ == Bool32x4;
+-    }
+-
+-    bool isSimd() const {
+-        return isInt8x16() || isInt16x8() || isInt32x4() || isUint8x16() || isUint16x8() ||
+-               isUint32x4() || isFloat32x4() || isBool8x16() || isBool16x8() || isBool32x4();
+-    }
+-
+-    bool isUnsignedSimd() const {
+-        return isUint8x16() || isUint16x8() || isUint32x4();
+-    }
+-
+     // Check if this is one of the valid types for a function argument.
+     bool isArgType() const {
+-        return isInt() || isFloat() || isDouble() || (isSimd() && !isUnsignedSimd());
++        return isInt() || isFloat() || isDouble();
+     }
+ 
+     // Check if this is one of the valid types for a function return value.
+     bool isReturnType() const {
+-        return isSigned() || isFloat() || isDouble() || (isSimd() && !isUnsignedSimd()) ||
+-               isVoid();
++        return isSigned() || isFloat() || isDouble() || isVoid();
+     }
+ 
+     // Check if this is one of the valid types for a global variable.
+     bool isGlobalVarType() const {
+         return isArgType();
+     }
+ 
+     // Check if this is one of the canonical vartype representations of a
+@@ -1260,42 +1061,32 @@ class Type
+     bool isCanonical() const {
+         switch (which()) {
+           case Int:
+           case Float:
+           case Double:
+           case Void:
+             return true;
+           default:
+-            return isSimd();
++            return false;
+         }
+     }
+ 
+     // Check if this is a canonical representation of a wasm::ValType.
+     bool isCanonicalValType() const {
+         return !isVoid() && isCanonical();
+     }
+ 
+     // Convert this canonical type to a wasm::ExprType.
+     ExprType canonicalToExprType() const {
+         switch (which()) {
+           case Int:       return ExprType::I32;
+           case Float:     return ExprType::F32;
+           case Double:    return ExprType::F64;
+           case Void:      return ExprType::Void;
+-          case Uint8x16:
+-          case Int8x16:   return ExprType::I8x16;
+-          case Uint16x8:
+-          case Int16x8:   return ExprType::I16x8;
+-          case Uint32x4:
+-          case Int32x4:   return ExprType::I32x4;
+-          case Float32x4: return ExprType::F32x4;
+-          case Bool8x16:  return ExprType::B8x16;
+-          case Bool16x8:  return ExprType::B16x8;
+-          case Bool32x4:  return ExprType::B32x4;
+           default:        MOZ_CRASH("Need canonical type");
+         }
+     }
+ 
+     // Convert this canonical type to a wasm::ValType.
+     ValType canonicalToValType() const {
+         return NonVoidToValType(canonicalToExprType());
+     }
+@@ -1320,27 +1111,16 @@ class Type
+ 
+           case DoubleLit:
+           case Double:
+           case MaybeDouble:
+             return ExprType::F64;
+ 
+           case Void:
+             return ExprType::Void;
+-
+-          case Uint8x16:
+-          case Int8x16:   return ExprType::I8x16;
+-          case Uint16x8:
+-          case Int16x8:   return ExprType::I16x8;
+-          case Uint32x4:
+-          case Int32x4:   return ExprType::I32x4;
+-          case Float32x4: return ExprType::F32x4;
+-          case Bool8x16:  return ExprType::B8x16;
+-          case Bool16x8:  return ExprType::B16x8;
+-          case Bool32x4:  return ExprType::B32x4;
+         }
+         MOZ_CRASH("Invalid Type");
+     }
+ 
+     const char* toChars() const {
+         switch (which_) {
+           case Double:      return "double";
+           case DoubleLit:   return "doublelit";
+@@ -1348,26 +1128,16 @@ class Type
+           case Float:       return "float";
+           case Floatish:    return "floatish";
+           case MaybeFloat:  return "float?";
+           case Fixnum:      return "fixnum";
+           case Int:         return "int";
+           case Signed:      return "signed";
+           case Unsigned:    return "unsigned";
+           case Intish:      return "intish";
+-          case Int8x16:     return "int8x16";
+-          case Int16x8:     return "int16x8";
+-          case Int32x4:     return "int32x4";
+-          case Uint8x16:    return "uint8x16";
+-          case Uint16x8:    return "uint16x8";
+-          case Uint32x4:    return "uint32x4";
+-          case Float32x4:   return "float32x4";
+-          case Bool8x16:    return "bool8x16";
+-          case Bool16x8:    return "bool16x8";
+-          case Bool32x4:    return "bool32x4";
+           case Void:        return "void";
+         }
+         MOZ_CRASH("Invalid Type");
+     }
+ };
+ 
+ static const unsigned VALIDATION_LIFO_DEFAULT_CHUNK_SIZE = 4 * 1024;
+ 
+@@ -1469,19 +1239,17 @@ class MOZ_STACK_CLASS ModuleValidator
+             ConstantLiteral,
+             ConstantImport,
+             Function,
+             Table,
+             FFI,
+             ArrayView,
+             ArrayViewCtor,
+             MathBuiltinFunction,
+-            AtomicsBuiltinFunction,
+-            SimdCtor,
+-            SimdOp
++            AtomicsBuiltinFunction
+         };
+ 
+       private:
+         Which which_;
+         union U {
+             struct VarOrConst {
+                 Type::Which type_;
+                 unsigned index_;
+@@ -1510,21 +1278,16 @@ class MOZ_STACK_CLASS ModuleValidator
+                 }
+             } varOrConst;
+             uint32_t funcDefIndex_;
+             uint32_t tableIndex_;
+             uint32_t ffiIndex_;
+             Scalar::Type viewType_;
+             AsmJSMathBuiltinFunction mathBuiltinFunc_;
+             AsmJSAtomicsBuiltinFunction atomicsBuiltinFunc_;
+-            SimdType simdCtorType_;
+-            struct SimdTypeAndOperation {
+-                SimdType type_;
+-                SimdOperation which_;
+-            } simdOp;
+ 
+             // |varOrConst|, through |varOrConst.literalValue_|, has a
+             // non-trivial constructor and therefore MUST be placement-new'd
+             // into existence.
+             MOZ_PUSH_DISABLE_NONTRIVIAL_UNION_WARNINGS
+             U() : funcDefIndex_(0) {}
+             MOZ_POP_DISABLE_NONTRIVIAL_UNION_WARNINGS
+         } u;
+@@ -1581,34 +1344,16 @@ class MOZ_STACK_CLASS ModuleValidator
+         }
+         bool isAtomicsFunction() const {
+             return which_ == AtomicsBuiltinFunction;
+         }
+         AsmJSAtomicsBuiltinFunction atomicsBuiltinFunction() const {
+             MOZ_ASSERT(which_ == AtomicsBuiltinFunction);
+             return u.atomicsBuiltinFunc_;
+         }
+-        bool isSimdCtor() const {
+-            return which_ == SimdCtor;
+-        }
+-        SimdType simdCtorType() const {
+-            MOZ_ASSERT(which_ == SimdCtor);
+-            return u.simdCtorType_;
+-        }
+-        bool isSimdOperation() const {
+-            return which_ == SimdOp;
+-        }
+-        SimdOperation simdOperation() const {
+-            MOZ_ASSERT(which_ == SimdOp);
+-            return u.simdOp.which_;
+-        }
+-        SimdType simdOperationType() const {
+-            MOZ_ASSERT(which_ == SimdOp);
+-            return u.simdOp.type_;
+-        }
+     };
+ 
+     struct MathBuiltin
+     {
+         enum Kind { Function, Constant };
+         Kind kind;
+ 
+         union {
+@@ -1688,41 +1433,38 @@ class MOZ_STACK_CLASS ModuleValidator
+         }
+     };
+ 
+     typedef HashSet<HashableSig, HashableSig> SigSet;
+     typedef HashMap<NamedSig, uint32_t, NamedSig> FuncImportMap;
+     typedef HashMap<PropertyName*, Global*> GlobalMap;
+     typedef HashMap<PropertyName*, MathBuiltin> MathNameMap;
+     typedef HashMap<PropertyName*, AsmJSAtomicsBuiltinFunction> AtomicsNameMap;
+-    typedef HashMap<PropertyName*, SimdOperation> SimdOperationNameMap;
+     typedef Vector<ArrayView> ArrayViewVector;
+ 
+     JSContext*            cx_;
+     AsmJSParser&          parser_;
+     ParseNode*            moduleFunctionNode_;
+     PropertyName*         moduleFunctionName_;
+     PropertyName*         globalArgumentName_;
+     PropertyName*         importArgumentName_;
+     PropertyName*         bufferArgumentName_;
+     MathNameMap           standardLibraryMathNames_;
+     AtomicsNameMap        standardLibraryAtomicsNames_;
+-    SimdOperationNameMap  standardLibrarySimdOpNames_;
+     RootedFunction        dummyFunction_;
+ 
+     // Validation-internal state:
+     LifoAlloc             validationLifo_;
+     FuncVector            funcDefs_;
+     TableVector           tables_;
+     GlobalMap             globalMap_;
+     SigSet                sigSet_;
+     FuncImportMap         funcImportMap_;
+     ArrayViewVector       arrayViews_;
+     bool                  atomicsPresent_;
+-    bool                  simdPresent_;
+ 
+     // State used to build the AsmJSModule in finish():
+     ModuleEnvironment     env_;
+     MutableAsmJSMetadata  asmJSMetadata_;
+ 
+     // Error reporting:
+     UniqueChars           errorString_;
+     uint32_t              errorOffset_;
+@@ -1744,22 +1486,16 @@ class MOZ_STACK_CLASS ModuleValidator
+         return standardLibraryMathNames_.putNew(atom->asPropertyName(), builtin);
+     }
+     bool addStandardLibraryAtomicsName(const char* name, AsmJSAtomicsBuiltinFunction func) {
+         JSAtom* atom = Atomize(cx_, name, strlen(name));
+         if (!atom)
+             return false;
+         return standardLibraryAtomicsNames_.putNew(atom->asPropertyName(), func);
+     }
+-    bool addStandardLibrarySimdOpName(const char* name, SimdOperation op) {
+-        JSAtom* atom = Atomize(cx_, name, strlen(name));
+-        if (!atom)
+-            return false;
+-        return standardLibrarySimdOpNames_.putNew(atom->asPropertyName(), op);
+-    }
+     bool newSig(FuncType&& sig, uint32_t* sigIndex) {
+         if (env_.types.length() >= MaxTypes)
+             return failCurrentOffset("too many signatures");
+ 
+         *sigIndex = env_.types.length();
+         return env_.types.append(std::move(sig));
+     }
+     bool declareSig(FuncType&& sig, uint32_t* sigIndex) {
+@@ -1780,27 +1516,25 @@ class MOZ_STACK_CLASS ModuleValidator
+         parser_(parser),
+         moduleFunctionNode_(moduleFunctionNode),
+         moduleFunctionName_(FunctionName(moduleFunctionNode)),
+         globalArgumentName_(nullptr),
+         importArgumentName_(nullptr),
+         bufferArgumentName_(nullptr),
+         standardLibraryMathNames_(cx),
+         standardLibraryAtomicsNames_(cx),
+-        standardLibrarySimdOpNames_(cx),
+         dummyFunction_(cx),
+         validationLifo_(VALIDATION_LIFO_DEFAULT_CHUNK_SIZE),
+         funcDefs_(cx),
+         tables_(cx),
+         globalMap_(cx),
+         sigSet_(cx),
+         funcImportMap_(cx),
+         arrayViews_(cx),
+         atomicsPresent_(false),
+-        simdPresent_(false),
+         env_(CompileMode::Once, Tier::Ion, DebugEnabled::False, HasGcTypes::False,
+              cx->realm()->creationOptions().getSharedMemoryAndAtomicsEnabled()
+                ? Shareable::True
+                : Shareable::False,
+              ModuleKind::AsmJS),
+         errorString_(nullptr),
+         errorOffset_(UINT32_MAX),
+         errorOverRecursed_(false)
+@@ -1903,24 +1637,16 @@ class MOZ_STACK_CLASS ModuleValidator
+             !addStandardLibraryAtomicsName("and", AsmJSAtomicsBuiltin_and) ||
+             !addStandardLibraryAtomicsName("or", AsmJSAtomicsBuiltin_or) ||
+             !addStandardLibraryAtomicsName("xor", AsmJSAtomicsBuiltin_xor) ||
+             !addStandardLibraryAtomicsName("isLockFree", AsmJSAtomicsBuiltin_isLockFree))
+         {
+             return false;
+         }
+ 
+-#define ADDSTDLIBSIMDOPNAME(op) || !addStandardLibrarySimdOpName(#op, SimdOperation::Fn_##op)
+-        if (!standardLibrarySimdOpNames_.init()
+-            FORALL_SIMD_ASMJS_OP(ADDSTDLIBSIMDOPNAME))
+-        {
+-            return false;
+-        }
+-#undef ADDSTDLIBSIMDOPNAME
+-
+         // This flows into FunctionBox, so must be tenured.
+         dummyFunction_ = NewScriptedFunction(cx_, 0, JSFunction::INTERPRETED, nullptr,
+                                              /* proto = */ nullptr, gc::AllocKind::FUNCTION,
+                                              TenuredObject);
+         if (!dummyFunction_)
+             return false;
+ 
+         return true;
+@@ -1937,17 +1663,16 @@ class MOZ_STACK_CLASS ModuleValidator
+ 
+     auto tokenStream() const
+       -> decltype(parser_.tokenStream)&
+     {
+         return parser_.tokenStream;
+     }
+ 
+     RootedFunction& dummyFunction()          { return dummyFunction_; }
+-    bool supportsSimd() const                { return cx_->jitSupportsSimd(); }
+     bool atomicsPresent() const              { return atomicsPresent_; }
+     uint32_t minMemoryLength() const         { return env_.minMemoryLength; }
+ 
+     void initModuleFunctionName(PropertyName* name) {
+         MOZ_ASSERT(!moduleFunctionName_);
+         moduleFunctionName_ = name;
+     }
+     MOZ_MUST_USE bool initGlobalArgumentName(PropertyName* n) {
+@@ -2127,53 +1852,16 @@ class MOZ_STACK_CLASS ModuleValidator
+         new (&global->u.atomicsBuiltinFunc_) AsmJSAtomicsBuiltinFunction(func);
+         if (!globalMap_.putNew(var, global))
+             return false;
+ 
+         AsmJSGlobal g(AsmJSGlobal::AtomicsBuiltinFunction, std::move(fieldChars));
+         g.pod.u.atomicsBuiltinFunc_ = func;
+         return asmJSMetadata_->asmJSGlobals.append(std::move(g));
+     }
+-    bool addSimdCtor(PropertyName* var, SimdType type, PropertyName* field) {
+-        simdPresent_ = true;
+-
+-        UniqueChars fieldChars = StringToNewUTF8CharsZ(cx_, *field);
+-        if (!fieldChars)
+-            return false;
+-
+-        Global* global = validationLifo_.new_<Global>(Global::SimdCtor);
+-        if (!global)
+-            return false;
+-        new (&global->u.simdCtorType_) SimdType(type);
+-        if (!globalMap_.putNew(var, global))
+-            return false;
+-
+-        AsmJSGlobal g(AsmJSGlobal::SimdCtor, std::move(fieldChars));
+-        g.pod.u.simdCtorType_ = type;
+-        return asmJSMetadata_->asmJSGlobals.append(std::move(g));
+-    }
+-    bool addSimdOperation(PropertyName* var, SimdType type, SimdOperation op, PropertyName* field) {
+-        simdPresent_ = true;
+-
+-        UniqueChars fieldChars = StringToNewUTF8CharsZ(cx_, *field);
+-        if (!fieldChars)
+-            return false;
+-
+-        Global* global = validationLifo_.new_<Global>(Global::SimdOp);
+-        if (!global)
+-            return false;
+-        new (&global->u.simdOp) Global::U::SimdTypeAndOperation{ type, op };
+-        if (!globalMap_.putNew(var, global))
+-            return false;
+-
+-        AsmJSGlobal g(AsmJSGlobal::SimdOp, std::move(fieldChars));
+-        g.pod.u.simdOp.type_ = type;
+-        g.pod.u.simdOp.which_ = op;
+-        return asmJSMetadata_->asmJSGlobals.append(std::move(g));
+-    }
+     bool addArrayViewCtor(PropertyName* var, Scalar::Type vt, PropertyName* field) {
+         UniqueChars fieldChars = StringToNewUTF8CharsZ(cx_, *field);
+         if (!fieldChars)
+             return false;
+ 
+         Global* global = validationLifo_.new_<Global>(Global::ArrayViewCtor);
+         if (!global)
+             return false;
+@@ -2433,23 +2121,16 @@ class MOZ_STACK_CLASS ModuleValidator
+     }
+     bool lookupStandardLibraryAtomicsName(PropertyName* name, AsmJSAtomicsBuiltinFunction* atomicsBuiltin) const {
+         if (AtomicsNameMap::Ptr p = standardLibraryAtomicsNames_.lookup(name)) {
+             *atomicsBuiltin = p->value();
+             return true;
+         }
+         return false;
+     }
+-    bool lookupStandardSimdOpName(PropertyName* name, SimdOperation* op) const {
+-        if (SimdOperationNameMap::Ptr p = standardLibrarySimdOpNames_.lookup(name)) {
+-            *op = p->value();
+-            return true;
+-        }
+-        return false;
+-    }
+ 
+     bool startFunctionBodies() {
+         if (!arrayViews_.empty())
+             env_.memoryUsage = atomicsPresent_ ? MemoryUsage::Shared : MemoryUsage::Unshared;
+         else
+             env_.memoryUsage = MemoryUsage::None;
+         return true;
+     }
+@@ -2466,18 +2147,16 @@ class MOZ_STACK_CLASS ModuleValidator
+             uint32_t funcIndex = funcImportMap_.count() + func.funcDefIndex();
+             MOZ_ASSERT(!env_.funcTypes[funcIndex]);
+             env_.funcTypes[funcIndex] = &env_.types[func.sigIndex()].funcType();
+         }
+ 
+         if (!env_.funcImportGlobalDataOffsets.resize(funcImportMap_.count()))
+             return nullptr;
+ 
+-        asmJSMetadata_->usesSimd = simdPresent_;
+-
+         MOZ_ASSERT(asmJSMetadata_->asmJSFuncNames.empty());
+         if (!asmJSMetadata_->asmJSFuncNames.resize(funcImportMap_.count()))
+             return nullptr;
+         for (const Func& func : funcDefs_) {
+             CacheableChars funcName = StringToNewUTF8CharsZ(cx_, *func.name());
+             if (!funcName || !asmJSMetadata_->asmJSFuncNames.emplaceBack(std::move(funcName)))
+                 return nullptr;
+         }
+@@ -2574,21 +2253,16 @@ IsCoercionCall(ModuleValidator& m, Parse
+     if (coercedExpr)
+         *coercedExpr = CallArgList(pn);
+ 
+     if (global->isMathFunction() && global->mathBuiltinFunction() == AsmJSMathBuiltin_fround) {
+         *coerceTo = Type::Float;
+         return true;
+     }
+ 
+-    if (global->isSimdOperation() && global->simdOperation() == SimdOperation::Fn_check) {
+-        *coerceTo = global->simdOperationType();
+-        return true;
+-    }
+-
+     return false;
+ }
+ 
+ static bool
+ IsFloatLiteral(ModuleValidator& m, ParseNode* pn)
+ {
+     ParseNode* coercedExpr;
+     Type coerceTo;
+@@ -2596,87 +2270,28 @@ IsFloatLiteral(ModuleValidator& m, Parse
+         return false;
+     // Don't fold into || to avoid clang/memcheck bug (bug 1077031).
+     if (!coerceTo.isFloat())
+         return false;
+     return IsNumericNonFloatLiteral(coercedExpr);
+ }
+ 
+ static bool
+-IsSimdTuple(ModuleValidator& m, ParseNode* pn, SimdType* type)
+-{
+-    const ModuleValidator::Global* global;
+-    if (!IsCallToGlobal(m, pn, &global))
+-        return false;
+-
+-    if (!global->isSimdCtor())
+-        return false;
+-
+-    if (CallArgListLength(pn) != GetSimdLanes(global->simdCtorType()))
+-        return false;
+-
+-    *type = global->simdCtorType();
+-    return true;
+-}
+-
+-static bool
+ IsNumericLiteral(ModuleValidator& m, ParseNode* pn);
+ 
+ static NumLit
+ ExtractNumericLiteral(ModuleValidator& m, ParseNode* pn);
+ 
+ static inline bool
+ IsLiteralInt(ModuleValidator& m, ParseNode* pn, uint32_t* u32);
+ 
+ static bool
+-IsSimdLiteral(ModuleValidator& m, ParseNode* pn)
+-{
+-    SimdType type;
+-    if (!IsSimdTuple(m, pn, &type))
+-        return false;
+-
+-    ParseNode* arg = CallArgList(pn);
+-    unsigned length = GetSimdLanes(type);
+-    for (unsigned i = 0; i < length; i++) {
+-        if (!IsNumericLiteral(m, arg))
+-            return false;
+-
+-        uint32_t _;
+-        switch (type) {
+-          case SimdType::Int8x16:
+-          case SimdType::Int16x8:
+-          case SimdType::Int32x4:
+-          case SimdType::Uint8x16:
+-          case SimdType::Uint16x8:
+-          case SimdType::Uint32x4:
+-          case SimdType::Bool8x16:
+-          case SimdType::Bool16x8:
+-          case SimdType::Bool32x4:
+-            if (!IsLiteralInt(m, arg, &_))
+-                return false;
+-            break;
+-          case SimdType::Float32x4:
+-            if (!IsNumericNonFloatLiteral(arg))
+-                return false;
+-            break;
+-          default:
+-            MOZ_CRASH("unhandled simd type");
+-        }
+-
+-        arg = NextNode(arg);
+-    }
+-
+-    MOZ_ASSERT(arg == nullptr);
+-    return true;
+-}
+-
+-static bool
+ IsNumericLiteral(ModuleValidator& m, ParseNode* pn)
+ {
+-    return IsNumericNonFloatLiteral(pn) || IsFloatLiteral(m, pn) || IsSimdLiteral(m, pn);
++    return IsNumericNonFloatLiteral(pn) || IsFloatLiteral(m, pn);
+ }
+ 
+ // The JS grammar treats -42 as -(42) (i.e., with separate grammar
+ // productions) for the unary - and literal 42). However, the asm.js spec
+ // recognizes -42 (modulo parens, so -(42) and -((42))) as a single literal
+ // so fold the two potential parse nodes into a single double value.
+ static double
+ ExtractNumericNonFloatValue(ParseNode* pn, ParseNode** out = nullptr)
+@@ -2689,128 +2304,27 @@ ExtractNumericNonFloatValue(ParseNode* p
+             *out = pn;
+         return -NumberNodeValue(pn);
+     }
+ 
+     return NumberNodeValue(pn);
+ }
+ 
+ static NumLit
+-ExtractSimdValue(ModuleValidator& m, ParseNode* pn)
+-{
+-    MOZ_ASSERT(IsSimdLiteral(m, pn));
+-
+-    SimdType type = SimdType::Count;
+-    MOZ_ALWAYS_TRUE(IsSimdTuple(m, pn, &type));
+-    MOZ_ASSERT(CallArgListLength(pn) == GetSimdLanes(type));
+-
+-    ParseNode* arg = CallArgList(pn);
+-    switch (type) {
+-      case SimdType::Int8x16:
+-      case SimdType::Uint8x16: {
+-        MOZ_ASSERT(GetSimdLanes(type) == 16);
+-        int8_t val[16];
+-        for (size_t i = 0; i < 16; i++, arg = NextNode(arg)) {
+-            uint32_t u32;
+-            MOZ_ALWAYS_TRUE(IsLiteralInt(m, arg, &u32));
+-            val[i] = int8_t(u32);
+-        }
+-        MOZ_ASSERT(arg == nullptr);
+-        NumLit::Which w = type == SimdType::Uint8x16 ? NumLit::Uint8x16 : NumLit::Int8x16;
+-        return NumLit(w, SimdConstant::CreateX16(val));
+-      }
+-      case SimdType::Int16x8:
+-      case SimdType::Uint16x8: {
+-        MOZ_ASSERT(GetSimdLanes(type) == 8);
+-        int16_t val[8];
+-        for (size_t i = 0; i < 8; i++, arg = NextNode(arg)) {
+-            uint32_t u32;
+-            MOZ_ALWAYS_TRUE(IsLiteralInt(m, arg, &u32));
+-            val[i] = int16_t(u32);
+-        }
+-        MOZ_ASSERT(arg == nullptr);
+-        NumLit::Which w = type == SimdType::Uint16x8 ? NumLit::Uint16x8 : NumLit::Int16x8;
+-        return NumLit(w, SimdConstant::CreateX8(val));
+-      }
+-      case SimdType::Int32x4:
+-      case SimdType::Uint32x4: {
+-        MOZ_ASSERT(GetSimdLanes(type) == 4);
+-        int32_t val[4];
+-        for (size_t i = 0; i < 4; i++, arg = NextNode(arg)) {
+-            uint32_t u32;
+-            MOZ_ALWAYS_TRUE(IsLiteralInt(m, arg, &u32));
+-            val[i] = int32_t(u32);
+-        }
+-        MOZ_ASSERT(arg == nullptr);
+-        NumLit::Which w = type == SimdType::Uint32x4 ? NumLit::Uint32x4 : NumLit::Int32x4;
+-        return NumLit(w, SimdConstant::CreateX4(val));
+-      }
+-      case SimdType::Float32x4: {
+-        MOZ_ASSERT(GetSimdLanes(type) == 4);
+-        float val[4];
+-        for (size_t i = 0; i < 4; i++, arg = NextNode(arg))
+-            val[i] = float(ExtractNumericNonFloatValue(arg));
+-        MOZ_ASSERT(arg == nullptr);
+-        return NumLit(NumLit::Float32x4, SimdConstant::CreateX4(val));
+-      }
+-      case SimdType::Bool8x16: {
+-        MOZ_ASSERT(GetSimdLanes(type) == 16);
+-        int8_t val[16];
+-        for (size_t i = 0; i < 16; i++, arg = NextNode(arg)) {
+-            uint32_t u32;
+-            MOZ_ALWAYS_TRUE(IsLiteralInt(m, arg, &u32));
+-            val[i] = u32 ? -1 : 0;
+-        }
+-        MOZ_ASSERT(arg == nullptr);
+-        return NumLit(NumLit::Bool8x16, SimdConstant::CreateX16(val));
+-      }
+-      case SimdType::Bool16x8: {
+-        MOZ_ASSERT(GetSimdLanes(type) == 8);
+-        int16_t val[8];
+-        for (size_t i = 0; i < 8; i++, arg = NextNode(arg)) {
+-            uint32_t u32;
+-            MOZ_ALWAYS_TRUE(IsLiteralInt(m, arg, &u32));
+-            val[i] = u32 ? -1 : 0;
+-        }
+-        MOZ_ASSERT(arg == nullptr);
+-        return NumLit(NumLit::Bool16x8, SimdConstant::CreateX8(val));
+-      }
+-      case SimdType::Bool32x4: {
+-        MOZ_ASSERT(GetSimdLanes(type) == 4);
+-        int32_t val[4];
+-        for (size_t i = 0; i < 4; i++, arg = NextNode(arg)) {
+-            uint32_t u32;
+-            MOZ_ALWAYS_TRUE(IsLiteralInt(m, arg, &u32));
+-            val[i] = u32 ? -1 : 0;
+-        }
+-        MOZ_ASSERT(arg == nullptr);
+-        return NumLit(NumLit::Bool32x4, SimdConstant::CreateX4(val));
+-      }
+-      default:
+-        break;
+-    }
+-
+-    MOZ_CRASH("Unexpected SIMD type.");
+-}
+-
+-static NumLit
+ ExtractNumericLiteral(ModuleValidator& m, ParseNode* pn)
+ {
+     MOZ_ASSERT(IsNumericLiteral(m, pn));
+ 
+     if (pn->isKind(ParseNodeKind::Call)) {
+         // Float literals are explicitly coerced and thus the coerced literal may be
+         // any valid (non-float) numeric literal.
+-        if (CallArgListLength(pn) == 1) {
+-            pn = CallArgList(pn);
+-            double d = ExtractNumericNonFloatValue(pn);
+-            return NumLit(NumLit::Float, DoubleValue(d));
+-        }
+-
+-        return ExtractSimdValue(m, pn);
++        MOZ_ASSERT(CallArgListLength(pn) == 1);
++        pn = CallArgList(pn);
++        double d = ExtractNumericNonFloatValue(pn);
++        return NumLit(NumLit::Float, DoubleValue(d));
+     }
+ 
+     double d = ExtractNumericNonFloatValue(pn, &pn);
+ 
+     // The asm.js spec syntactically distinguishes any literal containing a
+     // decimal point or the literal -0 as having double type.
+     if (NumberNodeHasFrac(pn) || IsNegativeZero(d))
+         return NumLit(NumLit::Double, DoubleValue(d));
+@@ -2846,174 +2360,32 @@ IsLiteralInt(const NumLit& lit, uint32_t
+       case NumLit::Fixnum:
+       case NumLit::BigUnsigned:
+       case NumLit::NegativeInt:
+         *u32 = lit.toUint32();
+         return true;
+       case NumLit::Double:
+       case NumLit::Float:
+       case NumLit::OutOfRangeInt:
+-      case NumLit::Int8x16:
+-      case NumLit::Uint8x16:
+-      case NumLit::Int16x8:
+-      case NumLit::Uint16x8:
+-      case NumLit::Int32x4:
+-      case NumLit::Uint32x4:
+-      case NumLit::Float32x4:
+-      case NumLit::Bool8x16:
+-      case NumLit::Bool16x8:
+-      case NumLit::Bool32x4:
+         return false;
+     }
+     MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Bad literal type");
+ }
+ 
+ static inline bool
+ IsLiteralInt(ModuleValidator& m, ParseNode* pn, uint32_t* u32)
+ {
+     return IsNumericLiteral(m, pn) &&
+            IsLiteralInt(ExtractNumericLiteral(m, pn), u32);
+ }
+ 
+ /*****************************************************************************/
+ 
+ namespace {
+ 
+-#define CASE(TYPE, OP) case SimdOperation::Fn_##OP: return MozOp::TYPE##OP;
+-#define I8x16CASE(OP) CASE(I8x16, OP)
+-#define I16x8CASE(OP) CASE(I16x8, OP)
+-#define I32x4CASE(OP) CASE(I32x4, OP)
+-#define F32x4CASE(OP) CASE(F32x4, OP)
+-#define B8x16CASE(OP) CASE(B8x16, OP)
+-#define B16x8CASE(OP) CASE(B16x8, OP)
+-#define B32x4CASE(OP) CASE(B32x4, OP)
+-#define ENUMERATE(TYPE, FOR_ALL, DO)                                     \
+-    switch(op) {                                                         \
+-        case SimdOperation::Constructor: return MozOp::TYPE##Constructor;\
+-        FOR_ALL(DO)                                                      \
+-        default: break;                                                  \
+-    }
+-
+-static inline MozOp
+-SimdToOp(SimdType type, SimdOperation op)
+-{
+-    switch (type) {
+-      case SimdType::Uint8x16:
+-        // Handle the special unsigned opcodes, then fall through to Int8x16.
+-        switch (op) {
+-          case SimdOperation::Fn_addSaturate:        return MozOp::I8x16addSaturateU;
+-          case SimdOperation::Fn_subSaturate:        return MozOp::I8x16subSaturateU;
+-          case SimdOperation::Fn_extractLane:        return MozOp::I8x16extractLaneU;
+-          case SimdOperation::Fn_shiftRightByScalar: return MozOp::I8x16shiftRightByScalarU;
+-          case SimdOperation::Fn_lessThan:           return MozOp::I8x16lessThanU;
+-          case SimdOperation::Fn_lessThanOrEqual:    return MozOp::I8x16lessThanOrEqualU;
+-          case SimdOperation::Fn_greaterThan:        return MozOp::I8x16greaterThanU;
+-          case SimdOperation::Fn_greaterThanOrEqual: return MozOp::I8x16greaterThanOrEqualU;
+-          case SimdOperation::Fn_fromInt8x16Bits:    return MozOp::Limit;
+-          default: break;
+-        }
+-        MOZ_FALLTHROUGH;
+-      case SimdType::Int8x16:
+-        // Bitcasts Uint8x16 <--> Int8x16 become noops.
+-        switch (op) {
+-          case SimdOperation::Fn_fromUint8x16Bits: return MozOp::Limit;
+-          case SimdOperation::Fn_fromUint16x8Bits: return MozOp::I8x16fromInt16x8Bits;
+-          case SimdOperation::Fn_fromUint32x4Bits: return MozOp::I8x16fromInt32x4Bits;
+-          default: break;
+-        }
+-        ENUMERATE(I8x16, FORALL_INT8X16_ASMJS_OP, I8x16CASE)
+-        break;
+-
+-      case SimdType::Uint16x8:
+-        // Handle the special unsigned opcodes, then fall through to Int16x8.
+-        switch(op) {
+-          case SimdOperation::Fn_addSaturate:        return MozOp::I16x8addSaturateU;
+-          case SimdOperation::Fn_subSaturate:        return MozOp::I16x8subSaturateU;
+-          case SimdOperation::Fn_extractLane:        return MozOp::I16x8extractLaneU;
+-          case SimdOperation::Fn_shiftRightByScalar: return MozOp::I16x8shiftRightByScalarU;
+-          case SimdOperation::Fn_lessThan:           return MozOp::I16x8lessThanU;
+-          case SimdOperation::Fn_lessThanOrEqual:    return MozOp::I16x8lessThanOrEqualU;
+-          case SimdOperation::Fn_greaterThan:        return MozOp::I16x8greaterThanU;
+-          case SimdOperation::Fn_greaterThanOrEqual: return MozOp::I16x8greaterThanOrEqualU;
+-          case SimdOperation::Fn_fromInt16x8Bits:    return MozOp::Limit;
+-          default: break;
+-        }
+-        MOZ_FALLTHROUGH;
+-      case SimdType::Int16x8:
+-        // Bitcasts Uint16x8 <--> Int16x8 become noops.
+-        switch (op) {
+-          case SimdOperation::Fn_fromUint8x16Bits: return MozOp::I16x8fromInt8x16Bits;
+-          case SimdOperation::Fn_fromUint16x8Bits: return MozOp::Limit;
+-          case SimdOperation::Fn_fromUint32x4Bits: return MozOp::I16x8fromInt32x4Bits;
+-          default: break;
+-        }
+-        ENUMERATE(I16x8, FORALL_INT16X8_ASMJS_OP, I16x8CASE)
+-        break;
+-
+-      case SimdType::Uint32x4:
+-        // Handle the special unsigned opcodes, then fall through to Int32x4.
+-        switch(op) {
+-          case SimdOperation::Fn_shiftRightByScalar: return MozOp::I32x4shiftRightByScalarU;
+-          case SimdOperation::Fn_lessThan:           return MozOp::I32x4lessThanU;
+-          case SimdOperation::Fn_lessThanOrEqual:    return MozOp::I32x4lessThanOrEqualU;
+-          case SimdOperation::Fn_greaterThan:        return MozOp::I32x4greaterThanU;
+-          case SimdOperation::Fn_greaterThanOrEqual: return MozOp::I32x4greaterThanOrEqualU;
+-          case SimdOperation::Fn_fromFloat32x4:      return MozOp::I32x4fromFloat32x4U;
+-          case SimdOperation::Fn_fromInt32x4Bits:    return MozOp::Limit;
+-          default: break;
+-        }
+-        MOZ_FALLTHROUGH;
+-      case SimdType::Int32x4:
+-        // Bitcasts Uint32x4 <--> Int32x4 become noops.
+-        switch (op) {
+-          case SimdOperation::Fn_fromUint8x16Bits: return MozOp::I32x4fromInt8x16Bits;
+-          case SimdOperation::Fn_fromUint16x8Bits: return MozOp::I32x4fromInt16x8Bits;
+-          case SimdOperation::Fn_fromUint32x4Bits: return MozOp::Limit;
+-          default: break;
+-        }
+-        ENUMERATE(I32x4, FORALL_INT32X4_ASMJS_OP, I32x4CASE)
+-        break;
+-
+-      case SimdType::Float32x4:
+-        switch (op) {
+-          case SimdOperation::Fn_fromUint8x16Bits: return MozOp::F32x4fromInt8x16Bits;
+-          case SimdOperation::Fn_fromUint16x8Bits: return MozOp::F32x4fromInt16x8Bits;
+-          case SimdOperation::Fn_fromUint32x4Bits: return MozOp::F32x4fromInt32x4Bits;
+-          default: break;
+-        }
+-        ENUMERATE(F32x4, FORALL_FLOAT32X4_ASMJS_OP, F32x4CASE)
+-        break;
+-
+-      case SimdType::Bool8x16:
+-        ENUMERATE(B8x16, FORALL_BOOL_SIMD_OP, B8x16CASE)
+-        break;
+-
+-      case SimdType::Bool16x8:
+-        ENUMERATE(B16x8, FORALL_BOOL_SIMD_OP, B16x8CASE)
+-        break;
+-
+-      case SimdType::Bool32x4:
+-        ENUMERATE(B32x4, FORALL_BOOL_SIMD_OP, B32x4CASE)
+-        break;
+-
+-      default: break;
+-    }
+-    MOZ_CRASH("unexpected SIMD (type, operator) combination");
+-}
+-
+-#undef CASE
+-#undef I8x16CASE
+-#undef I16x8CASE
+-#undef I32x4CASE
+-#undef F32x4CASE
+-#undef B8x16CASE
+-#undef B16x8CASE
+-#undef B32x4CASE
+-#undef ENUMERATE
+-
+ typedef Vector<PropertyName*, 4, SystemAllocPolicy> NameVector;
+ 
+ // Encapsulates the building of an asm bytecode function from an asm.js function
+ // source code, packing the asm.js code into the asm bytecode form that can
+ // be decoded and compiled with a FunctionCompiler.
+ class MOZ_STACK_CLASS FunctionValidator
+ {
+   public:
+@@ -3284,43 +2656,16 @@ class MOZ_STACK_CLASS FunctionValidator
+           case NumLit::BigUnsigned:
+             return writeInt32Lit(lit.toInt32());
+           case NumLit::Float:
+             return encoder().writeOp(Op::F32Const) &&
+                    encoder().writeFixedF32(lit.toFloat());
+           case NumLit::Double:
+             return encoder().writeOp(Op::F64Const) &&
+                    encoder().writeFixedF64(lit.toDouble());
+-          case NumLit::Int8x16:
+-          case NumLit::Uint8x16:
+-            return encoder().writeOp(MozOp::I8x16Const) &&
+-                   encoder().writeFixedI8x16(lit.simdValue().asInt8x16());
+-          case NumLit::Int16x8:
+-          case NumLit::Uint16x8:
+-            return encoder().writeOp(MozOp::I16x8Const) &&
+-                   encoder().writeFixedI16x8(lit.simdValue().asInt16x8());
+-          case NumLit::Int32x4:
+-          case NumLit::Uint32x4:
+-            return encoder().writeOp(MozOp::I32x4Const) &&
+-                   encoder().writeFixedI32x4(lit.simdValue().asInt32x4());
+-          case NumLit::Float32x4:
+-            return encoder().writeOp(MozOp::F32x4Const) &&
+-                   encoder().writeFixedF32x4(lit.simdValue().asFloat32x4());
+-          case NumLit::Bool8x16:
+-            // Boolean vectors use the Int8x16 memory representation.
+-            return encoder().writeOp(MozOp::B8x16Const) &&
+-                   encoder().writeFixedI8x16(lit.simdValue().asInt8x16());
+-          case NumLit::Bool16x8:
+-            // Boolean vectors use the Int16x8 memory representation.
+-            return encoder().writeOp(MozOp::B16x8Const) &&
+-                   encoder().writeFixedI16x8(lit.simdValue().asInt16x8());
+-          case NumLit::Bool32x4:
+-            // Boolean vectors use the Int32x4 memory representation.
+-            return encoder().writeOp(MozOp::B32x4Const) &&
+-                   encoder().writeFixedI32x4(lit.simdValue().asInt32x4());
+           case NumLit::OutOfRangeInt:
+             break;
+         }
+         MOZ_CRASH("unexpected literal type");
+     }
+     MOZ_MUST_USE bool writeCall(ParseNode* pn, Op op) {
+         if (!encoder().writeOp(op))
+             return false;
+@@ -3334,22 +2679,16 @@ class MOZ_STACK_CLASS FunctionValidator
+ 
+         TokenStreamAnyChars& anyChars = m().tokenStream().anyCharsAccess();
+         return callSiteLineNums_.append(anyChars.srcCoords.lineNum(pn->pn_pos.begin));
+     }
+     MOZ_MUST_USE bool prepareCall(ParseNode* pn) {
+         TokenStreamAnyChars& anyChars = m().tokenStream().anyCharsAccess();
+         return callSiteLineNums_.append(anyChars.srcCoords.lineNum(pn->pn_pos.begin));
+     }
+-    MOZ_MUST_USE bool writeSimdOp(SimdType simdType, SimdOperation simdOp) {
+-        MozOp op = SimdToOp(simdType, simdOp);
+-        if (op == MozOp::Limit)
+-            return true;
+-        return encoder().writeOp(op);
+-    }
+ };
+ 
+ } /* anonymous namespace */
+ 
+ /*****************************************************************************/
+ // asm.js type-checking and code-generation algorithm
+ 
+ static bool
+@@ -3502,17 +2841,17 @@ CheckTypeAnnotation(ModuleValidator& m, 
+       case ParseNodeKind::Call: {
+         if (IsCoercionCall(m, coercionNode, coerceTo, coercedExpr))
+             return true;
+         break;
+       }
+       default:;
+     }
+ 
+-    return m.fail(coercionNode, "must be of the form +x, x|0, fround(x), or a SIMD check(x)");
++    return m.fail(coercionNode, "must be of the form +x, x|0 or fround(x)");
+ }
+ 
+ static bool
+ CheckGlobalVariableInitImport(ModuleValidator& m, PropertyName* varName, ParseNode* initNode,
+                               bool isConst)
+ {
+     Type coerceTo;
+     ParseNode* coercedExpr;
+@@ -3618,107 +2957,16 @@ CheckNewArrayView(ModuleValidator& m, Pr
+ 
+     if (!CheckNewArrayViewArgs(m, ctorExpr, bufferName))
+         return false;
+ 
+     return m.addArrayView(varName, type, field);
+ }
+ 
+ static bool
+-IsSimdValidOperationType(SimdType type, SimdOperation op)
+-{
+-#define CASE(op) case SimdOperation::Fn_##op:
+-    switch(type) {
+-      case SimdType::Int8x16:
+-        switch (op) {
+-          case SimdOperation::Constructor:
+-          case SimdOperation::Fn_fromUint8x16Bits:
+-          case SimdOperation::Fn_fromUint16x8Bits:
+-          case SimdOperation::Fn_fromUint32x4Bits:
+-          FORALL_INT8X16_ASMJS_OP(CASE) return true;
+-          default: return false;
+-        }
+-        break;
+-      case SimdType::Int16x8:
+-        switch (op) {
+-          case SimdOperation::Constructor:
+-          case SimdOperation::Fn_fromUint8x16Bits:
+-          case SimdOperation::Fn_fromUint16x8Bits:
+-          case SimdOperation::Fn_fromUint32x4Bits:
+-          FORALL_INT16X8_ASMJS_OP(CASE) return true;
+-          default: return false;
+-        }
+-        break;
+-      case SimdType::Int32x4:
+-        switch (op) {
+-          case SimdOperation::Constructor:
+-          case SimdOperation::Fn_fromUint8x16Bits:
+-          case SimdOperation::Fn_fromUint16x8Bits:
+-          case SimdOperation::Fn_fromUint32x4Bits:
+-          FORALL_INT32X4_ASMJS_OP(CASE) return true;
+-          default: return false;
+-        }
+-        break;
+-      case SimdType::Uint8x16:
+-        switch (op) {
+-          case SimdOperation::Constructor:
+-          case SimdOperation::Fn_fromInt8x16Bits:
+-          case SimdOperation::Fn_fromUint16x8Bits:
+-          case SimdOperation::Fn_fromUint32x4Bits:
+-          FORALL_INT8X16_ASMJS_OP(CASE) return true;
+-          default: return false;
+-        }
+-        break;
+-      case SimdType::Uint16x8:
+-        switch (op) {
+-          case SimdOperation::Constructor:
+-          case SimdOperation::Fn_fromUint8x16Bits:
+-          case SimdOperation::Fn_fromInt16x8Bits:
+-          case SimdOperation::Fn_fromUint32x4Bits:
+-          FORALL_INT16X8_ASMJS_OP(CASE) return true;
+-          default: return false;
+-        }
+-        break;
+-      case SimdType::Uint32x4:
+-        switch (op) {
+-          case SimdOperation::Constructor:
+-          case SimdOperation::Fn_fromUint8x16Bits:
+-          case SimdOperation::Fn_fromUint16x8Bits:
+-          case SimdOperation::Fn_fromInt32x4Bits:
+-          FORALL_INT32X4_ASMJS_OP(CASE) return true;
+-          default: return false;
+-        }
+-        break;
+-      case SimdType::Float32x4:
+-        switch (op) {
+-          case SimdOperation::Constructor:
+-          case SimdOperation::Fn_fromUint8x16Bits:
+-          case SimdOperation::Fn_fromUint16x8Bits:
+-          case SimdOperation::Fn_fromUint32x4Bits:
+-          FORALL_FLOAT32X4_ASMJS_OP(CASE) return true;
+-          default: return false;
+-        }
+-        break;
+-      case SimdType::Bool8x16:
+-      case SimdType::Bool16x8:
+-      case SimdType::Bool32x4:
+-        switch (op) {
+-          case SimdOperation::Constructor:
+-          FORALL_BOOL_SIMD_OP(CASE) return true;
+-          default: return false;
+-        }
+-        break;
+-      default:
+-        // Unimplemented SIMD type.
+-        return false;
+-    }
+-#undef CASE
+-}
+-
+-static bool
+ CheckGlobalMathImport(ModuleValidator& m, ParseNode* initNode, PropertyName* varName,
+                       PropertyName* field)
+ {
+     // Math builtin, with the form glob.Math.[[builtin]]
+     ModuleValidator::MathBuiltin mathBuiltin;
+     if (!m.lookupStandardLibraryMathName(field, &mathBuiltin))
+         return m.failName(initNode, "'%s' is not a standard Math builtin", field);
+ 
+@@ -3741,80 +2989,42 @@ CheckGlobalAtomicsImport(ModuleValidator
+     AsmJSAtomicsBuiltinFunction func;
+     if (!m.lookupStandardLibraryAtomicsName(field, &func))
+         return m.failName(initNode, "'%s' is not a standard Atomics builtin", field);
+ 
+     return m.addAtomicsBuiltinFunction(varName, func, field);
+ }
+ 
+ static bool
+-CheckGlobalSimdImport(ModuleValidator& m, ParseNode* initNode, PropertyName* varName,
+-                      PropertyName* field)
+-{
+-    if (!m.supportsSimd())
+-        return m.fail(initNode, "SIMD is not supported on this platform");
+-
+-    // SIMD constructor, with the form glob.SIMD.[[type]]
+-    SimdType simdType;
+-    if (!IsSimdTypeName(m.cx()->names(), field, &simdType))
+-        return m.failName(initNode, "'%s' is not a standard SIMD type", field);
+-
+-    // IsSimdTypeName will return true for any SIMD type supported by the VM.
+-    //
+-    // Since we may not support all of those SIMD types in asm.js, use the
+-    // asm.js-specific IsSimdValidOperationType() to check if this specific
+-    // constructor is supported in asm.js.
+-    if (!IsSimdValidOperationType(simdType, SimdOperation::Constructor))
+-        return m.failName(initNode, "'%s' is not a supported SIMD type", field);
+-
+-    return m.addSimdCtor(varName, simdType, field);
+-}
+-
+-static bool
+-CheckGlobalSimdOperationImport(ModuleValidator& m, const ModuleValidator::Global* global,
+-                               ParseNode* initNode, PropertyName* varName, PropertyName* opName)
+-{
+-    SimdType simdType = global->simdCtorType();
+-    SimdOperation simdOp;
+-    if (!m.lookupStandardSimdOpName(opName, &simdOp))
+-        return m.failName(initNode, "'%s' is not a standard SIMD operation", opName);
+-    if (!IsSimdValidOperationType(simdType, simdOp))
+-        return m.failName(initNode, "'%s' is not an operation supported by the SIMD type", opName);
+-    return m.addSimdOperation(varName, simdType, simdOp, opName);
+-}
+-
+-static bool
+ CheckGlobalDotImport(ModuleValidator& m, PropertyName* varName, ParseNode* initNode)
+ {
+     ParseNode* base = DotBase(initNode);
+     PropertyName* field = DotMember(initNode);
+ 
+     if (base->isKind(ParseNodeKind::Dot)) {
+         ParseNode* global = DotBase(base);
+-        PropertyName* mathOrAtomicsOrSimd = DotMember(base);
++        PropertyName* mathOrAtomics = DotMember(base);
+ 
+         PropertyName* globalName = m.globalArgumentName();
+         if (!globalName)
+             return m.fail(base, "import statement requires the module have a stdlib parameter");
+ 
+         if (!IsUseOfName(global, globalName)) {
+             if (global->isKind(ParseNodeKind::Dot)) {
+                 return m.failName(base, "imports can have at most two dot accesses "
+                                         "(e.g. %s.Math.sin)", globalName);
+             }
+             return m.failName(base, "expecting %s.*", globalName);
+         }
+ 
+-        if (mathOrAtomicsOrSimd == m.cx()->names().Math)
++        if (mathOrAtomics == m.cx()->names().Math)
+             return CheckGlobalMathImport(m, initNode, varName, field);
+-        if (mathOrAtomicsOrSimd == m.cx()->names().Atomics)
++        if (mathOrAtomics == m.cx()->names().Atomics)
+             return CheckGlobalAtomicsImport(m, initNode, varName, field);
+-        if (mathOrAtomicsOrSimd == m.cx()->names().SIMD)
+-            return CheckGlobalSimdImport(m, initNode, varName, field);
+-        return m.failName(base, "expecting %s.{Math|SIMD}", globalName);
++        return m.failName(base, "expecting %s.{Math|Atomics}", globalName);
+     }
+ 
+     if (!base->isKind(ParseNodeKind::Name))
+         return m.fail(base, "expected name of variable or parameter");
+ 
+     if (base->name() == m.globalArgumentName()) {
+         if (field == m.cx()->names().NaN)
+             return m.addGlobalConstant(varName, GenericNaN(), field);
+@@ -3823,27 +3033,20 @@ CheckGlobalDotImport(ModuleValidator& m,
+ 
+         Scalar::Type type;
+         if (IsArrayViewCtorName(m, field, &type))
+             return m.addArrayViewCtor(varName, type, field);
+ 
+         return m.failName(initNode, "'%s' is not a standard constant or typed array name", field);
+     }
+ 
+-    if (base->name() == m.importArgumentName())
+-        return m.addFFI(varName, field);
+-
+-    const ModuleValidator::Global* global = m.lookupGlobal(base->name());
+-    if (!global)
+-        return m.failName(initNode, "%s not found in module global scope", base->name());
+-
+-    if (!global->isSimdCtor())
+-        return m.failName(base, "expecting SIMD constructor name, got %s", field);
+-
+-    return CheckGlobalSimdOperationImport(m, global, initNode, varName, field);
++    if (base->name() != m.importArgumentName())
++        return m.fail(base, "expected global or import name");
++
++    return m.addFFI(varName, field);
+ }
+ 
+ static bool
+ CheckModuleGlobal(ModuleValidator& m, ParseNode* var, bool isConst)
+ {
+     if (!var->isKind(ParseNodeKind::Name))
+         return m.fail(var, "import variable is not a plain name");
+ 
+@@ -4132,18 +3335,16 @@ CheckVarRef(FunctionValidator& f, ParseN
+           }
+           case ModuleValidator::Global::Function:
+           case ModuleValidator::Global::FFI:
+           case ModuleValidator::Global::MathBuiltinFunction:
+           case ModuleValidator::Global::AtomicsBuiltinFunction:
+           case ModuleValidator::Global::Table:
+           case ModuleValidator::Global::ArrayView:
+           case ModuleValidator::Global::ArrayViewCtor:
+-          case ModuleValidator::Global::SimdCtor:
+-          case ModuleValidator::Global::SimdOp:
+             break;
+         }
+         return f.failName(varRef, "'%s' may not be accessed by ordinary expressions", name);
+     }
+ 
+     return f.failName(varRef, "'%s' not found in local or asm.js module scope", name);
+ }
+ 
+@@ -4153,36 +3354,34 @@ IsLiteralOrConstInt(FunctionValidator& f
+     NumLit lit;
+     if (!IsLiteralOrConst(f, pn, &lit))
+         return false;
+ 
+     return IsLiteralInt(lit, u32);
+ }
+ 
+ static const int32_t NoMask = -1;
+-static const bool YesSimd = true;
+-static const bool NoSimd = false;
+ 
+ static bool
+ CheckArrayAccess(FunctionValidator& f, ParseNode* viewName, ParseNode* indexExpr,
+-                 bool isSimd, Scalar::Type* viewType)
++                 Scalar::Type* viewType)
+ {
+     if (!viewName->isKind(ParseNodeKind::Name))
+         return f.fail(viewName, "base of array access must be a typed array view name");
+ 
+     const ModuleValidator::Global* global = f.lookupGlobal(viewName->name());
+     if (!global || !global->isAnyArrayView())
+         return f.fail(viewName, "base of array access must be a typed array view name");
+ 
+     *viewType = global->viewType();
+ 
+     uint32_t index;
+     if (IsLiteralOrConstInt(f, indexExpr, &index)) {
+         uint64_t byteOffset = uint64_t(index) << TypedArrayShift(*viewType);
+-        uint64_t width = isSimd ? Simd128DataSize : TypedArrayElemSize(*viewType);
++        uint64_t width = TypedArrayElemSize(*viewType);
+         if (!f.m().tryConstantAccess(byteOffset, width))
+             return f.fail(indexExpr, "constant index out of range");
+ 
+         return f.writeInt32Lit(byteOffset);
+     }
+ 
+     // Mask off the low bits to account for the clearing effect of a right shift
+     // followed by the left shift implicit in the array access. E.g., H32[i>>2]
+@@ -4204,56 +3403,43 @@ CheckArrayAccess(FunctionValidator& f, P
+ 
+         Type pointerType;
+         if (!CheckExpr(f, pointerNode, &pointerType))
+             return false;
+ 
+         if (!pointerType.isIntish())
+             return f.failf(pointerNode, "%s is not a subtype of int", pointerType.toChars());
+     } else {
+-        // For SIMD access, and legacy scalar access compatibility, accept
+-        // Int8/Uint8 accesses with no shift.
++        // For legacy scalar access compatibility, accept Int8/Uint8 accesses
++        // with no shift.
+         if (TypedArrayShift(*viewType) != 0)
+             return f.fail(indexExpr, "index expression isn't shifted; must be an Int8/Uint8 access");
+ 
+         MOZ_ASSERT(mask == NoMask);
+ 
+         ParseNode* pointerNode = indexExpr;
+ 
+         Type pointerType;
+         if (!CheckExpr(f, pointerNode, &pointerType))
+             return false;
+-
+-        if (isSimd) {
+-            if (!pointerType.isIntish())
+-                return f.failf(pointerNode, "%s is not a subtype of intish", pointerType.toChars());
+-        } else {
+-            if (!pointerType.isInt())
+-                return f.failf(pointerNode, "%s is not a subtype of int", pointerType.toChars());
+-        }
+-    }
+-
+-    // Don't generate the mask op if there is no need for it which could happen for
+-    // a shift of zero or a SIMD access.
++        if (!pointerType.isInt())
++            return f.failf(pointerNode, "%s is not a subtype of int", pointerType.toChars());
++    }
++
++    // Don't generate the mask op if there is no need for it which could happen
++    // for a shift of zero.
+     if (mask != NoMask) {
+         return f.writeInt32Lit(mask) &&
+                f.encoder().writeOp(Op::I32And);
+     }
+ 
+     return true;
+ }
+ 
+ static bool
+-CheckAndPrepareArrayAccess(FunctionValidator& f, ParseNode* viewName, ParseNode* indexExpr,
+-                           bool isSimd, Scalar::Type* viewType)
+-{
+-    return CheckArrayAccess(f, viewName, indexExpr, isSimd, viewType);
+-}
+-
+-static bool
+ WriteArrayAccessFlags(FunctionValidator& f, Scalar::Type viewType)
+ {
+     // asm.js only has naturally-aligned accesses.
+     size_t align = TypedArrayElemSize(viewType);
+     MOZ_ASSERT(IsPowerOfTwo(align));
+     if (!f.encoder().writeFixedU8(CeilingLog2(align)))
+         return false;
+ 
+@@ -4264,17 +3450,17 @@ WriteArrayAccessFlags(FunctionValidator&
+     return true;
+ }
+ 
+ static bool
+ CheckLoadArray(FunctionValidator& f, ParseNode* elem, Type* type)
+ {
+     Scalar::Type viewType;
+ 
+-    if (!CheckAndPrepareArrayAccess(f, ElemBase(elem), ElemIndex(elem), NoSimd, &viewType))
++    if (!CheckArrayAccess(f, ElemBase(elem), ElemIndex(elem), &viewType))
+         return false;
+ 
+     switch (viewType) {
+       case Scalar::Int8:    if (!f.encoder().writeOp(Op::I32Load8S))  return false; break;
+       case Scalar::Uint8:   if (!f.encoder().writeOp(Op::I32Load8U))  return false; break;
+       case Scalar::Int16:   if (!f.encoder().writeOp(Op::I32Load16S)) return false; break;
+       case Scalar::Uint16:  if (!f.encoder().writeOp(Op::I32Load16U)) return false; break;
+       case Scalar::Uint32:
+@@ -4307,17 +3493,17 @@ CheckLoadArray(FunctionValidator& f, Par
+ 
+     return true;
+ }
+ 
+ static bool
+ CheckStoreArray(FunctionValidator& f, ParseNode* lhs, ParseNode* rhs, Type* type)
+ {
+     Scalar::Type viewType;
+-    if (!CheckAndPrepareArrayAccess(f, ElemBase(lhs), ElemIndex(lhs), NoSimd, &viewType))
++    if (!CheckArrayAccess(f, ElemBase(lhs), ElemIndex(lhs), &viewType))
+         return false;
+ 
+     Type rhsType;
+     if (!CheckExpr(f, rhs, &rhsType))
+         return false;
+ 
+     switch (viewType) {
+       case Scalar::Int8:
+@@ -4598,17 +3784,17 @@ CheckMathMinMax(FunctionValidator& f, Pa
+ 
+     return true;
+ }
+ 
+ static bool
+ CheckSharedArrayAtomicAccess(FunctionValidator& f, ParseNode* viewName, ParseNode* indexExpr,
+                              Scalar::Type* viewType)
+ {
+-    if (!CheckAndPrepareArrayAccess(f, viewName, indexExpr, NoSimd, viewType))
++    if (!CheckArrayAccess(f, viewName, indexExpr, viewType))
+         return false;
+ 
+     // The global will be sane, CheckArrayAccess checks it.
+     const ModuleValidator::Global* global = f.lookupGlobal(viewName->name());
+     if (global->which() != ModuleValidator::Global::ArrayView)
+         return f.fail(viewName, "base of array access must be a typed array view");
+ 
+     MOZ_ASSERT(f.m().atomicsPresent());
+@@ -4905,20 +4091,17 @@ CheckFunctionSignature(ModuleValidator& 
+     *func = existing;
+     return true;
+ }
+ 
+ static bool
+ CheckIsArgType(FunctionValidator& f, ParseNode* argNode, Type type)
+ {
+     if (!type.isArgType())
+-        return f.failf(argNode,
+-                       "%s is not a subtype of int, float, double, or an allowed SIMD type",
+-                       type.toChars());
+-
++        return f.failf(argNode, "%s is not a subtype of int, float, or double", type.toChars());
+     return true;
+ }
+ 
+ static bool
+ CheckInternalCall(FunctionValidator& f, ParseNode* callNode, PropertyName* calleeName,
+                   Type ret, Type* type)
+ {
+     MOZ_ASSERT(ret.isCanonical());
+@@ -5039,18 +4222,16 @@ static bool
+ CheckFFICall(FunctionValidator& f, ParseNode* callNode, unsigned ffiIndex, Type ret, Type* type)
+ {
+     MOZ_ASSERT(ret.isCanonical());
+ 
+     PropertyName* calleeName = CallCallee(callNode)->name();
+ 
+     if (ret.isFloat())
+         return f.fail(callNode, "FFI calls can't return float");
+-    if (ret.isSimd())
+-        return f.fail(callNode, "FFI calls can't return SIMD values");
+ 
+     ValTypeVector args;
+     if (!CheckCallArgs<CheckIsExternType>(f, callNode, &args))
+         return false;
+ 
+     FuncType sig(std::move(args), ret.canonicalToExprType());
+ 
+     uint32_t importIndex;
+@@ -5096,19 +4277,16 @@ CheckCoercionArg(FunctionValidator& f, P
+ 
+     Type argType;
+     if (!CheckExpr(f, arg, &argType))
+         return false;
+ 
+     if (expected.isFloat()) {
+         if (!CheckFloatCoercionArg(f, arg, argType))
+             return false;
+-    } else if (expected.isSimd()) {
+-        if (!(argType <= expected))
+-            return f.fail(arg, "argument to SIMD coercion isn't from the correct SIMD type");
+     } else {
+         MOZ_CRASH("not call coercions");
+     }
+ 
+     *type = Type::ret(expected);
+     return true;
+ }
+ 
+@@ -5202,666 +4380,33 @@ CheckMathBuiltinCall(FunctionValidator& 
+         if (!f.encoder().writeOp(f32))
+             return false;
+     }
+ 
+     *type = opIsDouble ? Type::Double : Type::Floatish;
+     return true;
+ }
+ 
+-namespace {
+-// Include CheckSimdCallArgs in unnamed namespace to avoid MSVC name lookup bug.
+-
+-template<class CheckArgOp>
+-static bool
+-CheckSimdCallArgs(FunctionValidator& f, ParseNode* call, unsigned expectedArity,
+-                  const CheckArgOp& checkArg)
+-{
+-    unsigned numArgs = CallArgListLength(call);
+-    if (numArgs != expectedArity)
+-        return f.failf(call, "expected %u arguments to SIMD call, got %u", expectedArity, numArgs);
+-
+-    ParseNode* arg = CallArgList(call);
+-    for (size_t i = 0; i < numArgs; i++, arg = NextNode(arg)) {
+-        MOZ_ASSERT(!!arg);
+-        Type argType;
+-        if (!CheckExpr(f, arg, &argType))
+-            return false;
+-        if (!checkArg(f, arg, i, argType))
+-            return false;
+-    }
+-
+-    return true;
+-}
+-
+-
+-class CheckArgIsSubtypeOf
+-{
+-    Type formalType_;
+-
+-  public:
+-    explicit CheckArgIsSubtypeOf(SimdType t) : formalType_(t) {}
+-
+-    bool operator()(FunctionValidator& f, ParseNode* arg, unsigned argIndex, Type actualType) const
+-    {
+-        if (!(actualType <= formalType_)) {
+-            return f.failf(arg, "%s is not a subtype of %s", actualType.toChars(),
+-                           formalType_.toChars());
+-        }
+-        return true;
+-    }
+-};
+-
+-static inline Type
+-SimdToCoercedScalarType(SimdType t)
+-{
+-    switch (t) {
+-      case SimdType::Int8x16:
+-      case SimdType::Int16x8:
+-      case SimdType::Int32x4:
+-      case SimdType::Uint8x16:
+-      case SimdType::Uint16x8:
+-      case SimdType::Uint32x4:
+-      case SimdType::Bool8x16:
+-      case SimdType::Bool16x8:
+-      case SimdType::Bool32x4:
+-        return Type::Intish;
+-      case SimdType::Float32x4:
+-        return Type::Floatish;
+-      default:
+-        break;
+-    }
+-    MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unexpected SIMD type");
+-}
+-
+-class CheckSimdScalarArgs
+-{
+-    SimdType simdType_;
+-    Type formalType_;
+-
+-  public:
+-    explicit CheckSimdScalarArgs(SimdType simdType)
+-      : simdType_(simdType), formalType_(SimdToCoercedScalarType(simdType))
+-    {}
+-
+-    bool operator()(FunctionValidator& f, ParseNode* arg, unsigned argIndex, Type actualType) const
+-    {
+-        if (!(actualType <= formalType_)) {
+-            // As a special case, accept doublelit arguments to float32x4 ops by
+-            // re-emitting them as float32 constants.
+-            if (simdType_ != SimdType::Float32x4 || !actualType.isDoubleLit()) {
+-                return f.failf(arg, "%s is not a subtype of %s%s",
+-                               actualType.toChars(), formalType_.toChars(),
+-                               simdType_ == SimdType::Float32x4 ? " or doublelit" : "");
+-            }
+-
+-            // We emitted a double literal and actually want a float32.
+-            return f.encoder().writeOp(Op::F32DemoteF64);
+-        }
+-
+-        return true;
+-    }
+-};
+-
+-class CheckSimdSelectArgs
+-{
+-    Type formalType_;
+-    Type maskType_;
+-
+-  public:
+-    explicit CheckSimdSelectArgs(SimdType t) : formalType_(t), maskType_(GetBooleanSimdType(t)) {}
+-
+-    bool operator()(FunctionValidator& f, ParseNode* arg, unsigned argIndex, Type actualType) const
+-    {
+-        // The first argument is the boolean selector, the next two are the
+-        // values to choose from.
+-        Type wantedType = argIndex == 0 ? maskType_ : formalType_;
+-
+-        if (!(actualType <= wantedType)) {
+-            return f.failf(arg, "%s is not a subtype of %s", actualType.toChars(),
+-                           wantedType.toChars());
+-        }
+-        return true;
+-    }
+-};
+-
+-class CheckSimdVectorScalarArgs
+-{
+-    SimdType formalSimdType_;
+-
+-  public:
+-    explicit CheckSimdVectorScalarArgs(SimdType t) : formalSimdType_(t) {}
+-
+-    bool operator()(FunctionValidator& f, ParseNode* arg, unsigned argIndex, Type actualType) const
+-    {
+-        MOZ_ASSERT(argIndex < 2);
+-        if (argIndex == 0) {
+-            // First argument is the vector
+-            if (!(actualType <= Type(formalSimdType_))) {
+-                return f.failf(arg, "%s is not a subtype of %s", actualType.toChars(),
+-                               Type(formalSimdType_).toChars());
+-            }
+-
+-            return true;
+-        }
+-
+-        // Second argument is the scalar
+-        return CheckSimdScalarArgs(formalSimdType_)(f, arg, argIndex, actualType);
+-    }
+-};
+-
+-} // namespace
+-
+-static bool
+-CheckSimdUnary(FunctionValidator& f, ParseNode* call, SimdType opType, SimdOperation op,
+-               Type* type)
+-{
+-    if (!CheckSimdCallArgs(f, call, 1, CheckArgIsSubtypeOf(opType)))
+-        return false;
+-    if (!f.writeSimdOp(opType, op))
+-        return false;
+-    *type = opType;
+-    return true;
+-}
+-
+-static bool
+-CheckSimdBinaryShift(FunctionValidator& f, ParseNode* call, SimdType opType, SimdOperation op,
+-                     Type *type)
+-{
+-    if (!CheckSimdCallArgs(f, call, 2, CheckSimdVectorScalarArgs(opType)))
+-        return false;
+-    if (!f.writeSimdOp(opType, op))
+-        return false;
+-    *type = opType;
+-    return true;
+-}
+-
+-static bool
+-CheckSimdBinaryComp(FunctionValidator& f, ParseNode* call, SimdType opType, SimdOperation op,
+-                    Type *type)
+-{
+-    if (!CheckSimdCallArgs(f, call, 2, CheckArgIsSubtypeOf(opType)))
+-        return false;
+-    if (!f.writeSimdOp(opType, op))
+-        return false;
+-    *type = GetBooleanSimdType(opType);
+-    return true;
+-}
+-
+-static bool
+-CheckSimdBinary(FunctionValidator& f, ParseNode* call, SimdType opType, SimdOperation op,
+-                Type* type)
+-{
+-    if (!CheckSimdCallArgs(f, call, 2, CheckArgIsSubtypeOf(opType)))
+-        return false;
+-    if (!f.writeSimdOp(opType, op))
+-        return false;
+-    *type = opType;
+-    return true;
+-}
+-
+-static bool
+-CheckSimdExtractLane(FunctionValidator& f, ParseNode* call, SimdType opType, Type* type)
+-{
+-    switch (opType) {
+-      case SimdType::Int8x16:
+-      case SimdType::Int16x8:
+-      case SimdType::Int32x4:   *type = Type::Signed; break;
+-      case SimdType::Uint8x16:
+-      case SimdType::Uint16x8:
+-      case SimdType::Uint32x4:  *type = Type::Unsigned; break;
+-      case SimdType::Float32x4: *type = Type::Float; break;
+-      case SimdType::Bool8x16:
+-      case SimdType::Bool16x8:
+-      case SimdType::Bool32x4:  *type = Type::Int; break;
+-      default:                  MOZ_CRASH("unhandled simd type");
+-    }
+-
+-    unsigned numArgs = CallArgListLength(call);
+-    if (numArgs != 2)
+-        return f.failf(call, "expected 2 arguments to SIMD extract, got %u", numArgs);
+-
+-    ParseNode* arg = CallArgList(call);
+-
+-    // First argument is the vector
+-    Type vecType;
+-    if (!CheckExpr(f, arg, &vecType))
+-        return false;
+-    if (!(vecType <= Type(opType))) {
+-        return f.failf(arg, "%s is not a subtype of %s", vecType.toChars(),
+-                       Type(opType).toChars());
+-    }
+-
+-    arg = NextNode(arg);
+-
+-    // Second argument is the lane < vector length
+-    uint32_t lane;
+-    if (!IsLiteralOrConstInt(f, arg, &lane))
+-        return f.failf(arg, "lane selector should be a constant integer literal");
+-    if (lane >= GetSimdLanes(opType))
+-        return f.failf(arg, "lane selector should be in bounds");
+-
+-    if (!f.writeSimdOp(opType, SimdOperation::Fn_extractLane))
+-        return false;
+-    if (!f.encoder().writeVarU32(lane))
+-        return false;
+-    return true;
+-}
+-
+-static bool
+-CheckSimdReplaceLane(FunctionValidator& f, ParseNode* call, SimdType opType, Type* type)
+-{
+-    unsigned numArgs = CallArgListLength(call);
+-    if (numArgs != 3)
+-        return f.failf(call, "expected 2 arguments to SIMD replace, got %u", numArgs);
+-
+-    ParseNode* arg = CallArgList(call);
+-
+-    // First argument is the vector
+-    Type vecType;
+-    if (!CheckExpr(f, arg, &vecType))
+-        return false;
+-    if (!(vecType <= Type(opType))) {
+-        return f.failf(arg, "%s is not a subtype of %s", vecType.toChars(),
+-                       Type(opType).toChars());
+-    }
+-
+-    arg = NextNode(arg);
+-
+-    // Second argument is the lane < vector length
+-    uint32_t lane;
+-    if (!IsLiteralOrConstInt(f, arg, &lane))
+-        return f.failf(arg, "lane selector should be a constant integer literal");
+-    if (lane >= GetSimdLanes(opType))
+-        return f.failf(arg, "lane selector should be in bounds");
+-
+-    arg = NextNode(arg);
+-
+-    // Third argument is the scalar
+-    Type scalarType;
+-    if (!CheckExpr(f, arg, &scalarType))
+-        return false;
+-    if (!(scalarType <= SimdToCoercedScalarType(opType))) {
+-        if (opType == SimdType::Float32x4 && scalarType.isDoubleLit()) {
+-            if (!f.encoder().writeOp(Op::F32DemoteF64))
+-                return false;
+-        } else {
+-            return f.failf(arg, "%s is not the correct type to replace an element of %s",
+-                           scalarType.toChars(), vecType.toChars());
+-        }
+-    }
+-
+-    if (!f.writeSimdOp(opType, SimdOperation::Fn_replaceLane))
+-        return false;
+-    if (!f.encoder().writeVarU32(lane))
+-        return false;
+-    *type = opType;
+-    return true;
+-}
+-
+-typedef bool Bitcast;
+-
+-namespace {
+-// Include CheckSimdCast in unnamed namespace to avoid MSVC name lookup bug (due to the use of Type).
+-
+-static bool
+-CheckSimdCast(FunctionValidator& f, ParseNode* call, SimdType fromType, SimdType toType,
+-              SimdOperation op, Type* type)
+-{
+-    if (!CheckSimdCallArgs(f, call, 1, CheckArgIsSubtypeOf(fromType)))
+-        return false;
+-    if (!f.writeSimdOp(toType, op))
+-        return false;
+-    *type = toType;
+-    return true;
+-}
+-
+-} // namespace
+-
+-static bool
+-CheckSimdShuffleSelectors(FunctionValidator& f, ParseNode* lane,
+-                          mozilla::Array<uint8_t, 16>& lanes, unsigned numLanes, unsigned maxLane)
+-{
+-    for (unsigned i = 0; i < numLanes; i++, lane = NextNode(lane)) {
+-        uint32_t u32;
+-        if (!IsLiteralInt(f.m(), lane, &u32))
+-            return f.failf(lane, "lane selector should be a constant integer literal");
+-        if (u32 >= maxLane)
+-            return f.failf(lane, "lane selector should be less than %u", maxLane);
+-        lanes[i] = uint8_t(u32);
+-    }
+-    return true;
+-}
+-
+-static bool
+-CheckSimdSwizzle(FunctionValidator& f, ParseNode* call, SimdType opType, Type* type)
+-{
+-    const unsigned numLanes = GetSimdLanes(opType);
+-    unsigned numArgs = CallArgListLength(call);
+-    if (numArgs != 1 + numLanes)
+-        return f.failf(call, "expected %u arguments to SIMD swizzle, got %u", 1 + numLanes,
+-                       numArgs);
+-
+-    Type retType = opType;
+-    ParseNode* vec = CallArgList(call);
+-    Type vecType;
+-    if (!CheckExpr(f, vec, &vecType))
+-        return false;
+-    if (!(vecType <= retType))
+-        return f.failf(vec, "%s is not a subtype of %s", vecType.toChars(), retType.toChars());
+-
+-    if (!f.writeSimdOp(opType, SimdOperation::Fn_swizzle))
+-        return false;
+-
+-    mozilla::Array<uint8_t, 16> lanes;
+-    if (!CheckSimdShuffleSelectors(f, NextNode(vec), lanes, numLanes, numLanes))
+-        return false;
+-
+-    for (unsigned i = 0; i < numLanes; i++) {
+-        if (!f.encoder().writeFixedU8(lanes[i]))
+-            return false;
+-    }
+-
+-    *type = retType;
+-    return true;
+-}
+-
+-static bool
+-CheckSimdShuffle(FunctionValidator& f, ParseNode* call, SimdType opType, Type* type)
+-{
+-    const unsigned numLanes = GetSimdLanes(opType);
+-    unsigned numArgs = CallArgListLength(call);
+-    if (numArgs != 2 + numLanes)
+-        return f.failf(call, "expected %u arguments to SIMD shuffle, got %u", 2 + numLanes,
+-                       numArgs);
+-
+-    Type retType = opType;
+-    ParseNode* arg = CallArgList(call);
+-    for (unsigned i = 0; i < 2; i++, arg = NextNode(arg)) {
+-        Type type;
+-        if (!CheckExpr(f, arg, &type))
+-            return false;
+-        if (!(type <= retType))
+-            return f.failf(arg, "%s is not a subtype of %s", type.toChars(), retType.toChars());
+-    }
+-
+-    if (!f.writeSimdOp(opType, SimdOperation::Fn_shuffle))
+-        return false;
+-
+-    mozilla::Array<uint8_t, 16> lanes;
+-    if (!CheckSimdShuffleSelectors(f, arg, lanes, numLanes, 2 * numLanes))
+-        return false;
+-
+-    for (unsigned i = 0; i < numLanes; i++) {
+-        if (!f.encoder().writeFixedU8(uint8_t(lanes[i])))
+-            return false;
+-    }
+-
+-    *type = retType;
+-    return true;
+-}
+-
+-static bool
+-CheckSimdLoadStoreArgs(FunctionValidator& f, ParseNode* call, Scalar::Type* viewType)
+-{
+-    ParseNode* view = CallArgList(call);
+-    if (!view->isKind(ParseNodeKind::Name))
+-        return f.fail(view, "expected Uint8Array view as SIMD.*.load/store first argument");
+-
+-    ParseNode* indexExpr = NextNode(view);
+-
+-    if (!CheckAndPrepareArrayAccess(f, view, indexExpr, YesSimd, viewType))
+-        return false;
+-
+-    if (*viewType != Scalar::Uint8)
+-        return f.fail(view, "expected Uint8Array view as SIMD.*.load/store first argument");
+-
+-    return true;
+-}
+-
+-static bool
+-CheckSimdLoad(FunctionValidator& f, ParseNode* call, SimdType opType, SimdOperation op,
+-              Type* type)
+-{
+-    unsigned numArgs = CallArgListLength(call);
+-    if (numArgs != 2)
+-        return f.failf(call, "expected 2 arguments to SIMD load, got %u", numArgs);
+-
+-    Scalar::Type viewType;
+-    if (!CheckSimdLoadStoreArgs(f, call, &viewType))
+-        return false;
+-
+-    if (!f.writeSimdOp(opType, op))
+-        return false;
+-
+-    if (!WriteArrayAccessFlags(f, viewType))
+-        return false;
+-
+-    *type = opType;
+-    return true;
+-}
+-
+-static bool
+-CheckSimdStore(FunctionValidator& f, ParseNode* call, SimdType opType, SimdOperation op,
+-               Type* type)
+-{
+-    unsigned numArgs = CallArgListLength(call);
+-    if (numArgs != 3)
+-        return f.failf(call, "expected 3 arguments to SIMD store, got %u", numArgs);
+-
+-    Scalar::Type viewType;
+-    if (!CheckSimdLoadStoreArgs(f, call, &viewType))
+-        return false;
+-
+-    Type retType = opType;
+-    ParseNode* vecExpr = NextNode(NextNode(CallArgList(call)));
+-    Type vecType;
+-    if (!CheckExpr(f, vecExpr, &vecType))
+-        return false;
+-
+-    if (!f.writeSimdOp(opType, op))
+-        return false;
+-
+-    if (!WriteArrayAccessFlags(f, viewType))
+-        return false;
+-
+-    if (!(vecType <= retType))
+-        return f.failf(vecExpr, "%s is not a subtype of %s", vecType.toChars(), retType.toChars());
+-
+-    *type = vecType;
+-    return true;
+-}
+-
+-static bool
+-CheckSimdSelect(FunctionValidator& f, ParseNode* call, SimdType opType, Type* type)
+-{
+-    if (!CheckSimdCallArgs(f, call, 3, CheckSimdSelectArgs(opType)))
+-        return false;
+-    if (!f.writeSimdOp(opType, SimdOperation::Fn_select))
+-        return false;
+-    *type = opType;
+-    return true;
+-}
+-
+-static bool
+-CheckSimdAllTrue(FunctionValidator& f, ParseNode* call, SimdType opType, Type* type)
+-{
+-    if (!CheckSimdCallArgs(f, call, 1, CheckArgIsSubtypeOf(opType)))
+-        return false;
+-    if (!f.writeSimdOp(opType, SimdOperation::Fn_allTrue))
+-        return false;
+-    *type = Type::Int;
+-    return true;
+-}
+-
+-static bool
+-CheckSimdAnyTrue(FunctionValidator& f, ParseNode* call, SimdType opType, Type* type)
+-{
+-    if (!CheckSimdCallArgs(f, call, 1, CheckArgIsSubtypeOf(opType)))
+-        return false;
+-    if (!f.writeSimdOp(opType, SimdOperation::Fn_anyTrue))
+-        return false;
+-    *type = Type::Int;
+-    return true;
+-}
+-
+-static bool
+-CheckSimdCheck(FunctionValidator& f, ParseNode* call, Type* type)
+-{
+-    Type coerceTo;
+-    ParseNode* argNode;
+-    if (!IsCoercionCall(f.m(), call, &coerceTo, &argNode))
+-        return f.failf(call, "expected 1 argument in call to check");
+-    return CheckCoercionArg(f, argNode, coerceTo, type);
+-}
+-
+-static bool
+-CheckSimdSplat(FunctionValidator& f, ParseNode* call, SimdType opType, Type* type)
+-{
+-    if (!CheckSimdCallArgs(f, call, 1, CheckSimdScalarArgs(opType)))
+-        return false;
+-    if (!f.writeSimdOp(opType, SimdOperation::Fn_splat))
+-        return false;
+-    *type = opType;
+-    return true;
+-}
+-
+-static bool
+-CheckSimdOperationCall(FunctionValidator& f, ParseNode* call, const ModuleValidator::Global* global,
+-                       Type* type)
+-{
+-    MOZ_ASSERT(global->isSimdOperation());
+-
+-    SimdType opType = global->simdOperationType();
+-
+-    switch (SimdOperation op = global->simdOperation()) {
+-      case SimdOperation::Fn_check:
+-        return CheckSimdCheck(f, call, type);
+-
+-#define _CASE(OP) case SimdOperation::Fn_##OP:
+-      FOREACH_SHIFT_SIMD_OP(_CASE)
+-        return CheckSimdBinaryShift(f, call, opType, op, type);
+-
+-      FOREACH_COMP_SIMD_OP(_CASE)
+-        return CheckSimdBinaryComp(f, call, opType, op, type);
+-
+-      FOREACH_NUMERIC_SIMD_BINOP(_CASE)
+-      FOREACH_FLOAT_SIMD_BINOP(_CASE)
+-      FOREACH_BITWISE_SIMD_BINOP(_CASE)
+-      FOREACH_SMINT_SIMD_BINOP(_CASE)
+-        return CheckSimdBinary(f, call, opType, op, type);
+-#undef _CASE
+-
+-      case SimdOperation::Fn_extractLane:
+-        return CheckSimdExtractLane(f, call, opType, type);
+-      case SimdOperation::Fn_replaceLane:
+-        return CheckSimdReplaceLane(f, call, opType, type);
+-
+-      case SimdOperation::Fn_fromInt8x16Bits:
+-        return CheckSimdCast(f, call, SimdType::Int8x16, opType, op, type);
+-      case SimdOperation::Fn_fromUint8x16Bits:
+-        return CheckSimdCast(f, call, SimdType::Uint8x16, opType, op, type);
+-      case SimdOperation::Fn_fromInt16x8Bits:
+-        return CheckSimdCast(f, call, SimdType::Int16x8, opType, op, type);
+-      case SimdOperation::Fn_fromUint16x8Bits:
+-        return CheckSimdCast(f, call, SimdType::Uint16x8, opType, op, type);
+-      case SimdOperation::Fn_fromInt32x4:
+-      case SimdOperation::Fn_fromInt32x4Bits:
+-        return CheckSimdCast(f, call, SimdType::Int32x4, opType, op, type);
+-      case SimdOperation::Fn_fromUint32x4:
+-      case SimdOperation::Fn_fromUint32x4Bits:
+-        return CheckSimdCast(f, call, SimdType::Uint32x4, opType, op, type);
+-      case SimdOperation::Fn_fromFloat32x4:
+-      case SimdOperation::Fn_fromFloat32x4Bits:
+-        return CheckSimdCast(f, call, SimdType::Float32x4, opType, op, type);
+-
+-      case SimdOperation::Fn_abs:
+-      case SimdOperation::Fn_neg:
+-      case SimdOperation::Fn_not:
+-      case SimdOperation::Fn_sqrt:
+-      case SimdOperation::Fn_reciprocalApproximation:
+-      case SimdOperation::Fn_reciprocalSqrtApproximation:
+-        return CheckSimdUnary(f, call, opType, op, type);
+-
+-      case SimdOperation::Fn_swizzle:
+-        return CheckSimdSwizzle(f, call, opType, type);
+-      case SimdOperation::Fn_shuffle:
+-        return CheckSimdShuffle(f, call, opType, type);
+-
+-      case SimdOperation::Fn_load:
+-      case SimdOperation::Fn_load1:
+-      case SimdOperation::Fn_load2:
+-        return CheckSimdLoad(f, call, opType, op, type);
+-      case SimdOperation::Fn_store:
+-      case SimdOperation::Fn_store1:
+-      case SimdOperation::Fn_store2:
+-        return CheckSimdStore(f, call, opType, op, type);
+-
+-      case SimdOperation::Fn_select:
+-        return CheckSimdSelect(f, call, opType, type);
+-
+-      case SimdOperation::Fn_splat:
+-        return CheckSimdSplat(f, call, opType, type);
+-
+-      case SimdOperation::Fn_allTrue:
+-        return CheckSimdAllTrue(f, call, opType, type);
+-      case SimdOperation::Fn_anyTrue:
+-        return CheckSimdAnyTrue(f, call, opType, type);
+-
+-      case SimdOperation::Fn_load3:
+-      case SimdOperation::Fn_store3:
+-        return f.fail(call, "asm.js does not support 3-element SIMD loads or stores");
+-
+-      case SimdOperation::Constructor:
+-        MOZ_CRASH("constructors are handled in CheckSimdCtorCall");
+-      case SimdOperation::Fn_fromFloat64x2Bits:
+-        MOZ_CRASH("NYI");
+-    }
+-    MOZ_CRASH("unexpected simd operation in CheckSimdOperationCall");
+-}
+-
+-static bool
+-CheckSimdCtorCall(FunctionValidator& f, ParseNode* call, const ModuleValidator::Global* global,
+-                  Type* type)
+-{
+-    MOZ_ASSERT(call->isKind(ParseNodeKind::Call));
+-
+-    SimdType simdType = global->simdCtorType();
+-    unsigned length = GetSimdLanes(simdType);
+-    if (!CheckSimdCallArgs(f, call, length, CheckSimdScalarArgs(simdType)))
+-        return false;
+-
+-    if (!f.writeSimdOp(simdType, SimdOperation::Constructor))
+-        return false;
+-
+-    *type = simdType;
+-    return true;
+-}
+-
+ static bool
+ CheckUncoercedCall(FunctionValidator& f, ParseNode* expr, Type* type)
+ {
+     MOZ_ASSERT(expr->isKind(ParseNodeKind::Call));
+ 
+     const ModuleValidator::Global* global;
+     if (IsCallToGlobal(f.m(), expr, &global)) {
+         if (global->isMathFunction())
+             return CheckMathBuiltinCall(f, expr, global->mathBuiltinFunction(), type);
+         if (global->isAtomicsFunction())
+             return CheckAtomicsBuiltinCall(f, expr, global->atomicsBuiltinFunction(), type);
+-        if (global->isSimdCtor())
+-            return CheckSimdCtorCall(f, expr, global, type);
+-        if (global->isSimdOperation())
+-            return CheckSimdOperationCall(f, expr, global, type);
+     }
+ 
+     return f.fail(expr, "all function calls must either be calls to standard lib math functions, "
+-                        "standard atomic functions, standard SIMD constructors or operations, "
+-                        "ignored (via f(); or comma-expression), coerced to signed (via f()|0), "
+-                        "coerced to float (via fround(f())) or coerced to double (via +f())");
++                        "standard atomic functions ignored (via f(); or comma-expression), coerced"
++                        " to signed (via f()|0), coerced to float (via fround(f())) or coerced to "
++                        "double (via +f())");
+ }
+ 
+ static bool
+ CoerceResult(FunctionValidator& f, ParseNode* expr, Type expected, Type actual,
+              Type* type)
+ {
+     MOZ_ASSERT(expected.isCanonical());
+ 
+@@ -5894,20 +4439,17 @@ CoerceResult(FunctionValidator& f, Parse
+         } else if (actual.isUnsigned()) {
+             if (!f.encoder().writeOp(Op::F64ConvertUI32))
+                 return false;
+         } else {
+             return f.failf(expr, "%s is not a subtype of double?, float?, signed or unsigned", actual.toChars());
+         }
+         break;
+       default:
+-        MOZ_ASSERT(expected.isSimd(), "Incomplete switch");
+-        if (actual != expected)
+-            return f.failf(expr, "got type %s, expected %s", actual.toChars(), expected.toChars());
+-        break;
++        MOZ_CRASH("unexpected uncoerced result type");
+     }
+ 
+     *type = Type::ret(expected);
+     return true;
+ }
+ 
+ static bool
+ CheckCoercedMathBuiltinCall(FunctionValidator& f, ParseNode* callNode, AsmJSMathBuiltinFunction func,
+@@ -5915,36 +4457,16 @@ CheckCoercedMathBuiltinCall(FunctionVali
+ {
+     Type actual;
+     if (!CheckMathBuiltinCall(f, callNode, func, &actual))
+         return false;
+     return CoerceResult(f, callNode, ret, actual, type);
+ }
+ 
+ static bool
+-CheckCoercedSimdCall(FunctionValidator& f, ParseNode* call, const ModuleValidator::Global* global,
+-                     Type ret, Type* type)
+-{
+-    MOZ_ASSERT(ret.isCanonical());
+-
+-    Type actual;
+-    if (global->isSimdCtor()) {
+-        if (!CheckSimdCtorCall(f, call, global, &actual))
+-            return false;
+-        MOZ_ASSERT(actual.isSimd());
+-    } else {
+-        MOZ_ASSERT(global->isSimdOperation());
+-        if (!CheckSimdOperationCall(f, call, global, &actual))
+-            return false;
+-    }
+-
+-    return CoerceResult(f, call, ret, actual, type);
+-}
+-
+-static bool
+ CheckCoercedAtomicsBuiltinCall(FunctionValidator& f, ParseNode* callNode,
+                                AsmJSAtomicsBuiltinFunction func, Type ret, Type* type)
+ {
+     MOZ_ASSERT(ret.isCanonical());
+ 
+     Type actual;
+     if (!CheckAtomicsBuiltinCall(f, callNode, func, &actual))
+         return false;
+@@ -5986,19 +4508,16 @@ CheckCoercedCall(FunctionValidator& f, P
+             return CheckCoercedAtomicsBuiltinCall(f, call, global->atomicsBuiltinFunction(), ret, type);
+           case ModuleValidator::Global::ConstantLiteral:
+           case ModuleValidator::Global::ConstantImport:
+           case ModuleValidator::Global::Variable:
+           case ModuleValidator::Global::Table:
+           case ModuleValidator::Global::ArrayView:
+           case ModuleValidator::Global::ArrayViewCtor:
+             return f.failName(callee, "'%s' is not callable function", callee->name());
+-          case ModuleValidator::Global::SimdCtor:
+-          case ModuleValidator::Global::SimdOp:
+-            return CheckCoercedSimdCall(f, call, global, ret, type);
+           case ModuleValidator::Global::Function:
+             break;
+         }
+     }
+ 
+     return CheckInternalCall(f, call, calleeName, ret, type);
+ }
+ 
+@@ -6173,21 +4692,19 @@ CheckConditional(FunctionValidator& f, P
+         return false;
+ 
+     if (thenType.isInt() && elseType.isInt()) {
+         *type = Type::Int;
+     } else if (thenType.isDouble() && elseType.isDouble()) {
+         *type = Type::Double;
+     } else if (thenType.isFloat() && elseType.isFloat()) {
+         *type = Type::Float;
+-    } else if (thenType.isSimd() && elseType == thenType) {
+-        *type = thenType;
+     } else {
+         return f.failf(ternary, "then/else branches of conditional must both produce int, float, "
+-                       "double or SIMD types, current types are %s and %s",
++                       "double, current types are %s and %s",
+                        thenType.toChars(), elseType.toChars());
+     }
+ 
+     if (!f.popIf(typeAt, type->toWasmBlockSignatureType()))
+         return false;
+ 
+     return true;
+ }
+@@ -6204,26 +4721,16 @@ IsValidIntMultiplyConstant(ModuleValidat
+       case NumLit::NegativeInt:
+         if (abs(lit.toInt32()) < (1<<20))
+             return true;
+         return false;
+       case NumLit::BigUnsigned:
+       case NumLit::Double:
+       case NumLit::Float:
+       case NumLit::OutOfRangeInt:
+-      case NumLit::Int8x16:
+-      case NumLit::Uint8x16:
+-      case NumLit::Int16x8:
+-      case NumLit::Uint16x8:
+-      case NumLit::Int32x4:
+-      case NumLit::Uint32x4:
+-      case NumLit::Float32x4:
+-      case NumLit::Bool8x16:
+-      case NumLit::Bool16x8:
+-      case NumLit::Bool32x4:
+         return false;
+     }
+ 
+     MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Bad literal");
+ }
+ 
+ static bool
+ CheckMultiply(FunctionValidator& f, ParseNode* star, Type* type)
+@@ -6887,26 +5394,16 @@ CheckCaseExpr(FunctionValidator& f, Pars
+       case NumLit::NegativeInt:
+         *value = lit.toInt32();
+         break;
+       case NumLit::OutOfRangeInt:
+       case NumLit::BigUnsigned:
+         return f.fail(caseExpr, "switch case expression out of integer range");
+       case NumLit::Double:
+       case NumLit::Float:
+-      case NumLit::Int8x16:
+-      case NumLit::Uint8x16:
+-      case NumLit::Int16x8:
+-      case NumLit::Uint16x8:
+-      case NumLit::Int32x4:
+-      case NumLit::Uint32x4:
+-      case NumLit::Float32x4:
+-      case NumLit::Bool8x16:
+-      case NumLit::Bool16x8:
+-      case NumLit::Bool32x4:
+         return f.fail(caseExpr, "switch case expression must be an integer literal");
+     }
+ 
+     return true;
+ }
+ 
+ static bool
+ CheckDefaultAtEnd(FunctionValidator& f, ParseNode* stmt)
+@@ -7642,38 +6139,30 @@ HasObjectValueOfMethodPure(JSObject* obj
+         return false;
+ 
+     return IsSelfHostedFunctionWithName(fun, cx->names().Object_valueOf);
+ }
+ 
+ static bool
+ HasPureCoercion(JSContext* cx, HandleValue v)
+ {
+-    // Unsigned SIMD types are not allowed in function signatures.
+-    if (IsVectorObject<Int32x4>(v) || IsVectorObject<Int16x8>(v) ||  IsVectorObject<Int8x16>(v) ||
+-        IsVectorObject<Bool32x4>(v) || IsVectorObject<Bool16x8>(v) ||
+-        IsVectorObject<Bool8x16>(v) || IsVectorObject<Float32x4>(v)) {
+-        return true;
+-    }
+-
+-    // Ideally, we'd reject all non-SIMD non-primitives, but Emscripten has a
+-    // bug that generates code that passes functions for some imports. To avoid
+-    // breaking all the code that contains this bug, we make an exception for
+-    // functions that don't have user-defined valueOf or toString, for their
+-    // coercions are not observable and coercion via ToNumber/ToInt32
+-    // definitely produces NaN/0. We should remove this special case later once
+-    // most apps have been built with newer Emscripten.
++    // Ideally, we'd reject all non-primitives, but Emscripten has a bug that
++    // generates code that passes functions for some imports. To avoid breaking
++    // all the code that contains this bug, we make an exception for functions
++    // that don't have user-defined valueOf or toString, for their coercions
++    // are not observable and coercion via ToNumber/ToInt32 definitely produces
++    // NaN/0. We should remove this special case later once most apps have been
++    // built with newer Emscripten.
+     if (v.toObject().is<JSFunction>() &&
+         HasNoToPrimitiveMethodPure(&v.toObject(), cx) &&
+         HasObjectValueOfMethodPure(&v.toObject(), cx) &&
+         HasNativeMethodPure(&v.toObject(), cx->names().toString, fun_toString, cx))
+     {
+         return true;
+     }
+-
+     return false;
+ }
+ 
+ static bool
+ ValidateGlobalVariable(JSContext* cx, const AsmJSGlobal& global, HandleValue importVal,
+                        Maybe<LitVal>* val)
+ {
+     switch (global.varInitKind()) {
+@@ -7708,68 +6197,16 @@ ValidateGlobalVariable(JSContext* cx, co
+           }
+           case ValType::F64: {
+             double d;
+             if (!ToNumber(cx, v, &d))
+                 return false;
+             val->emplace(d);
+             return true;
+           }
+-          case ValType::I8x16: {
+-            SimdConstant simdConstant;
+-            if (!ToSimdConstant<Int8x16>(cx, v, &simdConstant))
+-                return false;
+-            val->emplace(simdConstant.asInt8x16());
+-            return true;
+-          }
+-          case ValType::I16x8: {
+-            SimdConstant simdConstant;
+-            if (!ToSimdConstant<Int16x8>(cx, v, &simdConstant))
+-                return false;
+-            val->emplace(simdConstant.asInt16x8());
+-            return true;
+-          }
+-          case ValType::I32x4: {
+-            SimdConstant simdConstant;
+-            if (!ToSimdConstant<Int32x4>(cx, v, &simdConstant))
+-                return false;
+-            val->emplace(simdConstant.asInt32x4());
+-            return true;
+-          }
+-          case ValType::F32x4: {
+-            SimdConstant simdConstant;
+-            if (!ToSimdConstant<Float32x4>(cx, v, &simdConstant))
+-                return false;
+-            val->emplace(simdConstant.asFloat32x4());
+-            return true;
+-          }
+-          case ValType::B8x16: {
+-            SimdConstant simdConstant;
+-            if (!ToSimdConstant<Bool8x16>(cx, v, &simdConstant))
+-                return false;
+-            // Bool8x16 uses the same data layout as Int8x16.
+-            val->emplace(simdConstant.asInt8x16());
+-            return true;
+-          }
+-          case ValType::B16x8: {
+-            SimdConstant simdConstant;
+-            if (!ToSimdConstant<Bool16x8>(cx, v, &simdConstant))
+-                return false;
+-            // Bool16x8 uses the same data layout as Int16x8.
+-            val->emplace(simdConstant.asInt16x8());
+-            return true;
+-          }
+-          case ValType::B32x4: {
+-            SimdConstant simdConstant;
+-            if (!ToSimdConstant<Bool32x4>(cx, v, &simdConstant))
+-                return false;
+-            // Bool32x4 uses the same data layout as Int32x4.
+-            val->emplace(simdConstant.asInt32x4());
+-            return true;
+-          }
+           case ValType::Ref:
+           case ValType::AnyRef: {
+             MOZ_CRASH("not available in asm.js");
+           }
+         }
+       }
+     }
+ 
+@@ -7843,177 +6280,16 @@ ValidateMathBuiltinFunction(JSContext* c
+ 
+     if (!IsNativeFunction(v, native))
+         return LinkFail(cx, "bad Math.* builtin function");
+ 
+     return true;
+ }
+ 
+ static bool
+-ValidateSimdType(JSContext* cx, const AsmJSGlobal& global, HandleValue globalVal,
+-                 MutableHandleValue out)
+-{
+-    RootedValue v(cx);
+-    if (!GetDataProperty(cx, globalVal, cx->names().SIMD, &v))
+-        return false;
+-
+-    SimdType type;
+-    if (global.which() == AsmJSGlobal::SimdCtor)
+-        type = global.simdCtorType();
+-    else
+-        type = global.simdOperationType();
+-
+-    RootedPropertyName simdTypeName(cx, SimdTypeToName(cx->names(), type));
+-    if (!GetDataProperty(cx, v, simdTypeName, &v))
+-        return false;
+-
+-    if (!v.isObject())
+-        return LinkFail(cx, "bad SIMD type");
+-
+-    RootedObject simdDesc(cx, &v.toObject());
+-    if (!simdDesc->is<SimdTypeDescr>())
+-        return LinkFail(cx, "bad SIMD type");
+-
+-    if (type != simdDesc->as<SimdTypeDescr>().type())
+-        return LinkFail(cx, "bad SIMD type");
+-
+-    out.set(v);
+-    return true;
+-}
+-
+-static bool
+-ValidateSimdType(JSContext* cx, const AsmJSGlobal& global, HandleValue globalVal)
+-{
+-    RootedValue _(cx);
+-    return ValidateSimdType(cx, global, globalVal, &_);
+-}
+-
+-static bool
+-ValidateSimdOperation(JSContext* cx, const AsmJSGlobal& global, HandleValue globalVal)
+-{
+-    RootedValue v(cx);
+-    MOZ_ALWAYS_TRUE(ValidateSimdType(cx, global, globalVal, &v));
+-
+-    if (!GetDataProperty(cx, v, global.field(), &v))
+-        return false;
+-
+-    Native native = nullptr;
+-    switch (global.simdOperationType()) {
+-#define SET_NATIVE_INT8X16(op) case SimdOperation::Fn_##op: native = simd_int8x16_##op; break;
+-#define SET_NATIVE_INT16X8(op) case SimdOperation::Fn_##op: native = simd_int16x8_##op; break;
+-#define SET_NATIVE_INT32X4(op) case SimdOperation::Fn_##op: native = simd_int32x4_##op; break;
+-#define SET_NATIVE_UINT8X16(op) case SimdOperation::Fn_##op: native = simd_uint8x16_##op; break;
+-#define SET_NATIVE_UINT16X8(op) case SimdOperation::Fn_##op: native = simd_uint16x8_##op; break;
+-#define SET_NATIVE_UINT32X4(op) case SimdOperation::Fn_##op: native = simd_uint32x4_##op; break;
+-#define SET_NATIVE_FLOAT32X4(op) case SimdOperation::Fn_##op: native = simd_float32x4_##op; break;
+-#define SET_NATIVE_BOOL8X16(op) case SimdOperation::Fn_##op: native = simd_bool8x16_##op; break;
+-#define SET_NATIVE_BOOL16X8(op) case SimdOperation::Fn_##op: native = simd_bool16x8_##op; break;
+-#define SET_NATIVE_BOOL32X4(op) case SimdOperation::Fn_##op: native = simd_bool32x4_##op; break;
+-#define FALLTHROUGH(op) case SimdOperation::Fn_##op:
+-      case SimdType::Int8x16:
+-        switch (global.simdOperation()) {
+-          FORALL_INT8X16_ASMJS_OP(SET_NATIVE_INT8X16)
+-          SET_NATIVE_INT8X16(fromUint8x16Bits)
+-          SET_NATIVE_INT8X16(fromUint16x8Bits)
+-          SET_NATIVE_INT8X16(fromUint32x4Bits)
+-          default: MOZ_CRASH("shouldn't have been validated in the first place");
+-        }
+-        break;
+-      case SimdType::Int16x8:
+-        switch (global.simdOperation()) {
+-          FORALL_INT16X8_ASMJS_OP(SET_NATIVE_INT16X8)
+-          SET_NATIVE_INT16X8(fromUint8x16Bits)
+-          SET_NATIVE_INT16X8(fromUint16x8Bits)
+-          SET_NATIVE_INT16X8(fromUint32x4Bits)
+-          default: MOZ_CRASH("shouldn't have been validated in the first place");
+-        }
+-        break;
+-      case SimdType::Int32x4:
+-        switch (global.simdOperation()) {
+-          FORALL_INT32X4_ASMJS_OP(SET_NATIVE_INT32X4)
+-          SET_NATIVE_INT32X4(fromUint8x16Bits)
+-          SET_NATIVE_INT32X4(fromUint16x8Bits)
+-          SET_NATIVE_INT32X4(fromUint32x4Bits)
+-          default: MOZ_CRASH("shouldn't have been validated in the first place");
+-        }
+-        break;
+-      case SimdType::Uint8x16:
+-        switch (global.simdOperation()) {
+-          FORALL_INT8X16_ASMJS_OP(SET_NATIVE_UINT8X16)
+-          SET_NATIVE_UINT8X16(fromInt8x16Bits)
+-          SET_NATIVE_UINT8X16(fromUint16x8Bits)
+-          SET_NATIVE_UINT8X16(fromUint32x4Bits)
+-          default: MOZ_CRASH("shouldn't have been validated in the first place");
+-        }
+-        break;
+-      case SimdType::Uint16x8:
+-        switch (global.simdOperation()) {
+-          FORALL_INT16X8_ASMJS_OP(SET_NATIVE_UINT16X8)
+-          SET_NATIVE_UINT16X8(fromUint8x16Bits)
+-          SET_NATIVE_UINT16X8(fromInt16x8Bits)
+-          SET_NATIVE_UINT16X8(fromUint32x4Bits)
+-          default: MOZ_CRASH("shouldn't have been validated in the first place");
+-        }
+-        break;
+-      case SimdType::Uint32x4:
+-        switch (global.simdOperation()) {
+-          FORALL_INT32X4_ASMJS_OP(SET_NATIVE_UINT32X4)
+-          SET_NATIVE_UINT32X4(fromUint8x16Bits)
+-          SET_NATIVE_UINT32X4(fromUint16x8Bits)
+-          SET_NATIVE_UINT32X4(fromInt32x4Bits)
+-          default: MOZ_CRASH("shouldn't have been validated in the first place");
+-        }
+-        break;
+-      case SimdType::Float32x4:
+-        switch (global.simdOperation()) {
+-          FORALL_FLOAT32X4_ASMJS_OP(SET_NATIVE_FLOAT32X4)
+-          SET_NATIVE_FLOAT32X4(fromUint8x16Bits)
+-          SET_NATIVE_FLOAT32X4(fromUint16x8Bits)
+-          SET_NATIVE_FLOAT32X4(fromUint32x4Bits)
+-          default: MOZ_CRASH("shouldn't have been validated in the first place");
+-        }
+-        break;
+-      case SimdType::Bool8x16:
+-        switch (global.simdOperation()) {
+-          FORALL_BOOL_SIMD_OP(SET_NATIVE_BOOL8X16)
+-          default: MOZ_CRASH("shouldn't have been validated in the first place");
+-        }
+-        break;
+-      case SimdType::Bool16x8:
+-        switch (global.simdOperation()) {
+-          FORALL_BOOL_SIMD_OP(SET_NATIVE_BOOL16X8)
+-          default: MOZ_CRASH("shouldn't have been validated in the first place");
+-        }
+-        break;
+-      case SimdType::Bool32x4:
+-        switch (global.simdOperation()) {
+-          FORALL_BOOL_SIMD_OP(SET_NATIVE_BOOL32X4)
+-          default: MOZ_CRASH("shouldn't have been validated in the first place");
+-        }
+-        break;
+-      default: MOZ_CRASH("unhandled simd type");
+-#undef FALLTHROUGH
+-#undef SET_NATIVE_INT8X16
+-#undef SET_NATIVE_INT16X8
+-#undef SET_NATIVE_INT32X4
+-#undef SET_NATIVE_UINT8X16
+-#undef SET_NATIVE_UINT16X8
+-#undef SET_NATIVE_UINT32X4
+-#undef SET_NATIVE_FLOAT32X4
+-#undef SET_NATIVE_BOOL8X16
+-#undef SET_NATIVE_BOOL16X8
+-#undef SET_NATIVE_BOOL32X4
+-#undef SET_NATIVE
+-    }
+-    if (!native || !IsNativeFunction(v, native))
+-        return LinkFail(cx, "bad SIMD.type.* operation");
+-    return true;
+-}
+-
+-static bool
+ ValidateAtomicsBuiltinFunction(JSContext* cx, const AsmJSGlobal& global, HandleValue globalVal)
+ {
+     RootedValue v(cx);
+     if (!GetDataProperty(cx, globalVal, cx->names().Atomics, &v))
+         return false;
+ 
+     if (!GetDataProperty(cx, v, global.field(), &v))
+         return false;
+@@ -8105,22 +6381,21 @@ CheckBuffer(JSContext* cx, const AsmJSMe
+             return false;
+         return LinkFail(cx, msg.get());
+     }
+ 
+     if (buffer->is<ArrayBufferObject>()) {
+         // On 64-bit, bounds checks are statically removed so the huge guard
+         // region is always necessary. On 32-bit, allocating a guard page
+         // requires reallocating the incoming ArrayBuffer which could trigger
+-        // OOM. Thus, only ask for a guard page when SIMD is used since SIMD
+-        // allows unaligned memory access (see MaxMemoryAccessSize comment);
++        // OOM. Thus, don't ask for a guard page in this case;
+ #ifdef WASM_HUGE_MEMORY
+         bool needGuard = true;
+ #else
+-        bool needGuard = metadata.usesSimd;
++        bool needGuard = false;
+ #endif
+         Rooted<ArrayBufferObject*> arrayBuffer(cx, &buffer->as<ArrayBufferObject>());
+         if (!ArrayBufferObject::prepareForAsmJS(cx, arrayBuffer, needGuard))
+             return LinkFail(cx, "Unable to prepare ArrayBuffer for asm.js use");
+     } else {
+         if (!buffer->as<SharedArrayBufferObject>().isPreparedForAsmJS()) {
+             if (buffer->as<SharedArrayBufferObject>().isWasm()) {
+                 return LinkFail(cx, "SharedArrayBuffer created for Wasm cannot be used for "
+@@ -8173,24 +6448,16 @@ GetImports(JSContext* cx, const AsmJSMet
+           case AsmJSGlobal::AtomicsBuiltinFunction:
+             if (!ValidateAtomicsBuiltinFunction(cx, global, globalVal))
+                 return false;
+             break;
+           case AsmJSGlobal::Constant:
+             if (!ValidateConstant(cx, global, globalVal))
+                 return false;
+             break;
+-          case AsmJSGlobal::SimdCtor:
+-            if (!ValidateSimdType(cx, global, globalVal))
+-                return false;
+-            break;
+-          case AsmJSGlobal::SimdOp:
+-            if (!ValidateSimdOperation(cx, global, globalVal))
+-                return false;
+-            break;
+         }
+     }
+ 
+     for (const AsmJSImport& import : metadata.asmJSImports) {
+         if (!funcImports.append(ffis[import.ffiIndex()]))
+             return false;
+     }
+ 
+diff --git a/js/src/wasm/WasmBaselineCompile.cpp b/js/src/wasm/WasmBaselineCompile.cpp
+--- a/js/src/wasm/WasmBaselineCompile.cpp
++++ b/js/src/wasm/WasmBaselineCompile.cpp
+@@ -9041,17 +9041,17 @@ BaseCompiler::emitAtomicCmpXchg(ValType 
+ 
+     if (!iter_.readAtomicCmpXchg(&addr, type, Scalar::byteSize(viewType), &unused, &unused))
+         return false;
+ 
+     if (deadCode_)
+         return true;
+ 
+     MemoryAccessDesc access(viewType, addr.align, addr.offset, bytecodeOffset(),
+-                            /*numSimdExprs=*/ 0, Synchronization::Full());
++                            Synchronization::Full());
+ 
+     if (Scalar::byteSize(viewType) <= 4) {
+         PopAtomicCmpXchg32Regs regs(this, type, viewType);
+ 
+         AccessCheck check;
+         RegI32 rp = popMemoryAccess(&access, &check);
+         RegI32 tls = maybeLoadTlsForAccess(check);
+ 
+@@ -9097,17 +9097,17 @@ BaseCompiler::emitAtomicLoad(ValType typ
+     LinearMemoryAddress<Nothing> addr;
+     if (!iter_.readAtomicLoad(&addr, type, Scalar::byteSize(viewType)))
+         return false;
+ 
+     if (deadCode_)
+         return true;
+ 
+     MemoryAccessDesc access(viewType, addr.align, addr.offset, bytecodeOffset(),
+-                            /*numSimdElems=*/ 0, Synchronization::Load());
++                            Synchronization::Load());
+ 
+     if (Scalar::byteSize(viewType) <= sizeof(void*))
+         return loadCommon(&access, type);
+ 
+     MOZ_ASSERT(type == ValType::I64 && Scalar::byteSize(viewType) == 8);
+ 
+ #if defined(JS_64BIT)
+     MOZ_CRASH("Should not happen");
+@@ -9141,17 +9141,17 @@ BaseCompiler::emitAtomicRMW(ValType type
+     Nothing unused_value;
+     if (!iter_.readAtomicRMW(&addr, type, Scalar::byteSize(viewType), &unused_value))
+         return false;
+ 
+     if (deadCode_)
+         return true;
+ 
+     MemoryAccessDesc access(viewType, addr.align, addr.offset, bytecodeOffset(),
+-                            /*numSimdElems=*/ 0, Synchronization::Full());
++                            Synchronization::Full());
+ 
+     if (Scalar::byteSize(viewType) <= 4) {
+         PopAtomicRMW32Regs regs(this, type, viewType, op);
+ 
+         AccessCheck check;
+         RegI32 rp = popMemoryAccess(&access, &check);
+         RegI32 tls = maybeLoadTlsForAccess(check);
+ 
+@@ -9204,17 +9204,17 @@ BaseCompiler::emitAtomicStore(ValType ty
+     Nothing unused_value;
+     if (!iter_.readAtomicStore(&addr, type, Scalar::byteSize(viewType), &unused_value))
+         return false;
+ 
+     if (deadCode_)
+         return true;
+ 
+     MemoryAccessDesc access(viewType, addr.align, addr.offset, bytecodeOffset(),
+-                            /*numSimdElems=*/ 0, Synchronization::Store());
++                            Synchronization::Store());
+ 
+     if (Scalar::byteSize(viewType) <= sizeof(void*))
+         return storeCommon(&access, type);
+ 
+     MOZ_ASSERT(type == ValType::I64 && Scalar::byteSize(viewType) == 8);
+ 
+ #ifdef JS_64BIT
+     MOZ_CRASH("Should not happen");
+@@ -9232,17 +9232,17 @@ BaseCompiler::emitAtomicXchg(ValType typ
+     if (!iter_.readAtomicRMW(&addr, type, Scalar::byteSize(viewType), &unused_value))
+         return false;
+ 
+     if (deadCode_)
+         return true;
+ 
+     AccessCheck check;
+     MemoryAccessDesc access(viewType, addr.align, addr.offset, bytecodeOffset(),
+-                            /*numSimdElems=*/ 0, Synchronization::Full());
++                            Synchronization::Full());
+ 
+     if (Scalar::byteSize(viewType) <= 4) {
+         PopAtomicXchg32Regs regs(this, type, viewType);
+         RegI32 rp = popMemoryAccess(&access, &check);
+         RegI32 tls = maybeLoadTlsForAccess(check);
+ 
+         regs.atomicXchg32(prepareAtomicMemoryAccess(&access, &check, tls, rp), viewType);
+ 
+diff --git a/js/src/wasm/WasmBaselineCompile.h b/js/src/wasm/WasmBaselineCompile.h
+--- a/js/src/wasm/WasmBaselineCompile.h
++++ b/js/src/wasm/WasmBaselineCompile.h
+@@ -20,17 +20,17 @@
+ #define asmjs_wasm_baseline_compile_h
+ 
+ #include "wasm/WasmGenerator.h"
+ 
+ namespace js {
+ namespace wasm {
+ 
+ // Return whether BaselineCompileFunction can generate code on the current device.
+-// Note: asm.js is also currently not supported due to Atomics and SIMD.
++// Note: asm.js is also currently not supported due to Atomics.
+ bool
+ BaselineCanCompile();
+ 
+ // Generate adequate code quickly.
+ MOZ_MUST_USE bool
+ BaselineCompileFunctions(const ModuleEnvironment& env, LifoAlloc& lifo,
+                          const FuncCompileInputVector& inputs, CompiledCode* code,
+                          UniqueChars* error);
+diff --git a/js/src/wasm/WasmBinaryConstants.h b/js/src/wasm/WasmBinaryConstants.h
+--- a/js/src/wasm/WasmBinaryConstants.h
++++ b/js/src/wasm/WasmBinaryConstants.h
+@@ -14,18 +14,16 @@
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ #ifndef wasm_binary_h
+ #define wasm_binary_h
+ 
+-#include "builtin/SIMDConstants.h"
+-
+ namespace js {
+ namespace wasm {
+ 
+ static const uint32_t MagicNumber        = 0x6d736100; // "\0asm"
+ static const uint32_t EncodingVersion    = 0x01;
+ 
+ enum class SectionId
+ {
+@@ -45,25 +43,16 @@ enum class SectionId
+ 
+ enum class TypeCode
+ {
+     I32                                  = 0x7f,  // SLEB128(-0x01)
+     I64                                  = 0x7e,  // SLEB128(-0x02)
+     F32                                  = 0x7d,  // SLEB128(-0x03)
+     F64                                  = 0x7c,  // SLEB128(-0x04)
+ 
+-    // Only emitted internally for asm.js, likely to get collapsed into I128
+-    I8x16                                = 0x7b,
+-    I16x8                                = 0x7a,
+-    I32x4                                = 0x79,
+-    F32x4                                = 0x78,
+-    B8x16                                = 0x77,
+-    B16x8                                = 0x76,
+-    B32x4                                = 0x75,
+-
+     // A function pointer with any signature
+     AnyFunc                              = 0x70,  // SLEB128(-0x10)
+ 
+     // A reference to any type.
+     AnyRef                               = 0x6f,
+ 
+     // Type constructor for reference types.
+     Ref                                  = 0x6e,
+@@ -489,92 +478,16 @@ enum class MozOp
+ 
+     // Atomics
+     I32AtomicsCompareExchange,
+     I32AtomicsExchange,
+     I32AtomicsLoad,
+     I32AtomicsStore,
+     I32AtomicsBinOp,
+ 
+-    // SIMD
+-#define SIMD_OPCODE(TYPE, OP) TYPE##OP,
+-#define _(OP) SIMD_OPCODE(I8x16, OP)
+-    FORALL_INT8X16_ASMJS_OP(_)
+-    I8x16Constructor,
+-    I8x16Const,
+-#undef _
+-    // Unsigned I8x16 operations. These are the SIMD.Uint8x16 operations that
+-    // behave differently from their SIMD.Int8x16 counterparts.
+-    I8x16extractLaneU,
+-    I8x16addSaturateU,
+-    I8x16subSaturateU,
+-    I8x16shiftRightByScalarU,
+-    I8x16lessThanU,
+-    I8x16lessThanOrEqualU,
+-    I8x16greaterThanU,
+-    I8x16greaterThanOrEqualU,
+-
+-#define SIMD_OPCODE(TYPE, OP) TYPE##OP,
+-#define _(OP) SIMD_OPCODE(I16x8, OP)
+-    FORALL_INT16X8_ASMJS_OP(_)
+-    I16x8Constructor,
+-    I16x8Const,
+-#undef _
+-    // Unsigned I16x8 operations. These are the SIMD.Uint16x8 operations that
+-    // behave differently from their SIMD.Int16x8 counterparts.
+-    I16x8extractLaneU,
+-    I16x8addSaturateU,
+-    I16x8subSaturateU,
+-    I16x8shiftRightByScalarU,
+-    I16x8lessThanU,
+-    I16x8lessThanOrEqualU,
+-    I16x8greaterThanU,
+-    I16x8greaterThanOrEqualU,
+-
+-#define SIMD_OPCODE(TYPE, OP) TYPE##OP,
+-#define _(OP) SIMD_OPCODE(I32x4, OP)
+-    FORALL_INT32X4_ASMJS_OP(_)
+-    I32x4Constructor,
+-    I32x4Const,
+-#undef _
+-    // Unsigned I32x4 operations. These are the SIMD.Uint32x4 operations that
+-    // behave differently from their SIMD.Int32x4 counterparts.
+-    I32x4shiftRightByScalarU,
+-    I32x4lessThanU,
+-    I32x4lessThanOrEqualU,
+-    I32x4greaterThanU,
+-    I32x4greaterThanOrEqualU,
+-    I32x4fromFloat32x4U,
+-#define _(OP) SIMD_OPCODE(F32x4, OP)
+-    FORALL_FLOAT32X4_ASMJS_OP(_)
+-    F32x4Constructor,
+-    F32x4Const,
+-#undef _
+-
+-#define _(OP) SIMD_OPCODE(B8x16, OP)
+-    FORALL_BOOL_SIMD_OP(_)
+-    B8x16Constructor,
+-    B8x16Const,
+-#undef _
+-#undef OPCODE
+-
+-#define _(OP) SIMD_OPCODE(B16x8, OP)
+-    FORALL_BOOL_SIMD_OP(_)
+-    B16x8Constructor,
+-    B16x8Const,
+-#undef _
+-#undef OPCODE
+-
+-#define _(OP) SIMD_OPCODE(B32x4, OP)
+-    FORALL_BOOL_SIMD_OP(_)
+-    B32x4Constructor,
+-    B32x4Const,
+-#undef _
+-#undef OPCODE
+-
+     Limit
+ };
+ 
+ struct OpBytes
+ {
+     // The bytes of the opcode have 16-bit representations to allow for a full
+     // 256-value range plus a sentinel Limit value.
+     uint16_t b0;
+diff --git a/js/src/wasm/WasmBuiltins.cpp b/js/src/wasm/WasmBuiltins.cpp
+--- a/js/src/wasm/WasmBuiltins.cpp
++++ b/js/src/wasm/WasmBuiltins.cpp
+@@ -252,18 +252,16 @@ WasmHandleTrap()
+       case Trap::InvalidConversionToInteger:
+         return ReportError(cx, JSMSG_WASM_INVALID_CONVERSION);
+       case Trap::IntegerDivideByZero:
+         return ReportError(cx, JSMSG_WASM_INT_DIVIDE_BY_ZERO);
+       case Trap::IndirectCallToNull:
+         return ReportError(cx, JSMSG_WASM_IND_CALL_TO_NULL);
+       case Trap::IndirectCallBadSig:
+         return ReportError(cx, JSMSG_WASM_IND_CALL_BAD_SIG);
+-      case Trap::ImpreciseSimdConversion:
+-        return ReportError(cx, JSMSG_SIMD_FAILED_CONVERSION);
+       case Trap::OutOfBounds:
+         return ReportError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
+       case Trap::UnalignedAccess:
+         return ReportError(cx, JSMSG_WASM_UNALIGNED_ACCESS);
+       case Trap::CheckInterrupt:
+         return CheckInterrupt(cx, activation);
+       case Trap::StackOverflow:
+         // TlsData::setInterrupt() causes a fake stack overflow. Since
+diff --git a/js/src/wasm/WasmInstance.cpp b/js/src/wasm/WasmInstance.cpp
+--- a/js/src/wasm/WasmInstance.cpp
++++ b/js/src/wasm/WasmInstance.cpp
+@@ -13,17 +13,16 @@
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ #include "wasm/WasmInstance.h"
+ 
+-#include "builtin/SIMD.h"
+ #include "jit/AtomicOperations.h"
+ #include "jit/BaselineJIT.h"
+ #include "jit/InlinableNatives.h"
+ #include "jit/JitCommon.h"
+ #include "jit/JitRealm.h"
+ #include "wasm/WasmBuiltins.h"
+ #include "wasm/WasmModule.h"
+ 
+@@ -137,23 +136,16 @@ Instance::callImport(JSContext* cx, uint
+             args[i].set(JS::CanonicalizedDoubleValue(*(double*)&argv[i]));
+             break;
+           case ValType::Ref:
+           case ValType::AnyRef: {
+             args[i].set(ObjectOrNullValue(*(JSObject**)&argv[i]));
+             break;
+           }
+           case ValType::I64:
+-          case ValType::I8x16:
+-          case ValType::I16x8:
+-          case ValType::I32x4:
+-          case ValType::F32x4:
+-          case ValType::B8x16:
+-          case ValType::B16x8:
+-          case ValType::B32x4:
+             MOZ_CRASH("unhandled type in callImport");
+         }
+     }
+ 
+     FuncImportTls& import = funcImportTls(fi);
+     RootedFunction importFun(cx, &import.obj->as<JSFunction>());
+     RootedValue fval(cx, ObjectValue(*import.obj));
+     RootedValue thisv(cx, UndefinedValue());
+@@ -207,23 +199,16 @@ Instance::callImport(JSContext* cx, uint
+         TypeSet::Type type = TypeSet::UnknownType();
+         switch (importArgs[i].code()) {
+           case ValType::I32:    type = TypeSet::Int32Type(); break;
+           case ValType::F32:    type = TypeSet::DoubleType(); break;
+           case ValType::F64:    type = TypeSet::DoubleType(); break;
+           case ValType::Ref:    MOZ_CRASH("case guarded above");
+           case ValType::AnyRef: MOZ_CRASH("case guarded above");
+           case ValType::I64:    MOZ_CRASH("NYI");
+-          case ValType::I8x16:  MOZ_CRASH("NYI");
+-          case ValType::I16x8:  MOZ_CRASH("NYI");
+-          case ValType::I32x4:  MOZ_CRASH("NYI");
+-          case ValType::F32x4:  MOZ_CRASH("NYI");
+-          case ValType::B8x16:  MOZ_CRASH("NYI");
+-          case ValType::B16x8:  MOZ_CRASH("NYI");
+-          case ValType::B32x4:  MOZ_CRASH("NYI");
+         }
+         if (!TypeScript::ArgTypes(script, i)->hasType(type))
+             return true;
+     }
+ 
+     // These arguments will be filled with undefined at runtime by the
+     // arguments rectifier: check that the imported function can handle
+     // undefined there.
+@@ -793,22 +778,22 @@ Instance::callExport(JSContext* cx, uint
+ 
+     if (func.funcType().hasI64ArgOrRet()) {
+         JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_I64_TYPE);
+         return false;
+     }
+ 
+     // The calling convention for an external call into wasm is to pass an
+     // array of 16-byte values where each value contains either a coerced int32
+-    // (in the low word), a double value (in the low dword) or a SIMD vector
+-    // value, with the coercions specified by the wasm signature. The external
+-    // entry point unpacks this array into the system-ABI-specified registers
+-    // and stack memory and then calls into the internal entry point. The return
+-    // value is stored in the first element of the array (which, therefore, must
+-    // have length >= 1).
++    // (in the low word), or a double value (in the low dword) value, with the
++    // coercions specified by the wasm signature. The external entry point
++    // unpacks this array into the system-ABI-specified registers and stack
++    // memory and then calls into the internal entry point. The return value is
++    // stored in the first element of the array (which, therefore, must have
++    // length >= 1).
+     Vector<ExportArg, 8> exportArgs(cx);
+     if (!exportArgs.resize(Max<size_t>(1, func.funcType().args().length())))
+         return false;
+ 
+     RootedValue v(cx);
+     for (unsigned i = 0; i < func.funcType().args().length(); ++i) {
+         v = i < args.length() ? args[i] : UndefinedValue();
+         switch (func.funcType().arg(i).code()) {
+@@ -827,68 +812,16 @@ Instance::callExport(JSContext* cx, uint
+                 return false;
+             break;
+           case ValType::Ref:
+           case ValType::AnyRef: {
+             if (!ToRef(cx, v, &exportArgs[i]))
+                 return false;
+             break;
+           }
+-          case ValType::I8x16: {
+-            SimdConstant simd;
+-            if (!ToSimdConstant<Int8x16>(cx, v, &simd))
+-                return false;
+-            memcpy(&exportArgs[i], simd.asInt8x16(), Simd128DataSize);
+-            break;
+-          }
+-          case ValType::I16x8: {
+-            SimdConstant simd;
+-            if (!ToSimdConstant<Int16x8>(cx, v, &simd))
+-                return false;
+-            memcpy(&exportArgs[i], simd.asInt16x8(), Simd128DataSize);
+-            break;
+-          }
+-          case ValType::I32x4: {
+-            SimdConstant simd;
+-            if (!ToSimdConstant<Int32x4>(cx, v, &simd))
+-                return false;
+-            memcpy(&exportArgs[i], simd.asInt32x4(), Simd128DataSize);
+-            break;
+-          }
+-          case ValType::F32x4: {
+-            SimdConstant simd;
+-            if (!ToSimdConstant<Float32x4>(cx, v, &simd))
+-                return false;
+-            memcpy(&exportArgs[i], simd.asFloat32x4(), Simd128DataSize);
+-            break;
+-          }
+-          case ValType::B8x16: {
+-            SimdConstant simd;
+-            if (!ToSimdConstant<Bool8x16>(cx, v, &simd))
+-                return false;
+-            // Bool8x16 uses the same representation as Int8x16.
+-            memcpy(&exportArgs[i], simd.asInt8x16(), Simd128DataSize);
+-            break;
+-          }
+-          case ValType::B16x8: {
+-            SimdConstant simd;
+-            if (!ToSimdConstant<Bool16x8>(cx, v, &simd))
+-                return false;
+-            // Bool16x8 uses the same representation as Int16x8.
+-            memcpy(&exportArgs[i], simd.asInt16x8(), Simd128DataSize);
+-            break;
+-          }
+-          case ValType::B32x4: {
+-            SimdConstant simd;
+-            if (!ToSimdConstant<Bool32x4>(cx, v, &simd))
+-                return false;
+-            // Bool32x4 uses the same representation as Int32x4.
+-            memcpy(&exportArgs[i], simd.asInt32x4(), Simd128DataSize);
+-            break;
+-          }
+         }
+     }
+ 
+     {
+         JitActivation activation(cx);
+ 
+         void* callee;
+         if (func.hasEagerStubs())
+@@ -933,51 +866,16 @@ Instance::callExport(JSContext* cx, uint
+       case ExprType::F64:
+         args.rval().set(NumberValue(*(double*)retAddr));
+         break;
+       case ExprType::Ref:
+       case ExprType::AnyRef:
+         retObj = *(JSObject**)retAddr;
+         expectsObject = true;
+         break;
+-      case ExprType::I8x16:
+-        retObj = CreateSimd<Int8x16>(cx, (int8_t*)retAddr);
+-        if (!retObj)
+-            return false;
+-        break;
+-      case ExprType::I16x8:
+-        retObj = CreateSimd<Int16x8>(cx, (int16_t*)retAddr);
+-        if (!retObj)
+-            return false;
+-        break;
+-      case ExprType::I32x4:
+-        retObj = CreateSimd<Int32x4>(cx, (int32_t*)retAddr);
+-        if (!retObj)
+-            return false;
+-        break;
+-      case ExprType::F32x4:
+-        retObj = CreateSimd<Float32x4>(cx, (float*)retAddr);
+-        if (!retObj)
+-            return false;
+-        break;
+-      case ExprType::B8x16:
+-        retObj = CreateSimd<Bool8x16>(cx, (int8_t*)retAddr);
+-        if (!retObj)
+-            return false;
+-        break;
+-      case ExprType::B16x8:
+-        retObj = CreateSimd<Bool16x8>(cx, (int16_t*)retAddr);
+-        if (!retObj)
+-            return false;
+-        break;
+-      case ExprType::B32x4:
+-        retObj = CreateSimd<Bool32x4>(cx, (int32_t*)retAddr);
+-        if (!retObj)
+-            return false;
+-        break;
+       case ExprType::Limit:
+         MOZ_CRASH("Limit");
+     }
+ 
+     if (expectsObject)
+         args.rval().set(ObjectOrNullValue(retObj));
+     else if (retObj)
+         args.rval().set(ObjectValue(*retObj));
+diff --git a/js/src/wasm/WasmIonCompile.cpp b/js/src/wasm/WasmIonCompile.cpp
+--- a/js/src/wasm/WasmIonCompile.cpp
++++ b/js/src/wasm/WasmIonCompile.cpp
+@@ -218,40 +218,16 @@ class FunctionCompiler
+                 break;
+               case ValType::F64:
+                 ins = MConstant::New(alloc(), DoubleValue(0.0), MIRType::Double);
+                 break;
+               case ValType::Ref:
+               case ValType::AnyRef:
+                 MOZ_CRASH("ion support for ref/anyref value NYI");
+                 break;
+-              case ValType::I8x16:
+-                ins = MSimdConstant::New(alloc(), SimdConstant::SplatX16(0), MIRType::Int8x16);
+-                break;
+-              case ValType::I16x8:
+-                ins = MSimdConstant::New(alloc(), SimdConstant::SplatX8(0), MIRType::Int16x8);
+-                break;
+-              case ValType::I32x4:
+-                ins = MSimdConstant::New(alloc(), SimdConstant::SplatX4(0), MIRType::Int32x4);
+-                break;
+-              case ValType::F32x4:
+-                ins = MSimdConstant::New(alloc(), SimdConstant::SplatX4(0.f), MIRType::Float32x4);
+-                break;
+-              case ValType::B8x16:
+-                // Bool8x16 uses the same data layout as Int8x16.
+-                ins = MSimdConstant::New(alloc(), SimdConstant::SplatX16(0), MIRType::Bool8x16);
+-                break;
+-              case ValType::B16x8:
+-                // Bool16x8 uses the same data layout as Int16x8.
+-                ins = MSimdConstant::New(alloc(), SimdConstant::SplatX8(0), MIRType::Bool16x8);
+-                break;
+-              case ValType::B32x4:
+-                // Bool32x4 uses the same data layout as Int32x4.
+-                ins = MSimdConstant::New(alloc(), SimdConstant::SplatX4(0), MIRType::Bool32x4);
+-                break;
+             }
+ 
+             curBlock_->add(ins);
+             curBlock_->initSlot(info().localSlot(i), ins);
+             if (!mirGen_.ensureBallast())
+                 return false;
+         }
+ 
+@@ -286,26 +262,16 @@ class FunctionCompiler
+             return nullptr;
+         return curBlock_->getSlot(info().localSlot(slot));
+     }
+ 
+     const ValTypeVector& locals() const { return locals_; }
+ 
+     /***************************** Code generation (after local scope setup) */
+ 
+-    MDefinition* constant(const SimdConstant& v, MIRType type)
+-    {
+-        if (inDeadCode())
+-            return nullptr;
+-        MInstruction* constant;
+-        constant = MSimdConstant::New(alloc(), v, type);
+-        curBlock_->add(constant);
+-        return constant;
+-    }
+-
+     MDefinition* constant(const Value& v, MIRType type)
+     {
+         if (inDeadCode())
+             return nullptr;
+         MConstant* constant = MConstant::New(alloc(), v, type);
+         curBlock_->add(constant);
+         return constant;
+     }
+@@ -398,182 +364,16 @@ class FunctionCompiler
+         if (inDeadCode())
+             return nullptr;
+ 
+         auto* ins = MNearbyInt::New(alloc(), input, input->type(), roundingMode);
+         curBlock_->add(ins);
+         return ins;
+     }
+ 
+-    MDefinition* unarySimd(MDefinition* input, MSimdUnaryArith::Operation op, MIRType type)
+-    {
+-        if (inDeadCode())
+-            return nullptr;
+-
+-        MOZ_ASSERT(IsSimdType(input->type()) && input->type() == type);
+-        MInstruction* ins = MSimdUnaryArith::New(alloc(), input, op);
+-        curBlock_->add(ins);
+-        return ins;
+-    }
+-
+-    MDefinition* binarySimd(MDefinition* lhs, MDefinition* rhs, MSimdBinaryArith::Operation op,
+-                            MIRType type)
+-    {
+-        if (inDeadCode())
+-            return nullptr;
+-
+-        MOZ_ASSERT(IsSimdType(lhs->type()) && rhs->type() == lhs->type());
+-        MOZ_ASSERT(lhs->type() == type);
+-        return MSimdBinaryArith::AddLegalized(alloc(), curBlock_, lhs, rhs, op);
+-    }
+-
+-    MDefinition* binarySimd(MDefinition* lhs, MDefinition* rhs, MSimdBinaryBitwise::Operation op,
+-                            MIRType type)
+-    {
+-        if (inDeadCode())
+-            return nullptr;
+-
+-        MOZ_ASSERT(IsSimdType(lhs->type()) && rhs->type() == lhs->type());
+-        MOZ_ASSERT(lhs->type() == type);
+-        auto* ins = MSimdBinaryBitwise::New(alloc(), lhs, rhs, op);
+-        curBlock_->add(ins);
+-        return ins;
+-    }
+-
+-    MDefinition* binarySimdComp(MDefinition* lhs, MDefinition* rhs, MSimdBinaryComp::Operation op,
+-                                SimdSign sign)
+-    {
+-        if (inDeadCode())
+-            return nullptr;
+-
+-        return MSimdBinaryComp::AddLegalized(alloc(), curBlock_, lhs, rhs, op, sign);
+-    }
+-
+-    MDefinition* binarySimdSaturating(MDefinition* lhs, MDefinition* rhs,
+-                                      MSimdBinarySaturating::Operation op, SimdSign sign)
+-    {
+-        if (inDeadCode())
+-            return nullptr;
+-
+-        auto* ins = MSimdBinarySaturating::New(alloc(), lhs, rhs, op, sign);
+-        curBlock_->add(ins);
+-        return ins;
+-    }
+-
+-    MDefinition* binarySimdShift(MDefinition* lhs, MDefinition* rhs, MSimdShift::Operation op)
+-    {
+-        if (inDeadCode())
+-            return nullptr;
+-
+-        return MSimdShift::AddLegalized(alloc(), curBlock_, lhs, rhs, op);
+-    }
+-
+-    MDefinition* swizzleSimd(MDefinition* vector, const uint8_t lanes[], MIRType type)
+-    {
+-        if (inDeadCode())
+-            return nullptr;
+-
+-        MOZ_ASSERT(vector->type() == type);
+-        MSimdSwizzle* ins = MSimdSwizzle::New(alloc(), vector, lanes);
+-        curBlock_->add(ins);
+-        return ins;
+-    }
+-
+-    MDefinition* shuffleSimd(MDefinition* lhs, MDefinition* rhs, const uint8_t lanes[],
+-                             MIRType type)
+-    {
+-        if (inDeadCode())
+-            return nullptr;
+-
+-        MOZ_ASSERT(lhs->type() == type);
+-        MInstruction* ins = MSimdShuffle::New(alloc(), lhs, rhs, lanes);
+-        curBlock_->add(ins);
+-        return ins;
+-    }
+-
+-    MDefinition* insertElementSimd(MDefinition* vec, MDefinition* val, unsigned lane, MIRType type)
+-    {
+-        if (inDeadCode())
+-            return nullptr;
+-
+-        MOZ_ASSERT(IsSimdType(vec->type()) && vec->type() == type);
+-        MOZ_ASSERT(SimdTypeToLaneArgumentType(vec->type()) == val->type());
+-        MSimdInsertElement* ins = MSimdInsertElement::New(alloc(), vec, val, lane);
+-        curBlock_->add(ins);
+-        return ins;
+-    }
+-
+-    MDefinition* selectSimd(MDefinition* mask, MDefinition* lhs, MDefinition* rhs, MIRType type)
+-    {
+-        if (inDeadCode())
+-            return nullptr;
+-
+-        MOZ_ASSERT(IsSimdType(mask->type()));
+-        MOZ_ASSERT(IsSimdType(lhs->type()) && rhs->type() == lhs->type());
+-        MOZ_ASSERT(lhs->type() == type);
+-        MSimdSelect* ins = MSimdSelect::New(alloc(), mask, lhs, rhs);
+-        curBlock_->add(ins);
+-        return ins;
+-    }
+-
+-    MDefinition* simdAllTrue(MDefinition* boolVector)
+-    {
+-        if (inDeadCode())
+-            return nullptr;
+-
+-        MSimdAllTrue* ins = MSimdAllTrue::New(alloc(), boolVector, MIRType::Int32);
+-        curBlock_->add(ins);
+-        return ins;
+-    }
+-
+-    MDefinition* simdAnyTrue(MDefinition* boolVector)
+-    {
+-        if (inDeadCode())
+-            return nullptr;
+-
+-        MSimdAnyTrue* ins = MSimdAnyTrue::New(alloc(), boolVector, MIRType::Int32);
+-        curBlock_->add(ins);
+-        return ins;
+-    }
+-
+-    // fromXXXBits()
+-    MDefinition* bitcastSimd(MDefinition* vec, MIRType from, MIRType to)
+-    {
+-        if (inDeadCode())
+-            return nullptr;
+-
+-        MOZ_ASSERT(vec->type() == from);
+-        MOZ_ASSERT(IsSimdType(from) && IsSimdType(to) && from != to);
+-        auto* ins = MSimdReinterpretCast::New(alloc(), vec, to);
+-        curBlock_->add(ins);
+-        return ins;
+-    }
+-
+-    // Int <--> Float conversions.
+-    MDefinition* convertSimd(MDefinition* vec, MIRType from, MIRType to, SimdSign sign)
+-    {
+-        if (inDeadCode())
+-            return nullptr;
+-
+-        MOZ_ASSERT(IsSimdType(from) && IsSimdType(to) && from != to);
+-        return MSimdConvert::AddLegalized(alloc(), curBlock_, vec, to, sign, bytecodeOffset());
+-    }
+-
+-    MDefinition* splatSimd(MDefinition* v, MIRType type)
+-    {
+-        if (inDeadCode())
+-            return nullptr;
+-
+-        MOZ_ASSERT(IsSimdType(type));
+-        MOZ_ASSERT(SimdTypeToLaneArgumentType(type) == v->type());
+-        MSimdSplat* ins = MSimdSplat::New(alloc(), v, type);
+-        curBlock_->add(ins);
+-        return ins;
+-    }
+-
+     MDefinition* minMax(MDefinition* lhs, MDefinition* rhs, MIRType type, bool isMax) {
+         if (inDeadCode())
+             return nullptr;
+ 
+         if (mustPreserveNaN(type)) {
+             // Convert signaling NaN to quiet NaNs.
+             MDefinition* zero = constant(DoubleValue(0.0), type);
+             lhs = sub(lhs, zero, type);
+@@ -886,17 +686,17 @@ class FunctionCompiler
+ 
+     MDefinition* load(MDefinition* base, MemoryAccessDesc* access, ValType result)
+     {
+         if (inDeadCode())
+             return nullptr;
+ 
+         MWasmLoadTls* memoryBase = maybeLoadMemoryBase();
+         MInstruction* load = nullptr;
+-        if (env_.isAsmJS() && !access->isAtomic() && !access->isSimd()) {
++        if (env_.isAsmJS() && !access->isAtomic()) {
+             MOZ_ASSERT(access->offset() == 0);
+             MWasmLoadTls* boundsCheckLimit = maybeLoadBoundsCheckLimit();
+             load = MAsmJSLoadHeap::New(alloc(), memoryBase, base, boundsCheckLimit, access->type());
+         } else {
+             checkOffsetAndAlignmentAndBounds(access, &base);
+             load = MWasmLoad::New(alloc(), memoryBase, base, *access, ToMIRType(result));
+         }
+         if (!load)
+@@ -907,17 +707,17 @@ class FunctionCompiler
+ 
+     void store(MDefinition* base, MemoryAccessDesc* access, MDefinition* v)
+     {
+         if (inDeadCode())
+             return;
+ 
+         MWasmLoadTls* memoryBase = maybeLoadMemoryBase();
+         MInstruction* store = nullptr;
+-        if (env_.isAsmJS() && !access->isAtomic() && !access->isSimd()) {
++        if (env_.isAsmJS() && !access->isAtomic()) {
+             MOZ_ASSERT(access->offset() == 0);
+             MWasmLoadTls* boundsCheckLimit = maybeLoadBoundsCheckLimit();
+             store = MAsmJSStoreHeap::New(alloc(), memoryBase, base, boundsCheckLimit,
+                                          access->type(), v);
+         } else {
+             checkOffsetAndAlignmentAndBounds(access, &base);
+             store = MWasmStore::New(alloc(), memoryBase, base, *access, v);
+         }
+@@ -1038,41 +838,16 @@ class FunctionCompiler
+ 
+     void addInterruptCheck()
+     {
+         if (inDeadCode())
+             return;
+         curBlock_->add(MWasmInterruptCheck::New(alloc(), tlsPointer_, bytecodeOffset()));
+     }
+ 
+-    MDefinition* extractSimdElement(unsigned lane, MDefinition* base, MIRType type, SimdSign sign)
+-    {
+-        if (inDeadCode())
+-            return nullptr;
+-
+-        MOZ_ASSERT(IsSimdType(base->type()));
+-        MOZ_ASSERT(!IsSimdType(type));
+-        auto* ins = MSimdExtractElement::New(alloc(), base, type, lane, sign);
+-        curBlock_->add(ins);
+-        return ins;
+-    }
+-
+-    template<typename T>
+-    MDefinition* constructSimd(MDefinition* x, MDefinition* y, MDefinition* z, MDefinition* w,
+-                               MIRType type)
+-    {
+-        if (inDeadCode())
+-            return nullptr;
+-
+-        MOZ_ASSERT(IsSimdType(type));
+-        T* ins = T::New(alloc(), type, x, y, z, w);
+-        curBlock_->add(ins);
+-        return ins;
+-    }
+-
+     /***************************************************************** Calls */
+ 
+     // The IonMonkey backend maintains a single stack offset (from the stack
+     // pointer to the base of the frame) by adding the total amount of spill
+     // space required plus the maximum stack required for argument passing.
+     // Since we do not use IonMonkey's MPrepareCall/MPassArg/MCall, we must
+     // manually accumulate, for the entire function, the maximum required stack
+     // space for argument passing. (This is passed to the CodeGenerator via
+@@ -1878,93 +1653,16 @@ EmitF64Const(FunctionCompiler& f)
+     if (!f.iter().readF64Const(&f64))
+         return false;
+ 
+     f.iter().setResult(f.constant(f64));
+     return true;
+ }
+ 
+ static bool
+-EmitI8x16Const(FunctionCompiler& f)
+-{
+-    I8x16 i8x16;
+-    if (!f.iter().readI8x16Const(&i8x16))
+-        return false;
+-
+-    f.iter().setResult(f.constant(SimdConstant::CreateX16(i8x16), MIRType::Int8x16));
+-    return true;
+-}
+-
+-static bool
+-EmitI16x8Const(FunctionCompiler& f)
+-{
+-    I16x8 i16x8;
+-    if (!f.iter().readI16x8Const(&i16x8))
+-        return false;
+-
+-    f.iter().setResult(f.constant(SimdConstant::CreateX8(i16x8), MIRType::Int16x8));
+-    return true;
+-}
+-
+-static bool
+-EmitI32x4Const(FunctionCompiler& f)
+-{
+-    I32x4 i32x4;
+-    if (!f.iter().readI32x4Const(&i32x4))
+-        return false;
+-
+-    f.iter().setResult(f.constant(SimdConstant::CreateX4(i32x4), MIRType::Int32x4));
+-    return true;
+-}
+-
+-static bool
+-EmitF32x4Const(FunctionCompiler& f)
+-{
+-    F32x4 f32x4;
+-    if (!f.iter().readF32x4Const(&f32x4))
+-        return false;
+-
+-    f.iter().setResult(f.constant(SimdConstant::CreateX4(f32x4), MIRType::Float32x4));
+-    return true;
+-}
+-
+-static bool
+-EmitB8x16Const(FunctionCompiler& f)
+-{
+-    I8x16 i8x16;
+-    if (!f.iter().readB8x16Const(&i8x16))
+-        return false;
+-
+-    f.iter().setResult(f.constant(SimdConstant::CreateX16(i8x16), MIRType::Bool8x16));
+-    return true;
+-}
+-
+-static bool
+-EmitB16x8Const(FunctionCompiler& f)
+-{
+-    I16x8 i16x8;
+-    if (!f.iter().readB16x8Const(&i16x8))
+-        return false;
+-
+-    f.iter().setResult(f.constant(SimdConstant::CreateX8(i16x8), MIRType::Bool16x8));
+-    return true;
+-}
+-
+-static bool
+-EmitB32x4Const(FunctionCompiler& f)
+-{
+-    I32x4 i32x4;
+-    if (!f.iter().readB32x4Const(&i32x4))
+-        return false;
+-
+-    f.iter().setResult(f.constant(SimdConstant::CreateX4(i32x4), MIRType::Bool32x4));
+-    return true;
+-}
+-
+-static bool
+ EmitBlock(FunctionCompiler& f)
+ {
+     return f.iter().readBlock() &&
+            f.startBlock();
+ }
+ 
+ static bool
+ EmitLoop(FunctionCompiler& f)
+@@ -2315,28 +2013,16 @@ EmitGetGlobal(FunctionCompiler& f)
+         result = f.constant(int64_t(value.i64()));
+         break;
+       case ValType::F32:
+         result = f.constant(value.f32());
+         break;
+       case ValType::F64:
+         result = f.constant(value.f64());
+         break;
+-      case ValType::I8x16:
+-        result = f.constant(SimdConstant::CreateX16(value.i8x16()), mirType);
+-        break;
+-      case ValType::I16x8:
+-        result = f.constant(SimdConstant::CreateX8(value.i16x8()), mirType);
+-        break;
+-      case ValType::I32x4:
+-        result = f.constant(SimdConstant::CreateX4(value.i32x4()), mirType);
+-        break;
+-      case ValType::F32x4:
+-        result = f.constant(SimdConstant::CreateX4(value.f32x4()), mirType);
+-        break;
+       default:
+         MOZ_CRASH("unexpected type in EmitGetGlobal");
+     }
+ 
+     f.iter().setResult(result);
+     return true;
+ }
+ 
+@@ -2788,17 +2474,17 @@ static bool
+ EmitOldAtomicsLoad(FunctionCompiler& f)
+ {
+     LinearMemoryAddress<MDefinition*> addr;
+     Scalar::Type viewType;
+     if (!f.iter().readOldAtomicLoad(&addr, &viewType))
+         return false;
+ 
+     MemoryAccessDesc access(viewType, addr.align, addr.offset, f.bytecodeOffset(),
+-                            /*numSimdExprs=*/ 0, Synchronization::Load());
++                            Synchronization::Load());
+ 
+     auto* ins = f.load(addr.base, &access, ValType::I32);
+     if (!f.inDeadCode() && !ins)
+         return false;
+ 
+     f.iter().setResult(ins);
+     return true;
+ }
+@@ -2808,17 +2494,17 @@ EmitOldAtomicsStore(FunctionCompiler& f)
+ {
+     LinearMemoryAddress<MDefinition*> addr;
+     Scalar::Type viewType;
+     MDefinition* value;
+     if (!f.iter().readOldAtomicStore(&addr, &viewType, &value))
+         return false;
+ 
+     MemoryAccessDesc access(viewType, addr.align, addr.offset, f.bytecodeOffset(),
+-                            /*numSimdExprs=*/ 0, Synchronization::Store());
++                            Synchronization::Store());
+ 
+     f.store(addr.base, &access, value);
+     f.iter().setResult(value);
+     return true;
+ }
+ 
+ static bool
+ EmitOldAtomicsBinOp(FunctionCompiler& f)
+@@ -2826,17 +2512,17 @@ EmitOldAtomicsBinOp(FunctionCompiler& f)
+     LinearMemoryAddress<MDefinition*> addr;
+     Scalar::Type viewType;
+     AtomicOp op;
+     MDefinition* value;
+     if (!f.iter().readOldAtomicBinOp(&addr, &viewType, &op, &value))
+         return false;
+ 
+     MemoryAccessDesc access(viewType, addr.align, addr.offset, f.bytecodeOffset(),
+-                            /*numSimdExprs=*/ 0, Synchronization::Full());
++                            Synchronization::Full());
+ 
+     auto* ins = f.atomicBinopHeap(op, addr.base, &access, ValType::I32, value);
+     if (!f.inDeadCode() && !ins)
+         return false;
+ 
+     f.iter().setResult(ins);
+     return true;
+ }
+@@ -2847,17 +2533,17 @@ EmitOldAtomicsCompareExchange(FunctionCo
+     LinearMemoryAddress<MDefinition*> addr;
+     Scalar::Type viewType;
+     MDefinition* oldValue;
+     MDefinition* newValue;
+     if (!f.iter().readOldAtomicCompareExchange(&addr, &viewType, &oldValue, &newValue))
+         return false;
+ 
+     MemoryAccessDesc access(viewType, addr.align, addr.offset, f.bytecodeOffset(),
+-                            /*numSimdExprs=*/ 0, Synchronization::Full());
++                            Synchronization::Full());
+ 
+     auto* ins = f.atomicCompareExchangeHeap(addr.base, &access, ValType::I32, oldValue, newValue);
+     if (!f.inDeadCode() && !ins)
+         return false;
+ 
+     f.iter().setResult(ins);
+     return true;
+ }
+@@ -2867,510 +2553,27 @@ EmitOldAtomicsExchange(FunctionCompiler&
+ {
+     LinearMemoryAddress<MDefinition*> addr;
+     Scalar::Type viewType;
+     MDefinition* value;
+     if (!f.iter().readOldAtomicExchange(&addr, &viewType, &value))
+         return false;
+ 
+     MemoryAccessDesc access(viewType, addr.align, addr.offset, f.bytecodeOffset(),
+-                            /*numSimdExprs=*/ 0, Synchronization::Full());
++                            Synchronization::Full());
+ 
+     auto* ins = f.atomicExchangeHeap(addr.base, &access, ValType::I32, value);
+     if (!f.inDeadCode() && !ins)
+         return false;
+ 
+     f.iter().setResult(ins);
+     return true;
+ }
+ 
+ static bool
+-EmitSimdUnary(FunctionCompiler& f, ValType type, SimdOperation simdOp)
+-{
+-    MSimdUnaryArith::Operation op;
+-    switch (simdOp) {
+-      case SimdOperation::Fn_abs:
+-        op = MSimdUnaryArith::abs;
+-        break;
+-      case SimdOperation::Fn_neg:
+-        op = MSimdUnaryArith::neg;
+-        break;
+-      case SimdOperation::Fn_not:
+-        op = MSimdUnaryArith::not_;
+-        break;
+-      case SimdOperation::Fn_sqrt:
+-        op = MSimdUnaryArith::sqrt;
+-        break;
+-      case SimdOperation::Fn_reciprocalApproximation:
+-        op = MSimdUnaryArith::reciprocalApproximation;
+-        break;
+-      case SimdOperation::Fn_reciprocalSqrtApproximation:
+-        op = MSimdUnaryArith::reciprocalSqrtApproximation;
+-        break;
+-      default:
+-        MOZ_CRASH("not a simd unary arithmetic operation");
+-    }
+-
+-    MDefinition* input;
+-    if (!f.iter().readUnary(type, &input))
+-        return false;
+-
+-    f.iter().setResult(f.unarySimd(input, op, ToMIRType(type)));
+-    return true;
+-}
+-
+-template<class OpKind>
+-inline bool
+-EmitSimdBinary(FunctionCompiler& f, ValType type, OpKind op)
+-{
+-    MDefinition* lhs;
+-    MDefinition* rhs;
+-    if (!f.iter().readBinary(type, &lhs, &rhs))
+-        return false;
+-
+-    f.iter().setResult(f.binarySimd(lhs, rhs, op, ToMIRType(type)));
+-    return true;
+-}
+-
+-static bool
+-EmitSimdBinaryComp(FunctionCompiler& f, ValType operandType, MSimdBinaryComp::Operation op,
+-                   SimdSign sign)
+-{
+-    MDefinition* lhs;
+-    MDefinition* rhs;
+-    if (!f.iter().readSimdComparison(operandType, &lhs, &rhs))
+-        return false;
+-
+-    f.iter().setResult(f.binarySimdComp(lhs, rhs, op, sign));
+-    return true;
+-}
+-
+-static bool
+-EmitSimdBinarySaturating(FunctionCompiler& f, ValType type, MSimdBinarySaturating::Operation op,
+-                         SimdSign sign)
+-{
+-    MDefinition* lhs;
+-    MDefinition* rhs;
+-    if (!f.iter().readBinary(type, &lhs, &rhs))
+-        return false;
+-
+-    f.iter().setResult(f.binarySimdSaturating(lhs, rhs, op, sign));
+-    return true;
+-}
+-
+-static bool
+-EmitSimdShift(FunctionCompiler& f, ValType operandType, MSimdShift::Operation op)
+-{
+-    MDefinition* lhs;
+-    MDefinition* rhs;
+-    if (!f.iter().readSimdShiftByScalar(operandType, &lhs, &rhs))
+-        return false;
+-
+-    f.iter().setResult(f.binarySimdShift(lhs, rhs, op));
+-    return true;
+-}
+-
+-static ValType
+-SimdToLaneType(ValType type)
+-{
+-    switch (type.code()) {
+-      case ValType::I8x16:
+-      case ValType::I16x8:
+-      case ValType::I32x4:  return ValType::I32;
+-      case ValType::F32x4:  return ValType::F32;
+-      case ValType::B8x16:
+-      case ValType::B16x8:
+-      case ValType::B32x4:  return ValType::I32; // Boolean lanes are Int32 in asm.
+-      case ValType::I32:
+-      case ValType::I64:
+-      case ValType::F32:
+-      case ValType::F64:
+-      case ValType::Ref:
+-      case ValType::AnyRef:
+-        break;
+-    }
+-    MOZ_CRASH("bad simd type");
+-}
+-
+-static bool
+-EmitExtractLane(FunctionCompiler& f, ValType operandType, SimdSign sign)
+-{
+-    uint8_t lane;
+-    MDefinition* vector;
+-    if (!f.iter().readExtractLane(operandType, &lane, &vector))
+-        return false;
+-
+-    f.iter().setResult(f.extractSimdElement(lane, vector,
+-                                            ToMIRType(SimdToLaneType(operandType)), sign));
+-    return true;
+-}
+-
+-// Emit an I32 expression and then convert it to a boolean SIMD lane value, i.e. -1 or 0.
+-static MDefinition*
+-EmitSimdBooleanLaneExpr(FunctionCompiler& f, MDefinition* i32)
+-{
+-    // Compute !i32 - 1 to force the value range into {0, -1}.
+-    MDefinition* noti32 = f.unary<MNot>(i32);
+-    return f.binary<MSub>(noti32, f.constant(Int32Value(1), MIRType::Int32), MIRType::Int32);
+-}
+-
+-static bool
+-EmitSimdReplaceLane(FunctionCompiler& f, ValType simdType)
+-{
+-    if (IsSimdBoolType(simdType))
+-        f.iter().setResult(EmitSimdBooleanLaneExpr(f, f.iter().getResult()));
+-
+-    uint8_t lane;
+-    MDefinition* vector;
+-    MDefinition* scalar;
+-    if (!f.iter().readReplaceLane(simdType, &lane, &vector, &scalar))
+-        return false;
+-
+-    f.iter().setResult(f.insertElementSimd(vector, scalar, lane, ToMIRType(simdType)));
+-    return true;
+-}
+-
+-inline bool
+-EmitSimdBitcast(FunctionCompiler& f, ValType fromType, ValType toType)
+-{
+-    MDefinition* input;
+-    if (!f.iter().readConversion(fromType, toType, &input))
+-        return false;
+-
+-    f.iter().setResult(f.bitcastSimd(input, ToMIRType(fromType), ToMIRType(toType)));
+-    return true;
+-}
+-
+-inline bool
+-EmitSimdConvert(FunctionCompiler& f, ValType fromType, ValType toType, SimdSign sign)
+-{
+-    MDefinition* input;
+-    if (!f.iter().readConversion(fromType, toType, &input))
+-        return false;
+-
+-    f.iter().setResult(f.convertSimd(input, ToMIRType(fromType), ToMIRType(toType), sign));
+-    return true;
+-}
+-
+-static bool
+-EmitSimdSwizzle(FunctionCompiler& f, ValType simdType)
+-{
+-    uint8_t lanes[16];
+-    MDefinition* vector;
+-    if (!f.iter().readSwizzle(simdType, &lanes, &vector))
+-        return false;
+-
+-    f.iter().setResult(f.swizzleSimd(vector, lanes, ToMIRType(simdType)));
+-    return true;
+-}
+-
+-static bool
+-EmitSimdShuffle(FunctionCompiler& f, ValType simdType)
+-{
+-    uint8_t lanes[16];
+-    MDefinition* lhs;
+-    MDefinition* rhs;
+-    if (!f.iter().readShuffle(simdType, &lanes, &lhs, &rhs))
+-        return false;
+-
+-    f.iter().setResult(f.shuffleSimd(lhs, rhs, lanes, ToMIRType(simdType)));
+-    return true;
+-}
+-
+-static inline Scalar::Type
+-SimdExprTypeToViewType(ValType type, unsigned* defaultNumElems)
+-{
+-    switch (type.code()) {
+-        case ValType::I8x16: *defaultNumElems = 16; return Scalar::Int8x16;
+-        case ValType::I16x8: *defaultNumElems = 8; return Scalar::Int16x8;
+-        case ValType::I32x4: *defaultNumElems = 4; return Scalar::Int32x4;
+-        case ValType::F32x4: *defaultNumElems = 4; return Scalar::Float32x4;
+-        default:              break;
+-    }
+-    MOZ_CRASH("type not handled in SimdExprTypeToViewType");
+-}
+-
+-static bool
+-EmitSimdLoad(FunctionCompiler& f, ValType resultType, unsigned numElems)
+-{
+-    unsigned defaultNumElems;
+-    Scalar::Type viewType = SimdExprTypeToViewType(resultType, &defaultNumElems);
+-
+-    if (!numElems)
+-        numElems = defaultNumElems;
+-
+-    LinearMemoryAddress<MDefinition*> addr;
+-    if (!f.iter().readLoad(resultType, Scalar::byteSize(viewType), &addr))
+-        return false;
+-
+-    MemoryAccessDesc access(viewType, addr.align, addr.offset, f.bytecodeOffset(), numElems);
+-
+-    auto* ins = f.load(addr.base, &access, resultType);
+-    if (!f.inDeadCode() && !ins)
+-        return false;
+-
+-    f.iter().setResult(ins);
+-    return true;
+-}
+-
+-static bool
+-EmitSimdStore(FunctionCompiler& f, ValType resultType, unsigned numElems)
+-{
+-    unsigned defaultNumElems;
+-    Scalar::Type viewType = SimdExprTypeToViewType(resultType, &defaultNumElems);
+-
+-    if (!numElems)
+-        numElems = defaultNumElems;
+-
+-    LinearMemoryAddress<MDefinition*> addr;
+-    MDefinition* value;
+-    if (!f.iter().readTeeStore(resultType, Scalar::byteSize(viewType), &addr, &value))
+-        return false;
+-
+-    MemoryAccessDesc access(viewType, addr.align, addr.offset, f.bytecodeOffset(), numElems);
+-
+-    f.store(addr.base, &access, value);
+-    return true;
+-}
+-
+-static bool
+-EmitSimdSelect(FunctionCompiler& f, ValType simdType)
+-{
+-    MDefinition* trueValue;
+-    MDefinition* falseValue;
+-    MDefinition* condition;
+-    if (!f.iter().readSimdSelect(simdType, &trueValue, &falseValue, &condition))
+-        return false;
+-
+-    f.iter().setResult(f.selectSimd(condition, trueValue, falseValue,
+-                                    ToMIRType(simdType)));
+-    return true;
+-}
+-
+-static bool
+-EmitSimdAllTrue(FunctionCompiler& f, ValType operandType)
+-{
+-    MDefinition* input;
+-    if (!f.iter().readSimdBooleanReduction(operandType, &input))
+-        return false;
+-
+-    f.iter().setResult(f.simdAllTrue(input));
+-    return true;
+-}
+-
+-static bool
+-EmitSimdAnyTrue(FunctionCompiler& f, ValType operandType)
+-{
+-    MDefinition* input;
+-    if (!f.iter().readSimdBooleanReduction(operandType, &input))
+-        return false;
+-
+-    f.iter().setResult(f.simdAnyTrue(input));
+-    return true;
+-}
+-
+-static bool
+-EmitSimdSplat(FunctionCompiler& f, ValType simdType)
+-{
+-    if (IsSimdBoolType(simdType))
+-        f.iter().setResult(EmitSimdBooleanLaneExpr(f, f.iter().getResult()));
+-
+-    MDefinition* input;
+-    if (!f.iter().readSplat(simdType, &input))
+-        return false;
+-
+-    f.iter().setResult(f.splatSimd(input, ToMIRType(simdType)));
+-    return true;
+-}
+-
+-// Build a SIMD vector by inserting lanes one at a time into an initial constant.
+-static bool
+-EmitSimdChainedCtor(FunctionCompiler& f, ValType valType, MIRType type, const SimdConstant& init)
+-{
+-    const unsigned length = SimdTypeToLength(type);
+-
+-    DefVector args;
+-    if (!f.iter().readSimdCtor(ValType::I32, length, valType, &args))
+-        return false;
+-
+-    MDefinition* val = f.constant(init, type);
+-    for (unsigned i = 0; i < length; i++)
+-        val = f.insertElementSimd(val, args[i], i, type);
+-
+-    f.iter().setResult(val);
+-    return true;
+-}
+-
+-// Build a boolean SIMD vector by inserting lanes one at a time into an initial constant.
+-static bool
+-EmitSimdBooleanChainedCtor(FunctionCompiler& f, ValType valType, MIRType type,
+-                           const SimdConstant& init)
+-{
+-    const unsigned length = SimdTypeToLength(type);
+-
+-    DefVector args;
+-    if (!f.iter().readSimdCtor(ValType::I32, length, valType, &args))
+-        return false;
+-
+-    MDefinition* val = f.constant(init, type);
+-    for (unsigned i = 0; i < length; i++)
+-        val = f.insertElementSimd(val, EmitSimdBooleanLaneExpr(f, args[i]), i, type);
+-
+-    f.iter().setResult(val);
+-    return true;
+-}
+-
+-static bool
+-EmitSimdCtor(FunctionCompiler& f, ValType type)
+-{
+-    switch (type.code()) {
+-      case ValType::I8x16:
+-        return EmitSimdChainedCtor(f, type, MIRType::Int8x16, SimdConstant::SplatX16(0));
+-      case ValType::I16x8:
+-        return EmitSimdChainedCtor(f, type, MIRType::Int16x8, SimdConstant::SplatX8(0));
+-      case ValType::I32x4: {
+-        DefVector args;
+-        if (!f.iter().readSimdCtor(ValType::I32, 4, type, &args))
+-            return false;
+-
+-        f.iter().setResult(f.constructSimd<MSimdValueX4>(args[0], args[1], args[2], args[3],
+-                                                         MIRType::Int32x4));
+-        return true;
+-      }
+-      case ValType::F32x4: {
+-        DefVector args;
+-        if (!f.iter().readSimdCtor(ValType::F32, 4, type, &args))
+-            return false;
+-
+-        f.iter().setResult(f.constructSimd<MSimdValueX4>(args[0], args[1], args[2], args[3],
+-                           MIRType::Float32x4));
+-        return true;
+-      }
+-      case ValType::B8x16:
+-        return EmitSimdBooleanChainedCtor(f, type, MIRType::Bool8x16, SimdConstant::SplatX16(0));
+-      case ValType::B16x8:
+-        return EmitSimdBooleanChainedCtor(f, type, MIRType::Bool16x8, SimdConstant::SplatX8(0));
+-      case ValType::B32x4: {
+-        DefVector args;
+-        if (!f.iter().readSimdCtor(ValType::I32, 4, type, &args))
+-            return false;
+-
+-        MOZ_ASSERT(args.length() == 4);
+-        for (unsigned i = 0; i < 4; i++)
+-            args[i] = EmitSimdBooleanLaneExpr(f, args[i]);
+-
+-        f.iter().setResult(f.constructSimd<MSimdValueX4>(args[0], args[1], args[2], args[3],
+-                           MIRType::Bool32x4));
+-        return true;
+-      }
+-      case ValType::I32:
+-      case ValType::I64:
+-      case ValType::F32:
+-      case ValType::F64:
+-      case ValType::Ref:
+-      case ValType::AnyRef:
+-        break;
+-    }
+-    MOZ_CRASH("unexpected SIMD type");
+-}
+-
+-static bool
+-EmitSimdOp(FunctionCompiler& f, ValType type, SimdOperation op, SimdSign sign)
+-{
+-    switch (op) {
+-      case SimdOperation::Constructor:
+-        return EmitSimdCtor(f, type);
+-      case SimdOperation::Fn_extractLane:
+-        return EmitExtractLane(f, type, sign);
+-      case SimdOperation::Fn_replaceLane:
+-        return EmitSimdReplaceLane(f, type);
+-      case SimdOperation::Fn_check:
+-        MOZ_CRASH("only used in asm.js' type system");
+-      case SimdOperation::Fn_splat:
+-        return EmitSimdSplat(f, type);
+-      case SimdOperation::Fn_select:
+-        return EmitSimdSelect(f, type);
+-      case SimdOperation::Fn_swizzle:
+-        return EmitSimdSwizzle(f, type);
+-      case SimdOperation::Fn_shuffle:
+-        return EmitSimdShuffle(f, type);
+-      case SimdOperation::Fn_load:
+-        return EmitSimdLoad(f, type, 0);
+-      case SimdOperation::Fn_load1:
+-        return EmitSimdLoad(f, type, 1);
+-      case SimdOperation::Fn_load2:
+-        return EmitSimdLoad(f, type, 2);
+-      case SimdOperation::Fn_store:
+-        return EmitSimdStore(f, type, 0);
+-      case SimdOperation::Fn_store1:
+-        return EmitSimdStore(f, type, 1);
+-      case SimdOperation::Fn_store2:
+-        return EmitSimdStore(f, type, 2);
+-      case SimdOperation::Fn_allTrue:
+-        return EmitSimdAllTrue(f, type);
+-      case SimdOperation::Fn_anyTrue:
+-        return EmitSimdAnyTrue(f, type);
+-      case SimdOperation::Fn_abs:
+-      case SimdOperation::Fn_neg:
+-      case SimdOperation::Fn_not:
+-      case SimdOperation::Fn_sqrt:
+-      case SimdOperation::Fn_reciprocalApproximation:
+-      case SimdOperation::Fn_reciprocalSqrtApproximation:
+-        return EmitSimdUnary(f, type, op);
+-      case SimdOperation::Fn_shiftLeftByScalar:
+-        return EmitSimdShift(f, type, MSimdShift::lsh);
+-      case SimdOperation::Fn_shiftRightByScalar:
+-        return EmitSimdShift(f, type, MSimdShift::rshForSign(sign));
+-#define _CASE(OP) \
+-      case SimdOperation::Fn_##OP: \
+-        return EmitSimdBinaryComp(f, type, MSimdBinaryComp::OP, sign);
+-        FOREACH_COMP_SIMD_OP(_CASE)
+-#undef _CASE
+-      case SimdOperation::Fn_and:
+-        return EmitSimdBinary(f, type, MSimdBinaryBitwise::and_);
+-      case SimdOperation::Fn_or:
+-        return EmitSimdBinary(f, type, MSimdBinaryBitwise::or_);
+-      case SimdOperation::Fn_xor:
+-        return EmitSimdBinary(f, type, MSimdBinaryBitwise::xor_);
+-#define _CASE(OP) \
+-      case SimdOperation::Fn_##OP: \
+-        return EmitSimdBinary(f, type, MSimdBinaryArith::Op_##OP);
+-      FOREACH_NUMERIC_SIMD_BINOP(_CASE)
+-      FOREACH_FLOAT_SIMD_BINOP(_CASE)
+-#undef _CASE
+-      case SimdOperation::Fn_addSaturate:
+-        return EmitSimdBinarySaturating(f, type, MSimdBinarySaturating::add, sign);
+-      case SimdOperation::Fn_subSaturate:
+-        return EmitSimdBinarySaturating(f, type, MSimdBinarySaturating::sub, sign);
+-      case SimdOperation::Fn_fromFloat32x4:
+-        return EmitSimdConvert(f, ValType::F32x4, type, sign);
+-      case SimdOperation::Fn_fromInt32x4:
+-        return EmitSimdConvert(f, ValType::I32x4, type, SimdSign::Signed);
+-      case SimdOperation::Fn_fromUint32x4:
+-        return EmitSimdConvert(f, ValType::I32x4, type, SimdSign::Unsigned);
+-      case SimdOperation::Fn_fromInt8x16Bits:
+-      case SimdOperation::Fn_fromUint8x16Bits:
+-        return EmitSimdBitcast(f, ValType::I8x16, type);
+-      case SimdOperation::Fn_fromUint16x8Bits:
+-      case SimdOperation::Fn_fromInt16x8Bits:
+-        return EmitSimdBitcast(f, ValType::I16x8, type);
+-      case SimdOperation::Fn_fromInt32x4Bits:
+-      case SimdOperation::Fn_fromUint32x4Bits:
+-        return EmitSimdBitcast(f, ValType::I32x4, type);
+-      case SimdOperation::Fn_fromFloat32x4Bits:
+-        return EmitSimdBitcast(f, ValType::F32x4, type);
+-      case SimdOperation::Fn_load3:
+-      case SimdOperation::Fn_store3:
+-      case SimdOperation::Fn_fromFloat64x2Bits:
+-        MOZ_CRASH("NYI");
+-    }
+-    MOZ_CRASH("unexpected opcode");
+-}
+-
+-static bool
+ EmitGrowMemory(FunctionCompiler& f)
+ {
+     uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
+ 
+     CallCompileState args(f, lineOrBytecode);
+     if (!f.startCall(&args))
+         return false;
+ 
+@@ -3427,34 +2630,34 @@ EmitAtomicCmpXchg(FunctionCompiler& f, V
+ {
+     LinearMemoryAddress<MDefinition*> addr;
+     MDefinition* oldValue;
+     MDefinition* newValue;
+     if (!f.iter().readAtomicCmpXchg(&addr, type, byteSize(viewType), &oldValue, &newValue))
+         return false;
+ 
+     MemoryAccessDesc access(viewType, addr.align, addr.offset, f.bytecodeOffset(),
+-                            /*numSimdExprs=*/ 0, Synchronization::Full());
++                            Synchronization::Full());
+     auto* ins = f.atomicCompareExchangeHeap(addr.base, &access, type, oldValue, newValue);
+     if (!f.inDeadCode() && !ins)
+         return false;
+ 
+     f.iter().setResult(ins);
+     return true;
+ }
+ 
+ static bool
+ EmitAtomicLoad(FunctionCompiler& f, ValType type, Scalar::Type viewType)
+ {
+     LinearMemoryAddress<MDefinition*> addr;
+     if (!f.iter().readAtomicLoad(&addr, type, byteSize(viewType)))
+         return false;
+ 
+     MemoryAccessDesc access(viewType, addr.align, addr.offset, f.bytecodeOffset(),
+-                            /*numSimdExprs=*/ 0, Synchronization::Load());
++                            Synchronization::Load());
+     auto* ins = f.load(addr.base, &access, type);
+     if (!f.inDeadCode() && !ins)
+         return false;
+ 
+     f.iter().setResult(ins);
+     return true;
+ }
+ 
+@@ -3462,17 +2665,17 @@ static bool
+ EmitAtomicRMW(FunctionCompiler& f, ValType type, Scalar::Type viewType, jit::AtomicOp op)
+ {
+     LinearMemoryAddress<MDefinition*> addr;
+     MDefinition* value;
+     if (!f.iter().readAtomicRMW(&addr, type, byteSize(viewType), &value))
+         return false;
+ 
+     MemoryAccessDesc access(viewType, addr.align, addr.offset, f.bytecodeOffset(),
+-                            /*numSimdExprs=*/ 0, Synchronization::Full());
++                            Synchronization::Full());
+     auto* ins = f.atomicBinopHeap(op, addr.base, &access, type, value);
+     if (!f.inDeadCode() && !ins)
+         return false;
+ 
+     f.iter().setResult(ins);
+     return true;
+ }
+ 
+@@ -3480,17 +2683,17 @@ static bool
+ EmitAtomicStore(FunctionCompiler& f, ValType type, Scalar::Type viewType)
+ {
+     LinearMemoryAddress<MDefinition*> addr;
+     MDefinition* value;
+     if (!f.iter().readAtomicStore(&addr, type, byteSize(viewType), &value))
+         return false;
+ 
+     MemoryAccessDesc access(viewType, addr.align, addr.offset, f.bytecodeOffset(),
+-                            /*numSimdExprs=*/ 0, Synchronization::Store());
++                            Synchronization::Store());
+     f.store(addr.base, &access, value);
+     return true;
+ }
+ 
+ static bool
+ EmitWait(FunctionCompiler& f, ValType type, uint32_t byteSize)
+ {
+     uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
+@@ -3584,17 +2787,17 @@ static bool
+ EmitAtomicXchg(FunctionCompiler& f, ValType type, Scalar::Type viewType)
+ {
+     LinearMemoryAddress<MDefinition*> addr;
+     MDefinition* value;
+     if (!f.iter().readAtomicRMW(&addr, type, byteSize(viewType), &value))
+         return false;
+ 
+     MemoryAccessDesc access(viewType, addr.align, addr.offset, f.bytecodeOffset(),
+-                            /*numSimdExprs=*/ 0, Synchronization::Full());
++                            Synchronization::Full());
+     MDefinition* ins = f.atomicExchangeHeap(addr.base, &access, type, value);
+     if (!f.inDeadCode() && !ins)
+         return false;
+ 
+     f.iter().setResult(ins);
+     return true;
+ }
+ 
+@@ -4338,112 +3541,16 @@ EmitBodyExprs(FunctionCompiler& f)
+                 CHECK(EmitOldAtomicsStore(f));
+               case uint16_t(MozOp::I32AtomicsBinOp):
+                 CHECK(EmitOldAtomicsBinOp(f));
+               case uint16_t(MozOp::I32AtomicsCompareExchange):
+                 CHECK(EmitOldAtomicsCompareExchange(f));
+               case uint16_t(MozOp::I32AtomicsExchange):
+                 CHECK(EmitOldAtomicsExchange(f));
+ 
+-              // SIMD
+-#define CASE(TYPE, OP, SIGN)                                          \
+-              case uint16_t(MozOp::TYPE##OP):                         \
+-                CHECK(EmitSimdOp(f, ValType::TYPE, SimdOperation::Fn_##OP, SIGN));
+-#define I8x16CASE(OP) CASE(I8x16, OP, SimdSign::Signed)
+-#define I16x8CASE(OP) CASE(I16x8, OP, SimdSign::Signed)
+-#define I32x4CASE(OP) CASE(I32x4, OP, SimdSign::Signed)
+-#define F32x4CASE(OP) CASE(F32x4, OP, SimdSign::NotApplicable)
+-#define B8x16CASE(OP) CASE(B8x16, OP, SimdSign::NotApplicable)
+-#define B16x8CASE(OP) CASE(B16x8, OP, SimdSign::NotApplicable)
+-#define B32x4CASE(OP) CASE(B32x4, OP, SimdSign::NotApplicable)
+-#define ENUMERATE(TYPE, FORALL, DO)                                   \
+-              case uint16_t(MozOp::TYPE##Constructor):                \
+-                CHECK(EmitSimdOp(f, ValType::TYPE, SimdOperation::Constructor, SimdSign::NotApplicable)); \
+-                FORALL(DO)
+-
+-              ENUMERATE(I8x16, FORALL_INT8X16_ASMJS_OP, I8x16CASE)
+-              ENUMERATE(I16x8, FORALL_INT16X8_ASMJS_OP, I16x8CASE)
+-              ENUMERATE(I32x4, FORALL_INT32X4_ASMJS_OP, I32x4CASE)
+-              ENUMERATE(F32x4, FORALL_FLOAT32X4_ASMJS_OP, F32x4CASE)
+-              ENUMERATE(B8x16, FORALL_BOOL_SIMD_OP, B8x16CASE)
+-              ENUMERATE(B16x8, FORALL_BOOL_SIMD_OP, B16x8CASE)
+-              ENUMERATE(B32x4, FORALL_BOOL_SIMD_OP, B32x4CASE)
+-
+-#undef CASE
+-#undef I8x16CASE
+-#undef I16x8CASE
+-#undef I32x4CASE
+-#undef F32x4CASE
+-#undef B8x16CASE
+-#undef B16x8CASE
+-#undef B32x4CASE
+-#undef ENUMERATE
+-
+-              case uint16_t(MozOp::I8x16Const):
+-                CHECK(EmitI8x16Const(f));
+-              case uint16_t(MozOp::I16x8Const):
+-                CHECK(EmitI16x8Const(f));
+-              case uint16_t(MozOp::I32x4Const):
+-                CHECK(EmitI32x4Const(f));
+-              case uint16_t(MozOp::F32x4Const):
+-                CHECK(EmitF32x4Const(f));
+-              case uint16_t(MozOp::B8x16Const):
+-                CHECK(EmitB8x16Const(f));
+-              case uint16_t(MozOp::B16x8Const):
+-                CHECK(EmitB16x8Const(f));
+-              case uint16_t(MozOp::B32x4Const):
+-                CHECK(EmitB32x4Const(f));
+-
+-              case uint16_t(MozOp::I8x16addSaturateU):
+-                CHECK(EmitSimdOp(f, ValType::I8x16, SimdOperation::Fn_addSaturate, SimdSign::Unsigned));
+-              case uint16_t(MozOp::I8x16subSaturateU):
+-                CHECK(EmitSimdOp(f, ValType::I8x16, SimdOperation::Fn_subSaturate, SimdSign::Unsigned));
+-              case uint16_t(MozOp::I8x16shiftRightByScalarU):
+-                CHECK(EmitSimdOp(f, ValType::I8x16, SimdOperation::Fn_shiftRightByScalar, SimdSign::Unsigned));
+-              case uint16_t(MozOp::I8x16lessThanU):
+-                CHECK(EmitSimdOp(f, ValType::I8x16, SimdOperation::Fn_lessThan, SimdSign::Unsigned));
+-              case uint16_t(MozOp::I8x16lessThanOrEqualU):
+-                CHECK(EmitSimdOp(f, ValType::I8x16, SimdOperation::Fn_lessThanOrEqual, SimdSign::Unsigned));
+-              case uint16_t(MozOp::I8x16greaterThanU):
+-                CHECK(EmitSimdOp(f, ValType::I8x16, SimdOperation::Fn_greaterThan, SimdSign::Unsigned));
+-              case uint16_t(MozOp::I8x16greaterThanOrEqualU):
+-                CHECK(EmitSimdOp(f, ValType::I8x16, SimdOperation::Fn_greaterThanOrEqual, SimdSign::Unsigned));
+-              case uint16_t(MozOp::I8x16extractLaneU):
+-                CHECK(EmitSimdOp(f, ValType::I8x16, SimdOperation::Fn_extractLane, SimdSign::Unsigned));
+-
+-              case uint16_t(MozOp::I16x8addSaturateU):
+-                CHECK(EmitSimdOp(f, ValType::I16x8, SimdOperation::Fn_addSaturate, SimdSign::Unsigned));
+-              case uint16_t(MozOp::I16x8subSaturateU):
+-                CHECK(EmitSimdOp(f, ValType::I16x8, SimdOperation::Fn_subSaturate, SimdSign::Unsigned));
+-              case uint16_t(MozOp::I16x8shiftRightByScalarU):
+-                CHECK(EmitSimdOp(f, ValType::I16x8, SimdOperation::Fn_shiftRightByScalar, SimdSign::Unsigned));
+-              case uint16_t(MozOp::I16x8lessThanU):
+-                CHECK(EmitSimdOp(f, ValType::I16x8, SimdOperation::Fn_lessThan, SimdSign::Unsigned));
+-              case uint16_t(MozOp::I16x8lessThanOrEqualU):
+-                CHECK(EmitSimdOp(f, ValType::I16x8, SimdOperation::Fn_lessThanOrEqual, SimdSign::Unsigned));
+-              case uint16_t(MozOp::I16x8greaterThanU):
+-                CHECK(EmitSimdOp(f, ValType::I16x8, SimdOperation::Fn_greaterThan, SimdSign::Unsigned));
+-              case uint16_t(MozOp::I16x8greaterThanOrEqualU):
+-                CHECK(EmitSimdOp(f, ValType::I16x8, SimdOperation::Fn_greaterThanOrEqual, SimdSign::Unsigned));
+-              case uint16_t(MozOp::I16x8extractLaneU):
+-                CHECK(EmitSimdOp(f, ValType::I16x8, SimdOperation::Fn_extractLane, SimdSign::Unsigned));
+-
+-              case uint16_t(MozOp::I32x4shiftRightByScalarU):
+-                CHECK(EmitSimdOp(f, ValType::I32x4, SimdOperation::Fn_shiftRightByScalar, SimdSign::Unsigned));
+-              case uint16_t(MozOp::I32x4lessThanU):
+-                CHECK(EmitSimdOp(f, ValType::I32x4, SimdOperation::Fn_lessThan, SimdSign::Unsigned));
+-              case uint16_t(MozOp::I32x4lessThanOrEqualU):
+-                CHECK(EmitSimdOp(f, ValType::I32x4, SimdOperation::Fn_lessThanOrEqual, SimdSign::Unsigned));
+-              case uint16_t(MozOp::I32x4greaterThanU):
+-                CHECK(EmitSimdOp(f, ValType::I32x4, SimdOperation::Fn_greaterThan, SimdSign::Unsigned));
+-              case uint16_t(MozOp::I32x4greaterThanOrEqualU):
+-                CHECK(EmitSimdOp(f, ValType::I32x4, SimdOperation::Fn_greaterThanOrEqual, SimdSign::Unsigned));
+-              case uint16_t(MozOp::I32x4fromFloat32x4U):
+-                CHECK(EmitSimdOp(f, ValType::I32x4, SimdOperation::Fn_fromFloat32x4, SimdSign::Unsigned));
+-
+               default:
+                 return f.iter().unrecognizedOpcode(&op);
+             }
+             break;
+           }
+ 
+           default:
+             return f.iter().unrecognizedOpcode(&op);
+diff --git a/js/src/wasm/WasmJS.cpp b/js/src/wasm/WasmJS.cpp
+--- a/js/src/wasm/WasmJS.cpp
++++ b/js/src/wasm/WasmJS.cpp
+@@ -40,26 +40,16 @@
+ #include "wasm/WasmSignalHandlers.h"
+ #include "wasm/WasmStubs.h"
+ #include "wasm/WasmValidate.h"
+ 
+ #include "vm/ArrayBufferObject-inl.h"
+ #include "vm/JSObject-inl.h"
+ #include "vm/NativeObject-inl.h"
+ 
+-#define WASM_CRASH_IF_SIMD_TYPES \
+-    case ValType::I8x16: \
+-    case ValType::B8x16: \
+-    case ValType::I16x8: \
+-    case ValType::B16x8: \
+-    case ValType::I32x4: \
+-    case ValType::B32x4: \
+-    case ValType::F32x4: \
+-      MOZ_CRASH("unexpected SIMD type")
+-
+ using namespace js;
+ using namespace js::jit;
+ using namespace js::wasm;
+ 
+ using mozilla::CheckedInt;
+ using mozilla::Nothing;
+ using mozilla::RangedPtr;
+ 
+@@ -151,17 +141,16 @@ wasm::ToWebAssemblyValue(JSContext* cx, 
+             JSObject* obj = ToObject(cx, v);
+             if (!obj)
+                 return false;
+             MOZ_ASSERT(obj->compartment() == cx->compartment());
+             val.set(Val(obj));
+         }
+         return true;
+       }
+-      WASM_CRASH_IF_SIMD_TYPES;
+       case ValType::Ref:
+       case ValType::I64: {
+         break;
+       }
+     }
+     MOZ_CRASH("unexpected import value type, caller must guard");
+ }
+ 
+@@ -174,17 +163,16 @@ wasm::ToJSValue(const Val& val)
+       case ValType::F32:
+         return DoubleValue(JS::CanonicalizeNaN(double(val.f32())));
+       case ValType::F64:
+         return DoubleValue(JS::CanonicalizeNaN(val.f64()));
+       case ValType::AnyRef:
+         if (!val.ptr())
+             return NullValue();
+         return ObjectValue(*(JSObject*)val.ptr());
+-      WASM_CRASH_IF_SIMD_TYPES;
+       case ValType::Ref:
+       case ValType::I64:
+         break;
+     }
+     MOZ_CRASH("unexpected type when translating to a JS value");
+ }
+ 
+ // ============================================================================
+@@ -2168,17 +2156,16 @@ WasmGlobalObject::trace(JSTracer* trc, J
+         if (global->cell()->ptr)
+             TraceManuallyBarrieredEdge(trc, &global->cell()->ptr, "wasm anyref global");
+         break;
+       case ValType::I32:
+       case ValType::F32:
+       case ValType::I64:
+       case ValType::F64:
+         break;
+-      WASM_CRASH_IF_SIMD_TYPES;
+       case ValType::Ref:
+         MOZ_CRASH("Ref NYI");
+     }
+ }
+ 
+ /* static */ void
+ WasmGlobalObject::finalize(FreeOp*, JSObject* obj)
+ {
+@@ -2224,17 +2211,16 @@ WasmGlobalObject::create(JSContext* cx, 
+         cell->f64 = val.f64();
+         break;
+       case ValType::AnyRef:
+         MOZ_ASSERT(!cell->ptr, "no prebarriers needed");
+         cell->ptr = val.ptr();
+         if (cell->ptr)
+             JSObject::writeBarrierPost(&cell->ptr, nullptr, cell->ptr);
+         break;
+-      WASM_CRASH_IF_SIMD_TYPES;
+       case ValType::Ref:
+         MOZ_CRASH("Ref NYI");
+     }
+ 
+     obj->initReservedSlot(TYPE_SLOT, Int32Value(int32_t(val.type().bitsUnsafe())));
+     obj->initReservedSlot(MUTABLE_SLOT, JS::BooleanValue(isMutable));
+     obj->initReservedSlot(CELL_SLOT, PrivateValue(cell));
+ 
+@@ -2308,17 +2294,16 @@ WasmGlobalObject::construct(JSContext* c
+             return false;
+     } else {
+         switch (globalType.code()) {
+           case ValType::I32:    globalVal = Val(uint32_t(0)); break;
+           case ValType::I64:    globalVal = Val(uint64_t(0)); break;
+           case ValType::F32:    globalVal = Val(float(0.0));  break;
+           case ValType::F64:    globalVal = Val(double(0.0)); break;
+           case ValType::AnyRef: globalVal = Val(nullptr);     break;
+-          WASM_CRASH_IF_SIMD_TYPES;
+           case ValType::Ref:    MOZ_CRASH("Ref NYI");
+         }
+     }
+ 
+     WasmGlobalObject* global = WasmGlobalObject::create(cx, globalVal, isMutable);
+     if (!global)
+         return false;
+ 
+@@ -2340,17 +2325,16 @@ WasmGlobalObject::valueGetterImpl(JSCont
+       case ValType::F32:
+       case ValType::F64:
+       case ValType::AnyRef:
+         args.rval().set(args.thisv().toObject().as<WasmGlobalObject>().value(cx));
+         return true;
+       case ValType::I64:
+         JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_I64_TYPE);
+         return false;
+-      WASM_CRASH_IF_SIMD_TYPES;
+       case ValType::Ref:
+         MOZ_CRASH("Ref NYI");
+     }
+     MOZ_CRASH();
+ }
+ 
+ /* static */ bool
+ WasmGlobalObject::valueGetter(JSContext* cx, unsigned argc, Value* vp)
+@@ -2391,17 +2375,16 @@ WasmGlobalObject::valueSetterImpl(JSCont
+       case ValType::AnyRef: {
+         JSObject* prevPtr = cell->ptr;
+         JSObject::writeBarrierPre(prevPtr);
+         cell->ptr = val.get().ptr();
+         if (cell->ptr)
+             JSObject::writeBarrierPost(&cell->ptr, prevPtr, cell->ptr);
+         break;
+       }
+-      WASM_CRASH_IF_SIMD_TYPES;
+       case ValType::I64:
+         MOZ_CRASH("unexpected i64 when setting global's value");
+       case ValType::Ref:
+         MOZ_CRASH("Ref NYI");
+     }
+ 
+     args.rval().setUndefined();
+     return true;
+@@ -2447,17 +2430,16 @@ WasmGlobalObject::val(MutableHandleVal o
+ {
+     Cell* cell = this->cell();
+     switch (type().code()) {
+       case ValType::I32:    outval.set(Val(uint32_t(cell->i32))); return;
+       case ValType::I64:    outval.set(Val(uint64_t(cell->i64))); return;
+       case ValType::F32:    outval.set(Val(cell->f32));           return;
+       case ValType::F64:    outval.set(Val(cell->f64));           return;
+       case ValType::AnyRef: outval.set(Val(cell->ptr));           return;
+-      WASM_CRASH_IF_SIMD_TYPES;
+       case ValType::Ref:    MOZ_CRASH("Ref NYI");
+     }
+     MOZ_CRASH("unexpected Global type");
+ }
+ 
+ Value
+ WasmGlobalObject::value(JSContext* cx) const
+ {
+diff --git a/js/src/wasm/WasmOpIter.cpp b/js/src/wasm/WasmOpIter.cpp
+--- a/js/src/wasm/WasmOpIter.cpp
++++ b/js/src/wasm/WasmOpIter.cpp
+@@ -351,96 +351,25 @@ wasm::Classify(OpBytes op)
+       }
+       case Op::MozPrefix: {
+           switch (MozOp(op.b1)) {
+             case MozOp::Limit:
+               // Reject Limit for the MozPrefix encoding
+               break;
+             case MozOp::TeeGlobal:
+               return OpKind::TeeGlobal;
+-            case MozOp::I8x16Const:
+-              return OpKind::I8x16;
+-            case MozOp::I16x8Const:
+-              return OpKind::I16x8;
+-            case MozOp::I32x4Const:
+-              return OpKind::I32x4;
+-            case MozOp::B8x16Const:
+-              return OpKind::B8x16;
+-            case MozOp::B16x8Const:
+-              return OpKind::B16x8;
+-            case MozOp::B32x4Const:
+-              return OpKind::B32x4;
+-            case MozOp::F32x4Const:
+-              return OpKind::F32x4;
+             case MozOp::I32BitNot:
+             case MozOp::I32Abs:
+             case MozOp::I32Neg:
+-            case MozOp::I8x16neg:
+-            case MozOp::I8x16not:
+-            case MozOp::I16x8neg:
+-            case MozOp::I16x8not:
+-            case MozOp::I32x4neg:
+-            case MozOp::I32x4not:
+-            case MozOp::F32x4neg:
+-            case MozOp::F32x4sqrt:
+-            case MozOp::F32x4abs:
+-            case MozOp::F32x4reciprocalApproximation:
+-            case MozOp::F32x4reciprocalSqrtApproximation:
+-            case MozOp::B8x16not:
+-            case MozOp::B16x8not:
+-            case MozOp::B32x4not:
+               return OpKind::Unary;
+             case MozOp::I32Min:
+             case MozOp::I32Max:
+             case MozOp::F64Mod:
+             case MozOp::F64Pow:
+             case MozOp::F64Atan2:
+-            case MozOp::I8x16add:
+-            case MozOp::I8x16sub:
+-            case MozOp::I8x16mul:
+-            case MozOp::I8x16addSaturate:
+-            case MozOp::I8x16subSaturate:
+-            case MozOp::I8x16addSaturateU:
+-            case MozOp::I8x16subSaturateU:
+-            case MozOp::I8x16and:
+-            case MozOp::I8x16or:
+-            case MozOp::I8x16xor:
+-            case MozOp::I16x8add:
+-            case MozOp::I16x8sub:
+-            case MozOp::I16x8mul:
+-            case MozOp::I16x8addSaturate:
+-            case MozOp::I16x8subSaturate:
+-            case MozOp::I16x8addSaturateU:
+-            case MozOp::I16x8subSaturateU:
+-            case MozOp::I16x8and:
+-            case MozOp::I16x8or:
+-            case MozOp::I16x8xor:
+-            case MozOp::I32x4add:
+-            case MozOp::I32x4sub:
+-            case MozOp::I32x4mul:
+-            case MozOp::I32x4and:
+-            case MozOp::I32x4or:
+-            case MozOp::I32x4xor:
+-            case MozOp::F32x4add:
+-            case MozOp::F32x4sub:
+-            case MozOp::F32x4mul:
+-            case MozOp::F32x4div:
+-            case MozOp::F32x4min:
+-            case MozOp::F32x4max:
+-            case MozOp::F32x4minNum:
+-            case MozOp::F32x4maxNum:
+-            case MozOp::B8x16and:
+-            case MozOp::B8x16or:
+-            case MozOp::B8x16xor:
+-            case MozOp::B16x8and:
+-            case MozOp::B16x8or:
+-            case MozOp::B16x8xor:
+-            case MozOp::B32x4and:
+-            case MozOp::B32x4or:
+-            case MozOp::B32x4xor:
+               return OpKind::Binary;
+             case MozOp::F64Sin:
+             case MozOp::F64Cos:
+             case MozOp::F64Tan:
+             case MozOp::F64Asin:
+             case MozOp::F64Acos:
+             case MozOp::F64Atan:
+             case MozOp::F64Exp:
+@@ -453,178 +382,29 @@ wasm::Classify(OpBytes op)
+             case MozOp::I64TeeStore32:
+             case MozOp::I32TeeStore:
+             case MozOp::I64TeeStore:
+             case MozOp::F32TeeStore:
+             case MozOp::F64TeeStore:
+             case MozOp::F32TeeStoreF64:
+             case MozOp::F64TeeStoreF32:
+               return OpKind::TeeStore;
+-            case MozOp::I32x4fromFloat32x4:
+-            case MozOp::I32x4fromFloat32x4U:
+-            case MozOp::F32x4fromInt32x4:
+-            case MozOp::F32x4fromUint32x4:
+-            case MozOp::I32x4fromFloat32x4Bits:
+-            case MozOp::I32x4fromInt8x16Bits:
+-            case MozOp::I32x4fromInt16x8Bits:
+-            case MozOp::I16x8fromInt8x16Bits:
+-            case MozOp::I16x8fromInt32x4Bits:
+-            case MozOp::I16x8fromFloat32x4Bits:
+-            case MozOp::I8x16fromInt16x8Bits:
+-            case MozOp::I8x16fromInt32x4Bits:
+-            case MozOp::I8x16fromFloat32x4Bits:
+-            case MozOp::F32x4fromInt8x16Bits:
+-            case MozOp::F32x4fromInt16x8Bits:
+-            case MozOp::F32x4fromInt32x4Bits:
+-              return OpKind::Conversion;
+-            case MozOp::I8x16load:
+-            case MozOp::I16x8load:
+-            case MozOp::I32x4load:
+-            case MozOp::I32x4load1:
+-            case MozOp::I32x4load2:
+-            case MozOp::I32x4load3:
+-            case MozOp::F32x4load:
+-            case MozOp::F32x4load1:
+-            case MozOp::F32x4load2:
+-            case MozOp::F32x4load3:
+-              return OpKind::Load;
+-            case MozOp::I8x16store:
+-            case MozOp::I16x8store:
+-            case MozOp::I32x4store:
+-            case MozOp::I32x4store1:
+-            case MozOp::I32x4store2:
+-            case MozOp::I32x4store3:
+-            case MozOp::F32x4store:
+-            case MozOp::F32x4store1:
+-            case MozOp::F32x4store2:
+-            case MozOp::F32x4store3:
+-              return OpKind::TeeStore;
+             case MozOp::OldCallDirect:
+               return OpKind::OldCallDirect;
+             case MozOp::OldCallIndirect:
+               return OpKind::OldCallIndirect;
+             case MozOp::I32AtomicsLoad:
+               return OpKind::OldAtomicLoad;
+             case MozOp::I32AtomicsStore:
+               return OpKind::OldAtomicStore;
+             case MozOp::I32AtomicsBinOp:
+               return OpKind::OldAtomicBinOp;
+             case MozOp::I32AtomicsCompareExchange:
+               return OpKind::OldAtomicCompareExchange;
+             case MozOp::I32AtomicsExchange:
+               return OpKind::OldAtomicExchange;
+-            case MozOp::I8x16extractLane:
+-            case MozOp::I8x16extractLaneU:
+-            case MozOp::I16x8extractLane:
+-            case MozOp::I16x8extractLaneU:
+-            case MozOp::I32x4extractLane:
+-            case MozOp::F32x4extractLane:
+-            case MozOp::B8x16extractLane:
+-            case MozOp::B16x8extractLane:
+-            case MozOp::B32x4extractLane:
+-              return OpKind::ExtractLane;
+-            case MozOp::I8x16replaceLane:
+-            case MozOp::I16x8replaceLane:
+-            case MozOp::I32x4replaceLane:
+-            case MozOp::F32x4replaceLane:
+-            case MozOp::B8x16replaceLane:
+-            case MozOp::B16x8replaceLane:
+-            case MozOp::B32x4replaceLane:
+-              return OpKind::ReplaceLane;
+-            case MozOp::I8x16swizzle:
+-            case MozOp::I16x8swizzle:
+-            case MozOp::I32x4swizzle:
+-            case MozOp::F32x4swizzle:
+-              return OpKind::Swizzle;
+-            case MozOp::I8x16shuffle:
+-            case MozOp::I16x8shuffle:
+-            case MozOp::I32x4shuffle:
+-            case MozOp::F32x4shuffle:
+-              return OpKind::Shuffle;
+-            case MozOp::I16x8check:
+-            case MozOp::I16x8splat:
+-            case MozOp::I32x4check:
+-            case MozOp::I32x4splat:
+-            case MozOp::I8x16check:
+-            case MozOp::I8x16splat:
+-            case MozOp::F32x4check:
+-            case MozOp::F32x4splat:
+-            case MozOp::B16x8check:
+-            case MozOp::B16x8splat:
+-            case MozOp::B32x4check:
+-            case MozOp::B32x4splat:
+-            case MozOp::B8x16check:
+-            case MozOp::B8x16splat:
+-              return OpKind::Splat;
+-            case MozOp::I8x16select:
+-            case MozOp::I16x8select:
+-            case MozOp::I32x4select:
+-            case MozOp::F32x4select:
+-              return OpKind::SimdSelect;
+-            case MozOp::I8x16Constructor:
+-            case MozOp::I16x8Constructor:
+-            case MozOp::I32x4Constructor:
+-            case MozOp::F32x4Constructor:
+-            case MozOp::B8x16Constructor:
+-            case MozOp::B16x8Constructor:
+-            case MozOp::B32x4Constructor:
+-              return OpKind::SimdCtor;
+-            case MozOp::B8x16allTrue:
+-            case MozOp::B8x16anyTrue:
+-            case MozOp::B16x8allTrue:
+-            case MozOp::B16x8anyTrue:
+-            case MozOp::B32x4allTrue:
+-            case MozOp::B32x4anyTrue:
+-              return OpKind::SimdBooleanReduction;
+-            case MozOp::I8x16shiftLeftByScalar:
+-            case MozOp::I8x16shiftRightByScalar:
+-            case MozOp::I8x16shiftRightByScalarU:
+-            case MozOp::I16x8shiftLeftByScalar:
+-            case MozOp::I16x8shiftRightByScalar:
+-            case MozOp::I16x8shiftRightByScalarU:
+-            case MozOp::I32x4shiftLeftByScalar:
+-            case MozOp::I32x4shiftRightByScalar:
+-            case MozOp::I32x4shiftRightByScalarU:
+-              return OpKind::SimdShiftByScalar;
+-            case MozOp::I8x16equal:
+-            case MozOp::I8x16notEqual:
+-            case MozOp::I8x16greaterThan:
+-            case MozOp::I8x16greaterThanOrEqual:
+-            case MozOp::I8x16lessThan:
+-            case MozOp::I8x16lessThanOrEqual:
+-            case MozOp::I8x16greaterThanU:
+-            case MozOp::I8x16greaterThanOrEqualU:
+-            case MozOp::I8x16lessThanU:
+-            case MozOp::I8x16lessThanOrEqualU:
+-            case MozOp::I16x8equal:
+-            case MozOp::I16x8notEqual:
+-            case MozOp::I16x8greaterThan:
+-            case MozOp::I16x8greaterThanOrEqual:
+-            case MozOp::I16x8lessThan:
+-            case MozOp::I16x8lessThanOrEqual:
+-            case MozOp::I16x8greaterThanU:
+-            case MozOp::I16x8greaterThanOrEqualU:
+-            case MozOp::I16x8lessThanU:
+-            case MozOp::I16x8lessThanOrEqualU:
+-            case MozOp::I32x4equal:
+-            case MozOp::I32x4notEqual:
+-            case MozOp::I32x4greaterThan:
+-            case MozOp::I32x4greaterThanOrEqual:
+-            case MozOp::I32x4lessThan:
+-            case MozOp::I32x4lessThanOrEqual:
+-            case MozOp::I32x4greaterThanU:
+-            case MozOp::I32x4greaterThanOrEqualU:
+-            case MozOp::I32x4lessThanU:
+-            case MozOp::I32x4lessThanOrEqualU:
+-            case MozOp::F32x4equal:
+-            case MozOp::F32x4notEqual:
+-            case MozOp::F32x4greaterThan:
+-            case MozOp::F32x4greaterThanOrEqual:
+-            case MozOp::F32x4lessThan:
+-            case MozOp::F32x4lessThanOrEqual:
+-              return OpKind::SimdComparison;
+           }
+           break;
+       }
+     }
+     MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unimplemented opcode");
+ }
+ #endif
+diff --git a/js/src/wasm/WasmOpIter.h b/js/src/wasm/WasmOpIter.h
+--- a/js/src/wasm/WasmOpIter.h
++++ b/js/src/wasm/WasmOpIter.h
+@@ -46,23 +46,16 @@ class StackType
+ 
+ #ifdef DEBUG
+     bool isValidCode() {
+         switch (UnpackTypeCodeType(tc_)) {
+           case TypeCode::I32:
+           case TypeCode::I64:
+           case TypeCode::F32:
+           case TypeCode::F64:
+-          case TypeCode::I8x16:
+-          case TypeCode::I16x8:
+-          case TypeCode::I32x4:
+-          case TypeCode::F32x4:
+-          case TypeCode::B8x16:
+-          case TypeCode::B16x8:
+-          case TypeCode::B32x4:
+           case TypeCode::AnyRef:
+           case TypeCode::Ref:
+           case TypeCode::Limit:
+             return true;
+           default:
+             return false;
+         }
+     }
+@@ -70,24 +63,16 @@ class StackType
+ 
+   public:
+     enum Code {
+         I32    = uint8_t(ValType::I32),
+         I64    = uint8_t(ValType::I64),
+         F32    = uint8_t(ValType::F32),
+         F64    = uint8_t(ValType::F64),
+ 
+-        I8x16  = uint8_t(ValType::I8x16),
+-        I16x8  = uint8_t(ValType::I16x8),
+-        I32x4  = uint8_t(ValType::I32x4),
+-        F32x4  = uint8_t(ValType::F32x4),
+-        B8x16  = uint8_t(ValType::B8x16),
+-        B16x8  = uint8_t(ValType::B16x8),
+-        B32x4  = uint8_t(ValType::B32x4),
+-
+         AnyRef = uint8_t(ValType::AnyRef),
+         Ref    = uint8_t(ValType::Ref),
+ 
+         Any    = uint8_t(TypeCode::Limit),
+     };
+ 
+     StackType() : tc_(InvalidPackedTypeCode()) {}
+ 
+@@ -153,23 +138,16 @@ enum class OpKind {
+     Block,
+     Loop,
+     Unreachable,
+     Drop,
+     I32,
+     I64,
+     F32,
+     F64,
+-    I8x16,
+-    I16x8,
+-    I32x4,
+-    F32x4,
+-    B8x16,
+-    B16x8,
+-    B32x4,
+     Br,
+     BrIf,
+     BrTable,
+     Nop,
+     Unary,
+     Binary,
+     Comparison,
+     Conversion,
+@@ -204,21 +182,16 @@ enum class OpKind {
+     OldAtomicBinOp,
+     OldAtomicCompareExchange,
+     OldAtomicExchange,
+     ExtractLane,
+     ReplaceLane,
+     Swizzle,
+     Shuffle,
+     Splat,
+-    SimdSelect,
+-    SimdCtor,
+-    SimdBooleanReduction,
+-    SimdShiftByScalar,
+-    SimdComparison,
+     MemCopy,
+     MemFill,
+     RefNull,
+ };
+ 
+ // Return the OpKind for a given Op. This is used for sanity-checking that
+ // API users use the correct read function for a given Op.
+ OpKind
+@@ -400,28 +373,16 @@ class MOZ_STACK_CLASS OpIter : private P
+         return d_.readVarU64(out);
+     }
+     MOZ_MUST_USE bool readFixedF32(float* out) {
+         return d_.readFixedF32(out);
+     }
+     MOZ_MUST_USE bool readFixedF64(double* out) {
+         return d_.readFixedF64(out);
+     }
+-    MOZ_MUST_USE bool readFixedI8x16(I8x16* out) {
+-        return d_.readFixedI8x16(out);
+-    }
+-    MOZ_MUST_USE bool readFixedI16x8(I16x8* out) {
+-        return d_.readFixedI16x8(out);
+-    }
+-    MOZ_MUST_USE bool readFixedI32x4(I32x4* out) {
+-        return d_.readFixedI32x4(out);
+-    }
+-    MOZ_MUST_USE bool readFixedF32x4(F32x4* out) {
+-        return d_.readFixedF32x4(out);
+-    }
+ 
+     MOZ_MUST_USE bool readAtomicViewType(Scalar::Type* viewType) {
+         uint8_t x;
+         if (!readFixedU8(&x))
+             return fail("unable to read atomic view");
+         if (x >= Scalar::MaxTypedArrayViewType)
+             return fail("invalid atomic view type");
+         *viewType = Scalar::Type(x);
+@@ -587,23 +548,16 @@ class MOZ_STACK_CLASS OpIter : private P
+     MOZ_MUST_USE bool readTeeLocal(const ValTypeVector& locals, uint32_t* id, Value* value);
+     MOZ_MUST_USE bool readGetGlobal(uint32_t* id);
+     MOZ_MUST_USE bool readSetGlobal(uint32_t* id, Value* value);
+     MOZ_MUST_USE bool readTeeGlobal(uint32_t* id, Value* value);
+     MOZ_MUST_USE bool readI32Const(int32_t* i32);
+     MOZ_MUST_USE bool readI64Const(int64_t* i64);
+     MOZ_MUST_USE bool readF32Const(float* f32);
+     MOZ_MUST_USE bool readF64Const(double* f64);
+-    MOZ_MUST_USE bool readI8x16Const(I8x16* i8x16);
+-    MOZ_MUST_USE bool readI16x8Const(I16x8* i16x8);
+-    MOZ_MUST_USE bool readI32x4Const(I32x4* i32x4);
+-    MOZ_MUST_USE bool readF32x4Const(F32x4* f32x4);
+-    MOZ_MUST_USE bool readB8x16Const(I8x16* i8x16);
+-    MOZ_MUST_USE bool readB16x8Const(I16x8* i16x8);
+-    MOZ_MUST_USE bool readB32x4Const(I32x4* i32x4);
+     MOZ_MUST_USE bool readRefNull(ValType* type);
+     MOZ_MUST_USE bool readCall(uint32_t* calleeIndex, ValueVector* argValues);
+     MOZ_MUST_USE bool readCallIndirect(uint32_t* funcTypeIndex, Value* callee, ValueVector* argValues);
+     MOZ_MUST_USE bool readOldCallDirect(uint32_t numFuncImports, uint32_t* funcIndex,
+                                         ValueVector* argValues);
+     MOZ_MUST_USE bool readOldCallIndirect(uint32_t* funcTypeIndex, Value* callee, ValueVector* argValues);
+     MOZ_MUST_USE bool readWake(LinearMemoryAddress<Value>* addr, Value* count);
+     MOZ_MUST_USE bool readWait(LinearMemoryAddress<Value>* addr,
+@@ -638,34 +592,16 @@ class MOZ_STACK_CLASS OpIter : private P
+                                          Value* value);
+     MOZ_MUST_USE bool readOldAtomicCompareExchange(LinearMemoryAddress<Value>* addr,
+                                                    Scalar::Type* viewType,
+                                                    Value* oldValue,
+                                                    Value* newValue);
+     MOZ_MUST_USE bool readOldAtomicExchange(LinearMemoryAddress<Value>* addr,
+                                             Scalar::Type* viewType,
+                                             Value* newValue);
+-    MOZ_MUST_USE bool readSimdComparison(ValType simdType, Value* lhs,
+-                                         Value* rhs);
+-    MOZ_MUST_USE bool readSimdShiftByScalar(ValType simdType, Value* lhs,
+-                                            Value* rhs);
+-    MOZ_MUST_USE bool readSimdBooleanReduction(ValType simdType, Value* input);
+-    MOZ_MUST_USE bool readExtractLane(ValType simdType, uint8_t* lane,
+-                                      Value* vector);
+-    MOZ_MUST_USE bool readReplaceLane(ValType simdType, uint8_t* lane,
+-                                      Value* vector, Value* scalar);
+-    MOZ_MUST_USE bool readSplat(ValType simdType, Value* scalar);
+-    MOZ_MUST_USE bool readSwizzle(ValType simdType, uint8_t (* lanes)[16], Value* vector);
+-    MOZ_MUST_USE bool readShuffle(ValType simdType, uint8_t (* lanes)[16],
+-                                  Value* lhs, Value* rhs);
+-    MOZ_MUST_USE bool readSimdSelect(ValType simdType, Value* trueValue,
+-                                     Value* falseValue,
+-                                     Value* condition);
+-    MOZ_MUST_USE bool readSimdCtor(ValType elementType, uint32_t numElements, ValType simdType,
+-                                   ValueVector* argValues);
+     MOZ_MUST_USE bool readMemCopy(Value* dest, Value* src, Value* len);
+     MOZ_MUST_USE bool readMemFill(Value* start, Value* val, Value* len);
+ 
+     // At a location where readOp is allowed, peek at the next opcode
+     // without consuming it or updating any internal state.
+     // Never fails: returns uint16_t(Op::Limit) in op->b0 if it can't read.
+     void peekOp(OpBytes* op);
+ 
+@@ -1001,23 +937,16 @@ OpIter<Policy>::readBlockType(ExprType* 
+ 
+     bool known = false;
+     switch (uncheckedCode) {
+       case uint8_t(ExprType::Void):
+       case uint8_t(ExprType::I32):
+       case uint8_t(ExprType::I64):
+       case uint8_t(ExprType::F32):
+       case uint8_t(ExprType::F64):
+-      case uint8_t(ExprType::I8x16):
+-      case uint8_t(ExprType::I16x8):
+-      case uint8_t(ExprType::I32x4):
+-      case uint8_t(ExprType::F32x4):
+-      case uint8_t(ExprType::B8x16):
+-      case uint8_t(ExprType::B16x8):
+-      case uint8_t(ExprType::B32x4):
+         known = true;
+         break;
+       case uint8_t(ExprType::Ref):
+         known = env_.gcTypesEnabled == HasGcTypes::True;
+         break;
+       case uint8_t(ExprType::AnyRef):
+         known = env_.gcTypesEnabled == HasGcTypes::True;
+         break;
+@@ -1686,86 +1615,16 @@ OpIter<Policy>::readF64Const(double* f64
+     MOZ_ASSERT(Classify(op_) == OpKind::F64);
+ 
+     return readFixedF64(f64) &&
+            push(ValType::F64);
+ }
+ 
+ template <typename Policy>
+ inline bool
+-OpIter<Policy>::readI8x16Const(I8x16* i8x16)
+-{
+-    MOZ_ASSERT(Classify(op_) == OpKind::I8x16);
+-
+-    return readFixedI8x16(i8x16) &&
+-           push(ValType::I8x16);
+-}
+-
+-template <typename Policy>
+-inline bool
+-OpIter<Policy>::readI16x8Const(I16x8* i16x8)
+-{
+-    MOZ_ASSERT(Classify(op_) == OpKind::I16x8);
+-
+-    return readFixedI16x8(i16x8) &&
+-           push(ValType::I16x8);
+-}
+-
+-template <typename Policy>
+-inline bool
+-OpIter<Policy>::readI32x4Const(I32x4* i32x4)
+-{
+-    MOZ_ASSERT(Classify(op_) == OpKind::I32x4);
+-
+-    return readFixedI32x4(i32x4) &&
+-           push(ValType::I32x4);
+-}
+-
+-template <typename Policy>
+-inline bool
+-OpIter<Policy>::readF32x4Const(F32x4* f32x4)
+-{
+-    MOZ_ASSERT(Classify(op_) == OpKind::F32x4);
+-
+-    return readFixedF32x4(f32x4) &&
+-           push(ValType::F32x4);
+-}
+-
+-template <typename Policy>
+-inline bool
+-OpIter<Policy>::readB8x16Const(I8x16* i8x16)
+-{
+-    MOZ_ASSERT(Classify(op_) == OpKind::B8x16);
+-
+-    return readFixedI8x16(i8x16) &&
+-           push(ValType::B8x16);
+-}
+-
+-template <typename Policy>
+-inline bool
+-OpIter<Policy>::readB16x8Const(I16x8* i16x8)
+-{
+-    MOZ_ASSERT(Classify(op_) == OpKind::B16x8);
+-
+-    return readFixedI16x8(i16x8) &&
+-           push(ValType::B16x8);
+-}
+-
+-template <typename Policy>
+-inline bool
+-OpIter<Policy>::readB32x4Const(I32x4* i32x4)
+-{
+-    MOZ_ASSERT(Classify(op_) == OpKind::B32x4);
+-
+-    return readFixedI32x4(i32x4) &&
+-           push(ValType::B32x4);
+-}
+-
+-template <typename Policy>
+-inline bool
+ OpIter<Policy>::readRefNull(ValType* type)
+ {
+     MOZ_ASSERT(Classify(op_) == OpKind::RefNull);
+     uint8_t code;
+     uint32_t refTypeIndex;
+     if (!d_.readValType(&code, &refTypeIndex))
+         return fail("unknown nullref type");
+     if (code == uint8_t(TypeCode::Ref)) {
+@@ -2139,213 +1998,16 @@ OpIter<Policy>::readOldAtomicExchange(Li
+         return false;
+ 
+     infalliblePush(ValType::I32);
+     return true;
+ }
+ 
+ template <typename Policy>
+ inline bool
+-OpIter<Policy>::readSimdComparison(ValType simdType, Value* lhs, Value* rhs)
+-{
+-    MOZ_ASSERT(Classify(op_) == OpKind::SimdComparison);
+-
+-    if (!popWithType(simdType, rhs))
+-        return false;
+-
+-    if (!popWithType(simdType, lhs))
+-        return false;
+-
+-    infalliblePush(SimdBoolType(simdType));
+-
+-    return true;
+-}
+-
+-template <typename Policy>
+-inline bool
+-OpIter<Policy>::readSimdShiftByScalar(ValType simdType, Value* lhs, Value* rhs)
+-{
+-    MOZ_ASSERT(Classify(op_) == OpKind::SimdShiftByScalar);
+-
+-    if (!popWithType(ValType::I32, rhs))
+-        return false;
+-
+-    if (!popWithType(simdType, lhs))
+-        return false;
+-
+-    infalliblePush(simdType);
+-
+-    return true;
+-}
+-
+-template <typename Policy>
+-inline bool
+-OpIter<Policy>::readSimdBooleanReduction(ValType simdType, Value* input)
+-{
+-    MOZ_ASSERT(Classify(op_) == OpKind::SimdBooleanReduction);
+-
+-    if (!popWithType(simdType, input))
+-        return false;
+-
+-    infalliblePush(ValType::I32);
+-
+-    return true;
+-}
+-
+-template <typename Policy>
+-inline bool
+-OpIter<Policy>::readExtractLane(ValType simdType, uint8_t* lane, Value* vector)
+-{
+-    MOZ_ASSERT(Classify(op_) == OpKind::ExtractLane);
+-
+-    uint32_t laneBits;
+-    if (!readVarU32(&laneBits))
+-        return false;
+-
+-    if (laneBits >= NumSimdElements(simdType))
+-        return fail("simd lane out of bounds for simd type");
+-
+-    *lane = uint8_t(laneBits);
+-
+-    if (!popWithType(simdType, vector))
+-        return false;
+-
+-    infalliblePush(SimdElementType(simdType));
+-    return true;
+-}
+-
+-template <typename Policy>
+-inline bool
+-OpIter<Policy>::readReplaceLane(ValType simdType, uint8_t* lane, Value* vector, Value* scalar)
+-{
+-    MOZ_ASSERT(Classify(op_) == OpKind::ReplaceLane);
+-
+-    uint32_t laneBits;
+-    if (!readVarU32(&laneBits))
+-        return false;
+-
+-    if (laneBits >= NumSimdElements(simdType))
+-        return fail("simd lane out of bounds for simd type");
+-
+-    *lane = uint8_t(laneBits);
+-
+-    if (!popWithType(SimdElementType(simdType), scalar))
+-        return false;
+-
+-    if (!popWithType(simdType, vector))
+-        return false;
+-
+-    infalliblePush(simdType);
+-    return true;
+-}
+-
+-template <typename Policy>
+-inline bool
+-OpIter<Policy>::readSplat(ValType simdType, Value* scalar)
+-{
+-    MOZ_ASSERT(Classify(op_) == OpKind::Splat);
+-
+-    if (!popWithType(SimdElementType(simdType), scalar))
+-        return false;
+-
+-    infalliblePush(simdType);
+-
+-    return true;
+-}
+-
+-template <typename Policy>
+-inline bool
+-OpIter<Policy>::readSwizzle(ValType simdType, uint8_t (* lanes)[16], Value* vector)
+-{
+-    MOZ_ASSERT(Classify(op_) == OpKind::Swizzle);
+-
+-    uint32_t numSimdLanes = NumSimdElements(simdType);
+-    MOZ_ASSERT(numSimdLanes <= mozilla::ArrayLength(*lanes));
+-    for (uint32_t i = 0; i < numSimdLanes; ++i) {
+-        if (!readFixedU8(&(*lanes)[i]))
+-            return fail("unable to read swizzle lane");
+-        if ((*lanes)[i] >= numSimdLanes)
+-            return fail("swizzle index out of bounds");
+-    }
+-
+-    if (!popWithType(simdType, vector))
+-        return false;
+-
+-    infalliblePush(simdType);
+-
+-    return true;
+-}
+-
+-template <typename Policy>
+-inline bool
+-OpIter<Policy>::readShuffle(ValType simdType, uint8_t (* lanes)[16], Value* lhs, Value* rhs)
+-{
+-    MOZ_ASSERT(Classify(op_) == OpKind::Shuffle);
+-
+-    uint32_t numSimdLanes = NumSimdElements(simdType);
+-    MOZ_ASSERT(numSimdLanes <= mozilla::ArrayLength(*lanes));
+-    for (uint32_t i = 0; i < numSimdLanes; ++i) {
+-        if (!readFixedU8(&(*lanes)[i]))
+-            return fail("unable to read shuffle lane");
+-        if ((*lanes)[i] >= numSimdLanes * 2)
+-            return fail("shuffle index out of bounds");
+-    }
+-
+-    if (!popWithType(simdType, rhs))
+-        return false;
+-
+-    if (!popWithType(simdType, lhs))
+-        return false;
+-
+-    infalliblePush(simdType);
+-
+-    return true;
+-}
+-
+-template <typename Policy>
+-inline bool
+-OpIter<Policy>::readSimdSelect(ValType simdType, Value* trueValue, Value* falseValue,
+-                               Value* condition)
+-{
+-    MOZ_ASSERT(Classify(op_) == OpKind::SimdSelect);
+-
+-    if (!popWithType(simdType, falseValue))
+-        return false;
+-    if (!popWithType(simdType, trueValue))
+-        return false;
+-    if (!popWithType(SimdBoolType(simdType), condition))
+-        return false;
+-
+-    infalliblePush(simdType);
+-
+-    return true;
+-}
+-
+-template <typename Policy>
+-inline bool
+-OpIter<Policy>::readSimdCtor(ValType elementType, uint32_t numElements, ValType simdType,
+-                             ValueVector* argValues)
+-{
+-    MOZ_ASSERT(Classify(op_) == OpKind::SimdCtor);
+-
+-    if (!argValues->resize(numElements))
+-        return false;
+-
+-    for (int32_t i = numElements - 1; i >= 0; i--) {
+-        if (!popWithType(elementType, &(*argValues)[i]))
+-            return false;
+-    }
+-
+-    infalliblePush(simdType);
+-
+-    return true;
+-}
+-
+-template <typename Policy>
+-inline bool
+ OpIter<Policy>::readMemCopy(Value* dest, Value* src, Value* len)
+ {
+     MOZ_ASSERT(Classify(op_) == OpKind::MemCopy);
+ 
+     if (!env_.usesMemory())
+         return fail("can't touch memory without memory");
+ 
+     if (!popWithType(ValType::I32, len))
+diff --git a/js/src/wasm/WasmSignalHandlers.cpp b/js/src/wasm/WasmSignalHandlers.cpp
+--- a/js/src/wasm/WasmSignalHandlers.cpp
++++ b/js/src/wasm/WasmSignalHandlers.cpp
+@@ -737,19 +737,18 @@ HandleOutOfBounds(CONTEXT* context, uint
+                   uint8_t** ppc)
+ {
+     MOZ_RELEASE_ASSERT(segment->code().containsCodePC(pc));
+ 
+     Trap trap;
+     BytecodeOffset bytecode;
+     if (!segment->code().lookupTrap(pc, &trap, &bytecode)) {
+         // If there is no associated TrapSite for the faulting PC, this must be
+-        // experimental SIMD.js or Atomics. When these are converted to
+-        // non-experimental wasm features, this case, as well as outOfBoundsCode,
+-        // can be removed.
++        // an Atomics access. When these are converted to non-experimental wasm
++        // features, this case, as well as outOfBoundsCode, can be removed.
+         activation->startWasmTrap(Trap::OutOfBounds, 0, ToRegisterState(context));
+         *ppc = segment->outOfBoundsCode();
+         return true;
+     }
+ 
+     if (trap != Trap::OutOfBounds)
+         return false;
+ 
+@@ -866,19 +865,18 @@ HandleOutOfBounds(CONTEXT* context, uint
+         // We now know that this is an out-of-bounds access made by an asm.js
+         // load/store that we should handle.
+         switch (access.kind()) {
+           case Disassembler::HeapAccess::Load:
+           case Disassembler::HeapAccess::LoadSext32:
+             // Assign the JS-defined result value to the destination register
+             // (ToInt32(undefined) or ToNumber(undefined), determined by the
+             // type of the destination register). Very conveniently, we can
+-            // infer the type from the register class, since all SIMD accesses
+-            // throw on out of bounds (see above), so the only types using FP
+-            // registers are float32 and double.
++            // infer the type from the register class, so the only types using
++            // FP registers are float32 and double.
+             SetRegisterToCoercedUndefined(context, access.size(), access.otherOperand());
+             break;
+           case Disassembler::HeapAccess::Store:
+             // Do nothing.
+             break;
+           case Disassembler::HeapAccess::LoadSext64:
+             MOZ_CRASH("no int64 accesses in asm.js");
+           case Disassembler::HeapAccess::Unknown:
+diff --git a/js/src/wasm/WasmStubs.cpp b/js/src/wasm/WasmStubs.cpp
+--- a/js/src/wasm/WasmStubs.cpp
++++ b/js/src/wasm/WasmStubs.cpp
+@@ -104,27 +104,16 @@ SetupABIArguments(MacroAssembler& masm, 
+             else
+                 MOZ_CRASH("wasm uses hardfp for function calls.");
+             break;
+ #endif
+           case ABIArg::FPU: {
+             static_assert(sizeof(ExportArg) >= jit::Simd128DataSize,
+                           "ExportArg must be big enough to store SIMD values");
+             switch (type) {
+-              case MIRType::Int8x16:
+-              case MIRType::Int16x8:
+-              case MIRType::Int32x4:
+-              case MIRType::Bool8x16:
+-              case MIRType::Bool16x8:
+-              case MIRType::Bool32x4:
+-                masm.loadUnalignedSimd128Int(src, iter->fpu());
+-                break;
+-              case MIRType::Float32x4:
+-                masm.loadUnalignedSimd128Float(src, iter->fpu());
+-                break;
+               case MIRType::Double:
+                 masm.loadDouble(src, iter->fpu());
+                 break;
+               case MIRType::Float32:
+                 masm.loadFloat32(src, iter->fpu());
+                 break;
+               default:
+                 MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unexpected FPU type");
+@@ -161,31 +150,16 @@ SetupABIArguments(MacroAssembler& masm, 
+                 masm.storeDouble(ScratchDoubleReg,
+                                  Address(masm.getStackPointer(), iter->offsetFromArgBase()));
+                 break;
+               case MIRType::Float32:
+                 masm.loadFloat32(src, ScratchFloat32Reg);
+                 masm.storeFloat32(ScratchFloat32Reg,
+                                   Address(masm.getStackPointer(), iter->offsetFromArgBase()));
+                 break;
+-              case MIRType::Int8x16:
+-              case MIRType::Int16x8:
+-              case MIRType::Int32x4:
+-              case MIRType::Bool8x16:
+-              case MIRType::Bool16x8:
+-              case MIRType::Bool32x4:
+-                masm.loadUnalignedSimd128Int(src, ScratchSimd128Reg);
+-                masm.storeAlignedSimd128Int(
+-                  ScratchSimd128Reg, Address(masm.getStackPointer(), iter->offsetFromArgBase()));
+-                break;
+-              case MIRType::Float32x4:
+-                masm.loadUnalignedSimd128Float(src, ScratchSimd128Reg);
+-                masm.storeAlignedSimd128Float(
+-                  ScratchSimd128Reg, Address(masm.getStackPointer(), iter->offsetFromArgBase()));
+-                break;
+               default:
+                 MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unexpected stack arg type");
+             }
+             break;
+           case ABIArg::Uninitialized:
+             MOZ_CRASH("Uninitialized ABIArg kind");
+         }
+     }
+@@ -211,29 +185,16 @@ StoreABIReturn(MacroAssembler& masm, con
+       case ExprType::F64:
+         masm.canonicalizeDouble(ReturnDoubleReg);
+         masm.storeDouble(ReturnDoubleReg, Address(argv, 0));
+         break;
+       case ExprType::Ref:
+       case ExprType::AnyRef:
+         masm.storePtr(ReturnReg, Address(argv, 0));
+         break;
+-      case ExprType::I8x16:
+-      case ExprType::I16x8:
+-      case ExprType::I32x4:
+-      case ExprType::B8x16:
+-      case ExprType::B16x8:
+-      case ExprType::B32x4:
+-        // We don't have control on argv alignment, do an unaligned access.
+-        masm.storeUnalignedSimd128Int(ReturnSimd128Reg, Address(argv, 0));
+-        break;
+-      case ExprType::F32x4:
+-        // We don't have control on argv alignment, do an unaligned access.
+-        masm.storeUnalignedSimd128Float(ReturnSimd128Reg, Address(argv, 0));
+-        break;
+       case ExprType::Limit:
+         MOZ_CRASH("Limit");
+     }
+ }
+ 
+ #if defined(JS_CODEGEN_ARM)
+ // The ARM system ABI also includes d15 & s31 in the non volatile float registers.
+ // Also exclude lr (a.k.a. r14) as we preserve it manually.
+@@ -828,23 +789,16 @@ GenerateJitEntry(MacroAssembler& masm, s
+         break;
+       case ExprType::Ref:
+         MOZ_CRASH("return ref in jitentry NYI");
+         break;
+       case ExprType::AnyRef:
+         MOZ_CRASH("return anyref in jitentry NYI");
+         break;
+       case ExprType::I64:
+-      case ExprType::I8x16:
+-      case ExprType::I16x8:
+-      case ExprType::I32x4:
+-      case ExprType::B8x16:
+-      case ExprType::B16x8:
+-      case ExprType::B32x4:
+-      case ExprType::F32x4:
+         MOZ_CRASH("unexpected return type when calling from ion to wasm");
+       case ExprType::Limit:
+         MOZ_CRASH("Limit");
+     }
+ 
+     MOZ_ASSERT(masm.framePushed() == 0);
+ #ifdef JS_CODEGEN_ARM64
+     masm.loadPtr(Address(sp, 0), lr);
+@@ -1204,24 +1158,16 @@ GenerateImportInterpExit(MacroAssembler&
+         masm.loadDouble(argv, ReturnDoubleReg);
+         break;
+       case ExprType::Ref:
+       case ExprType::AnyRef:
+         masm.call(SymbolicAddress::CallImport_Ref);
+         masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
+         masm.loadPtr(argv, ReturnReg);
+         break;
+-      case ExprType::I8x16:
+-      case ExprType::I16x8:
+-      case ExprType::I32x4:
+-      case ExprType::F32x4:
+-      case ExprType::B8x16:
+-      case ExprType::B16x8:
+-      case ExprType::B32x4:
+-        MOZ_CRASH("SIMD types shouldn't be returned from a FFI");
+       case ExprType::Limit:
+         MOZ_CRASH("Limit");
+     }
+ 
+     // The native ABI preserves the TLS, heap and global registers since they
+     // are non-volatile.
+     MOZ_ASSERT(NonVolatileRegs.has(WasmTlsReg));
+ #if defined(JS_CODEGEN_X64) || \
+@@ -1385,24 +1331,16 @@ GenerateImportJitExit(MacroAssembler& ma
+         masm.convertValueToDouble(JSReturnOperand, ReturnDoubleReg, &oolConvert);
+         break;
+       case ExprType::Ref:
+         MOZ_CRASH("ref returned by import (jit exit) NYI");
+         break;
+       case ExprType::AnyRef:
+         MOZ_CRASH("anyref returned by import (jit exit) NYI");
+         break;
+-      case ExprType::I8x16:
+-      case ExprType::I16x8:
+-      case ExprType::I32x4:
+-      case ExprType::F32x4:
+-      case ExprType::B8x16:
+-      case ExprType::B16x8:
+-      case ExprType::B32x4:
+-        MOZ_CRASH("SIMD types shouldn't be returned from an import");
+       case ExprType::Limit:
+         MOZ_CRASH("Limit");
+     }
+ 
+     Label done;
+     masm.bind(&done);
+ 
+     GenerateJitExitEpilogue(masm, masm.framePushed(), offsets);
+@@ -1590,21 +1528,23 @@ static const LiveRegisterSet RegsToPrese
+ static_assert(!SupportsSimd, "high lanes of SIMD registers need to be saved too.");
+ #elif defined(JS_CODEGEN_ARM64)
+ // We assume that traps do not happen while lr is live. This both ensures that
+ // the size of RegsToPreserve is a multiple of 2 (preserving WasmStackAlignment)
+ // and gives us a register to clobber in the return path.
+ static const LiveRegisterSet RegsToPreserve(
+     GeneralRegisterSet(Registers::AllMask & ~((uint32_t(1) << Registers::StackPointer) |
+                                               (uint32_t(1) << Registers::lr))),
+-    FloatRegisterSet(FloatRegisters::AllMask));
++    FloatRegisterSet(FloatRegisters::AllDoubleMask));
++static_assert(!SupportsSimd, "high lanes of SIMD registers need to be saved too");
+ #else
+ static const LiveRegisterSet RegsToPreserve(
+     GeneralRegisterSet(Registers::AllMask & ~(uint32_t(1) << Registers::StackPointer)),
+-    FloatRegisterSet(FloatRegisters::AllMask));
++    FloatRegisterSet(FloatRegisters::AllDoubleMask));
++static_assert(!SupportsSimd, "high lanes of SIMD registers need to be saved too");
+ #endif
+ 
+ // Generate a stub which calls WasmReportTrap() and can be executed by having
+ // the signal handler redirect PC from any trapping instruction.
+ static bool
+ GenerateTrapExit(MacroAssembler& masm, Label* throwLabel, Offsets* offsets)
+ {
+     AssertExpectedSP(masm);
+@@ -1651,23 +1591,22 @@ GenerateTrapExit(MacroAssembler& masm, L
+ #else
+     masm.ret();
+ #endif
+ 
+     return FinishOffsets(masm, offsets);
+ }
+ 
+ // Generate a stub which is only used by the signal handlers to handle out of
+-// bounds access by experimental SIMD.js and Atomics and unaligned accesses on
+-// ARM. This stub is executed by direct PC transfer from the faulting memory
+-// access and thus the stack depth is unknown. Since
+-// JitActivation::packedExitFP() is not set before calling the error reporter,
+-// the current wasm activation will be lost. This stub should be removed when
+-// SIMD.js and Atomics are moved to wasm and given proper traps and when we use
+-// a non-faulting strategy for unaligned ARM access.
++// bounds access by Atomics and unaligned accesses on ARM. This stub is
++// executed by direct PC transfer from the faulting memory access and thus the
++// stack depth is unknown. Since JitActivation::packedExitFP() is not set
++// before calling the error reporter, the current wasm activation will be lost.
++// This stub should be removed when Atomics are moved to wasm and given proper
++// traps and when we use a non-faulting strategy for unaligned ARM access.
+ static bool
+ GenerateGenericMemoryAccessTrap(MacroAssembler& masm, SymbolicAddress reporter, Label* throwLabel,
+                                 Offsets* offsets)
+ {
+     AssertExpectedSP(masm);
+     masm.haltingAlign(CodeAlignment);
+ 
+     offsets->begin = masm.currentOffset();
+diff --git a/js/src/wasm/WasmTypes.cpp b/js/src/wasm/WasmTypes.cpp
+--- a/js/src/wasm/WasmTypes.cpp
++++ b/js/src/wasm/WasmTypes.cpp
+@@ -62,23 +62,16 @@ static_assert((MaxMemoryAccessSize & (Ma
+ Val::Val(const LitVal& val)
+ {
+     type_ = val.type();
+     switch (type_.code()) {
+       case ValType::I32: u.i32_ = val.i32(); return;
+       case ValType::F32: u.f32_ = val.f32(); return;
+       case ValType::I64: u.i64_ = val.i64(); return;
+       case ValType::F64: u.f64_ = val.f64(); return;
+-      case ValType::I8x16:
+-      case ValType::B8x16:
+-      case ValType::I16x8:
+-      case ValType::B16x8:
+-      case ValType::I32x4:
+-      case ValType::F32x4:
+-      case ValType::B32x4: memcpy(&u, val.rawSimd(), jit::Simd128DataSize); return;
+       case ValType::AnyRef: u.ptr_ = val.ptr(); return;
+       case ValType::Ref: break;
+     }
+     MOZ_CRASH();
+ }
+ 
+ void
+ Val::writePayload(uint8_t* dst) const
+@@ -87,25 +80,16 @@ Val::writePayload(uint8_t* dst) const
+       case ValType::I32:
+       case ValType::F32:
+         memcpy(dst, &u.i32_, sizeof(u.i32_));
+         return;
+       case ValType::I64:
+       case ValType::F64:
+         memcpy(dst, &u.i64_, sizeof(u.i64_));
+         return;
+-      case ValType::I8x16:
+-      case ValType::I16x8:
+-      case ValType::I32x4:
+-      case ValType::F32x4:
+-      case ValType::B8x16:
+-      case ValType::B16x8:
+-      case ValType::B32x4:
+-        memcpy(dst, &u, jit::Simd128DataSize);
+-        return;
+       case ValType::Ref:
+       case ValType::AnyRef:
+         MOZ_ASSERT(*(JSObject**)dst == nullptr, "should be null so no need for a pre-barrier");
+         memcpy(dst, &u.ptr_, sizeof(JSObject*));
+         // Either the written location is in the global data section in the
+         // WasmInstanceObject, or the Cell of a WasmGlobalObject:
+         // - WasmInstanceObjects are always tenured and u.ptr_ may point to a
+         // nursery object, so we need a post-barrier since the global data of
+@@ -231,23 +215,16 @@ IsImmediateType(ValType vt)
+ {
+     switch (vt.code()) {
+       case ValType::I32:
+       case ValType::I64:
+       case ValType::F32:
+       case ValType::F64:
+       case ValType::AnyRef:
+         return true;
+-      case ValType::I8x16:
+-      case ValType::I16x8:
+-      case ValType::I32x4:
+-      case ValType::F32x4:
+-      case ValType::B8x16:
+-      case ValType::B16x8:
+-      case ValType::B32x4:
+       case ValType::Ref:
+         return false;
+     }
+     MOZ_CRASH("bad ValType");
+ }
+ 
+ static unsigned
+ EncodeImmediateType(ValType vt)
+@@ -259,23 +236,16 @@ EncodeImmediateType(ValType vt)
+       case ValType::I64:
+         return 1;
+       case ValType::F32:
+         return 2;
+       case ValType::F64:
+         return 3;
+       case ValType::AnyRef:
+         return 4;
+-      case ValType::I8x16:
+-      case ValType::I16x8:
+-      case ValType::I32x4:
+-      case ValType::F32x4:
+-      case ValType::B8x16:
+-      case ValType::B16x8:
+-      case ValType::B32x4:
+       case ValType::Ref:
+         break;
+     }
+     MOZ_CRASH("bad ValType");
+ }
+ 
+ /* static */ bool
+ FuncTypeIdDesc::isGlobal(const FuncType& funcType)
+diff --git a/js/src/wasm/WasmTypes.h b/js/src/wasm/WasmTypes.h
+--- a/js/src/wasm/WasmTypes.h
++++ b/js/src/wasm/WasmTypes.h
+@@ -82,21 +82,16 @@ using mozilla::Maybe;
+ using mozilla::MallocSizeOf;
+ using mozilla::Nothing;
+ using mozilla::PodZero;
+ using mozilla::PodCopy;
+ using mozilla::PodEqual;
+ using mozilla::Some;
+ using mozilla::Unused;
+ 
+-typedef int8_t I8x16[16];
+-typedef int16_t I16x8[8];
+-typedef int32_t I32x4[4];
+-typedef float F32x4[4];
+-
+ class Code;
+ class DebugState;
+ class GeneratedSourceMap;
+ class Memory;
+ class Module;
+ class Instance;
+ class Table;
+ 
+@@ -245,23 +240,16 @@ class ExprType
+ 
+ #ifdef DEBUG
+     bool isValidCode() {
+         switch (UnpackTypeCodeType(tc_)) {
+           case TypeCode::I32:
+           case TypeCode::I64:
+           case TypeCode::F32:
+           case TypeCode::F64:
+-          case TypeCode::I8x16:
+-          case TypeCode::I16x8:
+-          case TypeCode::I32x4:
+-          case TypeCode::F32x4:
+-          case TypeCode::B8x16:
+-          case TypeCode::B16x8:
+-          case TypeCode::B32x4:
+           case TypeCode::AnyRef:
+           case TypeCode::Ref:
+           case TypeCode::BlockVoid:
+           case TypeCode::Limit:
+             return true;
+           default:
+             return false;
+         }
+@@ -274,24 +262,16 @@ class ExprType
+ 
+         I32    = uint8_t(TypeCode::I32),
+         I64    = uint8_t(TypeCode::I64),
+         F32    = uint8_t(TypeCode::F32),
+         F64    = uint8_t(TypeCode::F64),
+         AnyRef = uint8_t(TypeCode::AnyRef),
+         Ref    = uint8_t(TypeCode::Ref),
+ 
+-        I8x16  = uint8_t(TypeCode::I8x16),
+-        I16x8  = uint8_t(TypeCode::I16x8),
+-        I32x4  = uint8_t(TypeCode::I32x4),
+-        F32x4  = uint8_t(TypeCode::F32x4),
+-        B8x16  = uint8_t(TypeCode::B8x16),
+-        B16x8  = uint8_t(TypeCode::B16x8),
+-        B32x4  = uint8_t(TypeCode::B32x4),
+-
+         Limit  = uint8_t(TypeCode::Limit)
+     };
+ 
+     ExprType() : tc_() {}
+ 
+     ExprType(const ExprType& that) : tc_(that.tc_) {}
+ 
+     MOZ_IMPLICIT ExprType(Code c)
+@@ -366,23 +346,16 @@ class ValType
+ 
+ #ifdef DEBUG
+     bool isValidCode() {
+         switch (UnpackTypeCodeType(tc_)) {
+           case TypeCode::I32:
+           case TypeCode::I64:
+           case TypeCode::F32:
+           case TypeCode::F64:
+-          case TypeCode::I8x16:
+-          case TypeCode::I16x8:
+-          case TypeCode::I32x4:
+-          case TypeCode::F32x4:
+-          case TypeCode::B8x16:
+-          case TypeCode::B16x8:
+-          case TypeCode::B32x4:
+           case TypeCode::AnyRef:
+           case TypeCode::Ref:
+             return true;
+           default:
+             return false;
+         }
+     }
+ #endif
+@@ -391,24 +364,16 @@ class ValType
+     enum Code {
+         I32    = uint8_t(TypeCode::I32),
+         I64    = uint8_t(TypeCode::I64),
+         F32    = uint8_t(TypeCode::F32),
+         F64    = uint8_t(TypeCode::F64),
+ 
+         AnyRef = uint8_t(TypeCode::AnyRef),
+         Ref    = uint8_t(TypeCode::Ref),
+-
+-        I8x16  = uint8_t(TypeCode::I8x16),
+-        I16x8  = uint8_t(TypeCode::I16x8),
+-        I32x4  = uint8_t(TypeCode::I32x4),
+-        F32x4  = uint8_t(TypeCode::F32x4),
+-        B8x16  = uint8_t(TypeCode::B8x16),
+-        B16x8  = uint8_t(TypeCode::B16x8),
+-        B32x4  = uint8_t(TypeCode::B32x4)
+     };
+ 
+     ValType() : tc_(InvalidPackedTypeCode()) {}
+ 
+     MOZ_IMPLICIT ValType(Code c)
+       : tc_(PackTypeCode(TypeCode(c)))
+     {
+         MOZ_ASSERT(isValidCode());
+@@ -492,131 +457,33 @@ SizeOf(ValType vt)
+ {
+     switch (vt.code()) {
+       case ValType::I32:
+       case ValType::F32:
+         return 4;
+       case ValType::I64:
+       case ValType::F64:
+         return 8;
+-      case ValType::I8x16:
+-      case ValType::I16x8:
+-      case ValType::I32x4:
+-      case ValType::F32x4:
+-      case ValType::B8x16:
+-      case ValType::B16x8:
+-      case ValType::B32x4:
+-        return 16;
+       case ValType::AnyRef:
+       case ValType::Ref:
+         return sizeof(intptr_t);
+     }
+     MOZ_CRASH("Invalid ValType");
+ }
+ 
+-static inline bool
+-IsSimdType(ValType vt)
+-{
+-    switch (vt.code()) {
+-      case ValType::I8x16:
+-      case ValType::I16x8:
+-      case ValType::I32x4:
+-      case ValType::F32x4:
+-      case ValType::B8x16:
+-      case ValType::B16x8:
+-      case ValType::B32x4:
+-        return true;
+-      default:
+-        return false;
+-    }
+-}
+-
+-static inline uint32_t
+-NumSimdElements(ValType vt)
+-{
+-    MOZ_ASSERT(IsSimdType(vt));
+-    switch (vt.code()) {
+-      case ValType::I8x16:
+-      case ValType::B8x16:
+-        return 16;
+-      case ValType::I16x8:
+-      case ValType::B16x8:
+-        return 8;
+-      case ValType::I32x4:
+-      case ValType::F32x4:
+-      case ValType::B32x4:
+-        return 4;
+-     default:
+-        MOZ_CRASH("Unhandled SIMD type");
+-    }
+-}
+-
+-static inline ValType
+-SimdElementType(ValType vt)
+-{
+-    MOZ_ASSERT(IsSimdType(vt));
+-    switch (vt.code()) {
+-      case ValType::I8x16:
+-      case ValType::I16x8:
+-      case ValType::I32x4:
+-        return ValType::I32;
+-      case ValType::F32x4:
+-        return ValType::F32;
+-      case ValType::B8x16:
+-      case ValType::B16x8:
+-      case ValType::B32x4:
+-        return ValType::I32;
+-     default:
+-        MOZ_CRASH("Unhandled SIMD type");
+-    }
+-}
+-
+-static inline ValType
+-SimdBoolType(ValType vt)
+-{
+-    MOZ_ASSERT(IsSimdType(vt));
+-    switch (vt.code()) {
+-      case ValType::I8x16:
+-      case ValType::B8x16:
+-        return ValType::B8x16;
+-      case ValType::I16x8:
+-      case ValType::B16x8:
+-        return ValType::B16x8;
+-      case ValType::I32x4:
+-      case ValType::F32x4:
+-      case ValType::B32x4:
+-        return ValType::B32x4;
+-     default:
+-        MOZ_CRASH("Unhandled SIMD type");
+-    }
+-}
+-
+-static inline bool
+-IsSimdBoolType(ValType vt)
+-{
+-    return vt == ValType::B8x16 || vt == ValType::B16x8 || vt == ValType::B32x4;
+-}
+-
+ static inline jit::MIRType
+ ToMIRType(ValType vt)
+ {
+     switch (vt.code()) {
+       case ValType::I32:    return jit::MIRType::Int32;
+       case ValType::I64:    return jit::MIRType::Int64;
+       case ValType::F32:    return jit::MIRType::Float32;
+       case ValType::F64:    return jit::MIRType::Double;
+       case ValType::Ref:    return jit::MIRType::Pointer;
+       case ValType::AnyRef: return jit::MIRType::Pointer;
+-      case ValType::I8x16:  return jit::MIRType::Int8x16;
+-      case ValType::I16x8:  return jit::MIRType::Int16x8;
+-      case ValType::I32x4:  return jit::MIRType::Int32x4;
+-      case ValType::F32x4:  return jit::MIRType::Float32x4;
+-      case ValType::B8x16:  return jit::MIRType::Bool8x16;
+-      case ValType::B16x8:  return jit::MIRType::Bool16x8;
+-      case ValType::B32x4:  return jit::MIRType::Bool32x4;
+     }
+     MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("bad type");
+ }
+ 
+ static inline bool
+ IsNumberType(ValType vt)
+ {
+     return !vt.isRefOrAnyRef();
+@@ -637,22 +504,16 @@ IsVoid(ExprType et)
+ 
+ static inline ValType
+ NonVoidToValType(ExprType et)
+ {
+     MOZ_ASSERT(!IsVoid(et));
+     return ValType(et);
+ }
+ 
+-static inline bool
+-IsSimdType(ExprType et)
+-{
+-    return IsVoid(et) ? false : IsSimdType(ValType(et));
+-}
+-
+ static inline jit::MIRType
+ ToMIRType(ExprType et)
+ {
+     return IsVoid(et) ? jit::MIRType::None : ToMIRType(ValType(et));
+ }
+ 
+ static inline const char*
+ ToCString(ExprType type)
+@@ -660,23 +521,16 @@ ToCString(ExprType type)
+     switch (type.code()) {
+       case ExprType::Void:    return "void";
+       case ExprType::I32:     return "i32";
+       case ExprType::I64:     return "i64";
+       case ExprType::F32:     return "f32";
+       case ExprType::F64:     return "f64";
+       case ExprType::AnyRef:  return "anyref";
+       case ExprType::Ref:     return "ref";
+-      case ExprType::I8x16:   return "i8x16";
+-      case ExprType::I16x8:   return "i16x8";
+-      case ExprType::I32x4:   return "i32x4";
+-      case ExprType::F32x4:   return "f32x4";
+-      case ExprType::B8x16:   return "b8x16";
+-      case ExprType::B16x8:   return "b16x8";
+-      case ExprType::B32x4:   return "b32x4";
+       case ExprType::Limit:;
+     }
+     MOZ_CRASH("bad expression type");
+ }
+ 
+ static inline const char*
+ ToCString(ValType type)
+ {
+@@ -777,20 +631,16 @@ class LitVal
+ {
+   protected:
+     ValType type_;
+     union U {
+         uint32_t  i32_;
+         uint64_t  i64_;
+         float     f32_;
+         double    f64_;
+-        I8x16     i8x16_;
+-        I16x8     i16x8_;
+-        I32x4     i32x4_;
+-        F32x4     f32x4_;
+         JSObject* ptr_;
+     } u;
+ 
+   public:
+     LitVal() : type_(), u{} {}
+ 
+     explicit LitVal(uint32_t i32) : type_(ValType::I32) { u.i32_ = i32; }
+     explicit LitVal(uint64_t i64) : type_(ValType::I64) { u.i64_ = i64; }
+@@ -799,60 +649,24 @@ class LitVal
+     explicit LitVal(double f64) : type_(ValType::F64) { u.f64_ = f64; }
+ 
+     explicit LitVal(ValType refType, JSObject* ptr) : type_(refType) {
+         MOZ_ASSERT(refType.isRefOrAnyRef());
+         MOZ_ASSERT(ptr == nullptr, "use Val for non-nullptr ref types to get tracing");
+         u.ptr_ = ptr;
+     }
+ 
+-    explicit LitVal(const I8x16& i8x16, ValType type = ValType::I8x16) : type_(type) {
+-        MOZ_ASSERT(type_ == ValType::I8x16 || type_ == ValType::B8x16);
+-        memcpy(u.i8x16_, i8x16, sizeof(u.i8x16_));
+-    }
+-    explicit LitVal(const I16x8& i16x8, ValType type = ValType::I16x8) : type_(type) {
+-        MOZ_ASSERT(type_ == ValType::I16x8 || type_ == ValType::B16x8);
+-        memcpy(u.i16x8_, i16x8, sizeof(u.i16x8_));
+-    }
+-    explicit LitVal(const I32x4& i32x4, ValType type = ValType::I32x4) : type_(type) {
+-        MOZ_ASSERT(type_ == ValType::I32x4 || type_ == ValType::B32x4);
+-        memcpy(u.i32x4_, i32x4, sizeof(u.i32x4_));
+-    }
+-    explicit LitVal(const F32x4& f32x4) : type_(ValType::F32x4) {
+-        memcpy(u.f32x4_, f32x4, sizeof(u.f32x4_));
+-    }
+-
+     ValType type() const { return type_; }
+-    bool isSimd() const { return IsSimdType(type()); }
+     static constexpr size_t sizeofLargestValue() { return sizeof(u); }
+ 
+     uint32_t i32() const { MOZ_ASSERT(type_ == ValType::I32); return u.i32_; }
+     uint64_t i64() const { MOZ_ASSERT(type_ == ValType::I64); return u.i64_; }
+     const float& f32() const { MOZ_ASSERT(type_ == ValType::F32); return u.f32_; }
+     const double& f64() const { MOZ_ASSERT(type_ == ValType::F64); return u.f64_; }
+     JSObject* ptr() const { MOZ_ASSERT(type_.isRefOrAnyRef()); return u.ptr_; }
+-
+-    const I8x16& i8x16() const {
+-        MOZ_ASSERT(type_ == ValType::I8x16 || type_ == ValType::B8x16);
+-        return u.i8x16_;
+-    }
+-    const I16x8& i16x8() const {
+-        MOZ_ASSERT(type_ == ValType::I16x8 || type_ == ValType::B16x8);
+-        return u.i16x8_;
+-    }
+-    const I32x4& i32x4() const {
+-        MOZ_ASSERT(type_ == ValType::I32x4 || type_ == ValType::B32x4);
+-        return u.i32x4_;
+-    }
+-    const F32x4& f32x4() const {
+-        MOZ_ASSERT(type_ == ValType::F32x4);
+-        return u.f32x4_;
+-    }
+-    // To be used only by Val.
+-    const void* rawSimd() const { return &u.i32x4_; }
+ };
+ 
+ typedef Vector<LitVal, 0, SystemAllocPolicy> LitValVector;
+ 
+ // A Val is a LitVal that can contain pointers to JSObjects, thanks to their
+ // trace implementation. Since a Val is able to store a pointer to a JSObject,
+ // it needs to be traced during compilation in case the pointee is moved.
+ // The classic shorthands for Rooted things are defined after this class, for
+@@ -1475,30 +1289,26 @@ enum class Trap
+     // The Unreachable opcode has been executed.
+     Unreachable,
+     // An integer arithmetic operation led to an overflow.
+     IntegerOverflow,
+     // Trying to coerce NaN to an integer.
+     InvalidConversionToInteger,
+     // Integer division by zero.
+     IntegerDivideByZero,
+-    // Out of bounds on wasm memory accesses and asm.js SIMD/atomic accesses.
++    // Out of bounds on wasm memory accesses and asm.js atomic accesses.
+     OutOfBounds,
+     // Unaligned on wasm atomic accesses; also used for non-standard ARM
+     // unaligned access faults.
+     UnalignedAccess,
+     // call_indirect to null.
+     IndirectCallToNull,
+     // call_indirect signature mismatch.
+     IndirectCallBadSig,
+ 
+-    // (asm.js only) SIMD float to int conversion failed because the input
+-    // wasn't in bounds.
+-    ImpreciseSimdConversion,
+-
+     // The internal stack space was exhausted. For compatibility, this throws
+     // the same over-recursed error as JS.
+     StackOverflow,
+ 
+     // The wasm execution has potentially run too long and the engine must call
+     // CheckForInterrupt(). This trap is resumable.
+     CheckInterrupt,
+ 
+@@ -1631,17 +1441,17 @@ class CodeRange
+         InterpEntry,       // calls into wasm from C++
+         JitEntry,          // calls into wasm from jit code
+         ImportInterpExit,  // slow-path calling from wasm into C++ interp
+         ImportJitExit,     // fast-path calling from wasm into jit code
+         BuiltinThunk,      // fast-path calling from wasm into a C++ native
+         TrapExit,          // calls C++ to report and jumps to throw stub
+         DebugTrap,         // calls C++ to handle debug event
+         FarJumpIsland,     // inserted to connect otherwise out-of-range insns
+-        OutOfBoundsExit,   // stub jumped to by non-standard asm.js SIMD/Atomics
++        OutOfBoundsExit,   // stub jumped to by non-standard asm.js Atomics
+         UnalignedExit,     // stub jumped to by wasm Atomics and non-standard
+                            // ARM unaligned trap
+         Throw              // special stack-unwinding stub jumped to by other stubs
+     };
+ 
+   private:
+     // All fields are treated as cacheable POD:
+     uint32_t begin_;
+diff --git a/js/src/wasm/WasmValidate.cpp b/js/src/wasm/WasmValidate.cpp
+--- a/js/src/wasm/WasmValidate.cpp
++++ b/js/src/wasm/WasmValidate.cpp
+@@ -404,27 +404,16 @@ DecodeValType(Decoder& d, ModuleKind kin
+         if (gcTypesEnabled == HasGcTypes::False)
+             break;
+         if (uncheckedRefTypeIndex >= numTypes)
+             return d.fail("ref index out of range");
+         // We further validate ref types in the caller.
+         *type = ValType(ValType::Code(uncheckedCode), uncheckedRefTypeIndex);
+         return true;
+       }
+-      case uint8_t(ValType::I8x16):
+-      case uint8_t(ValType::I16x8):
+-      case uint8_t(ValType::I32x4):
+-      case uint8_t(ValType::F32x4):
+-      case uint8_t(ValType::B8x16):
+-      case uint8_t(ValType::B16x8):
+-      case uint8_t(ValType::B32x4):
+-        if (kind != ModuleKind::AsmJS)
+-            return d.fail("bad type");
+-        *type = ValType(ValType::Code(uncheckedCode));
+-        return true;
+       default:
+         break;
+     }
+     return d.fail("bad type");
+ }
+ 
+ static bool
+ ValidateRefType(Decoder& d, const TypeDefVector& types, ValType type)
+diff --git a/js/src/wasm/WasmValidate.h b/js/src/wasm/WasmValidate.h
+--- a/js/src/wasm/WasmValidate.h
++++ b/js/src/wasm/WasmValidate.h
+@@ -232,28 +232,16 @@ class Encoder
+         return write<uint32_t>(i);
+     }
+     MOZ_MUST_USE bool writeFixedF32(float f) {
+         return write<float>(f);
+     }
+     MOZ_MUST_USE bool writeFixedF64(double d) {
+         return write<double>(d);
+     }
+-    MOZ_MUST_USE bool writeFixedI8x16(const I8x16& i8x16) {
+-        return write<I8x16>(i8x16);
+-    }
+-    MOZ_MUST_USE bool writeFixedI16x8(const I16x8& i16x8) {
+-        return write<I16x8>(i16x8);
+-    }
+-    MOZ_MUST_USE bool writeFixedI32x4(const I32x4& i32x4) {
+-        return write<I32x4>(i32x4);
+-    }
+-    MOZ_MUST_USE bool writeFixedF32x4(const F32x4& f32x4) {
+-        return write<F32x4>(f32x4);
+-    }
+ 
+     // Variable-length encodings that all use LEB128.
+ 
+     MOZ_MUST_USE bool writeVarU32(uint32_t i) {
+         return writeVarU<uint32_t>(i);
+     }
+     MOZ_MUST_USE bool writeVarS32(int32_t i) {
+         return writeVarS<int32_t>(i);
+@@ -520,28 +508,16 @@ class Decoder
+         return read<uint32_t>(u);
+     }
+     MOZ_MUST_USE bool readFixedF32(float* f) {
+         return read<float>(f);
+     }
+     MOZ_MUST_USE bool readFixedF64(double* d) {
+         return read<double>(d);
+     }
+-    MOZ_MUST_USE bool readFixedI8x16(I8x16* i8x16) {
+-        return read<I8x16>(i8x16);
+-    }
+-    MOZ_MUST_USE bool readFixedI16x8(I16x8* i16x8) {
+-        return read<I16x8>(i16x8);
+-    }
+-    MOZ_MUST_USE bool readFixedI32x4(I32x4* i32x4) {
+-        return read<I32x4>(i32x4);
+-    }
+-    MOZ_MUST_USE bool readFixedF32x4(F32x4* f32x4) {
+-        return read<F32x4>(f32x4);
+-    }
+ 
+     // Variable-length encodings that all use LEB128.
+ 
+     MOZ_MUST_USE bool readVarU32(uint32_t* out) {
+         return readVarU<uint32_t>(out);
+     }
+     MOZ_MUST_USE bool readVarS32(int32_t* out) {
+         return readVarS<int32_t>(out);
+@@ -694,36 +670,16 @@ class Decoder
+     }
+     Op uncheckedReadOp() {
+         static_assert(size_t(Op::Limit) == 256, "fits");
+         uint8_t u8 = uncheckedReadFixedU8();
+         return u8 != UINT8_MAX
+                ? Op(u8)
+                : Op(uncheckedReadFixedU8() + UINT8_MAX);
+     }
+-    void uncheckedReadFixedI8x16(I8x16* i8x16) {
+-        struct T { I8x16 v; };
+-        T t = uncheckedRead<T>();
+-        memcpy(i8x16, &t, sizeof(t));
+-    }
+-    void uncheckedReadFixedI16x8(I16x8* i16x8) {
+-        struct T { I16x8 v; };
+-        T t = uncheckedRead<T>();
+-        memcpy(i16x8, &t, sizeof(t));
+-    }
+-    void uncheckedReadFixedI32x4(I32x4* i32x4) {
+-        struct T { I32x4 v; };
+-        T t = uncheckedRead<T>();
+-        memcpy(i32x4, &t, sizeof(t));
+-    }
+-    void uncheckedReadFixedF32x4(F32x4* f32x4) {
+-        struct T { F32x4 v; };
+-        T t = uncheckedRead<T>();
+-        memcpy(f32x4, &t, sizeof(t));
+-    }
+ };
+ 
+ // The local entries are part of function bodies and thus serialized by both
+ // wasm and asm.js and decoded as part of both validation and compilation.
+ 
+ MOZ_MUST_USE bool
+ EncodeLocalEntries(Encoder& d, const ValTypeVector& locals);
+ 

+ 11 - 9
frg/work-js/mozilla-release/patches/1447591-1-63a1.patch

@@ -3,13 +3,13 @@
 # Date 1529510179 -7200
 # Date 1529510179 -7200
 #      Wed Jun 20 17:56:19 2018 +0200
 #      Wed Jun 20 17:56:19 2018 +0200
 # Node ID 38d2f921a918b7fd0269e269f48030798044438d
 # Node ID 38d2f921a918b7fd0269e269f48030798044438d
-# Parent  63b21854d8006576e83847cff297cc975ecb8b71
+# Parent  0850024c73c1b64fd4637267a1605c00987da688
 Bug 1447591: Stub out a few Debugger APIs for wasm; r=yury
 Bug 1447591: Stub out a few Debugger APIs for wasm; r=yury
 
 
 diff --git a/devtools/server/actors/source.js b/devtools/server/actors/source.js
 diff --git a/devtools/server/actors/source.js b/devtools/server/actors/source.js
 --- a/devtools/server/actors/source.js
 --- a/devtools/server/actors/source.js
 +++ b/devtools/server/actors/source.js
 +++ b/devtools/server/actors/source.js
-@@ -787,18 +787,20 @@ const SourceActor = ActorClassWithSpec(s
+@@ -759,18 +759,20 @@ let SourceActor = ActorClassWithSpec(sou
     * @returns A Promise that resolves to the given BreakpointActor.
     * @returns A Promise that resolves to the given BreakpointActor.
     */
     */
    _setBreakpoint: function(actor, noSliding) {
    _setBreakpoint: function(actor, noSliding) {
@@ -1787,7 +1787,7 @@ diff --git a/js/src/wasm/WasmDebug.h b/js/src/wasm/WasmDebug.h
 diff --git a/js/src/wasm/WasmTextToBinary.cpp b/js/src/wasm/WasmTextToBinary.cpp
 diff --git a/js/src/wasm/WasmTextToBinary.cpp b/js/src/wasm/WasmTextToBinary.cpp
 --- a/js/src/wasm/WasmTextToBinary.cpp
 --- a/js/src/wasm/WasmTextToBinary.cpp
 +++ b/js/src/wasm/WasmTextToBinary.cpp
 +++ b/js/src/wasm/WasmTextToBinary.cpp
-@@ -5539,35 +5539,39 @@ EncodeTableSection(Encoder& e, AstModule
+@@ -5678,37 +5678,41 @@ EncodeTableSection(Encoder& e, AstModule
              return false;
              return false;
      }
      }
  
  
@@ -1806,8 +1806,10 @@ diff --git a/js/src/wasm/WasmTextToBinary.cpp b/js/src/wasm/WasmTextToBinary.cpp
      size_t beforeBody = e.currentOffset();
      size_t beforeBody = e.currentOffset();
  
  
      ValTypeVector varTypes;
      ValTypeVector varTypes;
-     if (!varTypes.appendAll(func.vars()))
-         return false;
+     for (const AstValType& vt : func.vars()) {
+         if (!varTypes.append(vt.type()))
+             return false;
+     }
      if (!EncodeLocalEntries(e, varTypes))
      if (!EncodeLocalEntries(e, varTypes))
          return false;
          return false;
  
  
@@ -1828,7 +1830,7 @@ diff --git a/js/src/wasm/WasmTextToBinary.cpp b/js/src/wasm/WasmTextToBinary.cpp
  }
  }
  
  
  static bool
  static bool
-@@ -5583,30 +5587,30 @@ EncodeStartSection(Encoder& e, AstModule
+@@ -5724,30 +5728,30 @@ EncodeStartSection(Encoder& e, AstModule
      if (!e.writeVarU32(module.startFunc().func().index()))
      if (!e.writeVarU32(module.startFunc().func().index()))
          return false;
          return false;
  
  
@@ -1861,7 +1863,7 @@ diff --git a/js/src/wasm/WasmTextToBinary.cpp b/js/src/wasm/WasmTextToBinary.cpp
  }
  }
  
  
  static bool
  static bool
-@@ -5703,17 +5707,17 @@ EncodeElemSection(Encoder& e, AstModule&
+@@ -5844,17 +5848,17 @@ EncodeElemSection(Encoder& e, AstModule&
              return false;
              return false;
      }
      }
  
  
@@ -1880,7 +1882,7 @@ diff --git a/js/src/wasm/WasmTextToBinary.cpp b/js/src/wasm/WasmTextToBinary.cpp
  
  
      if (!e.writeFixedU32(EncodingVersion))
      if (!e.writeFixedU32(EncodingVersion))
          return false;
          return false;
-@@ -5740,17 +5744,17 @@ EncodeModule(AstModule& module, Bytes* b
+@@ -5881,17 +5885,17 @@ EncodeModule(AstModule& module, Bytes* b
          return false;
          return false;
  
  
      if (!EncodeStartSection(e, module))
      if (!EncodeStartSection(e, module))
@@ -1899,7 +1901,7 @@ diff --git a/js/src/wasm/WasmTextToBinary.cpp b/js/src/wasm/WasmTextToBinary.cpp
      return true;
      return true;
  }
  }
  
  
-@@ -5774,25 +5778,26 @@ EncodeBinaryModule(const AstModule& modu
+@@ -5915,25 +5919,26 @@ EncodeBinaryModule(const AstModule& modu
      }
      }
  
  
      return true;
      return true;

+ 6 - 5
frg/work-js/mozilla-release/patches/1447591-2-63a1.patch

@@ -3,7 +3,7 @@
 # Date 1529574587 -7200
 # Date 1529574587 -7200
 #      Thu Jun 21 11:49:47 2018 +0200
 #      Thu Jun 21 11:49:47 2018 +0200
 # Node ID e7a694ff10044ca9d52f3ed05f61b93cae1a4620
 # Node ID e7a694ff10044ca9d52f3ed05f61b93cae1a4620
-# Parent  31d0d765dee3100991dbce7653615474b5a31411
+# Parent  b5623e601b8df0615204c1a5c2d6555bfebb3de5
 Bug 1447591: Remove wasm::BinaryToText; r=luke
 Bug 1447591: Remove wasm::BinaryToText; r=luke
 
 
 diff --git a/js/src/builtin/TestingFunctions.cpp b/js/src/builtin/TestingFunctions.cpp
 diff --git a/js/src/builtin/TestingFunctions.cpp b/js/src/builtin/TestingFunctions.cpp
@@ -606,7 +606,7 @@ diff --git a/js/src/moz.build b/js/src/moz.build
 diff --git a/js/src/wasm/WasmAST.h b/js/src/wasm/WasmAST.h
 diff --git a/js/src/wasm/WasmAST.h b/js/src/wasm/WasmAST.h
 --- a/js/src/wasm/WasmAST.h
 --- a/js/src/wasm/WasmAST.h
 +++ b/js/src/wasm/WasmAST.h
 +++ b/js/src/wasm/WasmAST.h
-@@ -121,20 +121,27 @@ typedef AstVector<AstRef> AstRefVector;
+@@ -284,20 +284,27 @@ typedef AstVector<AstRef> AstRefVector;
  
  
  struct AstBase
  struct AstBase
  {
  {
@@ -635,7 +635,7 @@ diff --git a/js/src/wasm/WasmAST.h b/js/src/wasm/WasmAST.h
      Which which_;
      Which which_;
  
  
    public:
    public:
-@@ -253,29 +260,16 @@ AstTypeDef::asFuncType() const
+@@ -441,29 +448,16 @@ AstTypeDef::asFuncType() const
  
  
  inline const AstStructType&
  inline const AstStructType&
  AstTypeDef::asStructType() const
  AstTypeDef::asStructType() const
@@ -665,7 +665,7 @@ diff --git a/js/src/wasm/WasmAST.h b/js/src/wasm/WasmAST.h
      AtomicStore,
      AtomicStore,
      BinaryOperator,
      BinaryOperator,
      Block,
      Block,
-@@ -857,35 +851,31 @@ class AstBranchTable : public AstExpr
+@@ -1045,36 +1039,32 @@ class AstBranchTable : public AstExpr
  
  
  class AstFunc : public AstNode
  class AstFunc : public AstNode
  {
  {
@@ -689,6 +689,7 @@ diff --git a/js/src/wasm/WasmAST.h b/js/src/wasm/WasmAST.h
      {}
      {}
      AstRef& funcType() { return funcType_; }
      AstRef& funcType() { return funcType_; }
      const AstValTypeVector& vars() const { return vars_; }
      const AstValTypeVector& vars() const { return vars_; }
+     AstValTypeVector& vars() { return vars_; }
      const AstNameVector& locals() const { return localNames_; }
      const AstNameVector& locals() const { return localNames_; }
      const AstExprVector& body() const { return body_; }
      const AstExprVector& body() const { return body_; }
      AstName name() const { return name_; }
      AstName name() const { return name_; }
@@ -700,7 +701,7 @@ diff --git a/js/src/wasm/WasmAST.h b/js/src/wasm/WasmAST.h
  {
  {
      AstName name_;
      AstName name_;
      bool isMutable_;
      bool isMutable_;
-     ValType type_;
+     AstValType type_;
      Maybe<AstExpr*> init_;
      Maybe<AstExpr*> init_;
 diff --git a/js/src/wasm/WasmBinaryToAST.cpp b/js/src/wasm/WasmBinaryToAST.cpp
 diff --git a/js/src/wasm/WasmBinaryToAST.cpp b/js/src/wasm/WasmBinaryToAST.cpp
 deleted file mode 100644
 deleted file mode 100644

+ 166 - 0
frg/work-js/mozilla-release/patches/1450261-1-62a1.patch

@@ -0,0 +1,166 @@
+# HG changeset patch
+# User Benjamin Bouvier <benj@benj.me>
+# Date 1529584347 -7200
+# Node ID d5a1643bd46e0e9da278a0ba7aeb2457ac59878e
+# Parent  c451084c1e34d82d1f6529171f8be7dfac8b4743
+Bug 1450261: Make To{WebAssembly,JS}Value private and inline GetGlobalExport; r=lth
+
+diff --git a/js/src/wasm/WasmJS.cpp b/js/src/wasm/WasmJS.cpp
+--- a/js/src/wasm/WasmJS.cpp
++++ b/js/src/wasm/WasmJS.cpp
+@@ -104,18 +104,18 @@ HasAvailableCompilerTier(JSContext* cx)
+ bool
+ wasm::HasSupport(JSContext* cx)
+ {
+     return cx->options().wasm() &&
+            HasCompilerSupport(cx) &&
+            HasAvailableCompilerTier(cx);
+ }
+ 
+-bool
+-wasm::ToWebAssemblyValue(JSContext* cx, ValType targetType, HandleValue v, Val* val)
++static bool
++ToWebAssemblyValue(JSContext* cx, ValType targetType, HandleValue v, Val* val)
+ {
+     switch (targetType.code()) {
+       case ValType::I32: {
+         int32_t i32;
+         if (!ToInt32(cx, v, &i32))
+             return false;
+         *val = Val(uint32_t(i32));
+         return true;
+@@ -135,18 +135,18 @@ wasm::ToWebAssemblyValue(JSContext* cx, 
+         return true;
+       }
+       default: {
+         MOZ_CRASH("unexpected import value type, caller must guard");
+       }
+     }
+ }
+ 
+-Value
+-wasm::ToJSValue(const Val& val)
++static Value
++ToJSValue(const Val& val)
+ {
+     switch (val.type().code()) {
+       case ValType::I32:
+         return Int32Value(val.i32());
+       case ValType::F32:
+         return DoubleValue(JS::CanonicalizeNaN(double(val.f32())));
+       case ValType::F64:
+         return DoubleValue(JS::CanonicalizeNaN(val.f64()));
+diff --git a/js/src/wasm/WasmJS.h b/js/src/wasm/WasmJS.h
+--- a/js/src/wasm/WasmJS.h
++++ b/js/src/wasm/WasmJS.h
+@@ -39,25 +39,16 @@ namespace wasm {
+ bool
+ HasCompilerSupport(JSContext* cx);
+ 
+ // Return whether WebAssembly is enabled on this platform.
+ 
+ bool
+ HasSupport(JSContext* cx);
+ 
+-// ToWebAssemblyValue and ToJSValue are conversion functions defined in
+-// the Wasm JS API spec.
+-
+-bool
+-ToWebAssemblyValue(JSContext* cx, ValType targetType, HandleValue v, Val* val);
+-
+-Value
+-ToJSValue(const Val& val);
+-
+ // Compiles the given binary wasm module given the ArrayBufferObject
+ // and links the module's imports with the given import object.
+ 
+ MOZ_MUST_USE bool
+ Eval(JSContext* cx, Handle<TypedArrayObject*> code, HandleObject importObj,
+      MutableHandleWasmInstanceObject instanceObj);
+ 
+ // These accessors can be used to probe JS values for being an exported wasm
+diff --git a/js/src/wasm/WasmModule.cpp b/js/src/wasm/WasmModule.cpp
+--- a/js/src/wasm/WasmModule.cpp
++++ b/js/src/wasm/WasmModule.cpp
+@@ -1115,34 +1115,21 @@ GetFunctionExport(JSContext* cx,
+     if (!instanceObj->getExportedFunction(cx, instanceObj, exp.funcIndex(), &fun))
+         return false;
+ 
+     val.setObject(*fun);
+     return true;
+ }
+ 
+ static bool
+-GetGlobalExport(JSContext* cx,
+-                const GlobalDescVector& globals,
+-                uint32_t globalIndex,
+-                const ValVector& globalImportValues,
+-                const WasmGlobalObjectVector& globalObjs,
+-                MutableHandleValue jsval)
+-{
+-    jsval.setObject(*globalObjs[globalIndex]);
+-    return true;
+-}
+-
+-static bool
+ CreateExportObject(JSContext* cx,
+                    HandleWasmInstanceObject instanceObj,
+                    Handle<FunctionVector> funcImports,
+                    HandleWasmTableObject tableObj,
+                    HandleWasmMemoryObject memoryObj,
+-                   const ValVector& globalImportValues,
+                    const WasmGlobalObjectVector& globalObjs,
+                    const ExportVector& exports)
+ {
+     const Instance& instance = instanceObj->instance();
+     const Metadata& metadata = instance.metadata();
+ 
+     if (metadata.isAsmJS() && exports.length() == 1 && strlen(exports[0].fieldName()) == 0) {
+         RootedValue val(cx);
+@@ -1174,21 +1161,17 @@ CreateExportObject(JSContext* cx,
+             break;
+           case DefinitionKind::Table:
+             val = ObjectValue(*tableObj);
+             break;
+           case DefinitionKind::Memory:
+             val = ObjectValue(*memoryObj);
+             break;
+           case DefinitionKind::Global:
+-            if (!GetGlobalExport(cx, metadata.globals, exp.globalIndex(), globalImportValues,
+-                                 globalObjs, &val))
+-            {
+-                return false;
+-            }
++            val.setObject(*globalObjs[exp.globalIndex()]);
+             break;
+         }
+ 
+         if (!JS_DefinePropertyById(cx, exportObj, id, val, JSPROP_ENUMERATE))
+             return false;
+     }
+ 
+     if (!metadata.isAsmJS()) {
+@@ -1301,21 +1284,18 @@ Module::instantiate(JSContext* cx,
+                                             funcImports,
+                                             metadata().globals,
+                                             globalImportValues,
+                                             globalObjs,
+                                             instanceProto));
+     if (!instance)
+         return false;
+ 
+-    if (!CreateExportObject(cx, instance, funcImports, table, memory, globalImportValues,
+-                            globalObjs, exports_))
+-    {
++    if (!CreateExportObject(cx, instance, funcImports, table, memory, globalObjs, exports_))
+         return false;
+-    }
+ 
+     // Register the instance with the Realm so that it can find out about global
+     // events like profiling being enabled in the realm. Registration does not
+     // require a fully-initialized instance and must precede initSegments as the
+     // final pre-requisite for a live instance.
+ 
+     if (!cx->realm()->wasm.registerInstance(cx, instance))
+         return false;
+

+ 1564 - 0
frg/work-js/mozilla-release/patches/1450261-2-63a1.patch

@@ -0,0 +1,1564 @@
+# HG changeset patch
+# User Benjamin Bouvier <benj@benj.me>
+# Date 1522158013 -7200
+# Node ID 2ffe4a57033c1a2bba3ad967109101f83ce89022
+# Parent  6171649fb3a8027731ce1f9f5b0ba7d74b893f44
+Bug 1450261: Implement support of anyref in wasm globals; r=lth
+
+diff --git a/js/src/gc/Nursery.h b/js/src/gc/Nursery.h
+--- a/js/src/gc/Nursery.h
++++ b/js/src/gc/Nursery.h
+@@ -49,18 +49,16 @@ class PlainObject;
+ class NativeObject;
+ class Nursery;
+ struct NurseryChunk;
+ class HeapSlot;
+ class JSONPrinter;
+ class MapObject;
+ class SetObject;
+ 
+-void SetGCZeal(JSRuntime*, uint8_t, uint32_t);
+-
+ namespace gc {
+ class AutoMaybeStartBackgroundAllocation;
+ class AutoTraceSession;
+ struct Cell;
+ class MinorCollectionTracer;
+ class RelocationOverlay;
+ struct TenureCountCache;
+ enum class AllocKind : uint8_t;
+diff --git a/js/src/jit-test/tests/wasm/gc/anyref-global-object.js b/js/src/jit-test/tests/wasm/gc/anyref-global-object.js
+new file mode 100644
+--- /dev/null
++++ b/js/src/jit-test/tests/wasm/gc/anyref-global-object.js
+@@ -0,0 +1,94 @@
++if (!wasmGcEnabled() || typeof WebAssembly.Global !== 'function') {
++    quit(0);
++}
++
++// Dummy object.
++function Baguette(calories) {
++    this.calories = calories;
++}
++
++assertEq(new WebAssembly.Global({value: "anyref"}) instanceof WebAssembly.Global, true);
++
++(function() {
++    // Test initialization without a value.
++    let g = new WebAssembly.Global({value: "anyref"});
++    assertEq(g.value, null);
++    assertErrorMessage(() => g.value = 42, TypeError, /immutable global/);
++})();
++
++(function() {
++    // Test initialization with a value.
++    let g = new WebAssembly.Global({value: "anyref"}, null);
++    assertEq(g.value, null);
++    assertErrorMessage(() => g.value = 42, TypeError, /immutable global/);
++
++    let obj = {};
++    g = new WebAssembly.Global({value: "anyref"}, obj);
++    assertEq(g.value, obj);
++    assertErrorMessage(() => g.value = 42, TypeError, /immutable global/);
++
++    g = new WebAssembly.Global({value: "anyref"}, 1337);
++    assertEq(g.value instanceof Number, true);
++    assertEq(+g.value, 1337);
++
++    g = new WebAssembly.Global({value: "anyref"}, 13.37);
++    assertEq(g.value instanceof Number, true);
++    assertEq(+g.value, 13.37);
++
++    g = new WebAssembly.Global({value: "anyref"}, "string");
++    assertEq(g.value instanceof String, true);
++    assertEq(g.value.toString(), "string");
++
++    g = new WebAssembly.Global({value: "anyref"}, true);
++    assertEq(g.value instanceof Boolean, true);
++    assertEq(!!g.value, true);
++
++    g = new WebAssembly.Global({value: "anyref"}, Symbol("status"));
++    assertEq(g.value instanceof Symbol, true);
++    assertEq(g.value.toString(), "Symbol(status)");
++
++    assertErrorMessage(() => new WebAssembly.Global({value: "anyref"}, undefined),
++                       TypeError,
++                       "can't convert undefined to object");
++})();
++
++(function() {
++    // Test mutable property and assignment.
++    let g = new WebAssembly.Global({value: "anyref", mutable: true}, null);
++    assertEq(g.value, null);
++
++    let obj = { x: 42 };
++    g.value = obj;
++    assertEq(g.value, obj);
++    assertEq(g.value.x, 42);
++
++    obj = null;
++    assertEq(g.value.x, 42);
++
++    let otherObj = { y : 35 };
++    g.value = otherObj;
++    assertEq(g.value, otherObj);
++})();
++
++(function() {
++    // Test tracing.
++    let nom = new Baguette(1);
++    let g = new WebAssembly.Global({value: "anyref"}, nom);
++    nom = null;
++    gc();
++    assertEq(g.value.calories, 1);
++})();
++
++var global = new WebAssembly.Global({ value: "anyref", mutable: true }, null);
++
++// GCZeal mode 2 implies that every allocation (second parameter = every single
++// allocation) will trigger a full GC.
++gczeal(2, 1);
++
++{
++    let nomnom = new Baguette(42);
++    global.value = nomnom;
++    nomnom = null;
++}
++new Baguette();
++assertEq(global.value.calories, 42);
+diff --git a/js/src/jit-test/tests/wasm/gc/anyref-global-postbarrier.js b/js/src/jit-test/tests/wasm/gc/anyref-global-postbarrier.js
+new file mode 100644
+--- /dev/null
++++ b/js/src/jit-test/tests/wasm/gc/anyref-global-postbarrier.js
+@@ -0,0 +1,72 @@
++if (!wasmGcEnabled()) {
++    quit(0);
++}
++
++const { startProfiling, endProfiling, assertEqPreciseStacks, isSingleStepProfilingEnabled } = WasmHelpers;
++
++// Dummy constructor.
++function Baguette(calories) {
++    this.calories = calories;
++}
++
++let exportsPlain = wasmEvalText(`(module
++    (global i32 (i32.const 42))
++    (global $g (mut anyref) (ref.null anyref))
++    (func (export "set") (param anyref) get_local 0 set_global $g)
++    (func (export "get") (result anyref) get_global $g)
++)`).exports;
++
++let exportsObj = wasmEvalText(`(module
++    (global $g (export "g") (mut anyref) (ref.null anyref))
++    (func (export "set") (param anyref) get_local 0 set_global $g)
++    (func (export "get") (result anyref) get_global $g)
++)`).exports;
++
++// 7 => Generational GC zeal.
++gczeal(7, 1);
++
++for (var i = 0; i < 100; i++) {
++    new Baguette(i);
++}
++
++function test(exports) {
++    // Test post-write barrier in wasm code.
++    {
++        let nomnom = new Baguette(15);
++        exports.set(nomnom);
++        nomnom = null;
++    }
++    new Baguette();
++    assertEq(exports.get().calories, 15);
++}
++
++test(exportsPlain);
++test(exportsObj);
++
++// Test stacks reported in profiling mode in a separate way, to not perturb
++// the behavior of the tested functions.
++if (!isSingleStepProfilingEnabled)
++    quit(0);
++
++enableGeckoProfiling();
++
++const EXPECTED_STACKS = [
++    ['', '!>', '0,!>', '<,0,!>', 'GC postbarrier,0,!>', '<,0,!>', '0,!>', '!>', ''],
++    ['', '!>', '0,!>', '!>', ''],
++];
++
++function testStacks(exports) {
++    // Test post-write barrier in wasm code.
++    {
++        let nomnom = new Baguette(15);
++        startProfiling();
++        exports.set(nomnom);
++        assertEqPreciseStacks(endProfiling(), EXPECTED_STACKS);
++        nomnom = null;
++    }
++    new Baguette();
++    assertEq(exports.get().calories, 15);
++}
++
++testStacks(exportsPlain);
++testStacks(exportsObj);
+diff --git a/js/src/jit-test/tests/wasm/gc/anyref-global-prebarrier.js b/js/src/jit-test/tests/wasm/gc/anyref-global-prebarrier.js
+new file mode 100644
+--- /dev/null
++++ b/js/src/jit-test/tests/wasm/gc/anyref-global-prebarrier.js
+@@ -0,0 +1,39 @@
++if (!wasmGcEnabled()) {
++    quit(0);
++}
++
++const { startProfiling, endProfiling, assertEqPreciseStacks, isSingleStepProfilingEnabled } = WasmHelpers;
++
++let e = wasmEvalText(`(module
++    (global $g (mut anyref) (ref.null anyref))
++    (func (export "set") (param anyref) get_local 0 set_global $g)
++)`).exports;
++
++let obj = { field: null };
++
++// GCZeal mode 4 implies that prebarriers are being verified at many
++// locations in the interpreter, during interrupt checks, etc. It can be ultra
++// slow, so disable it with gczeal(0) when it's not strictly needed.
++gczeal(4, 1);
++e.set(obj);
++e.set(null);
++gczeal(0);
++
++if (!isSingleStepProfilingEnabled) {
++    quit(0);
++}
++
++enableGeckoProfiling();
++startProfiling();
++gczeal(4, 1);
++e.set(obj);
++gczeal(0);
++assertEqPreciseStacks(endProfiling(), [['', '!>', '0,!>', '!>', '']]);
++
++startProfiling();
++gczeal(4, 1);
++e.set(null);
++gczeal(0);
++
++// We're losing stack info in the prebarrier code.
++assertEqPreciseStacks(endProfiling(), [['', '!>', '0,!>', '', '0,!>', '!>', '']]);
+diff --git a/js/src/jit-test/tests/wasm/gc/anyref.js b/js/src/jit-test/tests/wasm/gc/anyref.js
+--- a/js/src/jit-test/tests/wasm/gc/anyref.js
++++ b/js/src/jit-test/tests/wasm/gc/anyref.js
+@@ -44,16 +44,18 @@ let simpleTests = [
+     "(module (func (drop (ref.null anyref))))",
+     "(module (func $test (local anyref)))",
+     "(module (func $test (param anyref)))",
+     "(module (func $test (result anyref) (ref.null anyref)))",
+     "(module (func $test (block anyref (unreachable)) unreachable))",
+     "(module (func $test (local anyref) (result i32) (ref.is_null (get_local 0))))",
+     `(module (import "a" "b" (param anyref)))`,
+     `(module (import "a" "b" (result anyref)))`,
++    `(module (global anyref (ref.null anyref)))`,
++    `(module (global (mut anyref) (ref.null anyref)))`,
+ ];
+ 
+ for (let src of simpleTests) {
+     wasmEvalText(src, {a:{b(){}}});
+     assertEq(validate(wasmTextToBinary(src)), true);
+ }
+ 
+ // Basic behavioral tests.
+@@ -389,8 +391,83 @@ assertEq(exports.count_f(), 1);
+ assertEq(exports.count_g(), 1);
+ 
+ x = { i: 23 };
+ assertEq(exports.table.get(3)(x), x);
+ assertEq(x.i, 24);
+ assertEq(x.newProp, "hello");
+ assertEq(exports.count_f(), 1);
+ assertEq(exports.count_g(), 1);
++
++// Globals.
++
++// Anyref globals in wasm modules.
++
++assertErrorMessage(() => wasmEvalText(`(module (global (import "glob" "anyref") anyref))`, { glob: { anyref: 42 } }),
++    WebAssembly.LinkError,
++    /import object field 'anyref' is not a Object-or-null/);
++
++assertErrorMessage(() => wasmEvalText(`(module (global (import "glob" "anyref") anyref))`, { glob: { anyref: new WebAssembly.Global({ value: 'i32' }, 42) } }),
++    WebAssembly.LinkError,
++    /imported global type mismatch/);
++
++assertErrorMessage(() => wasmEvalText(`(module (global (import "glob" "i32") i32))`, { glob: { i32: {} } }),
++    WebAssembly.LinkError,
++    /import object field 'i32' is not a Number/);
++
++imports = {
++    constants: {
++        imm_null: null,
++        imm_bread: new Baguette(321),
++        mut_null: new WebAssembly.Global({ value: "anyref", mutable: true }, null),
++        mut_bread: new WebAssembly.Global({ value: "anyref", mutable: true }, new Baguette(123))
++    }
++};
++
++exports = wasmEvalText(`(module
++    (global $g_imp_imm_null  (import "constants" "imm_null") anyref)
++    (global $g_imp_imm_bread (import "constants" "imm_bread") anyref)
++
++    (global $g_imp_mut_null   (import "constants" "mut_null") (mut anyref))
++    (global $g_imp_mut_bread  (import "constants" "mut_bread") (mut anyref))
++
++    (global $g_imm_null     anyref (ref.null anyref))
++    (global $g_imm_getglob  anyref (get_global $g_imp_imm_bread))
++    (global $g_mut         (mut anyref) (ref.null anyref))
++
++    (func (export "imm_null")      (result anyref) get_global $g_imm_null)
++    (func (export "imm_getglob")   (result anyref) get_global $g_imm_getglob)
++
++    (func (export "imp_imm_null")  (result anyref) get_global $g_imp_imm_null)
++    (func (export "imp_imm_bread") (result anyref) get_global $g_imp_imm_bread)
++    (func (export "imp_mut_null")  (result anyref) get_global $g_imp_mut_null)
++    (func (export "imp_mut_bread") (result anyref) get_global $g_imp_mut_bread)
++
++    (func (export "set_imp_null")  (param anyref) get_local 0 set_global $g_imp_mut_null)
++    (func (export "set_imp_bread") (param anyref) get_local 0 set_global $g_imp_mut_bread)
++
++    (func (export "set_mut") (param anyref) get_local 0 set_global $g_mut)
++    (func (export "get_mut") (result anyref) get_global $g_mut)
++)`, imports).exports;
++
++assertEq(exports.imp_imm_null(), imports.constants.imm_null);
++assertEq(exports.imp_imm_bread(), imports.constants.imm_bread);
++
++assertEq(exports.imm_null(), null);
++assertEq(exports.imm_getglob(), imports.constants.imm_bread);
++
++assertEq(exports.imp_mut_null(), imports.constants.mut_null.value);
++assertEq(exports.imp_mut_bread(), imports.constants.mut_bread.value);
++
++let brandNewBaguette = new Baguette(1000);
++exports.set_imp_null(brandNewBaguette);
++assertEq(exports.imp_mut_null(), brandNewBaguette);
++assertEq(exports.imp_mut_bread(), imports.constants.mut_bread.value);
++
++exports.set_imp_bread(null);
++assertEq(exports.imp_mut_null(), brandNewBaguette);
++assertEq(exports.imp_mut_bread(), null);
++
++assertEq(exports.get_mut(), null);
++let glutenFreeBaguette = new Baguette("calories-free bread");
++exports.set_mut(glutenFreeBaguette);
++assertEq(exports.get_mut(), glutenFreeBaguette);
++assertEq(exports.get_mut().calories, "calories-free bread");
+diff --git a/js/src/wasm/WasmBaselineCompile.cpp b/js/src/wasm/WasmBaselineCompile.cpp
+--- a/js/src/wasm/WasmBaselineCompile.cpp
++++ b/js/src/wasm/WasmBaselineCompile.cpp
+@@ -134,16 +134,17 @@
+ # include "jit/mips32/Assembler-mips32.h"
+ #endif
+ #if defined(JS_CODEGEN_MIPS64)
+ # include "jit/mips-shared/Assembler-mips-shared.h"
+ # include "jit/mips64/Assembler-mips64.h"
+ #endif
+ 
+ #include "wasm/WasmGenerator.h"
++#include "wasm/WasmInstance.h"
+ #include "wasm/WasmOpIter.h"
+ #include "wasm/WasmSignalHandlers.h"
+ #include "wasm/WasmValidate.h"
+ 
+ #include "jit/MacroAssembler-inl.h"
+ 
+ using mozilla::DebugOnly;
+ using mozilla::FloorLog2;
+@@ -4386,17 +4387,16 @@ class BaseCompiler final : public BaseCo
+ 
+     //////////////////////////////////////////////////////////////////////
+     //
+     // Global variable access.
+ 
+     Address addressOfGlobalVar(const GlobalDesc& global, RegI32 tmp)
+     {
+         uint32_t globalToTlsOffset = offsetof(TlsData, globalArea) + global.offset();
+-
+         masm.loadWasmTlsRegFromFrame(tmp);
+         if (global.isIndirect()) {
+             masm.loadPtr(Address(tmp, globalToTlsOffset), tmp);
+             return Address(tmp, 0);
+         }
+         return Address(tmp, globalToTlsOffset);
+     }
+ 
+@@ -5712,16 +5712,91 @@ class BaseCompiler final : public BaseCo
+     void branchTo(Assembler::Condition c, RegI64 lhs, RegI64 rhs, Label* l) {
+         masm.branch64(c, lhs, rhs, l);
+     }
+ 
+     void branchTo(Assembler::Condition c, RegI64 lhs, Imm64 rhs, Label* l) {
+         masm.branch64(c, lhs, rhs, l);
+     }
+ 
++#ifdef ENABLE_WASM_GC
++    // The following couple of functions emit a GC pre-write barrier. This is
++    // needed when we replace a member field with a new value, and the previous
++    // field value might have no other referents. The field might belong to an
++    // object or be a stack slot or a register or a heap allocated value.
++    //
++    // let obj = { field: previousValue };
++    // obj.field = newValue; // previousValue must be marked with a pre-barrier.
++    //
++    // Implementing a pre-barrier looks like this:
++    // - call `testNeedPreBarrier` with a fresh label.
++    // - user code must put the address of the field we're about to clobber in
++    // PreBarrierReg (to avoid explicit pushing/popping).
++    // - call `emitPreBarrier`, which binds the label.
++
++    void testNeedPreBarrier(Label* skipBarrier) {
++        MOZ_ASSERT(!skipBarrier->used());
++        MOZ_ASSERT(!skipBarrier->bound());
++
++        // If no incremental GC has started, we don't need the barrier.
++        ScratchPtr scratch(*this);
++        masm.loadWasmTlsRegFromFrame(scratch);
++        masm.loadPtr(Address(scratch, offsetof(TlsData, addressOfNeedsIncrementalBarrier)), scratch);
++        masm.branchTest32(Assembler::Zero, Address(scratch, 0), Imm32(0x1), skipBarrier);
++    }
++
++    void emitPreBarrier(RegPtr valueAddr, Label* skipBarrier) {
++        MOZ_ASSERT(valueAddr == PreBarrierReg);
++
++        // If the previous value is null, we don't need the barrier.
++        ScratchPtr scratch(*this);
++        masm.loadPtr(Address(valueAddr, 0), scratch);
++        masm.branchTestPtr(Assembler::Zero, scratch, scratch, skipBarrier);
++
++        // Call the barrier. This assumes PreBarrierReg contains the address of
++        // the stored value.
++        masm.loadWasmTlsRegFromFrame(scratch);
++        masm.loadPtr(Address(scratch, offsetof(TlsData, instance)), scratch);
++        masm.loadPtr(Address(scratch, Instance::offsetOfPreBarrierCode()), scratch);
++        masm.call(scratch);
++
++        masm.bind(skipBarrier);
++    }
++
++    // This emits a GC post-write barrier. This is needed to ensure that the GC
++    // is aware of slots of tenured things containing references to nursery
++    // values. Pass None for object when the field's owner object is known to
++    // be tenured or heap-allocated.
++
++    void emitPostBarrier(const Maybe<RegPtr>& object, RegPtr setValue, PostBarrierArg arg) {
++        Label skipBarrier;
++
++        // If the set value is null, no barrier.
++        masm.branchTestPtr(Assembler::Zero, setValue, setValue, &skipBarrier);
++
++        RegPtr scratch = needRef();
++        if (object) {
++            // If the object value isn't tenured, no barrier.
++            masm.branchPtrInNurseryChunk(Assembler::Equal, *object, scratch, &skipBarrier);
++        }
++
++        // If the set value is tenured, no barrier.
++        masm.branchPtrInNurseryChunk(Assembler::NotEqual, setValue, scratch, &skipBarrier);
++
++        freeRef(scratch);
++
++        // Need a barrier.
++        uint32_t bytecodeOffset = iter_.lastOpcodeOffset();
++        pushI32(arg.rawPayload());
++        emitInstanceCall(bytecodeOffset, SigPI_, ExprType::Void, SymbolicAddress::PostBarrier);
++
++        masm.bind(&skipBarrier);
++    }
++#endif
++
+     // Emit a conditional branch that optionally and optimally cleans up the CPU
+     // stack before we branch.
+     //
+     // Cond is either Assembler::Condition or Assembler::DoubleCondition.
+     //
+     // Lhs is RegI32, RegI64, or RegF32, or RegF64.
+     //
+     // Rhs is either the same as Lhs, or an immediate expression compatible with
+@@ -8258,16 +8333,19 @@ BaseCompiler::emitGetGlobal()
+             pushI64(value.i64());
+             break;
+           case ValType::F32:
+             pushF32(value.f32());
+             break;
+           case ValType::F64:
+             pushF64(value.f64());
+             break;
++          case ValType::AnyRef:
++            pushRef(value.ptr());
++            break;
+           default:
+             MOZ_CRASH("Global constant type");
+         }
+         return true;
+     }
+ 
+     switch (global.type().code()) {
+       case ValType::I32: {
+@@ -8293,16 +8371,23 @@ BaseCompiler::emitGetGlobal()
+       }
+       case ValType::F64: {
+         RegF64 rv = needF64();
+         ScratchI32 tmp(*this);
+         masm.loadDouble(addressOfGlobalVar(global, tmp), rv);
+         pushF64(rv);
+         break;
+       }
++      case ValType::AnyRef: {
++        RegPtr rv = needRef();
++        ScratchI32 tmp(*this);
++        masm.loadPtr(addressOfGlobalVar(global, tmp), rv);
++        pushRef(rv);
++        break;
++      }
+       default:
+         MOZ_CRASH("Global variable type");
+         break;
+     }
+     return true;
+ }
+ 
+ bool
+@@ -8342,16 +8427,43 @@ BaseCompiler::emitSetGlobal()
+       }
+       case ValType::F64: {
+         RegF64 rv = popF64();
+         ScratchI32 tmp(*this);
+         masm.storeDouble(rv, addressOfGlobalVar(global, tmp));
+         freeF64(rv);
+         break;
+       }
++#ifdef ENABLE_WASM_GC
++      case ValType::AnyRef: {
++        Label skipBarrier;
++        testNeedPreBarrier(&skipBarrier);
++
++        RegPtr valueAddr(PreBarrierReg);
++        needRef(valueAddr);
++        {
++            ScratchI32 tmp(*this);
++            masm.computeEffectiveAddress(addressOfGlobalVar(global, tmp), valueAddr);
++        }
++        emitPreBarrier(valueAddr, &skipBarrier);
++        freeRef(valueAddr);
++
++        RegPtr rv = popRef();
++        {
++            // Actual store.
++            ScratchI32 tmp(*this);
++            masm.storePtr(rv, addressOfGlobalVar(global, tmp));
++        }
++
++        emitPostBarrier(Nothing(), rv, PostBarrierArg::Global(id));
++
++        freeRef(rv);
++        break;
++      }
++#endif
+       default:
+         MOZ_CRASH("Global variable type");
+         break;
+     }
+     return true;
+ }
+ 
+ // Bounds check elimination.
+diff --git a/js/src/wasm/WasmBuiltins.cpp b/js/src/wasm/WasmBuiltins.cpp
+--- a/js/src/wasm/WasmBuiltins.cpp
++++ b/js/src/wasm/WasmBuiltins.cpp
+@@ -668,16 +668,20 @@ AddressOf(SymbolicAddress imm, ABIFuncti
+         *abiType = Args_General3;
+         return FuncCast(Instance::wake, *abiType);
+       case SymbolicAddress::MemCopy:
+         *abiType = Args_General4;
+         return FuncCast(Instance::memCopy, *abiType);
+       case SymbolicAddress::MemFill:
+         *abiType = Args_General4;
+         return FuncCast(Instance::memFill, *abiType);
++      case SymbolicAddress::PostBarrier:
++        *abiType = Args_General2;
++        static_assert(sizeof(PostBarrierArg) == sizeof(uint32_t), "passed arg is a u32");
++        return FuncCast(Instance::postBarrier, *abiType);
+ #if defined(JS_CODEGEN_MIPS32)
+       case SymbolicAddress::js_jit_gAtomic64Lock:
+         return &js::jit::gAtomic64Lock;
+ #endif
+       case SymbolicAddress::Limit:
+         break;
+     }
+ 
+@@ -746,16 +750,17 @@ wasm::NeedsBuiltinThunk(SymbolicAddress 
+       case SymbolicAddress::CurrentMemory:
+       case SymbolicAddress::WaitI32:
+       case SymbolicAddress::WaitI64:
+       case SymbolicAddress::Wake:
+       case SymbolicAddress::CoerceInPlace_JitEntry:
+       case SymbolicAddress::ReportInt64JSCall:
+       case SymbolicAddress::MemCopy:
+       case SymbolicAddress::MemFill:
++      case SymbolicAddress::PostBarrier:
+         return true;
+       case SymbolicAddress::Limit:
+         break;
+     }
+ 
+     MOZ_CRASH("unexpected symbolic address");
+ }
+ 
+diff --git a/js/src/wasm/WasmFrameIter.cpp b/js/src/wasm/WasmFrameIter.cpp
+--- a/js/src/wasm/WasmFrameIter.cpp
++++ b/js/src/wasm/WasmFrameIter.cpp
+@@ -1262,16 +1262,18 @@ ThunkedNativeToDescription(SymbolicAddre
+       case SymbolicAddress::CoerceInPlace_JitEntry:
+         return "out-of-line coercion for jit entry arguments (in wasm)";
+       case SymbolicAddress::ReportInt64JSCall:
+         return "jit call to int64 wasm function";
+       case SymbolicAddress::MemCopy:
+         return "call to native memory.copy function";
+       case SymbolicAddress::MemFill:
+         return "call to native memory.fill function";
++      case SymbolicAddress::PostBarrier:
++        return "call to native GC postbarrier (in wasm)";
+ #if defined(JS_CODEGEN_MIPS32)
+       case SymbolicAddress::js_jit_gAtomic64Lock:
+         MOZ_CRASH();
+ #endif
+       case SymbolicAddress::Limit:
+         break;
+     }
+     return "?";
+diff --git a/js/src/wasm/WasmInstance.cpp b/js/src/wasm/WasmInstance.cpp
+--- a/js/src/wasm/WasmInstance.cpp
++++ b/js/src/wasm/WasmInstance.cpp
+@@ -20,16 +20,17 @@
+ 
+ #include "jit/AtomicOperations.h"
+ #include "jit/BaselineJIT.h"
+ #include "jit/InlinableNatives.h"
+ #include "jit/JitCommon.h"
+ #include "wasm/WasmBuiltins.h"
+ #include "wasm/WasmModule.h"
+ 
++#include "gc/StoreBuffer-inl.h"
+ #include "vm/ArrayBufferObject-inl.h"
+ #include "vm/JSObject-inl.h"
+ 
+ using namespace js;
+ using namespace js::jit;
+ using namespace js::wasm;
+ using mozilla::BitwiseCast;
+ 
+@@ -471,16 +472,38 @@ Instance::memFill(Instance* instance, ui
+ 
+     }
+ 
+     JSContext* cx = TlsContext.get();
+     JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_WASM_OUT_OF_BOUNDS);
+     return -1;
+ }
+ 
++/* static */ void
++Instance::postBarrier(Instance* instance, PostBarrierArg arg)
++{
++    gc::Cell** cell = nullptr;
++    switch (arg.type()) {
++      case PostBarrierArg::Type::Global: {
++        const GlobalDesc& global = instance->metadata().globals[arg.globalIndex()];
++        MOZ_ASSERT(!global.isConstant());
++        MOZ_ASSERT(global.type().isRefOrAnyRef());
++        uint8_t* globalAddr = instance->globalData() + global.offset();
++        if (global.isIndirect())
++            globalAddr = *(uint8_t**)globalAddr;
++        MOZ_ASSERT(*(JSObject**)globalAddr, "shouldn't call postbarrier if null");
++        cell = (gc::Cell**) globalAddr;
++        break;
++      }
++    }
++
++    MOZ_ASSERT(cell);
++    TlsContext.get()->runtime()->gc.storeBuffer().putCell(cell);
++}
++
+ Instance::Instance(JSContext* cx,
+                    Handle<WasmInstanceObject*> object,
+                    SharedCode code,
+                    UniqueDebugState debug,
+                    UniqueTlsData tlsDataIn,
+                    HandleWasmMemoryObject memory,
+                    SharedTableVector&& tables,
+                    Handle<FunctionVector> funcImports,
+@@ -504,16 +527,20 @@ Instance::Instance(JSContext* cx,
+     tlsData()->memoryBase = memory ? memory->buffer().dataPointerEither().unwrap() : nullptr;
+ #ifndef WASM_HUGE_MEMORY
+     tlsData()->boundsCheckLimit = memory ? memory->buffer().wasmBoundsCheckLimit() : 0;
+ #endif
+     tlsData()->instance = this;
+     tlsData()->cx = cx;
+     tlsData()->resetInterrupt(cx);
+     tlsData()->jumpTable = code_->tieringJumpTable();
++#ifdef ENABLE_WASM_GC
++    tlsData()->addressOfNeedsIncrementalBarrier =
++        (uint8_t*)cx->compartment()->zone()->addressOfNeedsIncrementalBarrier();
++#endif
+ 
+     Tier callerTier = code_->bestTier();
+ 
+     for (size_t i = 0; i < metadata(callerTier).funcImports.length(); i++) {
+         HandleFunction f = funcImports[i];
+         const FuncImport& fi = metadata(callerTier).funcImports[i];
+         FuncImportTls& import = funcImportTls(fi);
+         if (!isAsmJS() && IsExportedWasmFunction(f)) {
+@@ -624,16 +651,17 @@ Instance::init(JSContext* cx)
+         }
+     }
+ 
+     JitRuntime* jitRuntime = cx->runtime()->getJitRuntime(cx);
+     if (!jitRuntime)
+         return false;
+     jsJitArgsRectifier_ = jitRuntime->getArgumentsRectifier();
+     jsJitExceptionHandler_ = jitRuntime->getExceptionTail();
++    preBarrierCode_ = jitRuntime->preBarrier(MIRType::Object);
+     return true;
+ }
+ 
+ Instance::~Instance()
+ {
+     realm_->wasm.unregisterInstance(*this);
+ 
+     const FuncImportVector& funcImports = metadata(code().stableTier()).funcImports;
+@@ -691,16 +719,26 @@ Instance::tracePrivate(JSTracer* trc)
+     // OK to just do one tier here; though the tiers have different funcImports
+     // tables, they share the tls object.
+     for (const FuncImport& fi : metadata(code().stableTier()).funcImports)
+         TraceNullableEdge(trc, &funcImportTls(fi).obj, "wasm import");
+ 
+     for (const SharedTable& table : tables_)
+         table->trace(trc);
+ 
++#ifdef ENABLE_WASM_GC
++    for (const GlobalDesc& global : code().metadata().globals) {
++        // Indirect anyref global get traced by the owning WebAssembly.Global.
++        if (global.type() != ValType::AnyRef || global.isConstant() || global.isIndirect())
++            continue;
++        GCPtrObject* obj = (GCPtrObject*)(globalData() + global.offset());
++        TraceNullableEdge(trc, obj, "wasm anyref global");
++    }
++#endif
++
+     TraceNullableEdge(trc, &memory_, "wasm buffer");
+ }
+ 
+ void
+ Instance::trace(JSTracer* trc)
+ {
+     // Technically, instead of having this method, the caller could use
+     // Instance::object() to get the owning WasmInstanceObject to mark,
+diff --git a/js/src/wasm/WasmInstance.h b/js/src/wasm/WasmInstance.h
+--- a/js/src/wasm/WasmInstance.h
++++ b/js/src/wasm/WasmInstance.h
+@@ -42,16 +42,19 @@ namespace wasm {
+ // their code.
+ 
+ class Instance
+ {
+     JS::Realm* const                realm_;
+     ReadBarrieredWasmInstanceObject object_;
+     jit::TrampolinePtr              jsJitArgsRectifier_;
+     jit::TrampolinePtr              jsJitExceptionHandler_;
++#ifdef ENABLE_WASM_GC
++    jit::TrampolinePtr              preBarrierCode_;
++#endif
+     const SharedCode                code_;
+     const UniqueDebugState          debug_;
+     const UniqueTlsData             tlsData_;
+     GCPtrWasmMemoryObject           memory_;
+     SharedTableVector               tables_;
+     bool                            enterFrameTrapsEnabled_;
+ 
+     // Internal helpers:
+@@ -103,16 +106,21 @@ class Instance
+ #endif
+ 
+     static constexpr size_t offsetOfJSJitArgsRectifier() {
+         return offsetof(Instance, jsJitArgsRectifier_);
+     }
+     static constexpr size_t offsetOfJSJitExceptionHandler() {
+         return offsetof(Instance, jsJitExceptionHandler_);
+     }
++#ifdef ENABLE_WASM_GC
++    static constexpr size_t offsetOfPreBarrierCode() {
++        return offsetof(Instance, preBarrierCode_);
++    }
++#endif
+ 
+     // This method returns a pointer to the GC object that owns this Instance.
+     // Instances may be reached via weak edges (e.g., Compartment::instances_)
+     // so this perform a read-barrier on the returned object unless the barrier
+     // is explicitly waived.
+ 
+     WasmInstanceObject* object() const;
+     WasmInstanceObject* objectUnbarriered() const;
+@@ -166,16 +174,17 @@ class Instance
+     static int32_t callImport_ref(Instance*, int32_t, int32_t, uint64_t*);
+     static uint32_t growMemory_i32(Instance* instance, uint32_t delta);
+     static uint32_t currentMemory_i32(Instance* instance);
+     static int32_t wait_i32(Instance* instance, uint32_t byteOffset, int32_t value, int64_t timeout);
+     static int32_t wait_i64(Instance* instance, uint32_t byteOffset, int64_t value, int64_t timeout);
+     static int32_t wake(Instance* instance, uint32_t byteOffset, int32_t count);
+     static int32_t memCopy(Instance* instance, uint32_t destByteOffset, uint32_t srcByteOffset, uint32_t len);
+     static int32_t memFill(Instance* instance, uint32_t byteOffset, uint32_t value, uint32_t len);
++    static void postBarrier(Instance* instance, PostBarrierArg arg);
+ };
+ 
+ typedef UniquePtr<Instance> UniqueInstance;
+ 
+ } // namespace wasm
+ } // namespace js
+ 
+ #endif // wasm_instance_h
+diff --git a/js/src/wasm/WasmJS.cpp b/js/src/wasm/WasmJS.cpp
+--- a/js/src/wasm/WasmJS.cpp
++++ b/js/src/wasm/WasmJS.cpp
+@@ -129,32 +129,47 @@ ToWebAssemblyValue(JSContext* cx, ValTyp
+       }
+       case ValType::F64: {
+         double d;
+         if (!ToNumber(cx, v, &d))
+             return false;
+         *val = Val(d);
+         return true;
+       }
++      case ValType::AnyRef: {
++        if (v.isNull()) {
++            *val = Val(ValType::AnyRef, nullptr);
++        } else {
++            JSObject* obj = ToObject(cx, v);
++            if (!obj)
++                return false;
++            *val = Val(ValType::AnyRef, obj);
++        }
++        return true;
++      }
+       default: {
+         MOZ_CRASH("unexpected import value type, caller must guard");
+       }
+     }
+ }
+ 
+ static Value
+ ToJSValue(const Val& val)
+ {
+     switch (val.type().code()) {
+       case ValType::I32:
+         return Int32Value(val.i32());
+       case ValType::F32:
+         return DoubleValue(JS::CanonicalizeNaN(double(val.f32())));
+       case ValType::F64:
+         return DoubleValue(JS::CanonicalizeNaN(val.f64()));
++      case ValType::AnyRef:
++        if (!val.ptr())
++            return NullValue();
++        return ObjectValue(*(JSObject*)val.ptr());
+       default:
+         MOZ_CRASH("unexpected type when translating to a JS value");
+     }
+ }
+ 
+ // ============================================================================
+ // Imports
+ 
+@@ -261,32 +276,38 @@ GetImports(JSContext* cx,
+                 }
+ 
+                 if (globalObjs.length() <= index && !globalObjs.resize(index + 1)) {
+                     ReportOutOfMemory(cx);
+                     return false;
+                 }
+                 globalObjs[index] = obj;
+                 val = obj->val();
+-            } else
+-            if (v.isNumber()) {
++            } else {
++                if (IsNumberType(global.type())) {
++                    if (!v.isNumber())
++                        return ThrowBadImportType(cx, import.field.get(), "Number");
++                } else {
++                    MOZ_ASSERT(global.type().isRefOrAnyRef());
++                    if (!v.isNull() && !v.isObject())
++                        return ThrowBadImportType(cx, import.field.get(), "Object-or-null");
++                }
++
+                 if (global.type() == ValType::I64) {
+                     JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_I64_LINK);
+                     return false;
+                 }
+ 
+                 if (global.isMutable()) {
+                     JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_MUT_LINK);
+                     return false;
+                 }
+ 
+                 if (!ToWebAssemblyValue(cx, global.type(), v, &val))
+                     return false;
+-            } else {
+-                return ThrowBadImportType(cx, import.field.get(), "Number");
+             }
+ 
+             if (!globalImportValues->append(val))
+                 return false;
+ 
+             break;
+           }
+         }
+@@ -2097,56 +2118,72 @@ const ClassOps WasmGlobalObject::classOp
+     nullptr, /* enumerate */
+     nullptr, /* newEnumerate */
+     nullptr, /* resolve */
+     nullptr, /* mayResolve */
+     WasmGlobalObject::finalize,
+     nullptr, /* call */
+     nullptr, /* hasInstance */
+     nullptr, /* construct */
+-    nullptr  /* trace */
++    WasmGlobalObject::trace
+ };
+ 
+ const Class WasmGlobalObject::class_ =
+ {
+     "WebAssembly.Global",
+     JSCLASS_HAS_RESERVED_SLOTS(WasmGlobalObject::RESERVED_SLOTS) |
+     JSCLASS_BACKGROUND_FINALIZE,
+     &WasmGlobalObject::classOps_
+ };
+ 
+ /* static */ void
++WasmGlobalObject::trace(JSTracer* trc, JSObject* obj)
++{
++    WasmGlobalObject* global = reinterpret_cast<WasmGlobalObject*>(obj);
++    switch (global->type().code()) {
++      case ValType::AnyRef:
++        TraceNullableEdge(trc, &global->cell()->ptr, "wasm anyref global");
++        break;
++      default:
++        break;
++    }
++}
++
++/* static */ void
+ WasmGlobalObject::finalize(FreeOp*, JSObject* obj)
+ {
+     WasmGlobalObject* global = reinterpret_cast<WasmGlobalObject*>(obj);
+     js_delete(global->cell());
+ }
+ 
+ /* static */ WasmGlobalObject*
+ WasmGlobalObject::create(JSContext* cx, const Val& val, bool isMutable)
+ {
+     UniquePtr<Cell> cell = js::MakeUnique<Cell>();
+     if (!cell)
+         return nullptr;
+ 
+     switch (val.type().code()) {
+-      case ValType::I32: cell->i32 = val.i32(); break;
+-      case ValType::I64: cell->i64 = val.i64(); break;
+-      case ValType::F32: cell->f32 = val.f32(); break;
+-      case ValType::F64: cell->f64 = val.f64(); break;
+-      default:           MOZ_CRASH();
++      case ValType::I32:    cell->i32 = val.i32(); break;
++      case ValType::I64:    cell->i64 = val.i64(); break;
++      case ValType::F32:    cell->f32 = val.f32(); break;
++      case ValType::F64:    cell->f64 = val.f64(); break;
++      case ValType::AnyRef: cell->ptr = (JSObject*)val.ptr(); break;
++      default:              MOZ_CRASH();
+     }
+ 
+     RootedObject proto(cx, &cx->global()->getPrototype(JSProto_WasmGlobal).toObject());
+ 
+     AutoSetNewObjectMetadata metadata(cx);
+     RootedWasmGlobalObject obj(cx, NewObjectWithGivenProto<WasmGlobalObject>(cx, proto));
+     if (!obj)
+         return nullptr;
+ 
++    MOZ_ASSERT(obj->isTenured(), "assumed by set_global post barriers");
++
+     obj->initReservedSlot(TYPE_SLOT, Int32Value(int32_t(val.type().bitsUnsafe())));
+     obj->initReservedSlot(MUTABLE_SLOT, JS::BooleanValue(isMutable));
+     obj->initReservedSlot(CELL_SLOT, PrivateValue(cell.release()));
+ 
+     return obj;
+ }
+ 
+ /* static */ bool
+@@ -2185,16 +2222,20 @@ WasmGlobalObject::construct(JSContext* c
+     } else if (args.length() == 1 && StringEqualsAscii(typeLinearStr, "i64")) {
+         // For the time being, i64 is allowed only if there is not an
+         // initializing value.
+         globalType = ValType::I64;
+     } else if (StringEqualsAscii(typeLinearStr, "f32")) {
+         globalType = ValType::F32;
+     } else if (StringEqualsAscii(typeLinearStr, "f64")) {
+         globalType = ValType::F64;
++#ifdef ENABLE_WASM_GC
++    } else if (cx->options().wasmGc() && StringEqualsAscii(typeLinearStr, "anyref")) {
++        globalType = ValType::AnyRef;
++#endif
+     } else {
+         JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_GLOBAL_TYPE);
+         return false;
+     }
+ 
+     RootedValue mutableVal(cx);
+     if (!JS_GetProperty(cx, obj, "mutable", &mutableVal))
+         return false;
+@@ -2206,20 +2247,21 @@ WasmGlobalObject::construct(JSContext* c
+     Val globalVal = Val(uint32_t(0));
+     if (args.length() >= 2) {
+         RootedValue valueVal(cx, args.get(1));
+ 
+         if (!ToWebAssemblyValue(cx, globalType, valueVal, &globalVal))
+             return false;
+     } else {
+         switch (globalType.code()) {
+-          case ValType::I32: /* set above */               break;
++          case ValType::I32:    /* set above */ break;
+           case ValType::I64: globalVal = Val(uint64_t(0)); break;
+-          case ValType::F32: globalVal = Val(float(0.0));  break;
+-          case ValType::F64: globalVal = Val(double(0.0)); break;
++          case ValType::F32:    globalVal = Val(float(0.0)); break;
++          case ValType::F64:    globalVal = Val(double(0.0)); break;
++          case ValType::AnyRef: globalVal = Val(ValType::AnyRef, nullptr); break;
+           default: MOZ_CRASH();
+         }
+     }
+ 
+     WasmGlobalObject* global = WasmGlobalObject::create(cx, globalVal, isMutable);
+     if (!global)
+         return false;
+ 
+@@ -2235,16 +2277,17 @@ IsGlobal(HandleValue v)
+ 
+ /* static */ bool
+ WasmGlobalObject::valueGetterImpl(JSContext* cx, const CallArgs& args)
+ {
+     switch (args.thisv().toObject().as<WasmGlobalObject>().type().code()) {
+       case ValType::I32:
+       case ValType::F32:
+       case ValType::F64:
++      case ValType::AnyRef:
+         args.rval().set(args.thisv().toObject().as<WasmGlobalObject>().value());
+         return true;
+       case ValType::I64:
+         JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_I64_TYPE);
+         return false;
+       default:
+         MOZ_CRASH();
+     }
+@@ -2272,20 +2315,21 @@ WasmGlobalObject::valueSetterImpl(JSCont
+     }
+ 
+     Val val;
+     if (!ToWebAssemblyValue(cx, global->type(), args.get(0), &val))
+         return false;
+ 
+     Cell* cell = global->cell();
+     switch (global->type().code()) {
+-      case ValType::I32: cell->i32 = val.i32(); break;
+-      case ValType::F32: cell->f32 = val.f32(); break;
+-      case ValType::F64: cell->f64 = val.f64(); break;
+-      default:           MOZ_CRASH();
++      case ValType::I32:    cell->i32 = val.i32(); break;
++      case ValType::F32:    cell->f32 = val.f32(); break;
++      case ValType::F64:    cell->f64 = val.f64(); break;
++      case ValType::AnyRef: cell->ptr = (JSObject*)val.ptr(); break;
++      default:              MOZ_CRASH();
+     }
+ 
+     args.rval().setUndefined();
+     return true;
+ }
+ 
+ /* static */ bool
+ WasmGlobalObject::valueSetter(JSContext* cx, unsigned argc, Value* vp)
+@@ -2323,21 +2367,22 @@ WasmGlobalObject::isMutable() const
+ }
+ 
+ Val
+ WasmGlobalObject::val() const
+ {
+     Cell* cell = this->cell();
+     Val val;
+     switch (type().code()) {
+-      case ValType::I32: val = Val(uint32_t(cell->i32)); break;
+-      case ValType::I64: val = Val(uint64_t(cell->i64)); break;
+-      case ValType::F32: val = Val(cell->f32); break;
+-      case ValType::F64: val = Val(cell->f64); break;
+-      default:           MOZ_CRASH();
++      case ValType::I32:    val = Val(uint32_t(cell->i32)); break;
++      case ValType::I64:    val = Val(uint64_t(cell->i64)); break;
++      case ValType::F32:    val = Val(cell->f32); break;
++      case ValType::F64:    val = Val(cell->f64); break;
++      case ValType::AnyRef: val = Val(ValType::AnyRef, (void*)cell->ptr); break;
++      default:              MOZ_CRASH();
+     }
+     return val;
+ }
+ 
+ Value
+ WasmGlobalObject::value() const
+ {
+     // ToJSValue crashes on I64; this is desirable.
+diff --git a/js/src/wasm/WasmJS.h b/js/src/wasm/WasmJS.h
+--- a/js/src/wasm/WasmJS.h
++++ b/js/src/wasm/WasmJS.h
+@@ -121,30 +121,35 @@ class WasmModuleObject : public NativeOb
+ class WasmGlobalObject : public NativeObject
+ {
+     static const unsigned TYPE_SLOT = 0;
+     static const unsigned MUTABLE_SLOT = 1;
+     static const unsigned CELL_SLOT = 2;
+ 
+     static const ClassOps classOps_;
+     static void finalize(FreeOp*, JSObject* obj);
++    static void trace(JSTracer* trc, JSObject* obj);
+ 
+     static bool valueGetterImpl(JSContext* cx, const CallArgs& args);
+     static bool valueGetter(JSContext* cx, unsigned argc, Value* vp);
+     static bool valueSetterImpl(JSContext* cx, const CallArgs& args);
+     static bool valueSetter(JSContext* cx, unsigned argc, Value* vp);
+ 
+   public:
+     // For exposed globals the Cell holds the value of the global; the
+     // instance's global area holds a pointer to the Cell.
+     union Cell {
+-        int32_t i32;
+-        int64_t i64;
+-        float   f32;
+-        double  f64;
++        int32_t     i32;
++        int64_t     i64;
++        float       f32;
++        double      f64;
++        GCPtrObject ptr;
++
++        Cell() : i64(0) {}
++        ~Cell() {}
+     };
+ 
+     static const unsigned RESERVED_SLOTS = 3;
+     static const Class class_;
+     static const JSPropertySpec properties[];
+     static const JSFunctionSpec methods[];
+     static const JSFunctionSpec static_methods[];
+     static bool construct(JSContext*, unsigned, Value*);
+diff --git a/js/src/wasm/WasmTypes.cpp b/js/src/wasm/WasmTypes.cpp
+--- a/js/src/wasm/WasmTypes.cpp
++++ b/js/src/wasm/WasmTypes.cpp
+@@ -77,19 +77,20 @@ Val::writePayload(uint8_t* dst) const
+       case ValType::F32x4:
+       case ValType::B8x16:
+       case ValType::B16x8:
+       case ValType::B32x4:
+         memcpy(dst, &u, jit::Simd128DataSize);
+         return;
+       case ValType::Ref:
+       case ValType::AnyRef:
+-        // TODO
+-        MOZ_CRASH("writing imported value of Ref/AnyRef in global NYI");
++        memcpy(dst, &u.ptr_, sizeof(intptr_t));
++        return;
+     }
++    MOZ_CRASH("unexpected Val type");
+ }
+ 
+ bool
+ wasm::IsRoundingFunction(SymbolicAddress callee, jit::RoundingMode* mode)
+ {
+     switch (callee) {
+       case SymbolicAddress::FloorD:
+       case SymbolicAddress::FloorF:
+diff --git a/js/src/wasm/WasmTypes.h b/js/src/wasm/WasmTypes.h
+--- a/js/src/wasm/WasmTypes.h
++++ b/js/src/wasm/WasmTypes.h
+@@ -502,17 +502,17 @@ SizeOf(ValType vt)
+       case ValType::I32x4:
+       case ValType::F32x4:
+       case ValType::B8x16:
+       case ValType::B16x8:
+       case ValType::B32x4:
+         return 16;
+       case ValType::AnyRef:
+       case ValType::Ref:
+-        MOZ_CRASH("unexpected ref/anyref");
++        return sizeof(intptr_t);
+     }
+     MOZ_CRASH("Invalid ValType");
+ }
+ 
+ static inline bool
+ IsSimdType(ValType vt)
+ {
+     switch (vt.code()) {
+@@ -780,27 +780,33 @@ class Val
+         uint32_t i32_;
+         uint64_t i64_;
+         float f32_;
+         double f64_;
+         I8x16 i8x16_;
+         I16x8 i16x8_;
+         I32x4 i32x4_;
+         F32x4 f32x4_;
++        intptr_t ptr_;
+     } u;
+ 
+   public:
+     Val() = default;
+ 
+     explicit Val(uint32_t i32) : type_(ValType::I32) { u.i32_ = i32; }
+     explicit Val(uint64_t i64) : type_(ValType::I64) { u.i64_ = i64; }
+ 
+     explicit Val(float f32) : type_(ValType::F32) { u.f32_ = f32; }
+     explicit Val(double f64) : type_(ValType::F64) { u.f64_ = f64; }
+ 
++    explicit Val(ValType refType, void* ptr) : type_(refType) {
++        MOZ_ASSERT(refType.isRefOrAnyRef());
++        u.ptr_ = intptr_t(ptr);
++    }
++
+     explicit Val(const I8x16& i8x16, ValType type = ValType::I8x16) : type_(type) {
+         MOZ_ASSERT(type_ == ValType::I8x16 || type_ == ValType::B8x16);
+         memcpy(u.i8x16_, i8x16, sizeof(u.i8x16_));
+     }
+     explicit Val(const I16x8& i16x8, ValType type = ValType::I16x8) : type_(type) {
+         MOZ_ASSERT(type_ == ValType::I16x8 || type_ == ValType::B16x8);
+         memcpy(u.i16x8_, i16x8, sizeof(u.i16x8_));
+     }
+@@ -815,16 +821,17 @@ class Val
+     ValType type() const { return type_; }
+     bool isSimd() const { return IsSimdType(type()); }
+     static constexpr size_t sizeofLargestValue() { return sizeof(u); }
+ 
+     uint32_t i32() const { MOZ_ASSERT(type_ == ValType::I32); return u.i32_; }
+     uint64_t i64() const { MOZ_ASSERT(type_ == ValType::I64); return u.i64_; }
+     const float& f32() const { MOZ_ASSERT(type_ == ValType::F32); return u.f32_; }
+     const double& f64() const { MOZ_ASSERT(type_ == ValType::F64); return u.f64_; }
++    intptr_t ptr() const { MOZ_ASSERT(type_.isRefOrAnyRef()); return u.ptr_; }
+ 
+     const I8x16& i8x16() const {
+         MOZ_ASSERT(type_ == ValType::I8x16 || type_ == ValType::B8x16);
+         return u.i8x16_;
+     }
+     const I16x8& i16x8() const {
+         MOZ_ASSERT(type_ == ValType::I16x8 || type_ == ValType::B16x8);
+         return u.i16x8_;
+@@ -987,17 +994,17 @@ class InitExpr
+ 
+     bool isVal() const { return kind() == Kind::Constant; }
+     Val val() const { MOZ_ASSERT(isVal()); return u.val_; }
+ 
+     uint32_t globalIndex() const { MOZ_ASSERT(kind() == Kind::GetGlobal); return u.global.index_; }
+ 
+     ValType type() const {
+         switch (kind()) {
+-          case Kind::Constant: return u.val_.type();
++          case Kind::Constant:  return u.val_.type();
+           case Kind::GetGlobal: return u.global.type_;
+         }
+         MOZ_CRASH("unexpected initExpr type");
+     }
+ };
+ 
+ // CacheableChars is used to cacheably store UniqueChars.
+ 
+@@ -1120,17 +1127,18 @@ class GlobalDesc
+             u.var.isWasm_ = kind == Wasm;
+             u.var.isExport_ = false;
+             u.var.offset_ = UINT32_MAX;
+         } else {
+             u.cst_ = initial.val();
+         }
+     }
+ 
+-    explicit GlobalDesc(ValType type, bool isMutable, uint32_t importIndex, ModuleKind kind = ModuleKind::Wasm)
++    explicit GlobalDesc(ValType type, bool isMutable, uint32_t importIndex,
++                        ModuleKind kind = ModuleKind::Wasm)
+       : kind_(GlobalKind::Import)
+     {
+         u.var.val.import.type_ = type;
+         u.var.val.import.index_ = importIndex;
+         u.var.isMutable_ = isMutable;
+         u.var.isWasm_ = kind == Wasm;
+         u.var.isExport_ = false;
+         u.var.offset_ = UINT32_MAX;
+@@ -1937,16 +1945,17 @@ enum class SymbolicAddress
+     Int64ToDouble,
+     GrowMemory,
+     CurrentMemory,
+     WaitI32,
+     WaitI64,
+     Wake,
+     MemCopy,
+     MemFill,
++    PostBarrier,
+ #if defined(JS_CODEGEN_MIPS32)
+     js_jit_gAtomic64Lock,
+ #endif
+     Limit
+ };
+ 
+ bool
+ IsRoundingFunction(SymbolicAddress callee, jit::RoundingMode* mode);
+@@ -2055,16 +2064,20 @@ struct TlsData
+     // Usually equal to cx->stackLimitForJitCode(JS::StackForUntrustedScript),
+     // but can be racily set to trigger immediate trap as an opportunity to
+     // CheckForInterrupt without an additional branch.
+     Atomic<uintptr_t, mozilla::Relaxed> stackLimit;
+ 
+     // Set to 1 when wasm should call CheckForInterrupt.
+     Atomic<uint32_t, mozilla::Relaxed> interrupt;
+ 
++#ifdef ENABLE_WASM_GC
++    uint8_t* addressOfNeedsIncrementalBarrier;
++#endif
++
+     // Methods to set, test and clear the above two fields. Both interrupt
+     // fields are Relaxed and so no consistency/ordering can be assumed.
+     void setInterrupt();
+     bool isInterrupted() const;
+     void resetInterrupt(JSContext* cx);
+ 
+     // Pointer that should be freed (due to padding before the TlsData).
+     void* allocatedBase;
+@@ -2508,12 +2521,54 @@ class DebugFrame
+ 
+     // DebugFrames are aligned to 8-byte aligned, allowing them to be placed in
+     // an AbstractFramePtr.
+ 
+     static const unsigned Alignment = 8;
+     static void alignmentStaticAsserts();
+ };
+ 
++# ifdef ENABLE_WASM_GC
++// A packed format for an argument to the Instance::postBarrier function.
++class PostBarrierArg
++{
++  public:
++    enum class Type {
++        Global = 0x0,
++        Last = Global
++    };
++
++  private:
++    uint32_t type_: 1;
++    uint32_t payload_: 31;
++
++    PostBarrierArg(uint32_t payload, Type type)
++      : type_(uint32_t(type)),
++        payload_(payload)
++    {
++        MOZ_ASSERT(payload < (UINT32_MAX >> 1));
++        MOZ_ASSERT(uint32_t(type) <= uint32_t(Type::Last));
++    }
++
++  public:
++    static PostBarrierArg Global(uint32_t globalIndex) {
++        return PostBarrierArg(globalIndex, Type::Global);
++    }
++
++    Type type() const {
++        MOZ_ASSERT(type_ <= uint32_t(Type::Last));
++        return Type(type_);
++    }
++    uint32_t globalIndex() const {
++        MOZ_ASSERT(type() == Type::Global);
++        return payload_;
++    }
++
++    uint32_t rawPayload() const {
++        return (payload_ << 1) | type_;
++    }
++};
++# endif
++
+ } // namespace wasm
+ } // namespace js
+ 
+ #endif // wasm_types_h
+diff --git a/js/src/wasm/WasmValidate.cpp b/js/src/wasm/WasmValidate.cpp
+--- a/js/src/wasm/WasmValidate.cpp
++++ b/js/src/wasm/WasmValidate.cpp
+@@ -1393,29 +1393,30 @@ DecodeTableLimits(Decoder& d, TableDescV
+ static bool
+ GlobalIsJSCompatible(Decoder& d, ValType type, bool isMutable)
+ {
+     switch (type.code()) {
+       case ValType::I32:
+       case ValType::F32:
+       case ValType::F64:
+       case ValType::I64:
++      case ValType::AnyRef:
+         break;
+       default:
+         return d.fail("unexpected variable type in global import/export");
+     }
+ 
+     return true;
+ }
+ 
+ static bool
+-DecodeGlobalType(Decoder& d, const TypeDefVector& types, ValType* type, bool* isMutable)
++DecodeGlobalType(Decoder& d, const TypeDefVector& types, HasGcTypes gcTypesEnabled, ValType* type,
++                 bool* isMutable)
+ {
+-    // No gc types in globals at the moment.
+-    if (!DecodeValType(d, ModuleKind::Wasm, types.length(), HasGcTypes::False, type))
++    if (!DecodeValType(d, ModuleKind::Wasm, types.length(), gcTypesEnabled, type))
+         return false;
+     if (!ValidateRefType(d, types, *type))
+         return false;
+ 
+     uint8_t flags;
+     if (!d.readFixedU8(&flags))
+         return d.fail("expected global flags");
+ 
+@@ -1504,17 +1505,17 @@ DecodeImport(Decoder& d, ModuleEnvironme
+       case DefinitionKind::Memory: {
+         if (!DecodeMemoryLimits(d, env))
+             return false;
+         break;
+       }
+       case DefinitionKind::Global: {
+         ValType type;
+         bool isMutable;
+-        if (!DecodeGlobalType(d, env->types, &type, &isMutable))
++        if (!DecodeGlobalType(d, env->types, env->gcTypesEnabled, &type, &isMutable))
+             return false;
+         if (!GlobalIsJSCompatible(d, type, isMutable))
+             return false;
+         if (!env->globals.append(GlobalDesc(type, isMutable, env->globals.length())))
+             return false;
+         if (env->globals.length() > MaxGlobals)
+             return d.fail("too many globals");
+         break;
+@@ -1632,18 +1633,18 @@ DecodeMemorySection(Decoder& d, ModuleEn
+         if (!DecodeMemoryLimits(d, env))
+             return false;
+     }
+ 
+     return d.finishSection(*range, "memory");
+ }
+ 
+ static bool
+-DecodeInitializerExpression(Decoder& d, const GlobalDescVector& globals, ValType expected,
+-                            InitExpr* init)
++DecodeInitializerExpression(Decoder& d, HasGcTypes gcTypesEnabled, const GlobalDescVector& globals,
++                            ValType expected, InitExpr* init)
+ {
+     OpBytes op;
+     if (!d.readOp(&op))
+         return d.fail("failed to read initializer type");
+ 
+     switch (op.b0) {
+       case uint16_t(Op::I32Const): {
+         int32_t i32;
+@@ -1668,16 +1669,28 @@ DecodeInitializerExpression(Decoder& d, 
+       }
+       case uint16_t(Op::F64Const): {
+         double f64;
+         if (!d.readFixedF64(&f64))
+             return d.fail("failed to read initializer f64 expression");
+         *init = InitExpr(Val(f64));
+         break;
+       }
++      case uint16_t(Op::RefNull): {
++        if (gcTypesEnabled == HasGcTypes::False)
++            return d.fail("unexpected initializer expression");
++        uint8_t valType;
++        uint32_t unusedRefTypeIndex;
++        if (!d.readValType(&valType, &unusedRefTypeIndex))
++            return false;
++        if (valType != uint8_t(ValType::AnyRef))
++            return d.fail("expected anyref as type for ref.null");
++        *init = InitExpr(Val(ValType::AnyRef, nullptr));
++        break;
++      }
+       case uint16_t(Op::GetGlobal): {
+         uint32_t i;
+         if (!d.readVarU32(&i))
+             return d.fail("failed to read get_global index in initializer expression");
+         if (i >= globals.length())
+             return d.fail("global index out of range in initializer expression");
+         if (!globals[i].isImport() || globals[i].isMutable())
+             return d.fail("initializer expression must reference a global immutable import");
+@@ -1718,21 +1731,21 @@ DecodeGlobalSection(Decoder& d, ModuleEn
+         return d.fail("too many globals");
+ 
+     if (!env->globals.reserve(numGlobals.value()))
+         return false;
+ 
+     for (uint32_t i = 0; i < numDefs; i++) {
+         ValType type;
+         bool isMutable;
+-        if (!DecodeGlobalType(d, env->types, &type, &isMutable))
++        if (!DecodeGlobalType(d, env->types, env->gcTypesEnabled, &type, &isMutable))
+             return false;
+ 
+         InitExpr initializer;
+-        if (!DecodeInitializerExpression(d, env->globals, type, &initializer))
++        if (!DecodeInitializerExpression(d, env->gcTypesEnabled, env->globals, type, &initializer))
+             return false;
+ 
+         env->globals.infallibleAppend(GlobalDesc(initializer, isMutable));
+     }
+ 
+     return d.finishSection(*range, "global");
+ }
+ 
+@@ -1903,17 +1916,18 @@ DecodeElemSection(Decoder& d, ModuleEnvi
+         if (!d.readVarU32(&tableIndex))
+             return d.fail("expected table index");
+ 
+         MOZ_ASSERT(env->tables.length() <= 1);
+         if (tableIndex >= env->tables.length())
+             return d.fail("table index out of range");
+ 
+         InitExpr offset;
+-        if (!DecodeInitializerExpression(d, env->globals, ValType::I32, &offset))
++        if (!DecodeInitializerExpression(d, env->gcTypesEnabled, env->globals, ValType::I32,
++                                         &offset))
+             return false;
+ 
+         uint32_t numElems;
+         if (!d.readVarU32(&numElems))
+             return d.fail("expected segment size");
+ 
+         if (numElems > MaxTableInitialLength)
+             return d.fail("too many table elements");
+@@ -2073,17 +2087,18 @@ DecodeDataSection(Decoder& d, ModuleEnvi
+ 
+         if (linearMemoryIndex != 0)
+             return d.fail("linear memory index must currently be 0");
+ 
+         if (!env->usesMemory())
+             return d.fail("data segment requires a memory section");
+ 
+         DataSegment seg;
+-        if (!DecodeInitializerExpression(d, env->globals, ValType::I32, &seg.offset))
++        if (!DecodeInitializerExpression(d, env->gcTypesEnabled, env->globals, ValType::I32,
++                                         &seg.offset))
+             return false;
+ 
+         if (!d.readVarU32(&seg.length))
+             return d.fail("expected segment size");
+ 
+         if (seg.length > MaxMemoryInitialPages * PageSize)
+             return d.fail("segment size too big");
+ 

+ 1384 - 0
frg/work-js/mozilla-release/patches/1450261-3-63a1.patch

@@ -0,0 +1,1384 @@
+# HG changeset patch
+# User Benjamin Bouvier <benj@benj.me>
+# Date 1529583928 -7200
+# Node ID 631a97e399a03c3c74d1cadf8e26550373393fa9
+# Parent  2bf734fe27156251987947fb2becba3c582bda36
+Bug 1450261: Rename Val to LitVal; r=lth
+
+diff --git a/js/src/wasm/AsmJS.cpp b/js/src/wasm/AsmJS.cpp
+--- a/js/src/wasm/AsmJS.cpp
++++ b/js/src/wasm/AsmJS.cpp
+@@ -132,18 +132,18 @@ class AsmJSGlobal
+   private:
+     struct CacheablePod {
+         Which which_;
+         union V {
+             struct {
+                 VarInitKind initKind_;
+                 union U {
+                     ValType importType_;
+-                    Val val_;
+-                    U() : val_(Val()) {}
++                    LitVal val_;
++                    U() : val_(LitVal()) {}
+                 } u;
+             } var;
+             uint32_t ffiIndex_;
+             Scalar::Type viewType_;
+             AsmJSMathBuiltinFunction mathBuiltinFunc_;
+             AsmJSAtomicsBuiltinFunction atomicsBuiltinFunc_;
+             SimdType simdCtorType_;
+             struct {
+@@ -173,17 +173,17 @@ class AsmJSGlobal
+     }
+     Which which() const {
+         return pod.which_;
+     }
+     VarInitKind varInitKind() const {
+         MOZ_ASSERT(pod.which_ == Variable);
+         return pod.u.var.initKind_;
+     }
+-    Val varInitVal() const {
++    LitVal varInitVal() const {
+         MOZ_ASSERT(pod.which_ == Variable);
+         MOZ_ASSERT(pod.u.var.initKind_ == InitConstant);
+         return pod.u.var.u.val_;
+     }
+     ValType varInitImportType() const {
+         MOZ_ASSERT(pod.which_ == Variable);
+         MOZ_ASSERT(pod.u.var.initKind_ == InitImport);
+         return pod.u.var.u.importType_;
+@@ -818,17 +818,17 @@ ParseVarOrConstStatement(AsmJSParser& pa
+ //  negative int: [-2^31, 0)
+ //  big unsigned: [2^31, 2^32)
+ //  out of range: otherwise
+ // Lastly, a literal may be a float literal which is any double or integer
+ // literal coerced with Math.fround.
+ //
+ // This class distinguishes between signed and unsigned integer SIMD types like
+ // Int32x4 and Uint32x4, and so does Type below. The wasm ValType and ExprType
+-// enums, and the wasm::Val class do not.
++// enums, and the wasm::LitVal class do not.
+ class NumLit
+ {
+   public:
+     enum Which {
+         Fixnum,
+         NegativeInt,
+         BigUnsigned,
+         Double,
+@@ -943,43 +943,43 @@ class NumLit
+           case NumLit::Float32x4:
+             return simdValue() == SimdConstant::SplatX4(0.f);
+           case NumLit::OutOfRangeInt:
+             MOZ_CRASH("can't be here because of valid() check above");
+         }
+         return false;
+     }
+ 
+-    Val value() const {
++    LitVal value() const {
+         switch (which_) {
+           case NumLit::Fixnum:
+           case NumLit::NegativeInt:
+           case NumLit::BigUnsigned:
+-            return Val(toUint32());
++            return LitVal(toUint32());
+           case NumLit::Float:
+-            return Val(toFloat());
++            return LitVal(toFloat());
+           case NumLit::Double:
+-            return Val(toDouble());
++            return LitVal(toDouble());
+           case NumLit::Int8x16:
+           case NumLit::Uint8x16:
+-            return Val(simdValue().asInt8x16());
++            return LitVal(simdValue().asInt8x16());
+           case NumLit::Int16x8:
+           case NumLit::Uint16x8:
+-            return Val(simdValue().asInt16x8());
++            return LitVal(simdValue().asInt16x8());
+           case NumLit::Int32x4:
+           case NumLit::Uint32x4:
+-            return Val(simdValue().asInt32x4());
++            return LitVal(simdValue().asInt32x4());
+           case NumLit::Float32x4:
+-            return Val(simdValue().asFloat32x4());
++            return LitVal(simdValue().asFloat32x4());
+           case NumLit::Bool8x16:
+-            return Val(simdValue().asInt8x16(), ValType::B8x16);
++            return LitVal(simdValue().asInt8x16(), ValType::B8x16);
+           case NumLit::Bool16x8:
+-            return Val(simdValue().asInt16x8(), ValType::B16x8);
++            return LitVal(simdValue().asInt16x8(), ValType::B16x8);
+           case NumLit::Bool32x4:
+-            return Val(simdValue().asInt32x4(), ValType::B32x4);
++            return LitVal(simdValue().asInt32x4(), ValType::B32x4);
+           case NumLit::OutOfRangeInt:;
+         }
+         MOZ_CRASH("bad literal");
+     }
+ };
+ 
+ // Represents the type of a general asm.js expression.
+ //
+@@ -2282,17 +2282,17 @@ class MOZ_STACK_CLASS ModuleValidator
+         if (table.defined())
+             return false;
+ 
+         table.define();
+ 
+         for (uint32_t& index : elems)
+             index += funcImportMap_.count();
+ 
+-        return env_.elemSegments.emplaceBack(tableIndex, InitExpr(Val(uint32_t(0))), std::move(elems));
++        return env_.elemSegments.emplaceBack(tableIndex, InitExpr(LitVal(uint32_t(0))), std::move(elems));
+     }
+     bool declareImport(PropertyName* name, FuncType&& sig, unsigned ffiIndex, uint32_t* importIndex) {
+         FuncImportMap::AddPtr p = funcImportMap_.lookupForAdd(NamedSig::Lookup(name, sig));
+         if (p) {
+             *importIndex = p->value();
+             return true;
+         }
+ 
+@@ -7666,17 +7666,17 @@ HasPureCoercion(JSContext* cx, HandleVal
+     {
+         return true;
+     }
+ 
+     return false;
+ }
+ 
+ static bool
+-ValidateGlobalVariable(JSContext* cx, const AsmJSGlobal& global, HandleValue importVal, Val* val)
++ValidateGlobalVariable(JSContext* cx, const AsmJSGlobal& global, HandleValue importVal, LitVal* val)
+ {
+     switch (global.varInitKind()) {
+       case AsmJSGlobal::InitConstant:
+         *val = global.varInitVal();
+         return true;
+ 
+       case AsmJSGlobal::InitImport: {
+         RootedValue v(cx);
+@@ -7686,85 +7686,85 @@ ValidateGlobalVariable(JSContext* cx, co
+         if (!v.isPrimitive() && !HasPureCoercion(cx, v))
+             return LinkFail(cx, "Imported values must be primitives");
+ 
+         switch (global.varInitImportType().code()) {
+           case ValType::I32: {
+             int32_t i32;
+             if (!ToInt32(cx, v, &i32))
+                 return false;
+-            *val = Val(uint32_t(i32));
++            *val = LitVal(uint32_t(i32));
+             return true;
+           }
+           case ValType::I64:
+             MOZ_CRASH("int64");
+           case ValType::F32: {
+             float f;
+             if (!RoundFloat32(cx, v, &f))
+                 return false;
+-            *val = Val(f);
++            *val = LitVal(f);
+             return true;
+           }
+           case ValType::F64: {
+             double d;
+             if (!ToNumber(cx, v, &d))
+                 return false;
+-            *val = Val(d);
++            *val = LitVal(d);
+             return true;
+           }
+           case ValType::I8x16: {
+             SimdConstant simdConstant;
+             if (!ToSimdConstant<Int8x16>(cx, v, &simdConstant))
+                 return false;
+-            *val = Val(simdConstant.asInt8x16());
++            *val = LitVal(simdConstant.asInt8x16());
+             return true;
+           }
+           case ValType::I16x8: {
+             SimdConstant simdConstant;
+             if (!ToSimdConstant<Int16x8>(cx, v, &simdConstant))
+                 return false;
+-            *val = Val(simdConstant.asInt16x8());
++            *val = LitVal(simdConstant.asInt16x8());
+             return true;
+           }
+           case ValType::I32x4: {
+             SimdConstant simdConstant;
+             if (!ToSimdConstant<Int32x4>(cx, v, &simdConstant))
+                 return false;
+-            *val = Val(simdConstant.asInt32x4());
++            *val = LitVal(simdConstant.asInt32x4());
+             return true;
+           }
+           case ValType::F32x4: {
+             SimdConstant simdConstant;
+             if (!ToSimdConstant<Float32x4>(cx, v, &simdConstant))
+                 return false;
+-            *val = Val(simdConstant.asFloat32x4());
++            *val = LitVal(simdConstant.asFloat32x4());
+             return true;
+           }
+           case ValType::B8x16: {
+             SimdConstant simdConstant;
+             if (!ToSimdConstant<Bool8x16>(cx, v, &simdConstant))
+                 return false;
+             // Bool8x16 uses the same data layout as Int8x16.
+-            *val = Val(simdConstant.asInt8x16());
++            *val = LitVal(simdConstant.asInt8x16());
+             return true;
+           }
+           case ValType::B16x8: {
+             SimdConstant simdConstant;
+             if (!ToSimdConstant<Bool16x8>(cx, v, &simdConstant))
+                 return false;
+             // Bool16x8 uses the same data layout as Int16x8.
+-            *val = Val(simdConstant.asInt16x8());
++            *val = LitVal(simdConstant.asInt16x8());
+             return true;
+           }
+           case ValType::B32x4: {
+             SimdConstant simdConstant;
+             if (!ToSimdConstant<Bool32x4>(cx, v, &simdConstant))
+                 return false;
+             // Bool32x4 uses the same data layout as Int32x4.
+-            *val = Val(simdConstant.asInt32x4());
++            *val = LitVal(simdConstant.asInt32x4());
+             return true;
+           }
+           case ValType::Ref:
+           case ValType::AnyRef: {
+             MOZ_CRASH("not available in asm.js");
+           }
+         }
+       }
+@@ -8128,26 +8128,27 @@ CheckBuffer(JSContext* cx, const AsmJSMe
+     }
+ 
+     MOZ_ASSERT(buffer->isPreparedForAsmJS());
+     return true;
+ }
+ 
+ static bool
+ GetImports(JSContext* cx, const AsmJSMetadata& metadata, HandleValue globalVal,
+-           HandleValue importVal, MutableHandle<FunctionVector> funcImports, ValVector* valImports)
++           HandleValue importVal, MutableHandle<FunctionVector> funcImports,
++           LitValVector* valImports)
+ {
+     Rooted<FunctionVector> ffis(cx, FunctionVector(cx));
+     if (!ffis.resize(metadata.numFFIs))
+         return false;
+ 
+     for (const AsmJSGlobal& global : metadata.asmJSGlobals) {
+         switch (global.which()) {
+           case AsmJSGlobal::Variable: {
+-            Val val;
++            LitVal val;
+             if (!ValidateGlobalVariable(cx, global, importVal, &val))
+                 return false;
+             if (!valImports->append(val))
+                 return false;
+             break;
+           }
+           case AsmJSGlobal::FFI:
+             if (!ValidateFFI(cx, global, importVal, &ffis))
+@@ -8203,17 +8204,17 @@ TryInstantiate(JSContext* cx, CallArgs a
+         if (!CheckBuffer(cx, metadata, bufferVal, &buffer))
+             return false;
+ 
+         memory = WasmMemoryObject::create(cx, buffer, nullptr);
+         if (!memory)
+             return false;
+     }
+ 
+-    ValVector valImports;
++    LitValVector valImports;
+     Rooted<FunctionVector> funcs(cx, FunctionVector(cx));
+     if (!GetImports(cx, metadata, globalVal, importVal, &funcs, &valImports))
+         return false;
+ 
+     Rooted<WasmGlobalObjectVector> globalObjs(cx);
+ 
+     RootedWasmTableObject table(cx);
+     if (!module.instantiate(cx, funcs, table, memory, valImports, globalObjs.get(), nullptr, instanceObj))
+diff --git a/js/src/wasm/WasmAST.h b/js/src/wasm/WasmAST.h
+--- a/js/src/wasm/WasmAST.h
++++ b/js/src/wasm/WasmAST.h
+@@ -552,25 +552,25 @@ class AstDrop : public AstExpr
+     {}
+     AstExpr& value() const {
+         return value_;
+     }
+ };
+ 
+ class AstConst : public AstExpr
+ {
+-    const Val val_;
++    const LitVal val_;
+ 
+   public:
+     static const AstExprKind Kind = AstExprKind::Const;
+-    explicit AstConst(Val val)
++    explicit AstConst(LitVal val)
+       : AstExpr(Kind, ExprType::Limit),
+         val_(val)
+     {}
+-    Val val() const { return val_; }
++    LitVal val() const { return val_; }
+ };
+ 
+ class AstGetLocal : public AstExpr
+ {
+     AstRef local_;
+ 
+   public:
+     static const AstExprKind Kind = AstExprKind::GetLocal;
+diff --git a/js/src/wasm/WasmBaselineCompile.cpp b/js/src/wasm/WasmBaselineCompile.cpp
+--- a/js/src/wasm/WasmBaselineCompile.cpp
++++ b/js/src/wasm/WasmBaselineCompile.cpp
+@@ -8319,17 +8319,17 @@ BaseCompiler::emitGetGlobal()
+         return false;
+ 
+     if (deadCode_)
+         return true;
+ 
+     const GlobalDesc& global = env_.globals[id];
+ 
+     if (global.isConstant()) {
+-        Val value = global.constantValue();
++        LitVal value = global.constantValue();
+         switch (value.type().code()) {
+           case ValType::I32:
+             pushI32(value.i32());
+             break;
+           case ValType::I64:
+             pushI64(value.i64());
+             break;
+           case ValType::F32:
+diff --git a/js/src/wasm/WasmDebug.cpp b/js/src/wasm/WasmDebug.cpp
+--- a/js/src/wasm/WasmDebug.cpp
++++ b/js/src/wasm/WasmDebug.cpp
+@@ -393,17 +393,17 @@ DebugState::debugGetResultType(uint32_t 
+ }
+ 
+ bool
+ DebugState::getGlobal(Instance& instance, uint32_t globalIndex, MutableHandleValue vp)
+ {
+     const GlobalDesc& global = metadata().globals[globalIndex];
+ 
+     if (global.isConstant()) {
+-        Val value = global.constantValue();
++        LitVal value = global.constantValue();
+         switch (value.type().code()) {
+           case ValType::I32:
+             vp.set(Int32Value(value.i32()));
+             break;
+           case ValType::I64:
+           // Just display as a Number; it's ok if we lose some precision
+             vp.set(NumberValue((double)value.i64()));
+             break;
+diff --git a/js/src/wasm/WasmInstance.cpp b/js/src/wasm/WasmInstance.cpp
+--- a/js/src/wasm/WasmInstance.cpp
++++ b/js/src/wasm/WasmInstance.cpp
+@@ -502,17 +502,17 @@ Instance::postBarrier(Instance* instance
+ Instance::Instance(JSContext* cx,
+                    Handle<WasmInstanceObject*> object,
+                    SharedCode code,
+                    UniqueDebugState debug,
+                    UniqueTlsData tlsDataIn,
+                    HandleWasmMemoryObject memory,
+                    SharedTableVector&& tables,
+                    Handle<FunctionVector> funcImports,
+-                   const ValVector& globalImportValues,
++                   const LitValVector& globalImportValues,
+                    const WasmGlobalObjectVector& globalObjs)
+   : realm_(cx->realm()),
+     object_(object),
+     code_(code),
+     debug_(std::move(debug)),
+     tlsData_(std::move(tlsDataIn)),
+     memory_(memory),
+     tables_(std::move(tables)),
+diff --git a/js/src/wasm/WasmInstance.h b/js/src/wasm/WasmInstance.h
+--- a/js/src/wasm/WasmInstance.h
++++ b/js/src/wasm/WasmInstance.h
+@@ -73,17 +73,17 @@ class Instance
+     Instance(JSContext* cx,
+              HandleWasmInstanceObject object,
+              SharedCode code,
+              UniqueDebugState debug,
+              UniqueTlsData tlsData,
+              HandleWasmMemoryObject memory,
+              SharedTableVector&& tables,
+              Handle<FunctionVector> funcImports,
+-             const ValVector& globalImportValues,
++             const LitValVector& globalImportValues,
+              const WasmGlobalObjectVector& globalObjs);
+     ~Instance();
+     bool init(JSContext* cx);
+     void trace(JSTracer* trc);
+ 
+     JS::Realm* realm() const { return realm_; }
+     const Code& code() const { return *code_; }
+     const CodeTier& code(Tier t) const { return code_->codeTier(t); }
+diff --git a/js/src/wasm/WasmIonCompile.cpp b/js/src/wasm/WasmIonCompile.cpp
+--- a/js/src/wasm/WasmIonCompile.cpp
++++ b/js/src/wasm/WasmIonCompile.cpp
+@@ -2298,17 +2298,17 @@ EmitGetGlobal(FunctionCompiler& f)
+ 
+     const GlobalDesc& global = f.env().globals[id];
+     if (!global.isConstant()) {
+         f.iter().setResult(f.loadGlobalVar(global.offset(), !global.isMutable(),
+                                            global.isIndirect(), ToMIRType(global.type())));
+         return true;
+     }
+ 
+-    Val value = global.constantValue();
++    LitVal value = global.constantValue();
+     MIRType mirType = ToMIRType(value.type());
+ 
+     MDefinition* result;
+     switch (value.type().code()) {
+       case ValType::I32:
+         result = f.constant(Int32Value(value.i32()), mirType);
+         break;
+       case ValType::I64:
+diff --git a/js/src/wasm/WasmJS.cpp b/js/src/wasm/WasmJS.cpp
+--- a/js/src/wasm/WasmJS.cpp
++++ b/js/src/wasm/WasmJS.cpp
+@@ -105,59 +105,59 @@ bool
+ wasm::HasSupport(JSContext* cx)
+ {
+     return cx->options().wasm() &&
+            HasCompilerSupport(cx) &&
+            HasAvailableCompilerTier(cx);
+ }
+ 
+ static bool
+-ToWebAssemblyValue(JSContext* cx, ValType targetType, HandleValue v, Val* val)
++ToWebAssemblyValue(JSContext* cx, ValType targetType, HandleValue v, LitVal* val)
+ {
+     switch (targetType.code()) {
+       case ValType::I32: {
+         int32_t i32;
+         if (!ToInt32(cx, v, &i32))
+             return false;
+-        *val = Val(uint32_t(i32));
++        *val = LitVal(uint32_t(i32));
+         return true;
+       }
+       case ValType::F32: {
+         double d;
+         if (!ToNumber(cx, v, &d))
+             return false;
+-        *val = Val(float(d));
++        *val = LitVal(float(d));
+         return true;
+       }
+       case ValType::F64: {
+         double d;
+         if (!ToNumber(cx, v, &d))
+             return false;
+-        *val = Val(d);
++        *val = LitVal(d);
+         return true;
+       }
+       case ValType::AnyRef: {
+         if (v.isNull()) {
+-            *val = Val(ValType::AnyRef, nullptr);
++            *val = LitVal(ValType::AnyRef, nullptr);
+         } else {
+             JSObject* obj = ToObject(cx, v);
+             if (!obj)
+                 return false;
+-            *val = Val(ValType::AnyRef, obj);
++            *val = LitVal(ValType::AnyRef, obj);
+         }
+         return true;
+       }
+       default: {
+         MOZ_CRASH("unexpected import value type, caller must guard");
+       }
+     }
+ }
+ 
+ static Value
+-ToJSValue(const Val& val)
++ToJSValue(const LitVal& val)
+ {
+     switch (val.type().code()) {
+       case ValType::I32:
+         return Int32Value(val.i32());
+       case ValType::F32:
+         return DoubleValue(JS::CanonicalizeNaN(double(val.f32())));
+       case ValType::F64:
+         return DoubleValue(JS::CanonicalizeNaN(val.f64()));
+@@ -201,17 +201,17 @@ GetProperty(JSContext* cx, HandleObject 
+ static bool
+ GetImports(JSContext* cx,
+            const Module& module,
+            HandleObject importObj,
+            MutableHandle<FunctionVector> funcImports,
+            MutableHandleWasmTableObject tableImport,
+            MutableHandleWasmMemoryObject memoryImport,
+            WasmGlobalObjectVector& globalObjs,
+-           ValVector* globalImportValues)
++           LitValVector* globalImportValues)
+ {
+     const ImportVector& imports = module.imports();
+     if (!imports.empty() && !importObj)
+         return ThrowBadImportArg(cx);
+ 
+     const Metadata& metadata = module.metadata();
+ 
+     uint32_t globalIndex = 0;
+@@ -253,17 +253,17 @@ GetImports(JSContext* cx,
+             if (!v.isObject() || !v.toObject().is<WasmMemoryObject>())
+                 return ThrowBadImportType(cx, import.field.get(), "Memory");
+ 
+             MOZ_ASSERT(!memoryImport);
+             memoryImport.set(&v.toObject().as<WasmMemoryObject>());
+             break;
+           }
+           case DefinitionKind::Global: {
+-            Val val;
++            LitVal val;
+             const uint32_t index = globalIndex++;
+             const GlobalDesc& global = globals[index];
+             MOZ_ASSERT(global.importIndex() == index);
+ 
+             if (v.isObject() && v.toObject().is<WasmGlobalObject>()) {
+                 RootedWasmGlobalObject obj(cx, &v.toObject().as<WasmGlobalObject>());
+ 
+                 if (obj->isMutable() != global.isMutable()) {
+@@ -375,17 +375,17 @@ wasm::Eval(JSContext* cx, Handle<TypedAr
+         return false;
+     }
+ 
+     Rooted<FunctionVector> funcs(cx, FunctionVector(cx));
+     RootedWasmTableObject table(cx);
+     RootedWasmMemoryObject memory(cx);
+     Rooted<WasmGlobalObjectVector> globalObjs(cx);
+ 
+-    ValVector globals;
++    LitValVector globals;
+     if (!GetImports(cx, *module, importObj, &funcs, &table, &memory, globalObjs.get(), &globals))
+         return false;
+ 
+     return module->instantiate(cx, funcs, table, memory, globals, globalObjs.get(), nullptr, instanceObj);
+ }
+ 
+ // ============================================================================
+ // Common functions
+@@ -1065,17 +1065,17 @@ WasmInstanceObject::trace(JSTracer* trc,
+ WasmInstanceObject::create(JSContext* cx,
+                            SharedCode code,
+                            UniqueDebugState debug,
+                            UniqueTlsData tlsData,
+                            HandleWasmMemoryObject memory,
+                            SharedTableVector&& tables,
+                            Handle<FunctionVector> funcImports,
+                            const GlobalDescVector& globals,
+-                           const ValVector& globalImportValues,
++                           const LitValVector& globalImportValues,
+                            const WasmGlobalObjectVector& globalObjs,
+                            HandleObject proto)
+ {
+     UniquePtr<ExportMap> exports = js::MakeUnique<ExportMap>();
+     if (!exports || !exports->init()) {
+         ReportOutOfMemory(cx);
+         return nullptr;
+     }
+@@ -1172,17 +1172,17 @@ Instantiate(JSContext* cx, const Module&
+ {
+     RootedObject instanceProto(cx, &cx->global()->getPrototype(JSProto_WasmInstance).toObject());
+ 
+     Rooted<FunctionVector> funcs(cx, FunctionVector(cx));
+     RootedWasmTableObject table(cx);
+     RootedWasmMemoryObject memory(cx);
+     Rooted<WasmGlobalObjectVector> globalObjs(cx);
+ 
+-    ValVector globals;
++    LitValVector globals;
+     if (!GetImports(cx, module, importObj, &funcs, &table, &memory, globalObjs.get(), &globals))
+         return false;
+ 
+     return module.instantiate(cx, funcs, table, memory, globals, globalObjs.get(), instanceProto, instanceObj);
+ }
+ 
+ /* static */ bool
+ WasmInstanceObject::construct(JSContext* cx, unsigned argc, Value* vp)
+@@ -2150,17 +2150,17 @@ WasmGlobalObject::trace(JSTracer* trc, J
+ /* static */ void
+ WasmGlobalObject::finalize(FreeOp*, JSObject* obj)
+ {
+     WasmGlobalObject* global = reinterpret_cast<WasmGlobalObject*>(obj);
+     js_delete(global->cell());
+ }
+ 
+ /* static */ WasmGlobalObject*
+-WasmGlobalObject::create(JSContext* cx, const Val& val, bool isMutable)
++WasmGlobalObject::create(JSContext* cx, const LitVal& val, bool isMutable)
+ {
+     UniquePtr<Cell> cell = js::MakeUnique<Cell>();
+     if (!cell)
+         return nullptr;
+ 
+     switch (val.type().code()) {
+       case ValType::I32:    cell->i32 = val.i32(); break;
+       case ValType::I64:    cell->i64 = val.i64(); break;
+@@ -2239,29 +2239,29 @@ WasmGlobalObject::construct(JSContext* c
+     RootedValue mutableVal(cx);
+     if (!JS_GetProperty(cx, obj, "mutable", &mutableVal))
+         return false;
+ 
+     bool isMutable = ToBoolean(mutableVal);
+ 
+     // Extract the initial value, or provide a suitable default.
+     // Guard against control flow mistakes below failing to set |globalVal|.
+-    Val globalVal = Val(uint32_t(0));
++    LitVal globalVal = LitVal(uint32_t(0));
+     if (args.length() >= 2) {
+         RootedValue valueVal(cx, args.get(1));
+ 
+         if (!ToWebAssemblyValue(cx, globalType, valueVal, &globalVal))
+             return false;
+     } else {
+         switch (globalType.code()) {
+           case ValType::I32:    /* set above */ break;
+-          case ValType::I64: globalVal = Val(uint64_t(0)); break;
+-          case ValType::F32:    globalVal = Val(float(0.0)); break;
+-          case ValType::F64:    globalVal = Val(double(0.0)); break;
+-          case ValType::AnyRef: globalVal = Val(ValType::AnyRef, nullptr); break;
++          case ValType::I64:    globalVal = LitVal(uint64_t(0)); break;
++          case ValType::F32:    globalVal = LitVal(float(0.0)); break;
++          case ValType::F64:    globalVal = LitVal(double(0.0)); break;
++          case ValType::AnyRef: globalVal = LitVal(ValType::AnyRef, nullptr); break;
+           default: MOZ_CRASH();
+         }
+     }
+ 
+     WasmGlobalObject* global = WasmGlobalObject::create(cx, globalVal, isMutable);
+     if (!global)
+         return false;
+ 
+@@ -2309,17 +2309,17 @@ WasmGlobalObject::valueSetterImpl(JSCont
+         return false;
+     }
+ 
+     if (global->type() == ValType::I64) {
+         JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_I64_TYPE);
+         return false;
+     }
+ 
+-    Val val;
++    LitVal val;
+     if (!ToWebAssemblyValue(cx, global->type(), args.get(0), &val))
+         return false;
+ 
+     Cell* cell = global->cell();
+     switch (global->type().code()) {
+       case ValType::I32:    cell->i32 = val.i32(); break;
+       case ValType::F32:    cell->f32 = val.f32(); break;
+       case ValType::F64:    cell->f64 = val.f64(); break;
+@@ -2361,27 +2361,27 @@ WasmGlobalObject::type() const
+ }
+ 
+ bool
+ WasmGlobalObject::isMutable() const
+ {
+     return getReservedSlot(MUTABLE_SLOT).toBoolean();
+ }
+ 
+-Val
++LitVal
+ WasmGlobalObject::val() const
+ {
+     Cell* cell = this->cell();
+-    Val val;
++    LitVal val;
+     switch (type().code()) {
+-      case ValType::I32:    val = Val(uint32_t(cell->i32)); break;
+-      case ValType::I64:    val = Val(uint64_t(cell->i64)); break;
+-      case ValType::F32:    val = Val(cell->f32); break;
+-      case ValType::F64:    val = Val(cell->f64); break;
+-      case ValType::AnyRef: val = Val(ValType::AnyRef, (void*)cell->ptr); break;
++      case ValType::I32:    val = LitVal(uint32_t(cell->i32)); break;
++      case ValType::I64:    val = LitVal(uint64_t(cell->i64)); break;
++      case ValType::F32:    val = LitVal(cell->f32); break;
++      case ValType::F64:    val = LitVal(cell->f64); break;
++      case ValType::AnyRef: val = LitVal(ValType::AnyRef, (void*)cell->ptr); break;
+       default:              MOZ_CRASH();
+     }
+     return val;
+ }
+ 
+ Value
+ WasmGlobalObject::value() const
+ {
+diff --git a/js/src/wasm/WasmJS.h b/js/src/wasm/WasmJS.h
+--- a/js/src/wasm/WasmJS.h
++++ b/js/src/wasm/WasmJS.h
+@@ -149,20 +149,20 @@ class WasmGlobalObject : public NativeOb
+ 
+     static const unsigned RESERVED_SLOTS = 3;
+     static const Class class_;
+     static const JSPropertySpec properties[];
+     static const JSFunctionSpec methods[];
+     static const JSFunctionSpec static_methods[];
+     static bool construct(JSContext*, unsigned, Value*);
+ 
+-    static WasmGlobalObject* create(JSContext* cx, const wasm::Val& value, bool isMutable);
++    static WasmGlobalObject* create(JSContext* cx, const wasm::LitVal& value, bool isMutable);
+ 
+     wasm::ValType type() const;
+-    wasm::Val val() const;
++    wasm::LitVal val() const;
+     bool isMutable() const;
+     // value() will MOZ_CRASH if the type is int64
+     Value value() const;
+     Cell* cell() const;
+ };
+ 
+ // The class of WebAssembly.Instance. Each WasmInstanceObject owns a
+ // wasm::Instance. These objects are used both as content-facing JS objects and
+@@ -214,17 +214,17 @@ class WasmInstanceObject : public Native
+     static WasmInstanceObject* create(JSContext* cx,
+                                       RefPtr<const wasm::Code> code,
+                                       UniquePtr<wasm::DebugState> debug,
+                                       wasm::UniqueTlsData tlsData,
+                                       HandleWasmMemoryObject memory,
+                                       Vector<RefPtr<wasm::Table>, 0, SystemAllocPolicy>&& tables,
+                                       Handle<FunctionVector> funcImports,
+                                       const wasm::GlobalDescVector& globals,
+-                                      const wasm::ValVector& globalImportValues,
++                                      const wasm::LitValVector& globalImportValues,
+                                       const WasmGlobalObjectVector& globalObjs,
+                                       HandleObject proto);
+     void initExportsObj(JSObject& exportsObj);
+ 
+     wasm::Instance& instance() const;
+     JSObject& exportsObj() const;
+ 
+     static bool getExportedFunction(JSContext* cx,
+diff --git a/js/src/wasm/WasmModule.cpp b/js/src/wasm/WasmModule.cpp
+--- a/js/src/wasm/WasmModule.cpp
++++ b/js/src/wasm/WasmModule.cpp
+@@ -716,34 +716,34 @@ Module::extractCode(JSContext* cx, Tier 
+     if (!JS_DefineProperty(cx, result, "segments", value, JSPROP_ENUMERATE))
+         return false;
+ 
+     vp.setObject(*result);
+     return true;
+ }
+ 
+ static uint32_t
+-EvaluateInitExpr(const ValVector& globalImportValues, InitExpr initExpr)
++EvaluateInitExpr(const LitValVector& globalImportValues, InitExpr initExpr)
+ {
+     switch (initExpr.kind()) {
+       case InitExpr::Kind::Constant:
+         return initExpr.val().i32();
+       case InitExpr::Kind::GetGlobal:
+         return globalImportValues[initExpr.globalIndex()].i32();
+     }
+ 
+     MOZ_CRASH("bad initializer expression");
+ }
+ 
+ bool
+ Module::initSegments(JSContext* cx,
+                      HandleWasmInstanceObject instanceObj,
+                      Handle<FunctionVector> funcImports,
+                      HandleWasmMemoryObject memoryObj,
+-                     const ValVector& globalImportValues) const
++                     const LitValVector& globalImportValues) const
+ {
+     Instance& instance = instanceObj->instance();
+     const SharedTableVector& tables = instance.tables();
+ 
+     Tier tier = code().bestTier();
+ 
+     // Perform all error checks up front so that this function does not perform
+     // partial initialization if an error is reported.
+@@ -1005,18 +1005,19 @@ Module::instantiateTable(JSContext* cx, 
+                 return false;
+             }
+         }
+     }
+ 
+     return true;
+ }
+ 
+-static Val
+-ExtractGlobalValue(const ValVector& globalImportValues, uint32_t globalIndex, const GlobalDesc& global)
++static LitVal
++ExtractGlobalValue(const LitValVector& globalImportValues, uint32_t globalIndex,
++                   const GlobalDesc& global)
+ {
+     switch (global.kind()) {
+       case GlobalKind::Import: {
+         return globalImportValues[globalIndex];
+       }
+       case GlobalKind::Variable: {
+         const InitExpr& init = global.initExpr();
+         switch (init.kind()) {
+@@ -1030,38 +1031,38 @@ ExtractGlobalValue(const ValVector& glob
+       case GlobalKind::Constant: {
+         return global.constantValue();
+       }
+     }
+     MOZ_CRASH("Not a global value");
+ }
+ 
+ static bool
+-EnsureGlobalObject(JSContext* cx, const ValVector& globalImportValues, size_t globalIndex,
++EnsureGlobalObject(JSContext* cx, const LitValVector& globalImportValues, size_t globalIndex,
+                    const GlobalDesc& global, WasmGlobalObjectVector& globalObjs)
+ {
+     if (globalIndex < globalObjs.length() && globalObjs[globalIndex])
+         return true;
+ 
+-    Val val = ExtractGlobalValue(globalImportValues, globalIndex, global);
++    LitVal val = ExtractGlobalValue(globalImportValues, globalIndex, global);
+     RootedWasmGlobalObject go(cx, WasmGlobalObject::create(cx, val, global.isMutable()));
+     if (!go)
+         return false;
+ 
+     if (globalObjs.length() <= globalIndex && !globalObjs.resize(globalIndex + 1)) {
+         ReportOutOfMemory(cx);
+         return false;
+     }
+ 
+     globalObjs[globalIndex] = go;
+     return true;
+ }
+ 
+ bool
+-Module::instantiateGlobals(JSContext* cx, const ValVector& globalImportValues,
++Module::instantiateGlobals(JSContext* cx, const LitValVector& globalImportValues,
+                            WasmGlobalObjectVector& globalObjs) const
+ {
+     // If there are exported globals that aren't in globalObjs because they
+     // originate in this module or because they were immutable imports that came
+     // in as primitive values then we must create cells in the globalObjs for
+     // them here, as WasmInstanceObject::create() and CreateExportObject() will
+     // need the cells to exist.
+ 
+@@ -1183,17 +1184,17 @@ CreateExportObject(JSContext* cx,
+     return true;
+ }
+ 
+ bool
+ Module::instantiate(JSContext* cx,
+                     Handle<FunctionVector> funcImports,
+                     HandleWasmTableObject tableImport,
+                     HandleWasmMemoryObject memoryImport,
+-                    const ValVector& globalImportValues,
++                    const LitValVector& globalImportValues,
+                     WasmGlobalObjectVector& globalObjs,
+                     HandleObject instanceProto,
+                     MutableHandleWasmInstanceObject instance) const
+ {
+     if (!instantiateFunctions(cx, funcImports))
+         return false;
+ 
+     RootedWasmMemoryObject memory(cx, memoryImport);
+diff --git a/js/src/wasm/WasmModule.h b/js/src/wasm/WasmModule.h
+--- a/js/src/wasm/WasmModule.h
++++ b/js/src/wasm/WasmModule.h
+@@ -146,23 +146,23 @@ class Module : public JS::WasmModule
+ 
+     mutable Atomic<bool>    codeIsBusy_;
+ 
+     bool instantiateFunctions(JSContext* cx, Handle<FunctionVector> funcImports) const;
+     bool instantiateMemory(JSContext* cx, MutableHandleWasmMemoryObject memory) const;
+     bool instantiateTable(JSContext* cx,
+                           MutableHandleWasmTableObject table,
+                           SharedTableVector* tables) const;
+-    bool instantiateGlobals(JSContext* cx, const ValVector& globalImportValues,
++    bool instantiateGlobals(JSContext* cx, const LitValVector& globalImportValues,
+                             WasmGlobalObjectVector& globalObjs) const;
+     bool initSegments(JSContext* cx,
+                       HandleWasmInstanceObject instance,
+                       Handle<FunctionVector> funcImports,
+                       HandleWasmMemoryObject memory,
+-                      const ValVector& globalImportValues) const;
++                      const LitValVector& globalImportValues) const;
+ 
+     class Tier2GeneratorTaskImpl;
+     void notifyCompilationListeners();
+ 
+   public:
+     Module(Assumptions&& assumptions,
+            const Code& code,
+            UniqueConstBytes unlinkedCodeForDebugging,
+@@ -202,17 +202,17 @@ class Module : public JS::WasmModule
+     uint32_t codeLength(Tier t) const { return code_->segment(t).length(); }
+ 
+     // Instantiate this module with the given imports:
+ 
+     bool instantiate(JSContext* cx,
+                      Handle<FunctionVector> funcImports,
+                      HandleWasmTableObject tableImport,
+                      HandleWasmMemoryObject memoryImport,
+-                     const ValVector& globalImportValues,
++                     const LitValVector& globalImportValues,
+                      WasmGlobalObjectVector& globalObjs,
+                      HandleObject instanceProto,
+                      MutableHandleWasmInstanceObject instanceObj) const;
+ 
+     // Tier-2 compilation may be initiated after the Module is constructed at
+     // most once, ideally before any client can attempt to serialize the Module.
+     // When tier-2 compilation completes, ModuleGenerator calls finishTier2()
+     // from a helper thread, passing tier-variant data which will be installed
+diff --git a/js/src/wasm/WasmTextToBinary.cpp b/js/src/wasm/WasmTextToBinary.cpp
+--- a/js/src/wasm/WasmTextToBinary.cpp
++++ b/js/src/wasm/WasmTextToBinary.cpp
+@@ -2106,17 +2106,17 @@ ParseNaNLiteral(WasmParseContext& c, Was
+         // Produce the spec's default NaN.
+         value = (Traits::kSignificandBits + 1) >> 1;
+     }
+ 
+     value = (isNegated ? Traits::kSignBit : 0) | Traits::kExponentBits | value;
+ 
+     Float flt;
+     BitwiseCast(value, &flt);
+-    return new (c.lifo) AstConst(Val(flt));
++    return new (c.lifo) AstConst(LitVal(flt));
+ 
+   error:
+     c.ts.generateError(token, c.error);
+     return nullptr;
+ }
+ 
+ template <typename Float>
+ static bool
+@@ -2258,17 +2258,17 @@ ParseFloatLiteral(WasmParseContext& c, W
+       case WasmToken::UnsignedInteger: result = token.uint(); break;
+       case WasmToken::SignedInteger:   result = token.sint(); break;
+       case WasmToken::NegativeZero:    result = -0.; break;
+       case WasmToken::Float:           break;
+       default:                         c.ts.generateError(token, c.error); return nullptr;
+     }
+ 
+     if (token.kind() != WasmToken::Float)
+-        return new (c.lifo) AstConst(Val(Float(result)));
++        return new (c.lifo) AstConst(LitVal(Float(result)));
+ 
+     const char16_t* begin = token.begin();
+     const char16_t* end = token.end();
+     const char16_t* cur = begin;
+ 
+     bool isNegated = false;
+     if (*cur == '-' || *cur == '+')
+         isNegated = *cur++ == '-';
+@@ -2308,51 +2308,51 @@ ParseFloatLiteral(WasmParseContext& c, W
+         c.lifo.release(mark);
+         break;
+       }
+     }
+ 
+     if (isNegated)
+         result = -result;
+ 
+-    return new (c.lifo) AstConst(Val(Float(result)));
++    return new (c.lifo) AstConst(LitVal(Float(result)));
+ }
+ 
+ static AstConst*
+ ParseConst(WasmParseContext& c, WasmToken constToken)
+ {
+     WasmToken val = c.ts.get();
+     switch (constToken.valueType().code()) {
+       case ValType::I32: {
+         switch (val.kind()) {
+           case WasmToken::Index:
+-            return new(c.lifo) AstConst(Val(val.index()));
++            return new(c.lifo) AstConst(LitVal(val.index()));
+           case WasmToken::SignedInteger: {
+             CheckedInt<int32_t> sint = val.sint();
+             if (!sint.isValid())
+                 break;
+-            return new(c.lifo) AstConst(Val(uint32_t(sint.value())));
++            return new(c.lifo) AstConst(LitVal(uint32_t(sint.value())));
+           }
+           case WasmToken::NegativeZero:
+-            return new(c.lifo) AstConst(Val(uint32_t(0)));
++            return new(c.lifo) AstConst(LitVal(uint32_t(0)));
+           default:
+             break;
+         }
+         break;
+       }
+       case ValType::I64: {
+         switch (val.kind()) {
+           case WasmToken::Index:
+-            return new(c.lifo) AstConst(Val(uint64_t(val.index())));
++            return new(c.lifo) AstConst(LitVal(uint64_t(val.index())));
+           case WasmToken::UnsignedInteger:
+-            return new(c.lifo) AstConst(Val(val.uint()));
++            return new(c.lifo) AstConst(LitVal(val.uint()));
+           case WasmToken::SignedInteger:
+-            return new(c.lifo) AstConst(Val(uint64_t(val.sint())));
++            return new(c.lifo) AstConst(LitVal(uint64_t(val.sint())));
+           case WasmToken::NegativeZero:
+-            return new(c.lifo) AstConst(Val(uint64_t(0)));
++            return new(c.lifo) AstConst(LitVal(uint64_t(0)));
+           default:
+             break;
+         }
+         break;
+       }
+       case ValType::F32: {
+         return ParseFloatLiteral<float>(c, val);
+       }
+@@ -3585,17 +3585,17 @@ ParseMemory(WasmParseContext& c, AstModu
+         size_t totalLength = 0;
+         while (c.ts.getIf(WasmToken::Text, &data)) {
+             if (!fragments.append(data.text()))
+                 return false;
+             totalLength += data.text().length();
+         }
+ 
+         if (fragments.length()) {
+-            AstExpr* offset = new(c.lifo) AstConst(Val(uint32_t(0)));
++            AstExpr* offset = new(c.lifo) AstConst(LitVal(uint32_t(0)));
+             if (!offset)
+                 return false;
+ 
+             AstDataSegment* segment = new(c.lifo) AstDataSegment(offset, std::move(fragments));
+             if (!segment || !module->append(segment))
+                 return false;
+ 
+             pages = AlignBytes<size_t>(totalLength, PageSize) / PageSize;
+@@ -3905,17 +3905,17 @@ ParseTable(WasmParseContext& c, WasmToke
+ 
+     uint32_t numElements = uint32_t(elems.length());
+     if (numElements != elems.length())
+         return false;
+ 
+     if (!module->addTable(name, Limits(numElements, Some(numElements), Shareable::False)))
+         return false;
+ 
+-    auto* zero = new(c.lifo) AstConst(Val(uint32_t(0)));
++    auto* zero = new(c.lifo) AstConst(LitVal(uint32_t(0)));
+     if (!zero)
+         return false;
+ 
+     AstElemSegment* segment = new(c.lifo) AstElemSegment(zero, std::move(elems));
+     return segment && module->append(segment);
+ }
+ 
+ static AstElemSegment*
+diff --git a/js/src/wasm/WasmTypes.cpp b/js/src/wasm/WasmTypes.cpp
+--- a/js/src/wasm/WasmTypes.cpp
++++ b/js/src/wasm/WasmTypes.cpp
+@@ -55,17 +55,17 @@ static_assert(MaxMemoryInitialPages <= A
+ // loads/stores, hence the lower limit of 8.  Some Intel processors support
+ // AVX-512 loads/stores, hence the upper limit of 64.
+ static_assert(MaxMemoryAccessSize >= 8,  "MaxMemoryAccessSize too low");
+ static_assert(MaxMemoryAccessSize <= 64, "MaxMemoryAccessSize too high");
+ static_assert((MaxMemoryAccessSize & (MaxMemoryAccessSize-1)) == 0,
+               "MaxMemoryAccessSize is not a power of two");
+ 
+ void
+-Val::writePayload(uint8_t* dst) const
++LitVal::writePayload(uint8_t* dst) const
+ {
+     switch (type_.code()) {
+       case ValType::I32:
+       case ValType::F32:
+         memcpy(dst, &u.i32_, sizeof(u.i32_));
+         return;
+       case ValType::I64:
+       case ValType::F64:
+@@ -80,17 +80,17 @@ Val::writePayload(uint8_t* dst) const
+       case ValType::B32x4:
+         memcpy(dst, &u, jit::Simd128DataSize);
+         return;
+       case ValType::Ref:
+       case ValType::AnyRef:
+         memcpy(dst, &u.ptr_, sizeof(intptr_t));
+         return;
+     }
+-    MOZ_CRASH("unexpected Val type");
++    MOZ_CRASH("unexpected LitVal type");
+ }
+ 
+ bool
+ wasm::IsRoundingFunction(SymbolicAddress callee, jit::RoundingMode* mode)
+ {
+     switch (callee) {
+       case SymbolicAddress::FloorD:
+       case SymbolicAddress::FloorF:
+diff --git a/js/src/wasm/WasmTypes.h b/js/src/wasm/WasmTypes.h
+--- a/js/src/wasm/WasmTypes.h
++++ b/js/src/wasm/WasmTypes.h
+@@ -761,65 +761,65 @@ enum class Shareable
+ };
+ 
+ enum class HasGcTypes
+ {
+     False,
+     True
+ };
+ 
+-// The Val class represents a single WebAssembly value of a given value type,
+-// mostly for the purpose of numeric literals and initializers. A Val does not
+-// directly map to a JS value since there is not (currently) a precise
+-// representation of i64 values. A Val may contain non-canonical NaNs since,
++// The LitVal class represents a single WebAssembly value of a given value
++// type, mostly for the purpose of numeric literals and initializers. A LitVal
++// does not directly map to a JS value since there is not (currently) a precise
++// representation of i64 values. A LitVal may contain non-canonical NaNs since,
+ // within WebAssembly, floats are not canonicalized. Canonicalization must
+ // happen at the JS boundary.
+ 
+-class Val
++class LitVal
+ {
+     ValType type_;
+     union U {
+         uint32_t i32_;
+         uint64_t i64_;
+         float f32_;
+         double f64_;
+         I8x16 i8x16_;
+         I16x8 i16x8_;
+         I32x4 i32x4_;
+         F32x4 f32x4_;
+         intptr_t ptr_;
+     } u;
+ 
+   public:
+-    Val() = default;
+-
+-    explicit Val(uint32_t i32) : type_(ValType::I32) { u.i32_ = i32; }
+-    explicit Val(uint64_t i64) : type_(ValType::I64) { u.i64_ = i64; }
+-
+-    explicit Val(float f32) : type_(ValType::F32) { u.f32_ = f32; }
+-    explicit Val(double f64) : type_(ValType::F64) { u.f64_ = f64; }
+-
+-    explicit Val(ValType refType, void* ptr) : type_(refType) {
++    LitVal() = default;
++
++    explicit LitVal(uint32_t i32) : type_(ValType::I32) { u.i32_ = i32; }
++    explicit LitVal(uint64_t i64) : type_(ValType::I64) { u.i64_ = i64; }
++
++    explicit LitVal(float f32) : type_(ValType::F32) { u.f32_ = f32; }
++    explicit LitVal(double f64) : type_(ValType::F64) { u.f64_ = f64; }
++
++    explicit LitVal(ValType refType, void* ptr) : type_(refType) {
+         MOZ_ASSERT(refType.isRefOrAnyRef());
+         u.ptr_ = intptr_t(ptr);
+     }
+ 
+-    explicit Val(const I8x16& i8x16, ValType type = ValType::I8x16) : type_(type) {
++    explicit LitVal(const I8x16& i8x16, ValType type = ValType::I8x16) : type_(type) {
+         MOZ_ASSERT(type_ == ValType::I8x16 || type_ == ValType::B8x16);
+         memcpy(u.i8x16_, i8x16, sizeof(u.i8x16_));
+     }
+-    explicit Val(const I16x8& i16x8, ValType type = ValType::I16x8) : type_(type) {
++    explicit LitVal(const I16x8& i16x8, ValType type = ValType::I16x8) : type_(type) {
+         MOZ_ASSERT(type_ == ValType::I16x8 || type_ == ValType::B16x8);
+         memcpy(u.i16x8_, i16x8, sizeof(u.i16x8_));
+     }
+-    explicit Val(const I32x4& i32x4, ValType type = ValType::I32x4) : type_(type) {
++    explicit LitVal(const I32x4& i32x4, ValType type = ValType::I32x4) : type_(type) {
+         MOZ_ASSERT(type_ == ValType::I32x4 || type_ == ValType::B32x4);
+         memcpy(u.i32x4_, i32x4, sizeof(u.i32x4_));
+     }
+-    explicit Val(const F32x4& f32x4) : type_(ValType::F32x4) {
++    explicit LitVal(const F32x4& f32x4) : type_(ValType::F32x4) {
+         memcpy(u.f32x4_, f32x4, sizeof(u.f32x4_));
+     }
+ 
+     ValType type() const { return type_; }
+     bool isSimd() const { return IsSimdType(type()); }
+     static constexpr size_t sizeofLargestValue() { return sizeof(u); }
+ 
+     uint32_t i32() const { MOZ_ASSERT(type_ == ValType::I32); return u.i32_; }
+@@ -843,17 +843,17 @@ class Val
+     const F32x4& f32x4() const {
+         MOZ_ASSERT(type_ == ValType::F32x4);
+         return u.f32x4_;
+     }
+ 
+     void writePayload(uint8_t* dst) const;
+ };
+ 
+-typedef Vector<Val, 0, SystemAllocPolicy> ValVector;
++typedef Vector<LitVal, 0, SystemAllocPolicy> LitValVector;
+ 
+ // The FuncType class represents a WebAssembly function signature which takes a
+ // list of value types and returns an expression type. The engine uses two
+ // in-memory representations of the argument Vector's memory (when elements do
+ // not fit inline): normal malloc allocation (via SystemAllocPolicy) and
+ // allocation in a LifoAlloc (via LifoAllocPolicy). The former FuncType objects
+ // can have any lifetime since they own the memory. The latter FuncType objects
+ // must not outlive the associated LifoAlloc mark/release interval (which is
+@@ -965,40 +965,40 @@ class InitExpr
+     enum class Kind {
+         Constant,
+         GetGlobal
+     };
+ 
+   private:
+     Kind kind_;
+     union U {
+-        Val val_;
++        LitVal val_;
+         struct {
+             uint32_t index_;
+             ValType type_;
+         } global;
+         U() : global{} {}
+     } u;
+ 
+   public:
+     InitExpr() = default;
+ 
+-    explicit InitExpr(Val val) : kind_(Kind::Constant) {
++    explicit InitExpr(LitVal val) : kind_(Kind::Constant) {
+         u.val_ = val;
+     }
+ 
+     explicit InitExpr(uint32_t globalIndex, ValType type) : kind_(Kind::GetGlobal) {
+         u.global.index_ = globalIndex;
+         u.global.type_ = type;
+     }
+ 
+     Kind kind() const { return kind_; }
+ 
+     bool isVal() const { return kind() == Kind::Constant; }
+-    Val val() const { MOZ_ASSERT(isVal()); return u.val_; }
++    LitVal val() const { MOZ_ASSERT(isVal()); return u.val_; }
+ 
+     uint32_t globalIndex() const { MOZ_ASSERT(kind() == Kind::GetGlobal); return u.global.index_; }
+ 
+     ValType type() const {
+         switch (kind()) {
+           case Kind::Constant:  return u.val_.type();
+           case Kind::GetGlobal: return u.global.type_;
+         }
+@@ -1100,17 +1100,17 @@ class GlobalDesc
+                 } import;
+                 U() : import{} {}
+             } val;
+             unsigned offset_;
+             bool isMutable_;
+             bool isWasm_;
+             bool isExport_;
+         } var;
+-        Val cst_;
++        LitVal cst_;
+         V() {}
+     } u;
+     GlobalKind kind_;
+ 
+     // Private, as they have unusual semantics.
+ 
+     bool isExport() const { return !isConstant() && u.var.isExport_; }
+     bool isWasm() const { return !isConstant() && u.var.isWasm_; }
+@@ -1161,17 +1161,17 @@ class GlobalDesc
+     }
+ 
+     GlobalKind kind() const { return kind_; }
+     bool isVariable() const { return kind_ == GlobalKind::Variable; }
+     bool isConstant() const { return kind_ == GlobalKind::Constant; }
+     bool isImport() const { return kind_ == GlobalKind::Import; }
+ 
+     bool isMutable() const { return !isConstant() && u.var.isMutable_; }
+-    Val constantValue() const { MOZ_ASSERT(isConstant()); return u.cst_; }
++    LitVal constantValue() const { MOZ_ASSERT(isConstant()); return u.cst_; }
+     const InitExpr& initExpr() const { MOZ_ASSERT(isVariable()); return u.var.val.initial_; }
+     uint32_t importIndex() const { MOZ_ASSERT(isImport()); return u.var.val.import.index_; }
+ 
+     // If isIndirect() is true then storage for the value is not in the
+     // instance's global area, but in a WasmGlobalObject::Cell hanging off a
+     // WasmGlobalObject; the global area contains a pointer to the Cell.
+     //
+     // We don't want to indirect unless we must, so only mutable, exposed
+@@ -2317,17 +2317,17 @@ static const unsigned PageSize = 64 * 10
+ 
+ // Bounds checks always compare the base of the memory access with the bounds
+ // check limit. If the memory access is unaligned, this means that, even if the
+ // bounds check succeeds, a few bytes of the access can extend past the end of
+ // memory. To guard against this, extra space is included in the guard region to
+ // catch the overflow. MaxMemoryAccessSize is a conservative approximation of
+ // the maximum guard space needed to catch all unaligned overflows.
+ 
+-static const unsigned MaxMemoryAccessSize = Val::sizeofLargestValue();
++static const unsigned MaxMemoryAccessSize = LitVal::sizeofLargestValue();
+ 
+ #ifdef WASM_HUGE_MEMORY
+ 
+ // On WASM_HUGE_MEMORY platforms, every asm.js or WebAssembly memory
+ // unconditionally allocates a huge region of virtual memory of size
+ // wasm::HugeMappedSize. This allows all memory resizing to work without
+ // reallocation and provides enough guard space for all offsets to be folded
+ // into memory accesses.
+diff --git a/js/src/wasm/WasmValidate.cpp b/js/src/wasm/WasmValidate.cpp
+--- a/js/src/wasm/WasmValidate.cpp
++++ b/js/src/wasm/WasmValidate.cpp
+@@ -1645,50 +1645,50 @@ DecodeInitializerExpression(Decoder& d, 
+     if (!d.readOp(&op))
+         return d.fail("failed to read initializer type");
+ 
+     switch (op.b0) {
+       case uint16_t(Op::I32Const): {
+         int32_t i32;
+         if (!d.readVarS32(&i32))
+             return d.fail("failed to read initializer i32 expression");
+-        *init = InitExpr(Val(uint32_t(i32)));
++        *init = InitExpr(LitVal(uint32_t(i32)));
+         break;
+       }
+       case uint16_t(Op::I64Const): {
+         int64_t i64;
+         if (!d.readVarS64(&i64))
+             return d.fail("failed to read initializer i64 expression");
+-        *init = InitExpr(Val(uint64_t(i64)));
++        *init = InitExpr(LitVal(uint64_t(i64)));
+         break;
+       }
+       case uint16_t(Op::F32Const): {
+         float f32;
+         if (!d.readFixedF32(&f32))
+             return d.fail("failed to read initializer f32 expression");
+-        *init = InitExpr(Val(f32));
++        *init = InitExpr(LitVal(f32));
+         break;
+       }
+       case uint16_t(Op::F64Const): {
+         double f64;
+         if (!d.readFixedF64(&f64))
+             return d.fail("failed to read initializer f64 expression");
+-        *init = InitExpr(Val(f64));
++        *init = InitExpr(LitVal(f64));
+         break;
+       }
+       case uint16_t(Op::RefNull): {
+         if (gcTypesEnabled == HasGcTypes::False)
+             return d.fail("unexpected initializer expression");
+         uint8_t valType;
+         uint32_t unusedRefTypeIndex;
+         if (!d.readValType(&valType, &unusedRefTypeIndex))
+             return false;
+         if (valType != uint8_t(ValType::AnyRef))
+             return d.fail("expected anyref as type for ref.null");
+-        *init = InitExpr(Val(ValType::AnyRef, nullptr));
++        *init = InitExpr(LitVal(ValType::AnyRef, nullptr));
+         break;
+       }
+       case uint16_t(Op::GetGlobal): {
+         uint32_t i;
+         if (!d.readVarU32(&i))
+             return d.fail("failed to read get_global index in initializer expression");
+         if (i >= globals.length())
+             return d.fail("global index out of range in initializer expression");

+ 1430 - 0
frg/work-js/mozilla-release/patches/1450261-4-63a1.patch

@@ -0,0 +1,1430 @@
+# HG changeset patch
+# User Benjamin Bouvier <benj@benj.me>
+# Date 1530283400 -7200
+# Node ID 50d6babb2ec730b1d460e74b0957db7c55b0b0dc
+# Parent  2e575c38eb49d02914e673df2a3b36206215cc42
+Bug 1450261: Implement Val, a rooted LitVal; r=luke, r=jonco
+
+diff --git a/js/src/devtools/automation/variants/msan b/js/src/devtools/automation/variants/msan
+--- a/js/src/devtools/automation/variants/msan
++++ b/js/src/devtools/automation/variants/msan
+@@ -4,11 +4,11 @@
+     "debug": false,
+     "compiler": "clang",
+     "env": {
+         "JITTEST_EXTRA_ARGS": "--jitflags=interp --ignore-timeouts={DIR}/cgc-jittest-timeouts.txt",
+         "JSTESTS_EXTRA_ARGS": "--jitflags=interp --exclude-file={DIR}/cgc-jstests-slow.txt",
+         "MSAN_OPTIONS": "external_symbolizer_path={TOOLTOOL_CHECKOUT}/clang/bin/llvm-symbolizer:log_path={OUTDIR}/sanitize_log"
+     },
+     "ignore-test-failures": "true",
+-    "max-errors": 6,
++    "max-errors": 7,
+     "use_minidump": false
+ }
+diff --git a/js/src/jit-test/tests/wasm/gc/anyref-val-tracing.js b/js/src/jit-test/tests/wasm/gc/anyref-val-tracing.js
+new file mode 100644
+--- /dev/null
++++ b/js/src/jit-test/tests/wasm/gc/anyref-val-tracing.js
+@@ -0,0 +1,14 @@
++if (!wasmGcEnabled()) {
++    quit(0);
++}
++
++gczeal(14, 1);
++let { exports } = wasmEvalText(`(module
++    (global $anyref (import "glob" "anyref") anyref)
++    (func (export "get") (result anyref) get_global $anyref)
++)`, {
++    glob: {
++        anyref: { sentinel: "lol" },
++    }
++});
++assertEq(exports.get().sentinel, "lol");
+diff --git a/js/src/wasm/AsmJS.cpp b/js/src/wasm/AsmJS.cpp
+--- a/js/src/wasm/AsmJS.cpp
++++ b/js/src/wasm/AsmJS.cpp
+@@ -7666,105 +7666,106 @@ HasPureCoercion(JSContext* cx, HandleVal
+     {
+         return true;
+     }
+ 
+     return false;
+ }
+ 
+ static bool
+-ValidateGlobalVariable(JSContext* cx, const AsmJSGlobal& global, HandleValue importVal, LitVal* val)
++ValidateGlobalVariable(JSContext* cx, const AsmJSGlobal& global, HandleValue importVal,
++                       Maybe<LitVal>* val)
+ {
+     switch (global.varInitKind()) {
+       case AsmJSGlobal::InitConstant:
+-        *val = global.varInitVal();
++        val->emplace(global.varInitVal());
+         return true;
+ 
+       case AsmJSGlobal::InitImport: {
+         RootedValue v(cx);
+         if (!GetDataProperty(cx, importVal, global.field(), &v))
+             return false;
+ 
+         if (!v.isPrimitive() && !HasPureCoercion(cx, v))
+             return LinkFail(cx, "Imported values must be primitives");
+ 
+         switch (global.varInitImportType().code()) {
+           case ValType::I32: {
+             int32_t i32;
+             if (!ToInt32(cx, v, &i32))
+                 return false;
+-            *val = LitVal(uint32_t(i32));
++            val->emplace(uint32_t(i32));
+             return true;
+           }
+           case ValType::I64:
+             MOZ_CRASH("int64");
+           case ValType::F32: {
+             float f;
+             if (!RoundFloat32(cx, v, &f))
+                 return false;
+-            *val = LitVal(f);
++            val->emplace(f);
+             return true;
+           }
+           case ValType::F64: {
+             double d;
+             if (!ToNumber(cx, v, &d))
+                 return false;
+-            *val = LitVal(d);
++            val->emplace(d);
+             return true;
+           }
+           case ValType::I8x16: {
+             SimdConstant simdConstant;
+             if (!ToSimdConstant<Int8x16>(cx, v, &simdConstant))
+                 return false;
+-            *val = LitVal(simdConstant.asInt8x16());
++            val->emplace(simdConstant.asInt8x16());
+             return true;
+           }
+           case ValType::I16x8: {
+             SimdConstant simdConstant;
+             if (!ToSimdConstant<Int16x8>(cx, v, &simdConstant))
+                 return false;
+-            *val = LitVal(simdConstant.asInt16x8());
++            val->emplace(simdConstant.asInt16x8());
+             return true;
+           }
+           case ValType::I32x4: {
+             SimdConstant simdConstant;
+             if (!ToSimdConstant<Int32x4>(cx, v, &simdConstant))
+                 return false;
+-            *val = LitVal(simdConstant.asInt32x4());
++            val->emplace(simdConstant.asInt32x4());
+             return true;
+           }
+           case ValType::F32x4: {
+             SimdConstant simdConstant;
+             if (!ToSimdConstant<Float32x4>(cx, v, &simdConstant))
+                 return false;
+-            *val = LitVal(simdConstant.asFloat32x4());
++            val->emplace(simdConstant.asFloat32x4());
+             return true;
+           }
+           case ValType::B8x16: {
+             SimdConstant simdConstant;
+             if (!ToSimdConstant<Bool8x16>(cx, v, &simdConstant))
+                 return false;
+             // Bool8x16 uses the same data layout as Int8x16.
+-            *val = LitVal(simdConstant.asInt8x16());
++            val->emplace(simdConstant.asInt8x16());
+             return true;
+           }
+           case ValType::B16x8: {
+             SimdConstant simdConstant;
+             if (!ToSimdConstant<Bool16x8>(cx, v, &simdConstant))
+                 return false;
+             // Bool16x8 uses the same data layout as Int16x8.
+-            *val = LitVal(simdConstant.asInt16x8());
++            val->emplace(simdConstant.asInt16x8());
+             return true;
+           }
+           case ValType::B32x4: {
+             SimdConstant simdConstant;
+             if (!ToSimdConstant<Bool32x4>(cx, v, &simdConstant))
+                 return false;
+             // Bool32x4 uses the same data layout as Int32x4.
+-            *val = LitVal(simdConstant.asInt32x4());
++            val->emplace(simdConstant.asInt32x4());
+             return true;
+           }
+           case ValType::Ref:
+           case ValType::AnyRef: {
+             MOZ_CRASH("not available in asm.js");
+           }
+         }
+       }
+@@ -8129,29 +8130,29 @@ CheckBuffer(JSContext* cx, const AsmJSMe
+ 
+     MOZ_ASSERT(buffer->isPreparedForAsmJS());
+     return true;
+ }
+ 
+ static bool
+ GetImports(JSContext* cx, const AsmJSMetadata& metadata, HandleValue globalVal,
+            HandleValue importVal, MutableHandle<FunctionVector> funcImports,
+-           LitValVector* valImports)
++           MutableHandleValVector valImports)
+ {
+     Rooted<FunctionVector> ffis(cx, FunctionVector(cx));
+     if (!ffis.resize(metadata.numFFIs))
+         return false;
+ 
+     for (const AsmJSGlobal& global : metadata.asmJSGlobals) {
+         switch (global.which()) {
+           case AsmJSGlobal::Variable: {
+-            LitVal val;
+-            if (!ValidateGlobalVariable(cx, global, importVal, &val))
++            Maybe<LitVal> litVal;
++            if (!ValidateGlobalVariable(cx, global, importVal, &litVal))
+                 return false;
+-            if (!valImports->append(val))
++            if (!valImports.append(Val(*litVal)))
+                 return false;
+             break;
+           }
+           case AsmJSGlobal::FFI:
+             if (!ValidateFFI(cx, global, importVal, &ffis))
+                 return false;
+             break;
+           case AsmJSGlobal::ArrayView:
+@@ -8204,25 +8205,26 @@ TryInstantiate(JSContext* cx, CallArgs a
+         if (!CheckBuffer(cx, metadata, bufferVal, &buffer))
+             return false;
+ 
+         memory = WasmMemoryObject::create(cx, buffer, nullptr);
+         if (!memory)
+             return false;
+     }
+ 
+-    LitValVector valImports;
++    RootedValVector valImports(cx);
+     Rooted<FunctionVector> funcs(cx, FunctionVector(cx));
+     if (!GetImports(cx, metadata, globalVal, importVal, &funcs, &valImports))
+         return false;
+ 
+     Rooted<WasmGlobalObjectVector> globalObjs(cx);
+ 
+     RootedWasmTableObject table(cx);
+-    if (!module.instantiate(cx, funcs, table, memory, valImports, globalObjs.get(), nullptr, instanceObj))
++    if (!module.instantiate(cx, funcs, table, memory, valImports, globalObjs.get(), nullptr,
++                            instanceObj))
+         return false;
+ 
+     exportObj.set(&instanceObj->exportsObj());
+     return true;
+ }
+ 
+ static bool
+ HandleInstantiationFailure(JSContext* cx, CallArgs args, const AsmJSMetadata& metadata)
+diff --git a/js/src/wasm/WasmBaselineCompile.cpp b/js/src/wasm/WasmBaselineCompile.cpp
+--- a/js/src/wasm/WasmBaselineCompile.cpp
++++ b/js/src/wasm/WasmBaselineCompile.cpp
+@@ -8334,17 +8334,17 @@ BaseCompiler::emitGetGlobal()
+             break;
+           case ValType::F32:
+             pushF32(value.f32());
+             break;
+           case ValType::F64:
+             pushF64(value.f64());
+             break;
+           case ValType::AnyRef:
+-            pushRef(value.ptr());
++            pushRef(intptr_t(value.ptr()));
+             break;
+           default:
+             MOZ_CRASH("Global constant type");
+         }
+         return true;
+     }
+ 
+     switch (global.type().code()) {
+diff --git a/js/src/wasm/WasmInstance.cpp b/js/src/wasm/WasmInstance.cpp
+--- a/js/src/wasm/WasmInstance.cpp
++++ b/js/src/wasm/WasmInstance.cpp
+@@ -400,25 +400,22 @@ Instance::wake(Instance* instance, uint3
+ /* static */ int32_t
+ Instance::memCopy(Instance* instance, uint32_t destByteOffset, uint32_t srcByteOffset, uint32_t len)
+ {
+     WasmMemoryObject* mem = instance->memory();
+     uint32_t memLen = mem->volatileMemoryLength();
+ 
+     // Knowing that len > 0 below simplifies the wraparound checks.
+     if (len == 0) {
+-
+         // Even though the length is zero, we must check for a valid offset.
+         if (destByteOffset < memLen && srcByteOffset < memLen)
+             return 0;
+ 
+         // else fall through to failure case
+-
+     } else {
+-
+         ArrayBufferObjectMaybeShared& arrBuf = mem->buffer();
+         uint8_t* rawBuf = arrBuf.dataPointerEither().unwrap();
+ 
+         // Here, we know that |len - 1| cannot underflow.
+         typedef CheckedInt<uint32_t> CheckedU32;
+         CheckedU32 highest_destOffset = CheckedU32(destByteOffset) + CheckedU32(len - 1);
+         CheckedU32 highest_srcOffset = CheckedU32(srcByteOffset) + CheckedU32(len - 1);
+ 
+@@ -441,40 +438,36 @@ Instance::memCopy(Instance* instance, ui
+ /* static */ int32_t
+ Instance::memFill(Instance* instance, uint32_t byteOffset, uint32_t value, uint32_t len)
+ {
+     WasmMemoryObject* mem = instance->memory();
+     uint32_t memLen = mem->volatileMemoryLength();
+ 
+     // Knowing that len > 0 below simplifies the wraparound check.
+     if (len == 0) {
+-
+         // Even though the length is zero, we must check for a valid offset.
+         if (byteOffset < memLen)
+             return 0;
+ 
+         // else fall through to failure case
+-
+     } else {
+-
+         ArrayBufferObjectMaybeShared& arrBuf = mem->buffer();
+         uint8_t* rawBuf = arrBuf.dataPointerEither().unwrap();
+ 
+         // Here, we know that |len - 1| cannot underflow.
+         typedef CheckedInt<uint32_t> CheckedU32;
+         CheckedU32 highest_offset = CheckedU32(byteOffset) + CheckedU32(len - 1);
+ 
+         if (highest_offset.isValid() &&     // wraparound check
+             highest_offset.value() < memLen)     // range check
+         {
+             memset(rawBuf + byteOffset, int(value), size_t(len));
+             return 0;
+         }
+         // else fall through to failure case
+-
+     }
+ 
+     JSContext* cx = TlsContext.get();
+     JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_WASM_OUT_OF_BOUNDS);
+     return -1;
+ }
+ 
+ /* static */ void
+@@ -502,17 +495,17 @@ Instance::postBarrier(Instance* instance
+ Instance::Instance(JSContext* cx,
+                    Handle<WasmInstanceObject*> object,
+                    SharedCode code,
+                    UniqueDebugState debug,
+                    UniqueTlsData tlsDataIn,
+                    HandleWasmMemoryObject memory,
+                    SharedTableVector&& tables,
+                    Handle<FunctionVector> funcImports,
+-                   const LitValVector& globalImportValues,
++                   HandleValVector globalImportValues,
+                    const WasmGlobalObjectVector& globalObjs)
+   : realm_(cx->realm()),
+     object_(object),
+     code_(code),
+     debug_(std::move(debug)),
+     tlsData_(std::move(tlsDataIn)),
+     memory_(memory),
+     tables_(std::move(tables)),
+@@ -581,42 +574,43 @@ Instance::Instance(JSContext* cx,
+ 
+         uint8_t* globalAddr = globalData() + global.offset();
+         switch (global.kind()) {
+           case GlobalKind::Import: {
+             size_t imported = global.importIndex();
+             if (global.isIndirect())
+                 *(void**)globalAddr = globalObjs[imported]->cell();
+             else
+-                globalImportValues[imported].writePayload(globalAddr);
++                globalImportValues[imported].get().writePayload(globalAddr);
+             break;
+           }
+           case GlobalKind::Variable: {
+             const InitExpr& init = global.initExpr();
+             switch (init.kind()) {
+               case InitExpr::Kind::Constant: {
+                 if (global.isIndirect())
+                     *(void**)globalAddr = globalObjs[i]->cell();
+                 else
+-                    init.val().writePayload(globalAddr);
++                    Val(init.val()).writePayload(globalAddr);
+                 break;
+               }
+               case InitExpr::Kind::GetGlobal: {
+                 const GlobalDesc& imported = metadata().globals[init.globalIndex()];
+ 
+                 // Global-ref initializers cannot reference mutable globals, so
+                 // the source global should never be indirect.
+                 MOZ_ASSERT(!imported.isIndirect());
+ 
++                RootedVal dest(cx, globalImportValues[imported.importIndex()].get());
+                 if (global.isIndirect()) {
+                     void* address = globalObjs[i]->cell();
+                     *(void**)globalAddr = address;
+-                    globalImportValues[imported.importIndex()].writePayload((uint8_t*)address);
++                    dest.get().writePayload((uint8_t*)address);
+                 } else {
+-                    globalImportValues[imported.importIndex()].writePayload(globalAddr);
++                    dest.get().writePayload(globalAddr);
+                 }
+                 break;
+               }
+             }
+             break;
+           }
+           case GlobalKind::Constant: {
+             MOZ_CRASH("skipped at the top");
+diff --git a/js/src/wasm/WasmInstance.h b/js/src/wasm/WasmInstance.h
+--- a/js/src/wasm/WasmInstance.h
++++ b/js/src/wasm/WasmInstance.h
+@@ -73,17 +73,17 @@ class Instance
+     Instance(JSContext* cx,
+              HandleWasmInstanceObject object,
+              SharedCode code,
+              UniqueDebugState debug,
+              UniqueTlsData tlsData,
+              HandleWasmMemoryObject memory,
+              SharedTableVector&& tables,
+              Handle<FunctionVector> funcImports,
+-             const LitValVector& globalImportValues,
++             HandleValVector globalImportValues,
+              const WasmGlobalObjectVector& globalObjs);
+     ~Instance();
+     bool init(JSContext* cx);
+     void trace(JSTracer* trc);
+ 
+     JS::Realm* realm() const { return realm_; }
+     const Code& code() const { return *code_; }
+     const CodeTier& code(Tier t) const { return code_->codeTier(t); }
+diff --git a/js/src/wasm/WasmJS.cpp b/js/src/wasm/WasmJS.cpp
+--- a/js/src/wasm/WasmJS.cpp
++++ b/js/src/wasm/WasmJS.cpp
+@@ -40,16 +40,26 @@
+ #include "wasm/WasmSignalHandlers.h"
+ #include "wasm/WasmStubs.h"
+ #include "wasm/WasmValidate.h"
+ 
+ #include "vm/ArrayBufferObject-inl.h"
+ #include "vm/JSObject-inl.h"
+ #include "vm/NativeObject-inl.h"
+ 
++#define WASM_CRASH_IF_SIMD_TYPES \
++    case ValType::I8x16: \
++    case ValType::B8x16: \
++    case ValType::I16x8: \
++    case ValType::B16x8: \
++    case ValType::I32x4: \
++    case ValType::B32x4: \
++    case ValType::F32x4: \
++      MOZ_CRASH("unexpected SIMD type")
++
+ using namespace js;
+ using namespace js::jit;
+ using namespace js::wasm;
+ 
+ using mozilla::CheckedInt;
+ using mozilla::Nothing;
+ using mozilla::RangedPtr;
+ 
+@@ -105,74 +115,81 @@ bool
+ wasm::HasSupport(JSContext* cx)
+ {
+     return cx->options().wasm() &&
+            HasCompilerSupport(cx) &&
+            HasAvailableCompilerTier(cx);
+ }
+ 
+ static bool
+-ToWebAssemblyValue(JSContext* cx, ValType targetType, HandleValue v, LitVal* val)
++ToWebAssemblyValue(JSContext* cx, ValType targetType, HandleValue v, MutableHandleVal val)
+ {
+     switch (targetType.code()) {
+       case ValType::I32: {
+         int32_t i32;
+         if (!ToInt32(cx, v, &i32))
+             return false;
+-        *val = LitVal(uint32_t(i32));
++        val.set(Val(uint32_t(i32)));
+         return true;
+       }
+       case ValType::F32: {
+         double d;
+         if (!ToNumber(cx, v, &d))
+             return false;
+-        *val = LitVal(float(d));
++        val.set(Val(float(d)));
+         return true;
+       }
+       case ValType::F64: {
+         double d;
+         if (!ToNumber(cx, v, &d))
+             return false;
+-        *val = LitVal(d);
++        val.set(Val(d));
+         return true;
+       }
+       case ValType::AnyRef: {
+         if (v.isNull()) {
+-            *val = LitVal(ValType::AnyRef, nullptr);
++            val.set(Val(nullptr));
+         } else {
+             JSObject* obj = ToObject(cx, v);
+             if (!obj)
+                 return false;
+-            *val = LitVal(ValType::AnyRef, obj);
++            MOZ_ASSERT(obj->compartment() == cx->compartment());
++            val.set(Val(obj));
+         }
+         return true;
+       }
+-      default: {
+-        MOZ_CRASH("unexpected import value type, caller must guard");
++      WASM_CRASH_IF_SIMD_TYPES;
++      case ValType::Ref:
++      case ValType::I64: {
++        break;
+       }
+     }
++    MOZ_CRASH("unexpected import value type, caller must guard");
+ }
+ 
+ static Value
+-ToJSValue(const LitVal& val)
++ToJSValue(const Val& val)
+ {
+     switch (val.type().code()) {
+       case ValType::I32:
+         return Int32Value(val.i32());
+       case ValType::F32:
+         return DoubleValue(JS::CanonicalizeNaN(double(val.f32())));
+       case ValType::F64:
+         return DoubleValue(JS::CanonicalizeNaN(val.f64()));
+       case ValType::AnyRef:
+         if (!val.ptr())
+             return NullValue();
+         return ObjectValue(*(JSObject*)val.ptr());
+-      default:
+-        MOZ_CRASH("unexpected type when translating to a JS value");
++      WASM_CRASH_IF_SIMD_TYPES;
++      case ValType::Ref:
++      case ValType::I64:
++        break;
+     }
++    MOZ_CRASH("unexpected type when translating to a JS value");
+ }
+ 
+ // ============================================================================
+ // Imports
+ 
+ static bool
+ ThrowBadImportArg(JSContext* cx)
+ {
+@@ -201,17 +218,17 @@ GetProperty(JSContext* cx, HandleObject 
+ static bool
+ GetImports(JSContext* cx,
+            const Module& module,
+            HandleObject importObj,
+            MutableHandle<FunctionVector> funcImports,
+            MutableHandleWasmTableObject tableImport,
+            MutableHandleWasmMemoryObject memoryImport,
+            WasmGlobalObjectVector& globalObjs,
+-           LitValVector* globalImportValues)
++           MutableHandleValVector globalImportValues)
+ {
+     const ImportVector& imports = module.imports();
+     if (!imports.empty() && !importObj)
+         return ThrowBadImportArg(cx);
+ 
+     const Metadata& metadata = module.metadata();
+ 
+     uint32_t globalIndex = 0;
+@@ -253,21 +270,21 @@ GetImports(JSContext* cx,
+             if (!v.isObject() || !v.toObject().is<WasmMemoryObject>())
+                 return ThrowBadImportType(cx, import.field.get(), "Memory");
+ 
+             MOZ_ASSERT(!memoryImport);
+             memoryImport.set(&v.toObject().as<WasmMemoryObject>());
+             break;
+           }
+           case DefinitionKind::Global: {
+-            LitVal val;
+             const uint32_t index = globalIndex++;
+             const GlobalDesc& global = globals[index];
+             MOZ_ASSERT(global.importIndex() == index);
+ 
++            RootedVal val(cx);
+             if (v.isObject() && v.toObject().is<WasmGlobalObject>()) {
+                 RootedWasmGlobalObject obj(cx, &v.toObject().as<WasmGlobalObject>());
+ 
+                 if (obj->isMutable() != global.isMutable()) {
+                     JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_MUT_LINK);
+                     return false;
+                 }
+                 if (obj->type() != global.type()) {
+@@ -275,17 +292,17 @@ GetImports(JSContext* cx,
+                     return false;
+                 }
+ 
+                 if (globalObjs.length() <= index && !globalObjs.resize(index + 1)) {
+                     ReportOutOfMemory(cx);
+                     return false;
+                 }
+                 globalObjs[index] = obj;
+-                val = obj->val();
++                obj->val(&val);
+             } else {
+                 if (IsNumberType(global.type())) {
+                     if (!v.isNumber())
+                         return ThrowBadImportType(cx, import.field.get(), "Number");
+                 } else {
+                     MOZ_ASSERT(global.type().isRefOrAnyRef());
+                     if (!v.isNull() && !v.isObject())
+                         return ThrowBadImportType(cx, import.field.get(), "Object-or-null");
+@@ -300,17 +317,17 @@ GetImports(JSContext* cx,
+                     JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_MUT_LINK);
+                     return false;
+                 }
+ 
+                 if (!ToWebAssemblyValue(cx, global.type(), v, &val))
+                     return false;
+             }
+ 
+-            if (!globalImportValues->append(val))
++            if (!globalImportValues.append(val))
+                 return false;
+ 
+             break;
+           }
+         }
+     }
+ 
+     MOZ_ASSERT(globalIndex == globals.length() || !globals[globalIndex].isImport());
+@@ -375,21 +392,22 @@ wasm::Eval(JSContext* cx, Handle<TypedAr
+         return false;
+     }
+ 
+     Rooted<FunctionVector> funcs(cx, FunctionVector(cx));
+     RootedWasmTableObject table(cx);
+     RootedWasmMemoryObject memory(cx);
+     Rooted<WasmGlobalObjectVector> globalObjs(cx);
+ 
+-    LitValVector globals;
++    RootedValVector globals(cx);
+     if (!GetImports(cx, *module, importObj, &funcs, &table, &memory, globalObjs.get(), &globals))
+         return false;
+ 
+-    return module->instantiate(cx, funcs, table, memory, globals, globalObjs.get(), nullptr, instanceObj);
++    return module->instantiate(cx, funcs, table, memory, globals, globalObjs.get(), nullptr,
++                               instanceObj);
+ }
+ 
+ // ============================================================================
+ // Common functions
+ 
+ // '[EnforceRange] unsigned long' types are coerced with
+ //    ConvertToInt(v, 32, 'unsigned')
+ // defined in Web IDL Section 3.2.4.9.
+@@ -1065,17 +1083,17 @@ WasmInstanceObject::trace(JSTracer* trc,
+ WasmInstanceObject::create(JSContext* cx,
+                            SharedCode code,
+                            UniqueDebugState debug,
+                            UniqueTlsData tlsData,
+                            HandleWasmMemoryObject memory,
+                            SharedTableVector&& tables,
+                            Handle<FunctionVector> funcImports,
+                            const GlobalDescVector& globals,
+-                           const LitValVector& globalImportValues,
++                           HandleValVector globalImportValues,
+                            const WasmGlobalObjectVector& globalObjs,
+                            HandleObject proto)
+ {
+     UniquePtr<ExportMap> exports = js::MakeUnique<ExportMap>();
+     if (!exports || !exports->init()) {
+         ReportOutOfMemory(cx);
+         return nullptr;
+     }
+@@ -1172,21 +1190,22 @@ Instantiate(JSContext* cx, const Module&
+ {
+     RootedObject instanceProto(cx, &cx->global()->getPrototype(JSProto_WasmInstance).toObject());
+ 
+     Rooted<FunctionVector> funcs(cx, FunctionVector(cx));
+     RootedWasmTableObject table(cx);
+     RootedWasmMemoryObject memory(cx);
+     Rooted<WasmGlobalObjectVector> globalObjs(cx);
+ 
+-    LitValVector globals;
++    RootedValVector globals(cx);
+     if (!GetImports(cx, module, importObj, &funcs, &table, &memory, globalObjs.get(), &globals))
+         return false;
+ 
+-    return module.instantiate(cx, funcs, table, memory, globals, globalObjs.get(), instanceProto, instanceObj);
++    return module.instantiate(cx, funcs, table, memory, globals, globalObjs.get(), instanceProto,
++                              instanceObj);
+ }
+ 
+ /* static */ bool
+ WasmInstanceObject::construct(JSContext* cx, unsigned argc, Value* vp)
+ {
+     CallArgs args = CallArgsFromVp(argc, vp);
+ 
+     if (!ThrowIfNotConstructing(cx, args, "Instance"))
+@@ -2135,58 +2154,84 @@ const Class WasmGlobalObject::class_ =
+ };
+ 
+ /* static */ void
+ WasmGlobalObject::trace(JSTracer* trc, JSObject* obj)
+ {
+     WasmGlobalObject* global = reinterpret_cast<WasmGlobalObject*>(obj);
+     switch (global->type().code()) {
+       case ValType::AnyRef:
+-        TraceNullableEdge(trc, &global->cell()->ptr, "wasm anyref global");
++        if (global->cell()->ptr)
++            TraceManuallyBarrieredEdge(trc, &global->cell()->ptr, "wasm anyref global");
+         break;
+-      default:
++      case ValType::I32:
++      case ValType::F32:
++      case ValType::I64:
++      case ValType::F64:
+         break;
++      WASM_CRASH_IF_SIMD_TYPES;
++      case ValType::Ref:
++        MOZ_CRASH("Ref NYI");
+     }
+ }
+ 
+ /* static */ void
+ WasmGlobalObject::finalize(FreeOp*, JSObject* obj)
+ {
+     WasmGlobalObject* global = reinterpret_cast<WasmGlobalObject*>(obj);
+     js_delete(global->cell());
+ }
+ 
+ /* static */ WasmGlobalObject*
+-WasmGlobalObject::create(JSContext* cx, const LitVal& val, bool isMutable)
++WasmGlobalObject::create(JSContext* cx, HandleVal hval, bool isMutable)
+ {
+-    UniquePtr<Cell> cell = js::MakeUnique<Cell>();
+-    if (!cell)
+-        return nullptr;
+-
+-    switch (val.type().code()) {
+-      case ValType::I32:    cell->i32 = val.i32(); break;
+-      case ValType::I64:    cell->i64 = val.i64(); break;
+-      case ValType::F32:    cell->f32 = val.f32(); break;
+-      case ValType::F64:    cell->f64 = val.f64(); break;
+-      case ValType::AnyRef: cell->ptr = (JSObject*)val.ptr(); break;
+-      default:              MOZ_CRASH();
+-    }
+-
+     RootedObject proto(cx, &cx->global()->getPrototype(JSProto_WasmGlobal).toObject());
+ 
+     AutoSetNewObjectMetadata metadata(cx);
+     RootedWasmGlobalObject obj(cx, NewObjectWithGivenProto<WasmGlobalObject>(cx, proto));
+     if (!obj)
+         return nullptr;
+ 
+     MOZ_ASSERT(obj->isTenured(), "assumed by set_global post barriers");
+ 
++    // It's simpler to initialize the cell after the object has been created,
++    // to avoid needing to root the cell before the object creation.
++
++    Cell* cell = js_new<Cell>();
++    if (!cell)
++        return nullptr;
++
++    const Val& val = hval.get();
++    switch (val.type().code()) {
++      case ValType::I32:
++        cell->i32 = val.i32();
++        break;
++      case ValType::I64:
++        cell->i64 = val.i64();
++        break;
++      case ValType::F32:
++        cell->f32 = val.f32();
++        break;
++      case ValType::F64:
++        cell->f64 = val.f64();
++        break;
++      case ValType::AnyRef:
++        MOZ_ASSERT(!cell->ptr, "no prebarriers needed");
++        cell->ptr = val.ptr();
++        if (cell->ptr)
++            JSObject::writeBarrierPost(&cell->ptr, nullptr, cell->ptr);
++        break;
++      WASM_CRASH_IF_SIMD_TYPES;
++      case ValType::Ref:
++        MOZ_CRASH("Ref NYI");
++    }
++
+     obj->initReservedSlot(TYPE_SLOT, Int32Value(int32_t(val.type().bitsUnsafe())));
+     obj->initReservedSlot(MUTABLE_SLOT, JS::BooleanValue(isMutable));
+-    obj->initReservedSlot(CELL_SLOT, PrivateValue(cell.release()));
++    obj->initReservedSlot(CELL_SLOT, PrivateValue(cell));
+ 
+     return obj;
+ }
+ 
+ /* static */ bool
+ WasmGlobalObject::construct(JSContext* cx, unsigned argc, Value* vp)
+ {
+     CallArgs args = CallArgsFromVp(argc, vp);
+@@ -2238,31 +2283,31 @@ WasmGlobalObject::construct(JSContext* c
+ 
+     RootedValue mutableVal(cx);
+     if (!JS_GetProperty(cx, obj, "mutable", &mutableVal))
+         return false;
+ 
+     bool isMutable = ToBoolean(mutableVal);
+ 
+     // Extract the initial value, or provide a suitable default.
+-    // Guard against control flow mistakes below failing to set |globalVal|.
+-    LitVal globalVal = LitVal(uint32_t(0));
++    RootedVal globalVal(cx);
+     if (args.length() >= 2) {
+         RootedValue valueVal(cx, args.get(1));
+ 
+         if (!ToWebAssemblyValue(cx, globalType, valueVal, &globalVal))
+             return false;
+     } else {
+         switch (globalType.code()) {
+-          case ValType::I32:    /* set above */ break;
+-          case ValType::I64:    globalVal = LitVal(uint64_t(0)); break;
+-          case ValType::F32:    globalVal = LitVal(float(0.0)); break;
+-          case ValType::F64:    globalVal = LitVal(double(0.0)); break;
+-          case ValType::AnyRef: globalVal = LitVal(ValType::AnyRef, nullptr); break;
+-          default: MOZ_CRASH();
++          case ValType::I32:    globalVal = Val(uint32_t(0)); break;
++          case ValType::I64:    globalVal = Val(uint64_t(0)); break;
++          case ValType::F32:    globalVal = Val(float(0.0));  break;
++          case ValType::F64:    globalVal = Val(double(0.0)); break;
++          case ValType::AnyRef: globalVal = Val(nullptr);     break;
++          WASM_CRASH_IF_SIMD_TYPES;
++          case ValType::Ref:    MOZ_CRASH("Ref NYI");
+         }
+     }
+ 
+     WasmGlobalObject* global = WasmGlobalObject::create(cx, globalVal, isMutable);
+     if (!global)
+         return false;
+ 
+     args.rval().setObject(*global);
+@@ -2278,24 +2323,26 @@ IsGlobal(HandleValue v)
+ /* static */ bool
+ WasmGlobalObject::valueGetterImpl(JSContext* cx, const CallArgs& args)
+ {
+     switch (args.thisv().toObject().as<WasmGlobalObject>().type().code()) {
+       case ValType::I32:
+       case ValType::F32:
+       case ValType::F64:
+       case ValType::AnyRef:
+-        args.rval().set(args.thisv().toObject().as<WasmGlobalObject>().value());
++        args.rval().set(args.thisv().toObject().as<WasmGlobalObject>().value(cx));
+         return true;
+       case ValType::I64:
+         JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_I64_TYPE);
+         return false;
+-      default:
+-        MOZ_CRASH();
++      WASM_CRASH_IF_SIMD_TYPES;
++      case ValType::Ref:
++        MOZ_CRASH("Ref NYI");
+     }
++    MOZ_CRASH();
+ }
+ 
+ /* static */ bool
+ WasmGlobalObject::valueGetter(JSContext* cx, unsigned argc, Value* vp)
+ {
+     CallArgs args = CallArgsFromVp(argc, vp);
+     return CallNonGenericMethod<IsGlobal, valueGetterImpl>(cx, args);
+ }
+@@ -2309,27 +2356,44 @@ WasmGlobalObject::valueSetterImpl(JSCont
+         return false;
+     }
+ 
+     if (global->type() == ValType::I64) {
+         JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_I64_TYPE);
+         return false;
+     }
+ 
+-    LitVal val;
++    RootedVal val(cx);
+     if (!ToWebAssemblyValue(cx, global->type(), args.get(0), &val))
+         return false;
+ 
+     Cell* cell = global->cell();
+     switch (global->type().code()) {
+-      case ValType::I32:    cell->i32 = val.i32(); break;
+-      case ValType::F32:    cell->f32 = val.f32(); break;
+-      case ValType::F64:    cell->f64 = val.f64(); break;
+-      case ValType::AnyRef: cell->ptr = (JSObject*)val.ptr(); break;
+-      default:              MOZ_CRASH();
++      case ValType::I32:
++        cell->i32 = val.get().i32();
++        break;
++      case ValType::F32:
++        cell->f32 = val.get().f32();
++        break;
++      case ValType::F64:
++        cell->f64 = val.get().f64();
++        break;
++      case ValType::AnyRef: {
++        JSObject* prevPtr = cell->ptr;
++        JSObject::writeBarrierPre(prevPtr);
++        cell->ptr = val.get().ptr();
++        if (cell->ptr)
++            JSObject::writeBarrierPost(&cell->ptr, prevPtr, cell->ptr);
++        break;
++      }
++      WASM_CRASH_IF_SIMD_TYPES;
++      case ValType::I64:
++        MOZ_CRASH("unexpected i64 when setting global's value");
++      case ValType::Ref:
++        MOZ_CRASH("Ref NYI");
+     }
+ 
+     args.rval().setUndefined();
+     return true;
+ }
+ 
+ /* static */ bool
+ WasmGlobalObject::valueSetter(JSContext* cx, unsigned argc, Value* vp)
+@@ -2361,37 +2425,39 @@ WasmGlobalObject::type() const
+ }
+ 
+ bool
+ WasmGlobalObject::isMutable() const
+ {
+     return getReservedSlot(MUTABLE_SLOT).toBoolean();
+ }
+ 
+-LitVal
+-WasmGlobalObject::val() const
++void
++WasmGlobalObject::val(MutableHandleVal outval) const
+ {
+     Cell* cell = this->cell();
+-    LitVal val;
+     switch (type().code()) {
+-      case ValType::I32:    val = LitVal(uint32_t(cell->i32)); break;
+-      case ValType::I64:    val = LitVal(uint64_t(cell->i64)); break;
+-      case ValType::F32:    val = LitVal(cell->f32); break;
+-      case ValType::F64:    val = LitVal(cell->f64); break;
+-      case ValType::AnyRef: val = LitVal(ValType::AnyRef, (void*)cell->ptr); break;
+-      default:              MOZ_CRASH();
++      case ValType::I32:    outval.set(Val(uint32_t(cell->i32))); return;
++      case ValType::I64:    outval.set(Val(uint64_t(cell->i64))); return;
++      case ValType::F32:    outval.set(Val(cell->f32));           return;
++      case ValType::F64:    outval.set(Val(cell->f64));           return;
++      case ValType::AnyRef: outval.set(Val(cell->ptr));           return;
++      WASM_CRASH_IF_SIMD_TYPES;
++      case ValType::Ref:    MOZ_CRASH("Ref NYI");
+     }
+-    return val;
++    MOZ_CRASH("unexpected Global type");
+ }
+ 
+ Value
+-WasmGlobalObject::value() const
++WasmGlobalObject::value(JSContext* cx) const
+ {
+     // ToJSValue crashes on I64; this is desirable.
+-    return ToJSValue(val());
++    RootedVal result(cx);
++    val(&result);
++    return ToJSValue(result.get());
+ }
+ 
+ WasmGlobalObject::Cell*
+ WasmGlobalObject::cell() const
+ {
+     return reinterpret_cast<Cell*>(getReservedSlot(CELL_SLOT).toPrivate());
+ }
+ 
+diff --git a/js/src/wasm/WasmJS.h b/js/src/wasm/WasmJS.h
+--- a/js/src/wasm/WasmJS.h
++++ b/js/src/wasm/WasmJS.h
+@@ -132,40 +132,39 @@ class WasmGlobalObject : public NativeOb
+     static bool valueGetter(JSContext* cx, unsigned argc, Value* vp);
+     static bool valueSetterImpl(JSContext* cx, const CallArgs& args);
+     static bool valueSetter(JSContext* cx, unsigned argc, Value* vp);
+ 
+   public:
+     // For exposed globals the Cell holds the value of the global; the
+     // instance's global area holds a pointer to the Cell.
+     union Cell {
+-        int32_t     i32;
+-        int64_t     i64;
+-        float       f32;
+-        double      f64;
+-        GCPtrObject ptr;
+-
++        int32_t   i32;
++        int64_t   i64;
++        float     f32;
++        double    f64;
++        JSObject* ptr;
+         Cell() : i64(0) {}
+         ~Cell() {}
+     };
+ 
+     static const unsigned RESERVED_SLOTS = 3;
+     static const Class class_;
+     static const JSPropertySpec properties[];
+     static const JSFunctionSpec methods[];
+     static const JSFunctionSpec static_methods[];
+     static bool construct(JSContext*, unsigned, Value*);
+ 
+-    static WasmGlobalObject* create(JSContext* cx, const wasm::LitVal& value, bool isMutable);
++    static WasmGlobalObject* create(JSContext* cx, wasm::HandleVal value, bool isMutable);
+ 
+     wasm::ValType type() const;
+-    wasm::LitVal val() const;
++    void val(wasm::MutableHandleVal outval) const;
+     bool isMutable() const;
+     // value() will MOZ_CRASH if the type is int64
+-    Value value() const;
++    Value value(JSContext* cx) const;
+     Cell* cell() const;
+ };
+ 
+ // The class of WebAssembly.Instance. Each WasmInstanceObject owns a
+ // wasm::Instance. These objects are used both as content-facing JS objects and
+ // as internal implementation details of asm.js.
+ 
+ class WasmInstanceObject : public NativeObject
+@@ -214,17 +213,17 @@ class WasmInstanceObject : public Native
+     static WasmInstanceObject* create(JSContext* cx,
+                                       RefPtr<const wasm::Code> code,
+                                       UniquePtr<wasm::DebugState> debug,
+                                       wasm::UniqueTlsData tlsData,
+                                       HandleWasmMemoryObject memory,
+                                       Vector<RefPtr<wasm::Table>, 0, SystemAllocPolicy>&& tables,
+                                       Handle<FunctionVector> funcImports,
+                                       const wasm::GlobalDescVector& globals,
+-                                      const wasm::LitValVector& globalImportValues,
++                                      wasm::HandleValVector globalImportValues,
+                                       const WasmGlobalObjectVector& globalObjs,
+                                       HandleObject proto);
+     void initExportsObj(JSObject& exportsObj);
+ 
+     wasm::Instance& instance() const;
+     JSObject& exportsObj() const;
+ 
+     static bool getExportedFunction(JSContext* cx,
+diff --git a/js/src/wasm/WasmModule.cpp b/js/src/wasm/WasmModule.cpp
+--- a/js/src/wasm/WasmModule.cpp
++++ b/js/src/wasm/WasmModule.cpp
+@@ -716,34 +716,34 @@ Module::extractCode(JSContext* cx, Tier 
+     if (!JS_DefineProperty(cx, result, "segments", value, JSPROP_ENUMERATE))
+         return false;
+ 
+     vp.setObject(*result);
+     return true;
+ }
+ 
+ static uint32_t
+-EvaluateInitExpr(const LitValVector& globalImportValues, InitExpr initExpr)
++EvaluateInitExpr(HandleValVector globalImportValues, InitExpr initExpr)
+ {
+     switch (initExpr.kind()) {
+       case InitExpr::Kind::Constant:
+         return initExpr.val().i32();
+       case InitExpr::Kind::GetGlobal:
+-        return globalImportValues[initExpr.globalIndex()].i32();
++        return globalImportValues[initExpr.globalIndex()].get().i32();
+     }
+ 
+     MOZ_CRASH("bad initializer expression");
+ }
+ 
+ bool
+ Module::initSegments(JSContext* cx,
+                      HandleWasmInstanceObject instanceObj,
+                      Handle<FunctionVector> funcImports,
+                      HandleWasmMemoryObject memoryObj,
+-                     const LitValVector& globalImportValues) const
++                     HandleValVector globalImportValues) const
+ {
+     Instance& instance = instanceObj->instance();
+     const SharedTableVector& tables = instance.tables();
+ 
+     Tier tier = code().bestTier();
+ 
+     // Perform all error checks up front so that this function does not perform
+     // partial initialization if an error is reported.
+@@ -1005,64 +1005,69 @@ Module::instantiateTable(JSContext* cx, 
+                 return false;
+             }
+         }
+     }
+ 
+     return true;
+ }
+ 
+-static LitVal
+-ExtractGlobalValue(const LitValVector& globalImportValues, uint32_t globalIndex,
+-                   const GlobalDesc& global)
++static void
++ExtractGlobalValue(HandleValVector globalImportValues, uint32_t globalIndex,
++                   const GlobalDesc& global, MutableHandleVal result)
+ {
+     switch (global.kind()) {
+       case GlobalKind::Import: {
+-        return globalImportValues[globalIndex];
++        result.set(Val(globalImportValues[globalIndex]));
++        return;
+       }
+       case GlobalKind::Variable: {
+         const InitExpr& init = global.initExpr();
+         switch (init.kind()) {
+           case InitExpr::Kind::Constant:
+-            return init.val();
++            result.set(Val(init.val()));
++            return;
+           case InitExpr::Kind::GetGlobal:
+-            return globalImportValues[init.globalIndex()];
++            result.set(Val(globalImportValues[init.globalIndex()]));
++            return;
+         }
+         break;
+       }
+       case GlobalKind::Constant: {
+-        return global.constantValue();
++        result.set(Val(global.constantValue()));
++        return;
+       }
+     }
+     MOZ_CRASH("Not a global value");
+ }
+ 
+ static bool
+-EnsureGlobalObject(JSContext* cx, const LitValVector& globalImportValues, size_t globalIndex,
++EnsureGlobalObject(JSContext* cx, HandleValVector globalImportValues, size_t globalIndex,
+                    const GlobalDesc& global, WasmGlobalObjectVector& globalObjs)
+ {
+     if (globalIndex < globalObjs.length() && globalObjs[globalIndex])
+         return true;
+ 
+-    LitVal val = ExtractGlobalValue(globalImportValues, globalIndex, global);
++    RootedVal val(cx);
++    ExtractGlobalValue(globalImportValues, globalIndex, global, &val);
+     RootedWasmGlobalObject go(cx, WasmGlobalObject::create(cx, val, global.isMutable()));
+     if (!go)
+         return false;
+ 
+     if (globalObjs.length() <= globalIndex && !globalObjs.resize(globalIndex + 1)) {
+         ReportOutOfMemory(cx);
+         return false;
+     }
+ 
+     globalObjs[globalIndex] = go;
+     return true;
+ }
+ 
+ bool
+-Module::instantiateGlobals(JSContext* cx, const LitValVector& globalImportValues,
++Module::instantiateGlobals(JSContext* cx, HandleValVector globalImportValues,
+                            WasmGlobalObjectVector& globalObjs) const
+ {
+     // If there are exported globals that aren't in globalObjs because they
+     // originate in this module or because they were immutable imports that came
+     // in as primitive values then we must create cells in the globalObjs for
+     // them here, as WasmInstanceObject::create() and CreateExportObject() will
+     // need the cells to exist.
+ 
+@@ -1184,17 +1189,17 @@ CreateExportObject(JSContext* cx,
+     return true;
+ }
+ 
+ bool
+ Module::instantiate(JSContext* cx,
+                     Handle<FunctionVector> funcImports,
+                     HandleWasmTableObject tableImport,
+                     HandleWasmMemoryObject memoryImport,
+-                    const LitValVector& globalImportValues,
++                    HandleValVector globalImportValues,
+                     WasmGlobalObjectVector& globalObjs,
+                     HandleObject instanceProto,
+                     MutableHandleWasmInstanceObject instance) const
+ {
+     if (!instantiateFunctions(cx, funcImports))
+         return false;
+ 
+     RootedWasmMemoryObject memory(cx, memoryImport);
+diff --git a/js/src/wasm/WasmModule.h b/js/src/wasm/WasmModule.h
+--- a/js/src/wasm/WasmModule.h
++++ b/js/src/wasm/WasmModule.h
+@@ -146,23 +146,23 @@ class Module : public JS::WasmModule
+ 
+     mutable Atomic<bool>    codeIsBusy_;
+ 
+     bool instantiateFunctions(JSContext* cx, Handle<FunctionVector> funcImports) const;
+     bool instantiateMemory(JSContext* cx, MutableHandleWasmMemoryObject memory) const;
+     bool instantiateTable(JSContext* cx,
+                           MutableHandleWasmTableObject table,
+                           SharedTableVector* tables) const;
+-    bool instantiateGlobals(JSContext* cx, const LitValVector& globalImportValues,
++    bool instantiateGlobals(JSContext* cx, HandleValVector globalImportValues,
+                             WasmGlobalObjectVector& globalObjs) const;
+     bool initSegments(JSContext* cx,
+                       HandleWasmInstanceObject instance,
+                       Handle<FunctionVector> funcImports,
+                       HandleWasmMemoryObject memory,
+-                      const LitValVector& globalImportValues) const;
++                      HandleValVector globalImportValues) const;
+ 
+     class Tier2GeneratorTaskImpl;
+     void notifyCompilationListeners();
+ 
+   public:
+     Module(Assumptions&& assumptions,
+            const Code& code,
+            UniqueConstBytes unlinkedCodeForDebugging,
+@@ -202,17 +202,17 @@ class Module : public JS::WasmModule
+     uint32_t codeLength(Tier t) const { return code_->segment(t).length(); }
+ 
+     // Instantiate this module with the given imports:
+ 
+     bool instantiate(JSContext* cx,
+                      Handle<FunctionVector> funcImports,
+                      HandleWasmTableObject tableImport,
+                      HandleWasmMemoryObject memoryImport,
+-                     const LitValVector& globalImportValues,
++                     HandleValVector globalImportValues,
+                      WasmGlobalObjectVector& globalObjs,
+                      HandleObject instanceProto,
+                      MutableHandleWasmInstanceObject instanceObj) const;
+ 
+     // Tier-2 compilation may be initiated after the Module is constructed at
+     // most once, ideally before any client can attempt to serialize the Module.
+     // When tier-2 compilation completes, ModuleGenerator calls finishTier2()
+     // from a helper thread, passing tier-variant data which will be installed
+diff --git a/js/src/wasm/WasmTypes.cpp b/js/src/wasm/WasmTypes.cpp
+--- a/js/src/wasm/WasmTypes.cpp
++++ b/js/src/wasm/WasmTypes.cpp
+@@ -54,18 +54,39 @@ static_assert(MaxMemoryInitialPages <= A
+ // All plausible targets must be able to do at least IEEE754 double
+ // loads/stores, hence the lower limit of 8.  Some Intel processors support
+ // AVX-512 loads/stores, hence the upper limit of 64.
+ static_assert(MaxMemoryAccessSize >= 8,  "MaxMemoryAccessSize too low");
+ static_assert(MaxMemoryAccessSize <= 64, "MaxMemoryAccessSize too high");
+ static_assert((MaxMemoryAccessSize & (MaxMemoryAccessSize-1)) == 0,
+               "MaxMemoryAccessSize is not a power of two");
+ 
++Val::Val(const LitVal& val)
++{
++    type_ = val.type();
++    switch (type_.code()) {
++      case ValType::I32: u.i32_ = val.i32(); return;
++      case ValType::F32: u.f32_ = val.f32(); return;
++      case ValType::I64: u.i64_ = val.i64(); return;
++      case ValType::F64: u.f64_ = val.f64(); return;
++      case ValType::I8x16:
++      case ValType::B8x16:
++      case ValType::I16x8:
++      case ValType::B16x8:
++      case ValType::I32x4:
++      case ValType::F32x4:
++      case ValType::B32x4: memcpy(&u, val.rawSimd(), jit::Simd128DataSize); return;
++      case ValType::AnyRef: u.ptr_ = val.ptr(); return;
++      case ValType::Ref: break;
++    }
++    MOZ_CRASH();
++}
++
+ void
+-LitVal::writePayload(uint8_t* dst) const
++Val::writePayload(uint8_t* dst) const
+ {
+     switch (type_.code()) {
+       case ValType::I32:
+       case ValType::F32:
+         memcpy(dst, &u.i32_, sizeof(u.i32_));
+         return;
+       case ValType::I64:
+       case ValType::F64:
+@@ -77,20 +98,37 @@ LitVal::writePayload(uint8_t* dst) const
+       case ValType::F32x4:
+       case ValType::B8x16:
+       case ValType::B16x8:
+       case ValType::B32x4:
+         memcpy(dst, &u, jit::Simd128DataSize);
+         return;
+       case ValType::Ref:
+       case ValType::AnyRef:
+-        memcpy(dst, &u.ptr_, sizeof(intptr_t));
++        MOZ_ASSERT(*(JSObject**)dst == nullptr, "should be null so no need for a pre-barrier");
++        memcpy(dst, &u.ptr_, sizeof(JSObject*));
++        // Either the written location is in the global data section in the
++        // WasmInstanceObject, or the Cell of a WasmGlobalObject:
++        // - WasmInstanceObjects are always tenured and u.ptr_ may point to a
++        // nursery object, so we need a post-barrier since the global data of
++        // an instance is effectively a field of the WasmInstanceObject.
++        // - WasmGlobalObjects are always tenured, and they have a Cell field,
++        // so a post-barrier may be needed for the same reason as above.
++        if (u.ptr_)
++            JSObject::writeBarrierPost((JSObject**)dst, nullptr, u.ptr_);
+         return;
+     }
+-    MOZ_CRASH("unexpected LitVal type");
++    MOZ_CRASH("unexpected Val type");
++}
++
++void
++Val::trace(JSTracer* trc)
++{
++    if (type_.isValid() && type_ == ValType::AnyRef && u.ptr_)
++        TraceManuallyBarrieredEdge(trc, &u.ptr_, "wasm anyref global");
+ }
+ 
+ bool
+ wasm::IsRoundingFunction(SymbolicAddress callee, jit::RoundingMode* mode)
+ {
+     switch (callee) {
+       case SymbolicAddress::FloorD:
+       case SymbolicAddress::FloorF:
+diff --git a/js/src/wasm/WasmTypes.h b/js/src/wasm/WasmTypes.h
+--- a/js/src/wasm/WasmTypes.h
++++ b/js/src/wasm/WasmTypes.h
+@@ -770,41 +770,43 @@ enum class HasGcTypes
+ // type, mostly for the purpose of numeric literals and initializers. A LitVal
+ // does not directly map to a JS value since there is not (currently) a precise
+ // representation of i64 values. A LitVal may contain non-canonical NaNs since,
+ // within WebAssembly, floats are not canonicalized. Canonicalization must
+ // happen at the JS boundary.
+ 
+ class LitVal
+ {
++  protected:
+     ValType type_;
+     union U {
+-        uint32_t i32_;
+-        uint64_t i64_;
+-        float f32_;
+-        double f64_;
+-        I8x16 i8x16_;
+-        I16x8 i16x8_;
+-        I32x4 i32x4_;
+-        F32x4 f32x4_;
+-        intptr_t ptr_;
++        uint32_t  i32_;
++        uint64_t  i64_;
++        float     f32_;
++        double    f64_;
++        I8x16     i8x16_;
++        I16x8     i16x8_;
++        I32x4     i32x4_;
++        F32x4     f32x4_;
++        JSObject* ptr_;
+     } u;
+ 
+   public:
+-    LitVal() = default;
++    LitVal() : type_(), u{} {}
+ 
+     explicit LitVal(uint32_t i32) : type_(ValType::I32) { u.i32_ = i32; }
+     explicit LitVal(uint64_t i64) : type_(ValType::I64) { u.i64_ = i64; }
+ 
+     explicit LitVal(float f32) : type_(ValType::F32) { u.f32_ = f32; }
+     explicit LitVal(double f64) : type_(ValType::F64) { u.f64_ = f64; }
+ 
+-    explicit LitVal(ValType refType, void* ptr) : type_(refType) {
++    explicit LitVal(ValType refType, JSObject* ptr) : type_(refType) {
+         MOZ_ASSERT(refType.isRefOrAnyRef());
+-        u.ptr_ = intptr_t(ptr);
++        MOZ_ASSERT(ptr == nullptr, "use Val for non-nullptr ref types to get tracing");
++        u.ptr_ = ptr;
+     }
+ 
+     explicit LitVal(const I8x16& i8x16, ValType type = ValType::I8x16) : type_(type) {
+         MOZ_ASSERT(type_ == ValType::I8x16 || type_ == ValType::B8x16);
+         memcpy(u.i8x16_, i8x16, sizeof(u.i8x16_));
+     }
+     explicit LitVal(const I16x8& i16x8, ValType type = ValType::I16x8) : type_(type) {
+         MOZ_ASSERT(type_ == ValType::I16x8 || type_ == ValType::B16x8);
+@@ -821,17 +823,17 @@ class LitVal
+     ValType type() const { return type_; }
+     bool isSimd() const { return IsSimdType(type()); }
+     static constexpr size_t sizeofLargestValue() { return sizeof(u); }
+ 
+     uint32_t i32() const { MOZ_ASSERT(type_ == ValType::I32); return u.i32_; }
+     uint64_t i64() const { MOZ_ASSERT(type_ == ValType::I64); return u.i64_; }
+     const float& f32() const { MOZ_ASSERT(type_ == ValType::F32); return u.f32_; }
+     const double& f64() const { MOZ_ASSERT(type_ == ValType::F64); return u.f64_; }
+-    intptr_t ptr() const { MOZ_ASSERT(type_.isRefOrAnyRef()); return u.ptr_; }
++    JSObject* ptr() const { MOZ_ASSERT(type_.isRefOrAnyRef()); return u.ptr_; }
+ 
+     const I8x16& i8x16() const {
+         MOZ_ASSERT(type_ == ValType::I8x16 || type_ == ValType::B8x16);
+         return u.i8x16_;
+     }
+     const I16x8& i16x8() const {
+         MOZ_ASSERT(type_ == ValType::I16x8 || type_ == ValType::B16x8);
+         return u.i16x8_;
+@@ -839,22 +841,51 @@ class LitVal
+     const I32x4& i32x4() const {
+         MOZ_ASSERT(type_ == ValType::I32x4 || type_ == ValType::B32x4);
+         return u.i32x4_;
+     }
+     const F32x4& f32x4() const {
+         MOZ_ASSERT(type_ == ValType::F32x4);
+         return u.f32x4_;
+     }
+-
+-    void writePayload(uint8_t* dst) const;
++    // To be used only by Val.
++    const void* rawSimd() const { return &u.i32x4_; }
+ };
+ 
+ typedef Vector<LitVal, 0, SystemAllocPolicy> LitValVector;
+ 
++// A Val is a LitVal that can contain pointers to JSObjects, thanks to their
++// trace implementation. Since a Val is able to store a pointer to a JSObject,
++// it needs to be traced during compilation in case the pointee is moved.
++// The classic shorthands for Rooted things are defined after this class, for
++// easier usage.
++
++class MOZ_NON_PARAM Val : public LitVal
++{
++  public:
++    Val() : LitVal() {}
++    explicit Val(const LitVal& val);
++    explicit Val(uint32_t i32)  : LitVal(i32) {}
++    explicit Val(uint64_t i64)  : LitVal(i64) {}
++    explicit Val(float f32)     : LitVal(f32) {}
++    explicit Val(double f64)    : LitVal(f64) {}
++    explicit Val(JSObject* obj) : LitVal(ValType::AnyRef, nullptr) { u.ptr_ = obj; }
++    void writePayload(uint8_t* dst) const;
++    void trace(JSTracer* trc);
++};
++
++typedef Rooted<Val> RootedVal;
++typedef Handle<Val> HandleVal;
++typedef MutableHandle<Val> MutableHandleVal;
++
++typedef GCVector<Val, 0, SystemAllocPolicy> GCVectorVal;
++typedef Rooted<GCVectorVal> RootedValVector;
++typedef Handle<GCVectorVal> HandleValVector;
++typedef MutableHandle<GCVectorVal> MutableHandleValVector;
++
+ // The FuncType class represents a WebAssembly function signature which takes a
+ // list of value types and returns an expression type. The engine uses two
+ // in-memory representations of the argument Vector's memory (when elements do
+ // not fit inline): normal malloc allocation (via SystemAllocPolicy) and
+ // allocation in a LifoAlloc (via LifoAllocPolicy). The former FuncType objects
+ // can have any lifetime since they own the memory. The latter FuncType objects
+ // must not outlive the associated LifoAlloc mark/release interval (which is
+ // currently the duration of module validation+compilation). Thus, long-lived

+ 4292 - 0
frg/work-js/mozilla-release/patches/1459900-4-63a1.patch

@@ -0,0 +1,4292 @@
+# HG changeset patch
+# User Lars T Hansen <lhansen@mozilla.com>
+# Date 1527526769 -7200
+#      Mon May 28 18:59:29 2018 +0200
+# Node ID 9e3fb74a11eefbed803b9e8705c18ad754a1fe23
+# Parent  6f06798229551bb9b0b3b772dd10bbad253678b8
+Bug 1459900 - Ref types with a simple type calculus.  r=luke
+
+We generalize ExprType and StackType in the same way as ValType, in
+order to reduce the scope for error.  (Intermediate solutions that
+avoided this turned out to be too error-prone.)  ExprType, StackType,
+and ValType now share a representation of a TypeCode + reference type
+index, called PackedTypeCode, but don't otherwise share much code -
+ExprType is going away soon and StackType will change.
+
+We then generalize many of the Ast nodes so that we can represent the
+new types available in the binary and textual formats.  This is
+wrenching in the same way that generalizing ValType and ExprType was.
+
+Finally we provide parsing, resolution, printing, and validation of
+(ref T) where T is some structure type.  This code should be general
+enough to later expand it to array types.
+
+The type calculus here is that (ref n) == (ref m) if n == m, ie, these
+are nominally equivalent; the only new subtype relationships are that
+(ref n) <: (ref n) and (ref n) <: anyref.  If the subtyping of anyref
+were to go away there's a small amount of work that would have to be
+done to handle the polymorphism of ref.is_null.
+
+diff --git a/js/src/jit-test/tests/wasm/gc/ref.js b/js/src/jit-test/tests/wasm/gc/ref.js
+new file mode 100644
+--- /dev/null
++++ b/js/src/jit-test/tests/wasm/gc/ref.js
+@@ -0,0 +1,150 @@
++if (!wasmGcEnabled()) {
++    assertErrorMessage(() => wasmEvalText(`(module (func (param (ref 0)) (unreachable)))`),
++                       WebAssembly.CompileError, /bad type/);
++    quit(0);
++}
++
++// Parsing and resolving
++
++var bin = wasmTextToBinary(
++    `(module
++      (type $cons (struct
++                   (field $car i32)
++                   (field $cdr (ref $cons))))
++
++      (type $odd (struct
++                  (field $x i32)
++                  (field $to_even (ref $even))))
++
++      (type $even (struct
++                   (field $x i32)
++                   (field $to_odd (ref $odd))))
++
++      ;; No globals of reference type yet.
++      ;;
++      ;;(import "m" "g" (global (ref $cons)))
++      ;;
++      ;;(global $glob (ref $cons) (ref.null $cons))
++
++      (import "m" "f" (func $imp (param (ref $cons)) (result (ref $odd))))
++
++      ;; The bodies do nothing since we have no operations on structs yet.
++
++      (func (export "car") (param (ref $cons)) (result i32)
++       (i32.const 0))
++
++      (func $cdr (export "cdr") (param $p (ref $cons)) (result (ref $cons))
++       (local $l (ref $cons))
++       ;; store null value of correct type
++       (set_local $l (ref.null (ref $cons)))
++       ;; store local of correct type
++       (set_local $l (get_local $p))
++       ;; store call result of correct type
++       (set_local $l (call $cdr (get_local $p)))
++       ;; TODO: eventually also a test with get_global
++       ;; blocks and if with result type
++       (block (ref $cons)
++        (if (ref $cons) (i32.eqz (i32.const 0))
++            (unreachable)
++            (ref.null (ref $cons)))))
++
++      (func (export "odder") (param (ref $even)) (result (ref $odd))
++       (ref.null (ref $odd)))
++
++      (func (export "evener") (param (ref $odd)) (result (ref $even))
++       (ref.null (ref $even)))
++
++      (func (export "passer") (param (ref $cons))
++       (call $cdr (get_local 0))
++       drop
++       (call $imp (get_local 0))
++       drop)
++
++     )`);
++
++// Validation
++
++assertEq(WebAssembly.validate(bin), true);
++
++// ref.is_null should work on any reference type
++
++new WebAssembly.Module(wasmTextToBinary(`
++(module
++ (type $s (struct))
++ (func $null (param (ref $s)) (result i32)
++   (ref.is_null (get_local 0))))
++`))
++
++// Automatic upcast to anyref
++
++new WebAssembly.Module(wasmTextToBinary(`
++(module
++ (type $s (struct (field i32)))
++ (func $f (param (ref $s)) (call $g (get_local 0)))
++ (func $g (param anyref) (unreachable)))
++`));
++
++// Misc failure modes
++
++assertErrorMessage(() => wasmEvalText(`
++(module
++  (func (param (ref $odd)) (unreachable)))
++`),
++SyntaxError, /Type label.*not found/);
++
++// Ref type mismatch in parameter
++
++assertErrorMessage(() => wasmEvalText(`
++(module
++ (type $s (struct (field i32)))
++ (type $t (struct (field i32)))
++ (func $f (param (ref $s)) (unreachable))
++ (func $g (param (ref $t)) (call $f (get_local 0)) drop))
++`),
++WebAssembly.CompileError, /expression has type ref.*but expected ref/);
++
++// Ref type mismatch in assignment to local
++
++assertErrorMessage(() => wasmEvalText(`
++(module
++ (type $s (struct (field i32)))
++ (type $t (struct (field i32)))
++ (func $f (param (ref $s)) (local (ref $t)) (set_local 1 (get_local 0))))
++`),
++WebAssembly.CompileError, /expression has type ref.*but expected ref/);
++
++// Ref type mismatch in return
++
++assertErrorMessage(() => wasmEvalText(`
++(module
++ (type $s (struct (field i32)))
++ (type $t (struct (field i32)))
++ (func $f (param (ref $s)) (result (ref $t)) (get_local 0)))
++`),
++WebAssembly.CompileError, /expression has type ref.*but expected ref/);
++
++// Ref type can't reference a function type
++
++assertErrorMessage(() => wasmEvalText(`
++(module
++ (type $x (func (param i32)))
++ (func $f (param (ref $x)) (unreachable)))
++`),
++SyntaxError, /Type label.*not found/);
++
++assertErrorMessage(() => wasmEvalText(`
++(module
++ (type (func (param i32)))
++ (func $f (param (ref 0)) (unreachable)))
++`),
++WebAssembly.CompileError, /does not reference a struct type/);
++
++// No automatic downcast from anyref
++
++assertErrorMessage(() => wasmEvalText(`
++(module
++ (type $s (struct (field i32)))
++ (func $f (param anyref) (call $g (get_local 0)))
++ (func $g (param (ref $s)) (unreachable)))
++`),
++WebAssembly.CompileError, /expression has type anyref but expected ref/);
+diff --git a/js/src/jit-test/tests/wasm/gc/structs.js b/js/src/jit-test/tests/wasm/gc/structs.js
+--- a/js/src/jit-test/tests/wasm/gc/structs.js
++++ b/js/src/jit-test/tests/wasm/gc/structs.js
+@@ -87,16 +87,25 @@ wasmEvalText(`
+ 
+ // Empty structs are OK.
+ 
+ wasmEvalText(`
+ (module
+  (type $s (struct)))
+ `)
+ 
++// Multiply defined structures.
++
++assertErrorMessage(() => wasmEvalText(`
++(module
++ (type $s (struct (field $x i32)))
++ (type $s (struct (field $y i32))))
++`),
++SyntaxError, /duplicate type name/);
++
+ // Bogus type definition syntax.
+ 
+ assertErrorMessage(() => wasmEvalText(`
+ (module
+  (type $s))
+ `),
+ SyntaxError, /parsing wasm text/);
+ 
+diff --git a/js/src/wasm/AsmJS.cpp b/js/src/wasm/AsmJS.cpp
+--- a/js/src/wasm/AsmJS.cpp
++++ b/js/src/wasm/AsmJS.cpp
+@@ -3192,17 +3192,17 @@ class MOZ_STACK_CLASS FunctionValidator
+         return encoder().writeOp(Op::If) &&
+                encoder().writePatchableFixedU7(typeAt);
+     }
+     bool switchToElse() {
+         MOZ_ASSERT(blockDepth_ > 0);
+         return encoder().writeOp(Op::Else);
+     }
+     void setIfType(size_t typeAt, ExprType type) {
+-        encoder().patchFixedU7(typeAt, uint8_t(type));
++        encoder().patchFixedU7(typeAt, uint8_t(type.code()));
+     }
+     bool popIf() {
+         MOZ_ASSERT(blockDepth_ > 0);
+         --blockDepth_;
+         return encoder().writeOp(Op::End);
+     }
+     bool popIf(size_t typeAt, ExprType type) {
+         MOZ_ASSERT(blockDepth_ > 0);
+@@ -6129,17 +6129,17 @@ CheckComma(FunctionValidator& f, ParseNo
+     for (; NextNode(pn); pn = NextNode(pn)) {
+         if (!CheckAsExprStatement(f, pn))
+             return false;
+     }
+ 
+     if (!CheckExpr(f, pn, type))
+         return false;
+ 
+-    f.encoder().patchFixedU7(typeAt, uint8_t(type->toWasmBlockSignatureType()));
++    f.encoder().patchFixedU7(typeAt, uint8_t(type->toWasmBlockSignatureType().code()));
+ 
+     return f.encoder().writeOp(Op::End);
+ }
+ 
+ static bool
+ CheckConditional(FunctionValidator& f, ParseNode* ternary, Type* type)
+ {
+     MOZ_ASSERT(ternary->isKind(ParseNodeKind::Conditional));
+@@ -7757,16 +7757,17 @@ ValidateGlobalVariable(JSContext* cx, co
+           case ValType::B32x4: {
+             SimdConstant simdConstant;
+             if (!ToSimdConstant<Bool32x4>(cx, v, &simdConstant))
+                 return false;
+             // Bool32x4 uses the same data layout as Int32x4.
+             *val = Val(simdConstant.asInt32x4());
+             return true;
+           }
++          case ValType::Ref:
+           case ValType::AnyRef: {
+             MOZ_CRASH("not available in asm.js");
+           }
+         }
+       }
+     }
+ 
+     MOZ_CRASH("unreachable");
+diff --git a/js/src/wasm/WasmAST.h b/js/src/wasm/WasmAST.h
+--- a/js/src/wasm/WasmAST.h
++++ b/js/src/wasm/WasmAST.h
+@@ -94,32 +94,193 @@ class AstRef
+     size_t index() const {
+         MOZ_ASSERT(index_ != AstNoIndex);
+         return index_;
+     }
+     void setIndex(uint32_t index) {
+         MOZ_ASSERT(index_ == AstNoIndex);
+         index_ = index;
+     }
++    bool operator==(AstRef rhs) const {
++        return name_ == rhs.name_ && index_ == rhs.index_;
++    }
++    bool operator!=(AstRef rhs) const {
++        return !(*this == rhs);
++    }
++};
++
++class AstValType
++{
++    // When this type is resolved, which_ becomes IsValType.
++
++    enum { IsValType, IsAstRef } which_;
++    ValType type_;
++    AstRef  ref_;
++
++  public:
++    AstValType() : which_(IsValType) {} // type_ is then !isValid()
++
++    explicit AstValType(ValType type)
++      : which_(IsValType),
++        type_(type)
++    { }
++
++    explicit AstValType(AstRef ref) {
++        if (ref.name().empty()) {
++            which_ = IsValType;
++            type_ = ValType(ValType::Ref, ref.index());
++        } else {
++            which_ = IsAstRef;
++            ref_ = ref;
++        }
++    }
++
++    bool isRefType() const {
++        return code() == ValType::AnyRef || code() == ValType::Ref;
++    }
++
++    bool isValid() const {
++        return !(which_ == IsValType && !type_.isValid());
++    }
++
++    bool isResolved() const {
++        return which_ == IsValType;
++    }
++
++    AstRef& asRef() {
++        return ref_;
++    }
++
++    void resolve() {
++        MOZ_ASSERT(which_ == IsAstRef);
++        which_ = IsValType;
++        type_ = ValType(ValType::Ref, ref_.index());
++    }
++
++    ValType::Code code() const {
++        if (which_ == IsValType)
++            return type_.code();
++        return ValType::Ref;
++    }
++
++    ValType type() const {
++        MOZ_ASSERT(which_ == IsValType);
++        return type_;
++    }
++
++    bool operator==(const AstValType& that) const {
++        if (which_ != that.which_)
++            return false;
++        if (which_ == IsValType)
++            return type_ == that.type_;
++        return ref_ == that.ref_;
++    }
++
++    bool operator!=(const AstValType& that) const {
++        return !(*this == that);
++    }
++};
++
++class AstExprType
++{
++    // When this type is resolved, which_ becomes IsExprType.
++
++    enum { IsExprType, IsAstValType } which_;
++    union {
++        ExprType   type_;
++        AstValType vt_;
++    };
++
++  public:
++    MOZ_IMPLICIT AstExprType(ExprType::Code type)
++      : which_(IsExprType),
++        type_(type)
++    {}
++
++    MOZ_IMPLICIT AstExprType(ExprType type)
++      : which_(IsExprType),
++        type_(type)
++    {}
++
++    MOZ_IMPLICIT AstExprType(const AstExprType& type)
++      : which_(type.which_)
++    {
++        switch (which_) {
++          case IsExprType:
++            type_ = type.type_;
++            break;
++          case IsAstValType:
++            vt_ = type.vt_;
++            break;
++        }
++    }
++
++    explicit AstExprType(AstValType vt)
++      : which_(IsAstValType),
++        vt_(vt)
++    {}
++
++    bool isVoid() const {
++        return which_ == IsExprType && type_ == ExprType::Void;
++    }
++
++    bool isResolved() const {
++        return which_ == IsExprType;
++    }
++
++    AstValType& asAstValType() {
++        MOZ_ASSERT(which_ == IsAstValType);
++        return vt_;
++    }
++
++    void resolve() {
++        MOZ_ASSERT(which_ == IsAstValType);
++        which_ = IsExprType;
++        type_ = ExprType(vt_.type());
++    }
++
++    ExprType::Code code() const {
++        if (which_ == IsExprType)
++            return type_.code();
++        return ExprType::Ref;
++    }
++
++    ExprType type() const {
++        if (which_ == IsExprType)
++            return type_;
++        return ExprType(vt_.type());
++    }
++
++    bool operator==(const AstExprType& that) const {
++        if (which_ != that.which_)
++            return false;
++        if (which_ == IsExprType)
++            return type_ == that.type_;
++        return vt_ == that.vt_;
++    }
++
++    bool operator!=(const AstExprType& that) const {
++        return !(*this == that);
++    }
+ };
+ 
+ struct AstNameHasher
+ {
+     typedef const AstName Lookup;
+     static js::HashNumber hash(Lookup l) {
+         return mozilla::HashString(l.begin(), l.length());
+     }
+     static bool match(const AstName key, Lookup lookup) {
+         return key == lookup;
+     }
+ };
+ 
+ using AstNameMap = AstHashMap<AstName, uint32_t, AstNameHasher>;
+ 
+-typedef AstVector<ValType> AstValTypeVector;
++typedef AstVector<AstValType> AstValTypeVector;
+ typedef AstVector<AstExpr*> AstExprVector;
+ typedef AstVector<AstName> AstNameVector;
+ typedef AstVector<AstRef> AstRefVector;
+ 
+ struct AstBase
+ {
+     void* operator new(size_t numBytes, LifoAlloc& astLifo) throw() {
+         return astLifo.alloc(numBytes);
+@@ -147,53 +308,68 @@ class AstTypeDef : public AstBase
+     inline const AstFuncType& asFuncType() const;
+     inline const AstStructType& asStructType() const;
+ };
+ 
+ class AstFuncType : public AstTypeDef
+ {
+     AstName name_;
+     AstValTypeVector args_;
+-    ExprType ret_;
++    AstExprType ret_;
+ 
+   public:
+     explicit AstFuncType(LifoAlloc& lifo)
+       : AstTypeDef(Which::IsFuncType),
+         args_(lifo),
+         ret_(ExprType::Void)
+     {}
+-    AstFuncType(AstValTypeVector&& args, ExprType ret)
++    AstFuncType(AstValTypeVector&& args, AstExprType ret)
+       : AstTypeDef(Which::IsFuncType),
+         args_(std::move(args)),
+         ret_(ret)
+     {}
+     AstFuncType(AstName name, AstFuncType&& rhs)
+       : AstTypeDef(Which::IsFuncType),
+         name_(name),
+         args_(std::move(rhs.args_)),
+         ret_(rhs.ret_)
+     {}
+     const AstValTypeVector& args() const {
+         return args_;
+     }
+-    ExprType ret() const {
++    AstValTypeVector& args() {
++        return args_;
++    }
++    AstExprType ret() const {
++        return ret_;
++    }
++    AstExprType& ret() {
+         return ret_;
+     }
+     AstName name() const {
+         return name_;
+     }
+     bool operator==(const AstFuncType& rhs) const {
+-        return ret() == rhs.ret() && EqualContainers(args(), rhs.args());
++        if (ret() != rhs.ret())
++            return false;
++        size_t len = args().length();
++        if (rhs.args().length() != len)
++            return false;
++        for (size_t i=0; i < len; i++) {
++            if (args()[i] != rhs.args()[i])
++                return false;
++        }
++        return true;
+     }
+ 
+     typedef const AstFuncType& Lookup;
+     static HashNumber hash(Lookup ft) {
+-        HashNumber hn = HashNumber(ft.ret());
+-        for (ValType vt : ft.args())
+-            hn = mozilla::AddToHash(hn, vt.code());
++        HashNumber hn = HashNumber(ft.ret().code());
++        for (const AstValType& vt : ft.args())
++            hn = mozilla::AddToHash(hn, uint32_t(vt.code()));
+         return hn;
+     }
+     static bool match(const AstFuncType* lhs, Lookup rhs) {
+         return *lhs == rhs;
+     }
+ };
+ 
+ class AstStructType : public AstTypeDef
+@@ -223,16 +399,19 @@ class AstStructType : public AstTypeDef
+         return name_;
+     }
+     const AstNameVector& fieldNames() const {
+         return fieldNames_;
+     }
+     const AstValTypeVector& fieldTypes() const {
+         return fieldTypes_;
+     }
++    AstValTypeVector& fieldTypes() {
++        return fieldTypes_;
++    }
+ };
+ 
+ inline AstFuncType&
+ AstTypeDef::asFuncType()
+ {
+     MOZ_ASSERT(isFuncType());
+     return *static_cast<AstFuncType*>(this);
+ }
+@@ -314,31 +493,31 @@ enum class AstExprKind
+     Unreachable,
+     Wait,
+     Wake
+ };
+ 
+ class AstExpr : public AstNode
+ {
+     const AstExprKind kind_;
+-    ExprType type_;
++    AstExprType type_;
+ 
+   protected:
+-    AstExpr(AstExprKind kind, ExprType type)
++    AstExpr(AstExprKind kind, AstExprType type)
+       : kind_(kind), type_(type)
+     {}
+ 
+   public:
+     AstExprKind kind() const { return kind_; }
+ 
+-    bool isVoid() const { return IsVoid(type_); }
++    bool isVoid() const { return type_.isVoid(); }
+ 
+-    // Note that for nodes other than blocks and block-like things, this
+-    // may return ExprType::Limit for nodes with non-void types.
+-    ExprType type() const { return type_; }
++    // Note that for nodes other than blocks and block-like things, the
++    // underlying type may be ExprType::Limit for nodes with non-void types.
++    AstExprType& type() { return type_; }
+ 
+     template <class T>
+     T& as() {
+         MOZ_ASSERT(kind() == T::Kind);
+         return static_cast<T&>(*this);
+     }
+ };
+ 
+@@ -479,17 +658,17 @@ class AstTeeLocal : public AstExpr
+ class AstBlock : public AstExpr
+ {
+     Op op_;
+     AstName name_;
+     AstExprVector exprs_;
+ 
+   public:
+     static const AstExprKind Kind = AstExprKind::Block;
+-    explicit AstBlock(Op op, ExprType type, AstName name, AstExprVector&& exprs)
++    explicit AstBlock(Op op, AstExprType type, AstName name, AstExprVector&& exprs)
+       : AstExpr(Kind, type),
+         op_(op),
+         name_(name),
+         exprs_(std::move(exprs))
+     {}
+ 
+     Op op() const { return op_; }
+     AstName name() const { return name_; }
+@@ -500,17 +679,17 @@ class AstBranch : public AstExpr
+ {
+     Op op_;
+     AstExpr* cond_;
+     AstRef target_;
+     AstExpr* value_;
+ 
+   public:
+     static const AstExprKind Kind = AstExprKind::Branch;
+-    explicit AstBranch(Op op, ExprType type,
++    explicit AstBranch(Op op, AstExprType type,
+                        AstExpr* cond, AstRef target, AstExpr* value)
+       : AstExpr(Kind, type),
+         op_(op),
+         cond_(cond),
+         target_(target),
+         value_(value)
+     {}
+ 
+@@ -523,34 +702,34 @@ class AstBranch : public AstExpr
+ class AstCall : public AstExpr
+ {
+     Op op_;
+     AstRef func_;
+     AstExprVector args_;
+ 
+   public:
+     static const AstExprKind Kind = AstExprKind::Call;
+-    AstCall(Op op, ExprType type, AstRef func, AstExprVector&& args)
++    AstCall(Op op, AstExprType type, AstRef func, AstExprVector&& args)
+       : AstExpr(Kind, type), op_(op), func_(func), args_(std::move(args))
+     {}
+ 
+     Op op() const { return op_; }
+     AstRef& func() { return func_; }
+     const AstExprVector& args() const { return args_; }
+ };
+ 
+ class AstCallIndirect : public AstExpr
+ {
+     AstRef funcType_;
+     AstExprVector args_;
+     AstExpr* index_;
+ 
+   public:
+     static const AstExprKind Kind = AstExprKind::CallIndirect;
+-    AstCallIndirect(AstRef funcType, ExprType type, AstExprVector&& args, AstExpr* index)
++    AstCallIndirect(AstRef funcType, AstExprType type, AstExprVector&& args, AstExpr* index)
+       : AstExpr(Kind, type), funcType_(funcType), args_(std::move(args)), index_(index)
+     {}
+     AstRef& funcType() { return funcType_; }
+     const AstExprVector& args() const { return args_; }
+     AstExpr* index() const { return index_; }
+ };
+ 
+ class AstReturn : public AstExpr
+@@ -570,17 +749,17 @@ class AstIf : public AstExpr
+ {
+     AstExpr* cond_;
+     AstName name_;
+     AstExprVector thenExprs_;
+     AstExprVector elseExprs_;
+ 
+   public:
+     static const AstExprKind Kind = AstExprKind::If;
+-    AstIf(ExprType type, AstExpr* cond, AstName name,
++    AstIf(AstExprType type, AstExpr* cond, AstName name,
+           AstExprVector&& thenExprs, AstExprVector&& elseExprs)
+       : AstExpr(Kind, type),
+         cond_(cond),
+         name_(name),
+         thenExprs_(std::move(thenExprs)),
+         elseExprs_(std::move(elseExprs))
+     {}
+ 
+@@ -871,42 +1050,44 @@ class AstFunc : public AstNode
+         funcType_(ft),
+         vars_(std::move(vars)),
+         localNames_(std::move(locals)),
+         body_(std::move(body)),
+         endOffset_(AstNodeUnknownOffset)
+     {}
+     AstRef& funcType() { return funcType_; }
+     const AstValTypeVector& vars() const { return vars_; }
++    AstValTypeVector& vars() { return vars_; }
+     const AstNameVector& locals() const { return localNames_; }
+     const AstExprVector& body() const { return body_; }
+     AstName name() const { return name_; }
+     uint32_t endOffset() const { return endOffset_; }
+     void setEndOffset(uint32_t offset) { endOffset_ = offset; }
+ };
+ 
+ class AstGlobal : public AstNode
+ {
+     AstName name_;
+     bool isMutable_;
+-    ValType type_;
++    AstValType type_;
+     Maybe<AstExpr*> init_;
+ 
+   public:
+     AstGlobal() : isMutable_(false), type_(ValType())
+     {}
+ 
+-    explicit AstGlobal(AstName name, ValType type, bool isMutable,
++    explicit AstGlobal(AstName name, AstValType type, bool isMutable,
+                        const Maybe<AstExpr*>& init = Maybe<AstExpr*>())
+       : name_(name), isMutable_(isMutable), type_(type), init_(init)
+     {}
+ 
+     AstName name() const { return name_; }
+     bool isMutable() const { return isMutable_; }
+-    ValType type() const { return type_; }
++    ValType type() const { return type_.type(); }
++    AstValType& type() { return type_; }
+ 
+     bool hasInit() const { return !!init_; }
+     AstExpr& init() const { MOZ_ASSERT(hasInit()); return **init_; }
+ };
+ 
+ typedef AstVector<AstGlobal*> AstGlobalVector;
+ 
+ class AstImport : public AstNode
+@@ -944,16 +1125,20 @@ class AstImport : public AstNode
+     Limits limits() const {
+         MOZ_ASSERT(kind_ == DefinitionKind::Memory || kind_ == DefinitionKind::Table);
+         return limits_;
+     }
+     const AstGlobal& global() const {
+         MOZ_ASSERT(kind_ == DefinitionKind::Global);
+         return global_;
+     }
++    AstGlobal& global() {
++        MOZ_ASSERT(kind_ == DefinitionKind::Global);
++        return global_;
++    }
+ };
+ 
+ class AstExport : public AstNode
+ {
+     AstName name_;
+     DefinitionKind kind_;
+     AstRef ref_;
+ 
+@@ -1300,23 +1485,23 @@ class AstExtraConversionOperator final :
+ 
+     MiscOp op() const { return op_; }
+     AstExpr* operand() const { return operand_; }
+ };
+ #endif
+ 
+ class AstRefNull final : public AstExpr
+ {
+-    ValType refType_;
++    AstValType refType_;
+   public:
+     static const AstExprKind Kind = AstExprKind::RefNull;
+-    explicit AstRefNull(ValType refType)
++    explicit AstRefNull(AstValType refType)
+       : AstExpr(Kind, ExprType::Limit), refType_(refType)
+     {}
+-    ValType refType() const {
++    AstValType& baseType() {
+         return refType_;
+     }
+ };
+ 
+ // This is an artificial AST node which can fill operand slots in an AST
+ // constructed from parsing or decoding stack-machine code that doesn't have
+ // an inherent AST structure.
+ class AstPop final : public AstExpr
+diff --git a/js/src/wasm/WasmBaselineCompile.cpp b/js/src/wasm/WasmBaselineCompile.cpp
+--- a/js/src/wasm/WasmBaselineCompile.cpp
++++ b/js/src/wasm/WasmBaselineCompile.cpp
+@@ -1051,16 +1051,17 @@ BaseLocalIter::settle()
+ 
+     MOZ_ASSERT(argsIter_.done());
+     if (index_ < locals_.length()) {
+         switch (locals_[index_].code()) {
+           case ValType::I32:
+           case ValType::I64:
+           case ValType::F32:
+           case ValType::F64:
++          case ValType::Ref:
+           case ValType::AnyRef:
+             mirType_ = ToMIRType(locals_[index_]);
+             frameOffset_ = pushLocal(MIRTypeToSize(mirType_));
+             break;
+           default:
+             MOZ_CRASH("Compiler bug: Unexpected local type");
+         }
+         return;
+@@ -2125,69 +2126,85 @@ class BaseCompiler final : public BaseCo
+     }
+ 
+     void moveF32(RegF32 src, RegF32 dest) {
+         if (src != dest)
+             masm.moveFloat32(src, dest);
+     }
+ 
+     void maybeReserveJoinRegI(ExprType type) {
+-        if (type == ExprType::I32)
++        switch (type.code()) {
++          case ExprType::I32:
+             needI32(joinRegI32_);
+-        else if (type == ExprType::I64)
++            break;
++          case ExprType::I64:
+             needI64(joinRegI64_);
+-        else if (type == ExprType::AnyRef)
++            break;
++          case ExprType::AnyRef:
++          case ExprType::Ref:
+             needRef(joinRegPtr_);
++            break;
++          default:;
++        }
+     }
+ 
+     void maybeUnreserveJoinRegI(ExprType type) {
+-        if (type == ExprType::I32)
++        switch (type.code()) {
++          case ExprType::I32:
+             freeI32(joinRegI32_);
+-        else if (type == ExprType::I64)
++            break;
++          case ExprType::I64:
+             freeI64(joinRegI64_);
+-        else if (type == ExprType::AnyRef)
++            break;
++          case ExprType::AnyRef:
++          case ExprType::Ref:
+             freeRef(joinRegPtr_);
++            break;
++          default:;
++        }
+     }
+ 
+     void maybeReserveJoinReg(ExprType type) {
+-        switch (type) {
++        switch (type.code()) {
+           case ExprType::I32:
+             needI32(joinRegI32_);
+             break;
+           case ExprType::I64:
+             needI64(joinRegI64_);
+             break;
+           case ExprType::F32:
+             needF32(joinRegF32_);
+             break;
+           case ExprType::F64:
+             needF64(joinRegF64_);
+             break;
++          case ExprType::Ref:
+           case ExprType::AnyRef:
+             needRef(joinRegPtr_);
+             break;
+           default:
+             break;
+         }
+     }
+ 
+     void maybeUnreserveJoinReg(ExprType type) {
+-        switch (type) {
++        switch (type.code()) {
+           case ExprType::I32:
+             freeI32(joinRegI32_);
+             break;
+           case ExprType::I64:
+             freeI64(joinRegI64_);
+             break;
+           case ExprType::F32:
+             freeF32(joinRegF32_);
+             break;
+           case ExprType::F64:
+             freeF64(joinRegF64_);
+             break;
++          case ExprType::Ref:
+           case ExprType::AnyRef:
+             freeRef(joinRegPtr_);
+             break;
+           default:
+             break;
+         }
+     }
+ 
+@@ -3094,17 +3111,17 @@ class BaseCompiler final : public BaseCo
+     // JoinReg is live out of the block.  But on the way out, we
+     // currently pop the JoinReg before freeing regs to be discarded,
+     // so there is a real risk of some pointless shuffling there.  If
+     // we instead integrate the popping of the join reg into the
+     // popping of the stack we can just use the JoinReg as it will
+     // become available in that process.
+ 
+     MOZ_MUST_USE Maybe<AnyReg> popJoinRegUnlessVoid(ExprType type) {
+-        switch (type) {
++        switch (type.code()) {
+           case ExprType::Void: {
+             return Nothing();
+           }
+           case ExprType::I32: {
+             DebugOnly<Stk::Kind> k(stk_.back().kind());
+             MOZ_ASSERT(k == Stk::RegisterI32 || k == Stk::ConstI32 || k == Stk::MemI32 ||
+                        k == Stk::LocalI32);
+             return Some(AnyReg(popI32(joinRegI32_)));
+@@ -3122,16 +3139,17 @@ class BaseCompiler final : public BaseCo
+             return Some(AnyReg(popF64(joinRegF64_)));
+           }
+           case ExprType::F32: {
+             DebugOnly<Stk::Kind> k(stk_.back().kind());
+             MOZ_ASSERT(k == Stk::RegisterF32 || k == Stk::ConstF32 || k == Stk::MemF32 ||
+                        k == Stk::LocalF32);
+             return Some(AnyReg(popF32(joinRegF32_)));
+           }
++          case ExprType::Ref:
+           case ExprType::AnyRef: {
+             DebugOnly<Stk::Kind> k(stk_.back().kind());
+             MOZ_ASSERT(k == Stk::RegisterRef || k == Stk::ConstRef || k == Stk::MemRef ||
+                        k == Stk::LocalRef);
+             return Some(AnyReg(popRef(joinRegPtr_)));
+           }
+           default: {
+             MOZ_CRASH("Compiler bug: unexpected expression type");
+@@ -3141,33 +3159,34 @@ class BaseCompiler final : public BaseCo
+ 
+     // If we ever start not sync-ing on entry to Block (but instead try to sync
+     // lazily) then this may start asserting because it does not spill the
+     // joinreg if the joinreg is already allocated.  Note, it *can't* spill the
+     // joinreg in the contexts it's being used, so some other solution will need
+     // to be found.
+ 
+     MOZ_MUST_USE Maybe<AnyReg> captureJoinRegUnlessVoid(ExprType type) {
+-        switch (type) {
++        switch (type.code()) {
+           case ExprType::I32:
+             MOZ_ASSERT(isAvailableI32(joinRegI32_));
+             needI32(joinRegI32_);
+             return Some(AnyReg(joinRegI32_));
+           case ExprType::I64:
+             MOZ_ASSERT(isAvailableI64(joinRegI64_));
+             needI64(joinRegI64_);
+             return Some(AnyReg(joinRegI64_));
+           case ExprType::F32:
+             MOZ_ASSERT(isAvailableF32(joinRegF32_));
+             needF32(joinRegF32_);
+             return Some(AnyReg(joinRegF32_));
+           case ExprType::F64:
+             MOZ_ASSERT(isAvailableF64(joinRegF64_));
+             needF64(joinRegF64_);
+             return Some(AnyReg(joinRegF64_));
++          case ExprType::Ref:
+           case ExprType::AnyRef:
+             MOZ_ASSERT(isAvailableRef(joinRegPtr_));
+             needRef(joinRegPtr_);
+             return Some(AnyReg(joinRegPtr_));
+           case ExprType::Void:
+             return Nothing();
+           default:
+             MOZ_CRASH("Compiler bug: unexpected type");
+@@ -3414,58 +3433,60 @@ class BaseCompiler final : public BaseCo
+         if (env_.debugEnabled())
+             insertBreakablePoint(CallSiteDesc::EnterFrame);
+     }
+ 
+     void saveResult() {
+         MOZ_ASSERT(env_.debugEnabled());
+         size_t debugFrameOffset = masm.framePushed() - DebugFrame::offsetOfFrame();
+         Address resultsAddress(masm.getStackPointer(), debugFrameOffset + DebugFrame::offsetOfResults());
+-        switch (funcType().ret()) {
++        switch (funcType().ret().code()) {
+           case ExprType::Void:
+             break;
+           case ExprType::I32:
+             masm.store32(RegI32(ReturnReg), resultsAddress);
+             break;
+           case ExprType::I64:
+             masm.store64(RegI64(ReturnReg64), resultsAddress);
+             break;
+           case ExprType::F64:
+             masm.storeDouble(RegF64(ReturnDoubleReg), resultsAddress);
+             break;
+           case ExprType::F32:
+             masm.storeFloat32(RegF32(ReturnFloat32Reg), resultsAddress);
+             break;
++          case ExprType::Ref:
+           case ExprType::AnyRef:
+             masm.storePtr(RegPtr(ReturnReg), resultsAddress);
+             break;
+           default:
+             MOZ_CRASH("Function return type");
+         }
+     }
+ 
+     void restoreResult() {
+         MOZ_ASSERT(env_.debugEnabled());
+         size_t debugFrameOffset = masm.framePushed() - DebugFrame::offsetOfFrame();
+         Address resultsAddress(masm.getStackPointer(), debugFrameOffset + DebugFrame::offsetOfResults());
+-        switch (funcType().ret()) {
++        switch (funcType().ret().code()) {
+           case ExprType::Void:
+             break;
+           case ExprType::I32:
+             masm.load32(resultsAddress, RegI32(ReturnReg));
+             break;
+           case ExprType::I64:
+             masm.load64(resultsAddress, RegI64(ReturnReg64));
+             break;
+           case ExprType::F64:
+             masm.loadDouble(resultsAddress, RegF64(ReturnDoubleReg));
+             break;
+           case ExprType::F32:
+             masm.loadFloat32(resultsAddress, RegF32(ReturnFloat32Reg));
+             break;
++          case ExprType::Ref:
+           case ExprType::AnyRef:
+             masm.loadPtr(resultsAddress, RegPtr(ReturnReg));
+             break;
+           default:
+             MOZ_CRASH("Function return type");
+         }
+     }
+ 
+@@ -3728,16 +3749,17 @@ class BaseCompiler final : public BaseCo
+                 MOZ_CRASH("Unexpected parameter passing discipline");
+               }
+ #endif
+               case ABIArg::Uninitialized:
+                 MOZ_CRASH("Uninitialized ABIArg kind");
+             }
+             break;
+           }
++          case ValType::Ref:
+           case ValType::AnyRef: {
+             ABIArg argLoc = call->abi.next(MIRType::Pointer);
+             if (argLoc.kind() == ABIArg::Stack) {
+                 ScratchPtr scratch(*this);
+                 loadRef(arg, scratch);
+                 masm.storePtr(scratch, Address(masm.getStackPointer(), argLoc.offsetFromArgBase()));
+             } else {
+                 loadRef(arg, RegPtr(argLoc.gpr()));
+@@ -7679,17 +7701,17 @@ BaseCompiler::emitDrop()
+ 
+     dropValue();
+     return true;
+ }
+ 
+ void
+ BaseCompiler::doReturn(ExprType type, bool popStack)
+ {
+-    switch (type) {
++    switch (type.code()) {
+       case ExprType::Void: {
+         returnCleanup(popStack);
+         break;
+       }
+       case ExprType::I32: {
+         RegI32 rv = popI32(RegI32(ReturnReg));
+         returnCleanup(popStack);
+         freeI32(rv);
+@@ -7708,16 +7730,17 @@ BaseCompiler::doReturn(ExprType type, bo
+         break;
+       }
+       case ExprType::F32: {
+         RegF32 rv = popF32(RegF32(ReturnFloat32Reg));
+         returnCleanup(popStack);
+         freeF32(rv);
+         break;
+       }
++      case ExprType::Ref:
+       case ExprType::AnyRef: {
+         RegPtr rv = popRef(RegPtr(ReturnReg));
+         returnCleanup(popStack);
+         freeRef(rv);
+         break;
+       }
+       default: {
+         MOZ_CRASH("Function return type");
+@@ -7754,17 +7777,17 @@ BaseCompiler::emitCallArgs(const ValType
+ 
+     masm.loadWasmTlsRegFromFrame();
+     return true;
+ }
+ 
+ void
+ BaseCompiler::pushReturnedIfNonVoid(const FunctionCall& call, ExprType type)
+ {
+-    switch (type) {
++    switch (type.code()) {
+       case ExprType::Void:
+         // There's no return value.  Do nothing.
+         break;
+       case ExprType::I32: {
+         RegI32 rv = captureReturnedI32();
+         pushI32(rv);
+         break;
+       }
+@@ -7778,16 +7801,17 @@ BaseCompiler::pushReturnedIfNonVoid(cons
+         pushF32(rv);
+         break;
+       }
+       case ExprType::F64: {
+         RegF64 rv = captureReturnedF64(call);
+         pushF64(rv);
+         break;
+       }
++      case ExprType::Ref:
+       case ExprType::AnyRef: {
+         RegPtr rv = captureReturnedRef();
+         pushRef(rv);
+         break;
+       }
+       default:
+         MOZ_CRASH("Function return type");
+     }
+@@ -8108,16 +8132,17 @@ BaseCompiler::emitGetLocal()
+         pushLocalI64(slot);
+         break;
+       case ValType::F64:
+         pushLocalF64(slot);
+         break;
+       case ValType::F32:
+         pushLocalF32(slot);
+         break;
++      case ValType::Ref:
+       case ValType::AnyRef:
+         pushLocalRef(slot);
+         break;
+       default:
+         MOZ_CRASH("Local variable type");
+     }
+ 
+     return true;
+@@ -8167,16 +8192,17 @@ BaseCompiler::emitSetOrTeeLocal(uint32_t
+         syncLocal(slot);
+         fr.storeLocalF32(rv, localFromSlot(slot, MIRType::Float32));
+         if (isSetLocal)
+             freeF32(rv);
+         else
+             pushF32(rv);
+         break;
+       }
++      case ValType::Ref:
+       case ValType::AnyRef: {
+         RegPtr rv = popRef();
+         syncLocal(slot);
+         fr.storeLocalPtr(rv, localFromSlot(slot, MIRType::Pointer));
+         if (isSetLocal)
+             freeRef(rv);
+         else
+             pushRef(rv);
+@@ -8697,16 +8723,17 @@ BaseCompiler::emitSelect()
+         pop2xF64(&r, &rs);
+         emitBranchPerform(&b);
+         moveF64(rs, r);
+         masm.bind(&done);
+         freeF64(rs);
+         pushF64(r);
+         break;
+       }
++      case ValType::Ref:
+       case ValType::AnyRef: {
+         RegPtr r, rs;
+         pop2xRef(&r, &rs);
+         emitBranchPerform(&b);
+         moveRef(rs, r);
+         masm.bind(&done);
+         freeRef(rs);
+         pushRef(r);
+@@ -8867,17 +8894,18 @@ BaseCompiler::emitCurrentMemory()
+ 
+     emitInstanceCall(lineOrBytecode, SigP_, ExprType::I32, SymbolicAddress::CurrentMemory);
+     return true;
+ }
+ 
+ bool
+ BaseCompiler::emitRefNull()
+ {
+-    if (!iter_.readRefNull())
++    ValType type;
++    if (!iter_.readRefNull(&type))
+         return false;
+ 
+     if (deadCode_)
+         return true;
+ 
+     pushRef(NULLREF_VALUE);
+     return true;
+ }
+@@ -10206,17 +10234,17 @@ js::wasm::BaselineCompileFunctions(const
+     for (const FuncCompileInput& func : inputs) {
+         Decoder d(func.begin, func.end, func.lineOrBytecode, error);
+ 
+         // Build the local types vector.
+ 
+         ValTypeVector locals;
+         if (!locals.appendAll(env.funcTypes[func.index]->args()))
+             return false;
+-        if (!DecodeLocalEntries(d, env.kind, env.gcTypesEnabled, &locals))
++        if (!DecodeLocalEntries(d, env.kind, env.types, env.gcTypesEnabled, &locals))
+             return false;
+ 
+         // One-pass baseline compilation.
+ 
+         BaseCompiler f(env, func, locals, d, &alloc, &masm);
+         if (!f.init())
+             return false;
+         if (!f.emitFunction())
+diff --git a/js/src/wasm/WasmBinaryConstants.h b/js/src/wasm/WasmBinaryConstants.h
+--- a/js/src/wasm/WasmBinaryConstants.h
++++ b/js/src/wasm/WasmBinaryConstants.h
+@@ -60,16 +60,19 @@ enum class TypeCode
+     B32x4                                = 0x75,
+ 
+     // A function pointer with any signature
+     AnyFunc                              = 0x70,  // SLEB128(-0x10)
+ 
+     // A reference to any type.
+     AnyRef                               = 0x6f,
+ 
++    // Type constructor for reference types.
++    Ref                                  = 0x6e,
++
+     // Type constructor for function types
+     Func                                 = 0x60,  // SLEB128(-0x20)
+ 
+     // Type constructor for structure types - unofficial
+     Struct                               = 0x50,  // SLEB128(-0x30)
+ 
+     // Special code representing the block signature ()->()
+     BlockVoid                            = 0x40,  // SLEB128(-0x40)
+diff --git a/js/src/wasm/WasmBuiltins.cpp b/js/src/wasm/WasmBuiltins.cpp
+--- a/js/src/wasm/WasmBuiltins.cpp
++++ b/js/src/wasm/WasmBuiltins.cpp
+@@ -1039,17 +1039,17 @@ wasm::SymbolicAddressTarget(SymbolicAddr
+ 
+ static Maybe<ABIFunctionType>
+ ToBuiltinABIFunctionType(const FuncType& funcType)
+ {
+     const ValTypeVector& args = funcType.args();
+     ExprType ret = funcType.ret();
+ 
+     uint32_t abiType;
+-    switch (ret) {
++    switch (ret.code()) {
+       case ExprType::F32: abiType = ArgType_Float32 << RetType_Shift; break;
+       case ExprType::F64: abiType = ArgType_Double << RetType_Shift; break;
+       default: return Nothing();
+     }
+ 
+     if ((args.length() + 1) > (sizeof(uint32_t) * 8 / ArgType_Shift))
+         return Nothing();
+ 
+diff --git a/js/src/wasm/WasmDebug.cpp b/js/src/wasm/WasmDebug.cpp
+--- a/js/src/wasm/WasmDebug.cpp
++++ b/js/src/wasm/WasmDebug.cpp
+@@ -537,17 +537,17 @@ DebugState::debugGetLocalTypes(uint32_t 
+ 
+     // Decode local var types from wasm binary function body.
+     const CodeRange& range = codeRanges(Tier::Debug)[debugFuncToCodeRangeIndex(funcIndex)];
+     // In wasm, the Code points to the function start via funcLineOrBytecode.
+     MOZ_ASSERT(!metadata().isAsmJS() && maybeBytecode_);
+     size_t offsetInModule = range.funcLineOrBytecode();
+     Decoder d(maybeBytecode_->begin() + offsetInModule,  maybeBytecode_->end(),
+               offsetInModule, /* error = */ nullptr);
+-    return DecodeLocalEntries(d, metadata().kind, metadata().temporaryHasGcTypes, locals);
++    return DecodeValidatedLocalEntries(d, locals);
+ }
+ 
+ ExprType
+ DebugState::debugGetResultType(uint32_t funcIndex)
+ {
+     MOZ_ASSERT(debugEnabled());
+     return metadata().debugFuncReturnTypes[funcIndex];
+ }
+diff --git a/js/src/wasm/WasmInstance.cpp b/js/src/wasm/WasmInstance.cpp
+--- a/js/src/wasm/WasmInstance.cpp
++++ b/js/src/wasm/WasmInstance.cpp
+@@ -128,16 +128,17 @@ Instance::callImport(JSContext* cx, uint
+             args[i].set(Int32Value(*(int32_t*)&argv[i]));
+             break;
+           case ValType::F32:
+             args[i].set(JS::CanonicalizedDoubleValue(*(float*)&argv[i]));
+             break;
+           case ValType::F64:
+             args[i].set(JS::CanonicalizedDoubleValue(*(double*)&argv[i]));
+             break;
++          case ValType::Ref:
+           case ValType::AnyRef: {
+             args[i].set(ObjectOrNullValue(*(JSObject**)&argv[i]));
+             break;
+           }
+           case ValType::I64:
+           case ValType::I8x16:
+           case ValType::I16x8:
+           case ValType::I32x4:
+@@ -200,16 +201,17 @@ Instance::callImport(JSContext* cx, uint
+ 
+     size_t numKnownArgs = Min(importArgs.length(), importFun->nargs());
+     for (uint32_t i = 0; i < numKnownArgs; i++) {
+         TypeSet::Type type = TypeSet::UnknownType();
+         switch (importArgs[i].code()) {
+           case ValType::I32:    type = TypeSet::Int32Type(); break;
+           case ValType::F32:    type = TypeSet::DoubleType(); break;
+           case ValType::F64:    type = TypeSet::DoubleType(); break;
++          case ValType::Ref:    MOZ_CRASH("case guarded above");
+           case ValType::AnyRef: MOZ_CRASH("case guarded above");
+           case ValType::I64:    MOZ_CRASH("NYI");
+           case ValType::I8x16:  MOZ_CRASH("NYI");
+           case ValType::I16x8:  MOZ_CRASH("NYI");
+           case ValType::I32x4:  MOZ_CRASH("NYI");
+           case ValType::F32x4:  MOZ_CRASH("NYI");
+           case ValType::B8x16:  MOZ_CRASH("NYI");
+           case ValType::B16x8:  MOZ_CRASH("NYI");
+@@ -781,16 +783,17 @@ Instance::callExport(JSContext* cx, uint
+           case ValType::F32:
+             if (!RoundFloat32(cx, v, (float*)&exportArgs[i]))
+                 return false;
+             break;
+           case ValType::F64:
+             if (!ToNumber(cx, v, (double*)&exportArgs[i]))
+                 return false;
+             break;
++          case ValType::Ref:
+           case ValType::AnyRef: {
+             if (!ToRef(cx, v, &exportArgs[i]))
+                 return false;
+             break;
+           }
+           case ValType::I8x16: {
+             SimdConstant simd;
+             if (!ToSimdConstant<Int8x16>(cx, v, &simd))
+@@ -872,31 +875,32 @@ Instance::callExport(JSContext* cx, uint
+         args.rval().set(ObjectValue(*obj));
+         return true;
+     }
+ 
+     void* retAddr = &exportArgs[0];
+ 
+     bool expectsObject = false;
+     JSObject* retObj = nullptr;
+-    switch (func.funcType().ret()) {
++    switch (func.funcType().ret().code()) {
+       case ExprType::Void:
+         args.rval().set(UndefinedValue());
+         break;
+       case ExprType::I32:
+         args.rval().set(Int32Value(*(int32_t*)retAddr));
+         break;
+       case ExprType::I64:
+         MOZ_CRASH("unexpected i64 flowing from callExport");
+       case ExprType::F32:
+         args.rval().set(NumberValue(*(float*)retAddr));
+         break;
+       case ExprType::F64:
+         args.rval().set(NumberValue(*(double*)retAddr));
+         break;
++      case ExprType::Ref:
+       case ExprType::AnyRef:
+         retObj = *(JSObject**)retAddr;
+         expectsObject = true;
+         break;
+       case ExprType::I8x16:
+         retObj = CreateSimd<Int8x16>(cx, (int8_t*)retAddr);
+         if (!retObj)
+             return false;
+diff --git a/js/src/wasm/WasmIonCompile.cpp b/js/src/wasm/WasmIonCompile.cpp
+--- a/js/src/wasm/WasmIonCompile.cpp
++++ b/js/src/wasm/WasmIonCompile.cpp
+@@ -214,18 +214,19 @@ class FunctionCompiler
+                 ins = MConstant::NewInt64(alloc(), 0);
+                 break;
+               case ValType::F32:
+                 ins = MConstant::New(alloc(), Float32Value(0.f), MIRType::Float32);
+                 break;
+               case ValType::F64:
+                 ins = MConstant::New(alloc(), DoubleValue(0.0), MIRType::Double);
+                 break;
++              case ValType::Ref:
+               case ValType::AnyRef:
+-                MOZ_CRASH("ion support for anyref locale default value NYI");
++                MOZ_CRASH("ion support for ref/anyref value NYI");
+                 break;
+               case ValType::I8x16:
+                 ins = MSimdConstant::New(alloc(), SimdConstant::SplatX16(0), MIRType::Int8x16);
+                 break;
+               case ValType::I16x8:
+                 ins = MSimdConstant::New(alloc(), SimdConstant::SplatX8(0), MIRType::Int16x8);
+                 break;
+               case ValType::I32x4:
+@@ -2977,16 +2978,17 @@ SimdToLaneType(ValType type)
+       case ValType::F32x4:  return ValType::F32;
+       case ValType::B8x16:
+       case ValType::B16x8:
+       case ValType::B32x4:  return ValType::I32; // Boolean lanes are Int32 in asm.
+       case ValType::I32:
+       case ValType::I64:
+       case ValType::F32:
+       case ValType::F64:
++      case ValType::Ref:
+       case ValType::AnyRef:
+         break;
+     }
+     MOZ_CRASH("bad simd type");
+ }
+ 
+ static bool
+ EmitExtractLane(FunctionCompiler& f, ValType operandType, SimdSign sign)
+@@ -3258,16 +3260,17 @@ EmitSimdCtor(FunctionCompiler& f, ValTyp
+         f.iter().setResult(f.constructSimd<MSimdValueX4>(args[0], args[1], args[2], args[3],
+                            MIRType::Bool32x4));
+         return true;
+       }
+       case ValType::I32:
+       case ValType::I64:
+       case ValType::F32:
+       case ValType::F64:
++      case ValType::Ref:
+       case ValType::AnyRef:
+         break;
+     }
+     MOZ_CRASH("unexpected SIMD type");
+ }
+ 
+ static bool
+ EmitSimdOp(FunctionCompiler& f, ValType type, SimdOperation op, SimdSign sign)
+@@ -4472,17 +4475,17 @@ wasm::IonCompileFunctions(const ModuleEn
+     for (const FuncCompileInput& func : inputs) {
+         Decoder d(func.begin, func.end, func.lineOrBytecode, error);
+ 
+         // Build the local types vector.
+ 
+         ValTypeVector locals;
+         if (!locals.appendAll(env.funcTypes[func.index]->args()))
+             return false;
+-        if (!DecodeLocalEntries(d, env.kind, env.gcTypesEnabled, &locals))
++        if (!DecodeLocalEntries(d, env.kind, env.types, env.gcTypesEnabled, &locals))
+             return false;
+ 
+         // Set up for Ion compilation.
+ 
+         const JitCompileOptions options;
+         MIRGraph graph(&alloc);
+         CompileInfo compileInfo(locals.length());
+         MIRGenerator mir(nullptr, options, &alloc, &graph, &compileInfo,
+diff --git a/js/src/wasm/WasmOpIter.h b/js/src/wasm/WasmOpIter.h
+--- a/js/src/wasm/WasmOpIter.h
++++ b/js/src/wasm/WasmOpIter.h
+@@ -34,60 +34,129 @@ enum class LabelKind : uint8_t
+     Block,
+     Loop,
+     Then,
+     Else
+ };
+ 
+ // The type of values on the operand stack during validation. The Any type
+ // represents the type of a value produced by an unconditional branch.
+-enum class StackType
++
++class StackType
+ {
+-    I32    = uint8_t(ValType::I32),
+-    I64    = uint8_t(ValType::I64),
+-    F32    = uint8_t(ValType::F32),
+-    F64    = uint8_t(ValType::F64),
+-
+-    I8x16  = uint8_t(ValType::I8x16),
+-    I16x8  = uint8_t(ValType::I16x8),
+-    I32x4  = uint8_t(ValType::I32x4),
+-    F32x4  = uint8_t(ValType::F32x4),
+-    B8x16  = uint8_t(ValType::B8x16),
+-    B16x8  = uint8_t(ValType::B16x8),
+-    B32x4  = uint8_t(ValType::B32x4),
+-
+-    AnyRef = uint8_t(ValType::AnyRef),
+-
+-    Any    = uint8_t(TypeCode::Limit),
++    PackedTypeCode tc_;
++
++#ifdef DEBUG
++    bool isValidCode() {
++        switch (UnpackTypeCodeType(tc_)) {
++          case TypeCode::I32:
++          case TypeCode::I64:
++          case TypeCode::F32:
++          case TypeCode::F64:
++          case TypeCode::I8x16:
++          case TypeCode::I16x8:
++          case TypeCode::I32x4:
++          case TypeCode::F32x4:
++          case TypeCode::B8x16:
++          case TypeCode::B16x8:
++          case TypeCode::B32x4:
++          case TypeCode::AnyRef:
++          case TypeCode::Ref:
++          case TypeCode::Limit:
++            return true;
++          default:
++            return false;
++        }
++    }
++#endif
++
++  public:
++    enum Code {
++        I32    = uint8_t(ValType::I32),
++        I64    = uint8_t(ValType::I64),
++        F32    = uint8_t(ValType::F32),
++        F64    = uint8_t(ValType::F64),
++
++        I8x16  = uint8_t(ValType::I8x16),
++        I16x8  = uint8_t(ValType::I16x8),
++        I32x4  = uint8_t(ValType::I32x4),
++        F32x4  = uint8_t(ValType::F32x4),
++        B8x16  = uint8_t(ValType::B8x16),
++        B16x8  = uint8_t(ValType::B16x8),
++        B32x4  = uint8_t(ValType::B32x4),
++
++        AnyRef = uint8_t(ValType::AnyRef),
++        Ref    = uint8_t(ValType::Ref),
++
++        Any    = uint8_t(TypeCode::Limit),
++    };
++
++    StackType() : tc_(InvalidPackedTypeCode()) {}
++
++    MOZ_IMPLICIT StackType(Code c)
++      : tc_(PackTypeCode(TypeCode(c)))
++    {
++        MOZ_ASSERT(isValidCode());
++    }
++
++    explicit StackType(const ValType& t)
++      : tc_(t.packed())
++    {}
++
++    PackedTypeCode packed() const {
++        return tc_;
++    }
++
++    Code code() const {
++        return Code(UnpackTypeCodeType(tc_));
++    }
++
++    uint32_t refTypeIndex() const {
++        return UnpackTypeCodeIndex(tc_);
++    }
++
++    bool isRef() const {
++        return UnpackTypeCodeType(tc_) == TypeCode::Ref;
++    }
++
++    bool isRefOrAnyRef() const {
++        TypeCode tc = UnpackTypeCodeType(tc_);
++        return tc == TypeCode::Ref || tc == TypeCode::AnyRef;
++    }
++
++    bool operator ==(const StackType& that) const {
++        return tc_ == that.tc_;
++    }
++
++    bool operator !=(const StackType& that) const {
++        return tc_ != that.tc_;
++    }
++
++    bool operator ==(Code that) const {
++        MOZ_ASSERT(that != Code::Ref);
++        return code() == that;
++    }
++
++    bool operator !=(Code that) const {
++        return !(*this == that);
++    }
+ };
+ 
+-static inline StackType
+-ToStackType(ValType type)
+-{
+-    return StackType(type.bitsUnsafe());
+-}
+-
+ static inline ValType
+ NonAnyToValType(StackType type)
+ {
+     MOZ_ASSERT(type != StackType::Any);
+-    return ValType::fromTypeCode(uint32_t(type));
+-}
+-
+-static inline bool
+-IsRefType(StackType st)
+-{
+-    return IsRefType(NonAnyToValType(st));
++    return ValType(type.packed());
+ }
+ 
+ static inline bool
+ IsSubtypeOf(StackType one, StackType two)
+ {
+-    MOZ_ASSERT(IsRefType(one));
+-    MOZ_ASSERT(IsRefType(two));
++    MOZ_ASSERT(one.isRefOrAnyRef());
++    MOZ_ASSERT(two.isRefOrAnyRef());
+     return one == two || two == StackType::AnyRef;
+ }
+ 
+ static inline bool
+ Unify(HasGcTypes gcTypesEnabled, StackType observed, StackType expected, StackType* result)
+ {
+     if (MOZ_LIKELY(observed == expected)) {
+         *result = observed;
+@@ -99,18 +168,18 @@ Unify(HasGcTypes gcTypesEnabled, StackTy
+         return true;
+     }
+ 
+     if (expected == StackType::Any) {
+         *result = observed;
+         return true;
+     }
+ 
+-    if (gcTypesEnabled == HasGcTypes::True && IsRefType(observed) && IsRefType(expected) &&
+-        IsSubtypeOf(observed, expected))
++    if (gcTypesEnabled == HasGcTypes::True && observed.isRefOrAnyRef() &&
++        expected.isRefOrAnyRef() && IsSubtypeOf(observed, expected))
+     {
+         *result = expected;
+         return true;
+     }
+ 
+     return false;
+ }
+ 
+@@ -127,17 +196,17 @@ Join(HasGcTypes gcTypesEnabled, StackTyp
+         return true;
+     }
+ 
+     if (two == StackType::Any) {
+         *result = one;
+         return true;
+     }
+ 
+-    if (gcTypesEnabled == HasGcTypes::True && IsRefType(one) && IsRefType(two)) {
++    if (gcTypesEnabled == HasGcTypes::True && one.isRefOrAnyRef() && two.isRefOrAnyRef()) {
+         if (IsSubtypeOf(two, one)) {
+             *result = one;
+             return true;
+         }
+ 
+         if (IsSubtypeOf(one, two)) {
+             *result = two;
+             return true;
+@@ -311,28 +380,28 @@ class ControlStackEntry<Nothing>
+ 
+ template <typename Value>
+ class TypeAndValue
+ {
+     StackType type_;
+     Value value_;
+ 
+   public:
+-    TypeAndValue() : type_(StackType(TypeCode::Limit)), value_() {}
++    TypeAndValue() : type_(StackType::Any), value_() {}
+     explicit TypeAndValue(StackType type)
+       : type_(type), value_()
+     {}
+     explicit TypeAndValue(ValType type)
+-      : type_(ToStackType(type)), value_()
++      : type_(StackType(type)), value_()
+     {}
+     TypeAndValue(StackType type, Value value)
+       : type_(type), value_(value)
+     {}
+     TypeAndValue(ValType type, Value value)
+-      : type_(ToStackType(type)), value_(value)
++      : type_(StackType(type)), value_(value)
+     {}
+     StackType type() const {
+         return type_;
+     }
+     StackType& typeRef() {
+         return type_;
+     }
+     Value value() const {
+@@ -345,21 +414,21 @@ class TypeAndValue
+ 
+ // Specialization for when there is no additional data needed.
+ template <>
+ class TypeAndValue<Nothing>
+ {
+     StackType type_;
+ 
+   public:
+-    TypeAndValue() : type_(StackType(TypeCode::Limit)) {}
++    TypeAndValue() : type_(StackType::Any) {}
+     explicit TypeAndValue(StackType type) : type_(type) {}
+-    explicit TypeAndValue(ValType type) : type_(ToStackType(type)) {}
++    explicit TypeAndValue(ValType type) : type_(StackType(type)) {}
+     TypeAndValue(StackType type, Nothing value) : type_(type) {}
+-    TypeAndValue(ValType type, Nothing value) : type_(ToStackType(type)) {}
++    TypeAndValue(ValType type, Nothing value) : type_(StackType(type)) {}
+ 
+     StackType type() const { return type_; }
+     StackType& typeRef() { return type_; }
+     Nothing value() const { return Nothing(); }
+     void setValue(Nothing value) {}
+ };
+ 
+ // An iterator over the bytes of a function body. It performs validation
+@@ -453,17 +522,17 @@ class MOZ_STACK_CLASS OpIter : private P
+     MOZ_MUST_USE bool readLinearMemoryAddress(uint32_t byteSize, LinearMemoryAddress<Value>* addr);
+     MOZ_MUST_USE bool readLinearMemoryAddressAligned(uint32_t byteSize, LinearMemoryAddress<Value>* addr);
+     MOZ_MUST_USE bool readBlockType(ExprType* expr);
+     MOZ_MUST_USE bool popCallArgs(const ValTypeVector& expectedTypes, Vector<Value, 8, SystemAllocPolicy>* values);
+ 
+     MOZ_MUST_USE bool popAnyType(StackType* type, Value* value);
+     MOZ_MUST_USE bool typeMismatch(StackType actual, StackType expected);
+     MOZ_MUST_USE bool popWithType(StackType expectedType, Value* value);
+-    MOZ_MUST_USE bool popWithType(ValType valType, Value* value) { return popWithType(ToStackType(valType), value); }
++    MOZ_MUST_USE bool popWithType(ValType valType, Value* value) { return popWithType(StackType(valType), value); }
+     MOZ_MUST_USE bool popWithType(ExprType expectedType, Value* value);
+     MOZ_MUST_USE bool topWithType(ExprType expectedType, Value* value);
+     MOZ_MUST_USE bool topWithType(ValType valType, Value* value);
+ 
+     MOZ_MUST_USE bool pushControl(LabelKind kind, ExprType type);
+     MOZ_MUST_USE bool checkStackAtEndOfBlock(ExprType* type, Value* value);
+     MOZ_MUST_USE bool getControl(uint32_t relativeDepth, ControlStackEntry<ControlItem>** controlEntry);
+     MOZ_MUST_USE bool checkBranchValue(uint32_t relativeDepth, ExprType* type, Value* value);
+@@ -480,17 +549,17 @@ class MOZ_STACK_CLASS OpIter : private P
+     }
+     MOZ_MUST_USE bool push(TypeAndValue<Value> tv) {
+         return valueStack_.append(tv);
+     }
+     void infalliblePush(StackType t) {
+         valueStack_.infallibleEmplaceBack(t);
+     }
+     void infalliblePush(ValType t) {
+-        valueStack_.infallibleEmplaceBack(ToStackType(t));
++        valueStack_.infallibleEmplaceBack(StackType(t));
+     }
+     void infalliblePush(TypeAndValue<Value> tv) {
+         valueStack_.infallibleAppend(tv);
+     }
+ 
+     void afterUnconditionalBranch() {
+         valueStack_.shrinkTo(controlStack_.back().valueStackStart());
+         controlStack_.back().setPolymorphicBase();
+@@ -593,17 +662,17 @@ class MOZ_STACK_CLASS OpIter : private P
+     MOZ_MUST_USE bool readF64Const(double* f64);
+     MOZ_MUST_USE bool readI8x16Const(I8x16* i8x16);
+     MOZ_MUST_USE bool readI16x8Const(I16x8* i16x8);
+     MOZ_MUST_USE bool readI32x4Const(I32x4* i32x4);
+     MOZ_MUST_USE bool readF32x4Const(F32x4* f32x4);
+     MOZ_MUST_USE bool readB8x16Const(I8x16* i8x16);
+     MOZ_MUST_USE bool readB16x8Const(I16x8* i16x8);
+     MOZ_MUST_USE bool readB32x4Const(I32x4* i32x4);
+-    MOZ_MUST_USE bool readRefNull();
++    MOZ_MUST_USE bool readRefNull(ValType* type);
+     MOZ_MUST_USE bool readCall(uint32_t* calleeIndex, ValueVector* argValues);
+     MOZ_MUST_USE bool readCallIndirect(uint32_t* funcTypeIndex, Value* callee, ValueVector* argValues);
+     MOZ_MUST_USE bool readOldCallDirect(uint32_t numFuncImports, uint32_t* funcIndex,
+                                         ValueVector* argValues);
+     MOZ_MUST_USE bool readOldCallIndirect(uint32_t* funcTypeIndex, Value* callee, ValueVector* argValues);
+     MOZ_MUST_USE bool readWake(LinearMemoryAddress<Value>* addr, Value* count);
+     MOZ_MUST_USE bool readWait(LinearMemoryAddress<Value>* addr,
+                                ValType resultType,
+@@ -845,20 +914,20 @@ OpIter<Policy>::topWithType(ValType expe
+ 
+         if (valueStack_.empty())
+             return fail("reading value from empty stack");
+         return fail("reading value from outside block");
+     }
+ 
+     TypeAndValue<Value>& tv = valueStack_.back();
+ 
+-    if (MOZ_UNLIKELY(!Unify(env_.gcTypesEnabled, tv.type(), ToStackType(expectedType),
++    if (MOZ_UNLIKELY(!Unify(env_.gcTypesEnabled, tv.type(), StackType(expectedType),
+                             &tv.typeRef())))
+     {
+-        return typeMismatch(tv.type(), ToStackType(expectedType));
++        return typeMismatch(tv.type(), StackType(expectedType));
+     }
+ 
+     *value = tv.value();
+     return true;
+ }
+ 
+ template <typename Policy>
+ inline bool
+@@ -907,47 +976,51 @@ OpIter<Policy>::getControl(uint32_t rela
+     *controlEntry = &controlStack_[controlStack_.length() - 1 - relativeDepth];
+     return true;
+ }
+ 
+ template <typename Policy>
+ inline bool
+ OpIter<Policy>::readBlockType(ExprType* type)
+ {
+-    uint8_t unchecked;
+-    if (!d_.readBlockType(&unchecked))
++    uint8_t uncheckedCode;
++    uint32_t uncheckedRefTypeIndex;
++    if (!d_.readBlockType(&uncheckedCode, &uncheckedRefTypeIndex))
+         return fail("unable to read block signature");
+ 
+     bool known = false;
+-    switch (unchecked) {
++    switch (uncheckedCode) {
+       case uint8_t(ExprType::Void):
+       case uint8_t(ExprType::I32):
+       case uint8_t(ExprType::I64):
+       case uint8_t(ExprType::F32):
+       case uint8_t(ExprType::F64):
+       case uint8_t(ExprType::I8x16):
+       case uint8_t(ExprType::I16x8):
+       case uint8_t(ExprType::I32x4):
+       case uint8_t(ExprType::F32x4):
+       case uint8_t(ExprType::B8x16):
+       case uint8_t(ExprType::B16x8):
+       case uint8_t(ExprType::B32x4):
+         known = true;
+         break;
++      case uint8_t(ExprType::Ref):
++        known = env_.gcTypesEnabled == HasGcTypes::True;
++        break;
+       case uint8_t(ExprType::AnyRef):
+         known = env_.gcTypesEnabled == HasGcTypes::True;
+         break;
+       case uint8_t(ExprType::Limit):
+         break;
+     }
+ 
+     if (!known)
+         return fail("invalid inline block type");
+ 
+-    *type = ExprType(unchecked);
++    *type = ExprType(ExprType::Code(uncheckedCode), uncheckedRefTypeIndex);
+     return true;
+ }
+ 
+ template <typename Policy>
+ inline bool
+ OpIter<Policy>::readOp(OpBytes* op)
+ {
+     MOZ_ASSERT(!controlStack_.empty());
+@@ -1670,23 +1743,31 @@ OpIter<Policy>::readB32x4Const(I32x4* i3
+     MOZ_ASSERT(Classify(op_) == OpKind::B32x4);
+ 
+     return readFixedI32x4(i32x4) &&
+            push(ValType::B32x4);
+ }
+ 
+ template <typename Policy>
+ inline bool
+-OpIter<Policy>::readRefNull()
++OpIter<Policy>::readRefNull(ValType* type)
+ {
+     MOZ_ASSERT(Classify(op_) == OpKind::RefNull);
+-    uint8_t valType;
+-    if (!d_.readValType(&valType) || valType != uint8_t(ValType::AnyRef))
++    uint8_t code;
++    uint32_t refTypeIndex;
++    if (!d_.readValType(&code, &refTypeIndex))
+         return fail("unknown nullref type");
+-    return push(StackType::AnyRef);
++    if (code == uint8_t(TypeCode::Ref)) {
++        if (refTypeIndex > MaxTypes)
++            return fail("invalid nullref type");
++    } else if (code != uint8_t(TypeCode::AnyRef)) {
++        return fail("unknown nullref type");
++    }
++    *type = ValType(ValType::Code(code), refTypeIndex);
++    return push(StackType(*type));
+ }
+ 
+ template <typename Policy>
+ inline bool
+ OpIter<Policy>::popCallArgs(const ValTypeVector& expectedTypes, ValueVector* values)
+ {
+     // Iterate through the argument types backward so that pops occur in the
+     // right order.
+diff --git a/js/src/wasm/WasmStubs.cpp b/js/src/wasm/WasmStubs.cpp
+--- a/js/src/wasm/WasmStubs.cpp
++++ b/js/src/wasm/WasmStubs.cpp
+@@ -190,33 +190,34 @@ SetupABIArguments(MacroAssembler& masm, 
+         }
+     }
+ }
+ 
+ static void
+ StoreABIReturn(MacroAssembler& masm, const FuncExport& fe, Register argv)
+ {
+     // Store the return value in argv[0].
+-    switch (fe.funcType().ret()) {
++    switch (fe.funcType().ret().code()) {
+       case ExprType::Void:
+         break;
+       case ExprType::I32:
+         masm.store32(ReturnReg, Address(argv, 0));
+         break;
+       case ExprType::I64:
+         masm.store64(ReturnReg64, Address(argv, 0));
+         break;
+       case ExprType::F32:
+         masm.canonicalizeFloat(ReturnFloat32Reg);
+         masm.storeFloat32(ReturnFloat32Reg, Address(argv, 0));
+         break;
+       case ExprType::F64:
+         masm.canonicalizeDouble(ReturnDoubleReg);
+         masm.storeDouble(ReturnDoubleReg, Address(argv, 0));
+         break;
++      case ExprType::Ref:
+       case ExprType::AnyRef:
+         masm.storePtr(ReturnReg, Address(argv, 0));
+         break;
+       case ExprType::I8x16:
+       case ExprType::I16x8:
+       case ExprType::I32x4:
+       case ExprType::B8x16:
+       case ExprType::B16x8:
+@@ -804,32 +805,35 @@ GenerateJitEntry(MacroAssembler& masm, s
+     // stub; otherwise the FP value is still set to the parent ion frame value.
+     Label exception;
+     masm.branchPtr(Assembler::Equal, FramePointer, Imm32(FailFP), &exception);
+ 
+     // Pop arguments.
+     masm.freeStack(frameSize);
+ 
+     // Store the return value in the JSReturnOperand.
+-    switch (fe.funcType().ret()) {
++    switch (fe.funcType().ret().code()) {
+       case ExprType::Void:
+         masm.moveValue(UndefinedValue(), JSReturnOperand);
+         break;
+       case ExprType::I32:
+         masm.boxNonDouble(JSVAL_TYPE_INT32, ReturnReg, JSReturnOperand);
+         break;
+       case ExprType::F32:
+         masm.canonicalizeFloat(ReturnFloat32Reg);
+         masm.convertFloat32ToDouble(ReturnFloat32Reg, ReturnDoubleReg);
+         masm.boxDouble(ReturnDoubleReg, JSReturnOperand, ScratchDoubleReg);
+         break;
+       case ExprType::F64:
+         masm.canonicalizeDouble(ReturnDoubleReg);
+         masm.boxDouble(ReturnDoubleReg, JSReturnOperand, ScratchDoubleReg);
+         break;
++      case ExprType::Ref:
++        MOZ_CRASH("return ref in jitentry NYI");
++        break;
+       case ExprType::AnyRef:
+         MOZ_CRASH("return anyref in jitentry NYI");
+         break;
+       case ExprType::I64:
+       case ExprType::I8x16:
+       case ExprType::I16x8:
+       case ExprType::I32x4:
+       case ExprType::B8x16:
+@@ -1169,17 +1173,17 @@ GenerateImportInterpExit(MacroAssembler&
+         masm.computeEffectiveAddress(argv, scratch);
+         masm.storePtr(scratch, Address(masm.getStackPointer(), i->offsetFromArgBase()));
+     }
+     i++;
+     MOZ_ASSERT(i.done());
+ 
+     // Make the call, test whether it succeeded, and extract the return value.
+     AssertStackAlignment(masm, ABIStackAlignment);
+-    switch (fi.funcType().ret()) {
++    switch (fi.funcType().ret().code()) {
+       case ExprType::Void:
+         masm.call(SymbolicAddress::CallImport_Void);
+         masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
+         break;
+       case ExprType::I32:
+         masm.call(SymbolicAddress::CallImport_I32);
+         masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
+         masm.load32(argv, ReturnReg);
+@@ -1194,16 +1198,17 @@ GenerateImportInterpExit(MacroAssembler&
+         masm.loadDouble(argv, ReturnDoubleReg);
+         masm.convertDoubleToFloat32(ReturnDoubleReg, ReturnFloat32Reg);
+         break;
+       case ExprType::F64:
+         masm.call(SymbolicAddress::CallImport_F64);
+         masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
+         masm.loadDouble(argv, ReturnDoubleReg);
+         break;
++      case ExprType::Ref:
+       case ExprType::AnyRef:
+         masm.call(SymbolicAddress::CallImport_Ref);
+         masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
+         masm.loadPtr(argv, ReturnReg);
+         break;
+       case ExprType::I8x16:
+       case ExprType::I16x8:
+       case ExprType::I32x4:
+@@ -1359,31 +1364,34 @@ GenerateImportJitExit(MacroAssembler& ma
+         Label ok;
+         masm.branchTestMagic(Assembler::NotEqual, JSReturnOperand, &ok);
+         masm.breakpoint();
+         masm.bind(&ok);
+     }
+ #endif
+ 
+     Label oolConvert;
+-    switch (fi.funcType().ret()) {
++    switch (fi.funcType().ret().code()) {
+       case ExprType::Void:
+         break;
+       case ExprType::I32:
+         masm.truncateValueToInt32(JSReturnOperand, ReturnDoubleReg, ReturnReg, &oolConvert);
+         break;
+       case ExprType::I64:
+         masm.breakpoint();
+         break;
+       case ExprType::F32:
+         masm.convertValueToFloat(JSReturnOperand, ReturnFloat32Reg, &oolConvert);
+         break;
+       case ExprType::F64:
+         masm.convertValueToDouble(JSReturnOperand, ReturnDoubleReg, &oolConvert);
+         break;
++      case ExprType::Ref:
++        MOZ_CRASH("ref returned by import (jit exit) NYI");
++        break;
+       case ExprType::AnyRef:
+         MOZ_CRASH("anyref returned by import (jit exit) NYI");
+         break;
+       case ExprType::I8x16:
+       case ExprType::I16x8:
+       case ExprType::I32x4:
+       case ExprType::F32x4:
+       case ExprType::B8x16:
+@@ -1440,17 +1448,17 @@ GenerateImportJitExit(MacroAssembler& ma
+             masm.storePtr(scratch, Address(masm.getStackPointer(), i->offsetFromArgBase()));
+         }
+         i++;
+         MOZ_ASSERT(i.done());
+ 
+         // Call coercion function. Note that right after the call, the value of
+         // FP is correct because FP is non-volatile in the native ABI.
+         AssertStackAlignment(masm, ABIStackAlignment);
+-        switch (fi.funcType().ret()) {
++        switch (fi.funcType().ret().code()) {
+           case ExprType::I32:
+             masm.call(SymbolicAddress::CoerceInPlace_ToInt32);
+             masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
+             masm.unboxInt32(Address(masm.getStackPointer(), offsetToCoerceArgv), ReturnReg);
+             break;
+           case ExprType::F64:
+           case ExprType::F32:
+             masm.call(SymbolicAddress::CoerceInPlace_ToNumber);
+diff --git a/js/src/wasm/WasmTextToBinary.cpp b/js/src/wasm/WasmTextToBinary.cpp
+--- a/js/src/wasm/WasmTextToBinary.cpp
++++ b/js/src/wasm/WasmTextToBinary.cpp
+@@ -115,16 +115,17 @@ class WasmToken
+ #endif
+         Module,
+         Mutable,
+         Name,
+         Nop,
+         Offset,
+         OpenParen,
+         Param,
++        Ref,
+         RefNull,
+         Result,
+         Return,
+         SetGlobal,
+         SetLocal,
+         Shared,
+         SignedInteger,
+         Start,
+@@ -378,16 +379,17 @@ class WasmToken
+           case Memory:
+           case NegativeZero:
+           case Local:
+           case Module:
+           case Name:
+           case Offset:
+           case OpenParen:
+           case Param:
++          case Ref:
+           case Result:
+           case Shared:
+           case SignedInteger:
+           case Start:
+           case Struct:
+           case Table:
+           case Text:
+           case Then:
+@@ -1690,22 +1692,22 @@ WasmTokenStream::next()
+             return WasmToken(WasmToken::Param, begin, cur_);
+         break;
+ 
+       case 'r':
+         if (consume(u"result"))
+             return WasmToken(WasmToken::Result, begin, cur_);
+         if (consume(u"return"))
+             return WasmToken(WasmToken::Return, begin, cur_);
+-        if (consume(u"ref.")) {
+-            if (consume(u"null"))
++        if (consume(u"ref")) {
++            if (consume(u".null"))
+                 return WasmToken(WasmToken::RefNull, begin, cur_);
+-            if (consume(u"is_null"))
++            if (consume(u".is_null"))
+                 return WasmToken(WasmToken::UnaryOpcode, Op::RefIsNull, begin, cur_);
+-            break;
++            return WasmToken(WasmToken::Ref, begin, cur_);
+         }
+         break;
+ 
+       case 's':
+         if (consume(u"select"))
+             return WasmToken(WasmToken::TernaryOpcode, Op::Select, begin, cur_);
+         if (consume(u"set_global"))
+             return WasmToken(WasmToken::SetGlobal, begin, cur_);
+@@ -1827,23 +1829,65 @@ ParseExprList(WasmParseContext& c, AstEx
+ 
+         break;
+     }
+ 
+     return true;
+ }
+ 
+ static bool
+-ParseBlockSignature(WasmParseContext& c, ExprType* type)
++MaybeParseValType(WasmParseContext& c, AstValType* type)
+ {
+     WasmToken token;
+-    if (c.ts.getIf(WasmToken::ValueType, &token))
+-        *type = ToExprType(token.valueType());
++
++    if (c.ts.getIf(WasmToken::ValueType, &token)) {
++        *type = AstValType(token.valueType());
++    } else if (c.ts.getIf(WasmToken::OpenParen, &token)) {
++        if (c.ts.getIf(WasmToken::Ref)) {
++            AstRef target;
++            if (!c.ts.matchRef(&target, c.error) ||
++                !c.ts.match(WasmToken::CloseParen, c.error))
++            {
++                return false;
++            }
++            *type = AstValType(target);
++        } else {
++            c.ts.unget(token);
++        }
++    }
++    return true;
++}
++
++static bool
++ParseValType(WasmParseContext& c, AstValType* type)
++{
++    if (!MaybeParseValType(c, type))
++        return false;
++
++    if (!type->isValid()) {
++        c.ts.generateError(c.ts.peek(), "expected value type", c.error);
++        return false;
++    }
++
++    return true;
++}
++
++static bool
++ParseBlockSignature(WasmParseContext& c, AstExprType* type)
++{
++    WasmToken token;
++    AstValType vt;
++
++    if (!MaybeParseValType(c, &vt))
++        return false;
++
++    if (vt.isValid())
++        *type = AstExprType(vt);
+     else
+-        *type = ExprType::Void;
++        *type = AstExprType(ExprType::Void);
+ 
+     return true;
+ }
+ 
+ static bool
+ MaybeMatchName(WasmParseContext& c, const AstName& name)
+ {
+     WasmToken tok;
+@@ -1878,17 +1922,17 @@ ParseBlock(WasmParseContext& c, Op op, b
+     if (op == Op::Loop) {
+         AstName maybeName = c.ts.getIfName();
+         if (!maybeName.empty()) {
+             otherName = name;
+             name = maybeName;
+         }
+     }
+ 
+-    ExprType type;
++    AstExprType type(ExprType::Limit);
+     if (!ParseBlockSignature(c, &type))
+         return nullptr;
+ 
+     if (!ParseExprList(c, &exprs))
+         return nullptr;
+ 
+     if (!inParens) {
+         if (!c.ts.match(WasmToken::End, c.error))
+@@ -2485,17 +2529,17 @@ ParseDrop(WasmParseContext& c, bool inPa
+     return new(c.lifo) AstDrop(*value);
+ }
+ 
+ static AstIf*
+ ParseIf(WasmParseContext& c, bool inParens)
+ {
+     AstName name = c.ts.getIfName();
+ 
+-    ExprType type;
++    AstExprType type(ExprType::Limit);
+     if (!ParseBlockSignature(c, &type))
+         return nullptr;
+ 
+     AstExpr* cond = ParseExpr(c, inParens);
+     if (!cond)
+         return nullptr;
+ 
+     if (inParens) {
+@@ -3011,23 +3055,27 @@ ParseMemFill(WasmParseContext& c, bool i
+     return new(c.lifo) AstMemFill(start, val, len);
+ }
+ #endif
+ 
+ static AstExpr*
+ ParseRefNull(WasmParseContext& c)
+ {
+     WasmToken token;
+-    if (!c.ts.match(WasmToken::ValueType, &token, c.error))
++    AstValType vt;
++
++    if (!ParseValType(c, &vt))
+         return nullptr;
+-    if (token.valueType() != ValType::AnyRef) {
+-        c.ts.generateError(token, "only anyref is supported for nullref", c.error);
++
++    if (!vt.isRefType()) {
++        c.ts.generateError(token, "ref.null requires ref type", c.error);
+         return nullptr;
+     }
+-    return new(c.lifo) AstRefNull(ValType::AnyRef);
++
++    return new(c.lifo) AstRefNull(vt);
+ }
+ 
+ static AstExpr*
+ ParseExprBody(WasmParseContext& c, WasmToken token, bool inParens)
+ {
+     if (!CheckRecursionLimitDontReport(c.stackLimit))
+         return nullptr;
+     switch (token.kind()) {
+@@ -3121,51 +3169,54 @@ ParseExprInsideParens(WasmParseContext& 
+     WasmToken token = c.ts.get();
+ 
+     return ParseExprBody(c, token, true);
+ }
+ 
+ static bool
+ ParseValueTypeList(WasmParseContext& c, AstValTypeVector* vec)
+ {
+-    WasmToken token;
+-    while (c.ts.getIf(WasmToken::ValueType, &token)) {
+-        if (!vec->append(token.valueType()))
++    for (;;) {
++        AstValType vt;
++        if (!MaybeParseValType(c, &vt))
+             return false;
+-    }
+-
++        if (!vt.isValid())
++            break;
++        if (!vec->append(vt))
++            return false;
++    }
+     return true;
+ }
+ 
+ static bool
+-ParseResult(WasmParseContext& c, ExprType* result)
+-{
+-    if (*result != ExprType::Void) {
++ParseResult(WasmParseContext& c, AstExprType* result)
++{
++    if (!result->isVoid()) {
+         c.ts.generateError(c.ts.peek(), c.error);
+         return false;
+     }
+ 
+-    WasmToken token;
+-    if (!c.ts.match(WasmToken::ValueType, &token, c.error))
++    AstValType type;
++    if (!ParseValType(c, &type))
+         return false;
+ 
+-    *result = ToExprType(token.valueType());
++    *result = AstExprType(type);
+     return true;
+ }
+ 
+ static bool
+ ParseLocalOrParam(WasmParseContext& c, AstNameVector* locals, AstValTypeVector* localTypes)
+ {
+     if (c.ts.peek().kind() != WasmToken::Name)
+         return locals->append(AstName()) && ParseValueTypeList(c, localTypes);
+ 
+-    WasmToken token;
++    AstValType type;
+     return locals->append(c.ts.get().name()) &&
+-           c.ts.match(WasmToken::ValueType, &token, c.error) &&
+-           localTypes->append(token.valueType());
++           ParseValType(c, &type) &&
++           localTypes->append(type);
+ }
+ 
+ static bool
+ ParseInlineImport(WasmParseContext& c, InlineImport* import)
+ {
+     return c.ts.match(WasmToken::Text, &import->module, c.error) &&
+            c.ts.match(WasmToken::Text, &import->field, c.error);
+ }
+@@ -3197,17 +3248,17 @@ MaybeParseTypeUse(WasmParseContext& c, A
+     }
+     return true;
+ }
+ 
+ static bool
+ ParseFuncSig(WasmParseContext& c, AstFuncType* funcType)
+ {
+     AstValTypeVector args(c.lifo);
+-    ExprType result = ExprType::Void;
++    AstExprType result = AstExprType(ExprType::Void);
+ 
+     while (c.ts.getIf(WasmToken::OpenParen)) {
+         WasmToken token = c.ts.get();
+         switch (token.kind()) {
+           case WasmToken::Param:
+             if (!ParseValueTypeList(c, &args))
+                 return false;
+             break;
+@@ -3292,17 +3343,17 @@ ParseFunc(WasmParseContext& c, AstModule
+     }
+ 
+     AstRef funcTypeRef;
+     if (!MaybeParseTypeUse(c, &funcTypeRef))
+         return false;
+ 
+     AstExprVector body(c.lifo);
+ 
+-    ExprType result = ExprType::Void;
++    AstExprType result = AstExprType(ExprType::Void);
+     while (c.ts.getIf(WasmToken::OpenParen)) {
+         WasmToken token = c.ts.get();
+         switch (token.kind()) {
+           case WasmToken::Local:
+             if (!ParseLocalOrParam(c, &locals, &vars))
+                 return false;
+             break;
+           case WasmToken::Param:
+@@ -3338,43 +3389,43 @@ ParseFunc(WasmParseContext& c, AstModule
+         funcTypeRef.setIndex(funcTypeIndex);
+     }
+ 
+     auto* func = new(c.lifo) AstFunc(funcName, funcTypeRef, std::move(vars), std::move(locals), std::move(body));
+     return func && module->append(func);
+ }
+ 
+ static bool
+-ParseGlobalType(WasmParseContext& c, WasmToken* typeToken, bool* isMutable);
++ParseGlobalType(WasmParseContext& c, AstValType* type, bool* isMutable);
+ 
+ static bool
+ ParseStructFields(WasmParseContext& c, AstStructType* st)
+ {
+     AstNameVector    names(c.lifo);
+     AstValTypeVector types(c.lifo);
+ 
+     while (true) {
+         if (!c.ts.getIf(WasmToken::OpenParen))
+             break;
+ 
+         if (!c.ts.match(WasmToken::Field, c.error))
+             return false;
+ 
+         AstName name = c.ts.getIfName();
+ 
+-        WasmToken typeToken;
++        AstValType type;
+         bool isMutable;
+-        if (!ParseGlobalType(c, &typeToken, &isMutable))
++        if (!ParseGlobalType(c, &type, &isMutable))
+             return false;
+         if (!c.ts.match(WasmToken::CloseParen, c.error))
+             return false;
+ 
+         if (!names.append(name))
+             return false;
+-        if (!types.append(typeToken.valueType()))
++        if (!types.append(type))
+             return false;
+     }
+ 
+     *st = AstStructType(std::move(names), std::move(types));
+     return true;
+ }
+ 
+ static AstTypeDef*
+@@ -3576,32 +3627,38 @@ ParseStartFunc(WasmParseContext& c, Wasm
+         c.ts.generateError(token, c.error);
+         return false;
+     }
+ 
+     return true;
+ }
+ 
+ static bool
+-ParseGlobalType(WasmParseContext& c, WasmToken* typeToken, bool* isMutable)
+-{
++ParseGlobalType(WasmParseContext& c, AstValType* type, bool* isMutable)
++{
++    WasmToken openParen;
+     *isMutable = false;
+ 
+-    // Either (mut i32) or i32.
+-    if (c.ts.getIf(WasmToken::OpenParen)) {
+-        // Immutable by default.
+-        *isMutable = c.ts.getIf(WasmToken::Mutable);
+-        if (!c.ts.match(WasmToken::ValueType, typeToken, c.error))
+-            return false;
+-        if (!c.ts.match(WasmToken::CloseParen, c.error))
+-            return false;
+-        return true;
+-    }
+-
+-    return c.ts.match(WasmToken::ValueType, typeToken, c.error);
++    // Either (mut T) or T, where T can be (ref U).
++    if (c.ts.getIf(WasmToken::OpenParen, &openParen)) {
++        if (c.ts.getIf(WasmToken::Mutable)) {
++            *isMutable = true;
++            if (!ParseValType(c, type))
++                return false;
++            if (!c.ts.match(WasmToken::CloseParen, c.error))
++                return false;
++            return true;
++        }
++        c.ts.unget(openParen);
++    }
++
++    if (!ParseValType(c, type))
++        return false;
++
++    return true;
+ }
+ 
+ static bool
+ ParseElemType(WasmParseContext& c)
+ {
+     // Only AnyFunc is allowed at the moment.
+     return c.ts.match(WasmToken::AnyFunc, c.error);
+ }
+@@ -3652,25 +3709,25 @@ ParseImport(WasmParseContext& c, AstModu
+                 return nullptr;
+             return new(c.lifo) AstImport(name, moduleName.text(), fieldName.text(),
+                                          DefinitionKind::Table, table);
+         }
+         if (c.ts.getIf(WasmToken::Global)) {
+             if (name.empty())
+                 name = c.ts.getIfName();
+ 
+-            WasmToken typeToken;
++            AstValType type;
+             bool isMutable;
+-            if (!ParseGlobalType(c, &typeToken, &isMutable))
++            if (!ParseGlobalType(c, &type, &isMutable))
+                 return nullptr;
+             if (!c.ts.match(WasmToken::CloseParen, c.error))
+                 return nullptr;
+ 
+             return new(c.lifo) AstImport(name, moduleName.text(), fieldName.text(),
+-                                         AstGlobal(AstName(), typeToken.valueType(), isMutable));
++                                         AstGlobal(AstName(), type, isMutable));
+         }
+         if (c.ts.getIf(WasmToken::Func)) {
+             if (name.empty())
+                 name = c.ts.getIfName();
+ 
+             AstRef funcTypeRef;
+             if (!ParseFuncType(c, &funcTypeRef, module))
+                 return nullptr;
+@@ -3879,62 +3936,61 @@ ParseElemSegment(WasmParseContext& c)
+     return new(c.lifo) AstElemSegment(offset, std::move(elems));
+ }
+ 
+ static bool
+ ParseGlobal(WasmParseContext& c, AstModule* module)
+ {
+     AstName name = c.ts.getIfName();
+ 
+-    WasmToken typeToken;
++    AstValType type;
+     bool isMutable;
+ 
+     WasmToken openParen;
+     if (c.ts.getIf(WasmToken::OpenParen, &openParen)) {
+         if (c.ts.getIf(WasmToken::Import)) {
+             if (module->globals().length()) {
+                 c.ts.generateError(openParen, "import after global definition", c.error);
+                 return false;
+             }
+ 
+             InlineImport names;
+             if (!ParseInlineImport(c, &names))
+                 return false;
+             if (!c.ts.match(WasmToken::CloseParen, c.error))
+                 return false;
+ 
+-            if (!ParseGlobalType(c, &typeToken, &isMutable))
++            if (!ParseGlobalType(c, &type, &isMutable))
+                 return false;
+ 
+             auto* imp = new(c.lifo) AstImport(name, names.module.text(), names.field.text(),
+-                                              AstGlobal(AstName(), typeToken.valueType(),
+-                                                        isMutable));
++                                              AstGlobal(AstName(), type, isMutable));
+             return imp && module->append(imp);
+         }
+ 
+         if (c.ts.getIf(WasmToken::Export)) {
+             size_t refIndex = module->numGlobalImports() + module->globals().length();
+             AstRef ref = name.empty() ? AstRef(refIndex) : AstRef(name);
+             if (!ParseInlineExport(c, DefinitionKind::Global, module, ref))
+                 return false;
+             if (!c.ts.match(WasmToken::CloseParen, c.error))
+                 return false;
+         } else {
+             c.ts.unget(openParen);
+         }
+     }
+ 
+-    if (!ParseGlobalType(c, &typeToken, &isMutable))
++    if (!ParseGlobalType(c, &type, &isMutable))
+         return false;
+ 
+     AstExpr* init = ParseInitializerExpression(c);
+     if (!init)
+         return false;
+ 
+-    auto* glob = new(c.lifo) AstGlobal(name, typeToken.valueType(), isMutable, Some(init));
++    auto* glob = new(c.lifo) AstGlobal(name, type, isMutable, Some(init));
+     return glob && module->append(glob);
+ }
+ 
+ static AstModule*
+ ParseBinaryModule(WasmParseContext& c, AstModule* module)
+ {
+     // By convention with EncodeBinaryModule, a binary module only contains a
+     // data section containing the raw bytes contained in the module.
+@@ -4181,16 +4237,38 @@ class Resolver
+         *error_ = JS_smprintf("%s", message);
+         return false;
+     }
+ };
+ 
+ } // end anonymous namespace
+ 
+ static bool
++ResolveType(Resolver& r, AstValType& vt)
++{
++    if (vt.isResolved())
++        return true;
++    if (!r.resolveType(vt.asRef()))
++        return false;
++    vt.resolve();
++    return true;
++}
++
++static bool
++ResolveType(Resolver& r, AstExprType& et)
++{
++    if (et.isResolved())
++        return true;
++    if (!ResolveType(r, et.asAstValType()))
++        return false;
++    et.resolve();
++    return true;
++}
++
++static bool
+ ResolveExpr(Resolver& r, AstExpr& expr);
+ 
+ static bool
+ ResolveExprList(Resolver& r, const AstExprVector& v)
+ {
+     for (size_t i = 0; i < v.length(); i++) {
+         if (!ResolveExpr(r, *v[i]))
+             return false;
+@@ -4199,16 +4277,19 @@ ResolveExprList(Resolver& r, const AstEx
+ }
+ 
+ static bool
+ ResolveBlock(Resolver& r, AstBlock& b)
+ {
+     if (!r.pushTarget(b.name()))
+         return false;
+ 
++    if (!ResolveType(r, b.type()))
++        return false;
++
+     if (!ResolveExprList(r, b.exprs()))
+         return false;
+ 
+     r.popTarget(b.name());
+     return true;
+ }
+ 
+ static bool
+@@ -4374,16 +4455,18 @@ ResolveExtraConversionOperator(Resolver&
+ {
+     return ResolveExpr(r, *b.operand());
+ }
+ #endif
+ 
+ static bool
+ ResolveIfElse(Resolver& r, AstIf& i)
+ {
++    if (!ResolveType(r, i.type()))
++        return false;
+     if (!ResolveExpr(r, i.cond()))
+         return false;
+     if (!r.pushTarget(i.name()))
+         return false;
+     if (!ResolveExprList(r, i.thenExprs()))
+         return false;
+     if (i.hasElse()) {
+         if (!ResolveExprList(r, i.elseExprs()))
+@@ -4492,25 +4575,32 @@ ResolveMemFill(Resolver& r, AstMemFill& 
+ {
+     return ResolveExpr(r, s.start()) &&
+            ResolveExpr(r, s.val()) &&
+            ResolveExpr(r, s.len());
+ }
+ #endif
+ 
+ static bool
++ResolveRefNull(Resolver& r, AstRefNull& s)
++{
++    return ResolveType(r, s.baseType());
++}
++
++static bool
+ ResolveExpr(Resolver& r, AstExpr& expr)
+ {
+     switch (expr.kind()) {
+       case AstExprKind::Nop:
+       case AstExprKind::Pop:
+       case AstExprKind::Unreachable:
+       case AstExprKind::CurrentMemory:
++        return true;
+       case AstExprKind::RefNull:
+-        return true;
++        return ResolveRefNull(r, expr.as<AstRefNull>());
+       case AstExprKind::Drop:
+         return ResolveDropOperator(r, expr.as<AstDrop>());
+       case AstExprKind::BinaryOperator:
+         return ResolveBinaryOperator(r, expr.as<AstBinaryOperator>());
+       case AstExprKind::Block:
+         return ResolveBlock(r, expr.as<AstBlock>());
+       case AstExprKind::Branch:
+         return ResolveBranch(r, expr.as<AstBranch>());
+@@ -4578,29 +4668,54 @@ ResolveExpr(Resolver& r, AstExpr& expr)
+     MOZ_CRASH("Bad expr kind");
+ }
+ 
+ static bool
+ ResolveFunc(Resolver& r, AstFunc& func)
+ {
+     r.beginFunc();
+ 
++    for (AstValType& vt : func.vars()) {
++        if (!ResolveType(r, vt))
++            return false;
++    }
++
+     for (size_t i = 0; i < func.locals().length(); i++) {
+         if (!r.registerVarName(func.locals()[i], i))
+             return r.fail("duplicate var");
+     }
+ 
+     for (AstExpr* expr : func.body()) {
+         if (!ResolveExpr(r, *expr))
+             return false;
+     }
+     return true;
+ }
+ 
+ static bool
++ResolveSignature(Resolver& r, AstFuncType& ft)
++{
++    for (AstValType& vt : ft.args()) {
++        if (!ResolveType(r, vt))
++            return false;
++    }
++    return ResolveType(r, ft.ret());
++}
++
++static bool
++ResolveStruct(Resolver& r, AstStructType& s)
++{
++    for (AstValType& vt : s.fieldTypes()) {
++        if (!ResolveType(r, vt))
++            return false;
++    }
++    return true;
++}
++
++static bool
+ ResolveModule(LifoAlloc& lifo, AstModule* module, UniqueChars* error)
+ {
+     Resolver r(lifo, error);
+ 
+     if (!r.init())
+         return false;
+ 
+     size_t numTypes = module->types().length();
+@@ -4608,17 +4723,30 @@ ResolveModule(LifoAlloc& lifo, AstModule
+         AstTypeDef* td = module->types()[i];
+         if (td->isFuncType()) {
+             AstFuncType* funcType = static_cast<AstFuncType*>(td);
+             if (!r.registerFuncTypeName(funcType->name(), i))
+                 return r.fail("duplicate signature");
+         } else if (td->isStructType()) {
+             AstStructType* structType = static_cast<AstStructType*>(td);
+             if (!r.registerTypeName(structType->name(), i))
+-                return r.fail("duplicate struct");
++                return r.fail("duplicate type name");
++        }
++    }
++
++    for (size_t i = 0; i < numTypes; i++) {
++        AstTypeDef* td = module->types()[i];
++        if (td->isFuncType()) {
++            AstFuncType* funcType = static_cast<AstFuncType*>(td);
++            if (!ResolveSignature(r, *funcType))
++                return false;
++        } else if (td->isStructType()) {
++            AstStructType* structType = static_cast<AstStructType*>(td);
++            if (!ResolveStruct(r, *structType))
++                return false;
+         }
+     }
+ 
+     size_t lastFuncIndex = 0;
+     size_t lastGlobalIndex = 0;
+     size_t lastMemoryIndex = 0;
+     size_t lastTableIndex = 0;
+     for (AstImport* imp : module->imports()) {
+@@ -4627,16 +4755,18 @@ ResolveModule(LifoAlloc& lifo, AstModule
+             if (!r.registerFuncName(imp->name(), lastFuncIndex++))
+                 return r.fail("duplicate import");
+             if (!r.resolveSignature(imp->funcType()))
+                 return false;
+             break;
+           case DefinitionKind::Global:
+             if (!r.registerGlobalName(imp->name(), lastGlobalIndex++))
+                 return r.fail("duplicate import");
++            if (!ResolveType(r, imp->global().type()))
++                return false;
+             break;
+           case DefinitionKind::Memory:
+             if (!r.registerMemoryName(imp->name(), lastMemoryIndex++))
+                 return r.fail("duplicate import");
+             break;
+           case DefinitionKind::Table:
+             if (!r.registerTableName(imp->name(), lastTableIndex++))
+                 return r.fail("duplicate import");
+@@ -4646,19 +4776,21 @@ ResolveModule(LifoAlloc& lifo, AstModule
+ 
+     for (AstFunc* func : module->funcs()) {
+         if (!r.resolveSignature(func->funcType()))
+             return false;
+         if (!r.registerFuncName(func->name(), lastFuncIndex++))
+             return r.fail("duplicate function");
+     }
+ 
+-    for (const AstGlobal* global : module->globals()) {
++    for (AstGlobal* global : module->globals()) {
+         if (!r.registerGlobalName(global->name(), lastGlobalIndex++))
+             return r.fail("duplicate import");
++        if (!ResolveType(r, global->type()))
++            return false;
+         if (global->hasInit() && !ResolveExpr(r, global->init()))
+             return false;
+     }
+ 
+     for (const AstResizable& table : module->tables()) {
+         if (table.imported)
+             continue;
+         if (!r.registerTableName(table.name, lastTableIndex++))
+@@ -4737,17 +4869,17 @@ EncodeExprList(Encoder& e, const AstExpr
+ }
+ 
+ static bool
+ EncodeBlock(Encoder& e, AstBlock& b)
+ {
+     if (!e.writeOp(b.op()))
+         return false;
+ 
+-    if (!e.writeBlockType(b.type()))
++    if (!e.writeBlockType(b.type().type()))
+         return false;
+ 
+     if (!EncodeExprList(e, b.exprs()))
+         return false;
+ 
+     if (!e.writeOp(Op::End))
+         return false;
+ 
+@@ -4947,17 +5079,17 @@ EncodeExtraConversionOperator(Encoder& e
+ #endif
+ 
+ static bool
+ EncodeIf(Encoder& e, AstIf& i)
+ {
+     if (!EncodeExpr(e, i.cond()) || !e.writeOp(Op::If))
+         return false;
+ 
+-    if (!e.writeBlockType(i.type()))
++    if (!e.writeBlockType(i.type().type()))
+         return false;
+ 
+     if (!EncodeExprList(e, i.thenExprs()))
+         return false;
+ 
+     if (i.hasElse()) {
+         if (!e.writeOp(Op::Else))
+             return false;
+@@ -5141,17 +5273,17 @@ EncodeMemFill(Encoder& e, AstMemFill& s)
+            e.writeOp(MiscOp::MemFill);
+ }
+ #endif
+ 
+ static bool
+ EncodeRefNull(Encoder& e, AstRefNull& s)
+ {
+     return e.writeOp(Op::RefNull) &&
+-           e.writeValType(s.refType());
++           e.writeValType(s.baseType().type());
+ }
+ 
+ static bool
+ EncodeExpr(Encoder& e, AstExpr& expr)
+ {
+     switch (expr.kind()) {
+       case AstExprKind::Pop:
+         return true;
+@@ -5255,38 +5387,38 @@ EncodeTypeSection(Encoder& e, AstModule&
+         if (td->isFuncType()) {
+             AstFuncType* funcType = static_cast<AstFuncType*>(td);
+             if (!e.writeVarU32(uint32_t(TypeCode::Func)))
+                 return false;
+ 
+             if (!e.writeVarU32(funcType->args().length()))
+                 return false;
+ 
+-            for (ValType t : funcType->args()) {
+-                if (!e.writeValType(t))
++            for (AstValType vt : funcType->args()) {
++                if (!e.writeValType(vt.type()))
+                     return false;
+             }
+ 
+-            if (!e.writeVarU32(!IsVoid(funcType->ret())))
++            if (!e.writeVarU32(!IsVoid(funcType->ret().type())))
+                 return false;
+ 
+-            if (!IsVoid(funcType->ret())) {
+-                if (!e.writeValType(NonVoidToValType(funcType->ret())))
++            if (!IsVoid(funcType->ret().type())) {
++                if (!e.writeValType(NonVoidToValType(funcType->ret().type())))
+                     return false;
+             }
+         } else if (td->isStructType()) {
+             AstStructType* st = static_cast<AstStructType*>(td);
+             if (!e.writeVarU32(uint32_t(TypeCode::Struct)))
+                 return false;
+ 
+             if (!e.writeVarU32(st->fieldTypes().length()))
+                 return false;
+ 
+-            for (ValType t : st->fieldTypes()) {
+-                if (!e.writeValType(t))
++            for (AstValType vt : st->fieldTypes()) {
++                if (!e.writeValType(vt.type()))
+                     return false;
+             }
+         } else {
+             MOZ_CRASH();
+         }
+     }
+ 
+     e.finishSection(offset);
+@@ -5548,18 +5680,20 @@ EncodeFunctionBody(Encoder& e, AstFunc& 
+ {
+     size_t bodySizeAt;
+     if (!e.writePatchableVarU32(&bodySizeAt))
+         return false;
+ 
+     size_t beforeBody = e.currentOffset();
+ 
+     ValTypeVector varTypes;
+-    if (!varTypes.appendAll(func.vars()))
+-        return false;
++    for (const AstValType& vt : func.vars()) {
++        if (!varTypes.append(vt.type()))
++            return false;
++    }
+     if (!EncodeLocalEntries(e, varTypes))
+         return false;
+ 
+     for (AstExpr* expr : func.body()) {
+         if (!EncodeExpr(e, *expr))
+             return false;
+     }
+ 
+diff --git a/js/src/wasm/WasmTypes.cpp b/js/src/wasm/WasmTypes.cpp
+--- a/js/src/wasm/WasmTypes.cpp
++++ b/js/src/wasm/WasmTypes.cpp
+@@ -75,19 +75,20 @@ Val::writePayload(uint8_t* dst) const
+       case ValType::I16x8:
+       case ValType::I32x4:
+       case ValType::F32x4:
+       case ValType::B8x16:
+       case ValType::B16x8:
+       case ValType::B32x4:
+         memcpy(dst, &u, jit::Simd128DataSize);
+         return;
++      case ValType::Ref:
+       case ValType::AnyRef:
+         // TODO
+-        MOZ_CRASH("writing imported value of AnyRef in global NYI");
++        MOZ_CRASH("writing imported value of Ref/AnyRef in global NYI");
+     }
+ }
+ 
+ bool
+ wasm::IsRoundingFunction(SymbolicAddress callee, jit::RoundingMode* mode)
+ {
+     switch (callee) {
+       case SymbolicAddress::FloorD:
+@@ -198,16 +199,17 @@ IsImmediateType(ValType vt)
+         return true;
+       case ValType::I8x16:
+       case ValType::I16x8:
+       case ValType::I32x4:
+       case ValType::F32x4:
+       case ValType::B8x16:
+       case ValType::B16x8:
+       case ValType::B32x4:
++      case ValType::Ref:
+         return false;
+     }
+     MOZ_CRASH("bad ValType");
+ }
+ 
+ static unsigned
+ EncodeImmediateType(ValType vt)
+ {
+@@ -225,16 +227,17 @@ EncodeImmediateType(ValType vt)
+         return 4;
+       case ValType::I8x16:
+       case ValType::I16x8:
+       case ValType::I32x4:
+       case ValType::F32x4:
+       case ValType::B8x16:
+       case ValType::B16x8:
+       case ValType::B32x4:
++      case ValType::Ref:
+         break;
+     }
+     MOZ_CRASH("bad ValType");
+ }
+ 
+ /* static */ bool
+ FuncTypeIdDesc::isGlobal(const FuncType& funcType)
+ {
+@@ -694,33 +697,34 @@ DebugFrame::getLocal(uint32_t localIndex
+     return true;
+ }
+ 
+ void
+ DebugFrame::updateReturnJSValue()
+ {
+     hasCachedReturnJSValue_ = true;
+     ExprType returnType = instance()->debug().debugGetResultType(funcIndex());
+-    switch (returnType) {
++    switch (returnType.code()) {
+       case ExprType::Void:
+           cachedReturnJSValue_.setUndefined();
+           break;
+       case ExprType::I32:
+           cachedReturnJSValue_.setInt32(resultI32_);
+           break;
+       case ExprType::I64:
+           // Just display as a Number; it's ok if we lose some precision
+           cachedReturnJSValue_.setDouble((double)resultI64_);
+           break;
+       case ExprType::F32:
+           cachedReturnJSValue_.setDouble(JS::CanonicalizeNaN(resultF32_));
+           break;
+       case ExprType::F64:
+           cachedReturnJSValue_.setDouble(JS::CanonicalizeNaN(resultF64_));
+           break;
++      case ExprType::Ref:
+       case ExprType::AnyRef:
+           cachedReturnJSValue_ = ObjectOrNullValue(*(JSObject**)&resultRef_);
+           break;
+       default:
+           MOZ_CRASH("result type");
+     }
+ }
+ 
+diff --git a/js/src/wasm/WasmTypes.h b/js/src/wasm/WasmTypes.h
+--- a/js/src/wasm/WasmTypes.h
++++ b/js/src/wasm/WasmTypes.h
+@@ -157,138 +157,334 @@ struct ShareableBase : AtomicRefCounted<
+         if (p)
+             return 0;
+         bool ok = seen->add(p, self);
+         (void)ok;  // oh well
+         return mallocSizeOf(self) + self->sizeOfExcludingThis(mallocSizeOf);
+     }
+ };
+ 
+-enum class ExprType;
++// A PackedTypeCode represents a TypeCode paired with a refTypeIndex (valid only
++// for TypeCode::Ref).  PackedTypeCode is guaranteed to be POD.
++//
++// PackedTypeCode is an enum class, as opposed to the more natural
++// struct-with-bitfields, because bitfields would make it non-POD.
++//
++// DO NOT use PackedTypeCode as a cast.  ALWAYS go via PackTypeCode().
++
++enum class PackedTypeCode : uint32_t {};
++
++const uint32_t NoTypeCode     = 0xFF;      // Only use these
++const uint32_t NoRefTypeIndex = 0xFFFFFF;  //   with PackedTypeCode
++
++static inline PackedTypeCode
++InvalidPackedTypeCode()
++{
++    return PackedTypeCode((NoRefTypeIndex << 8) | NoTypeCode);
++}
++
++static inline PackedTypeCode
++PackTypeCode(TypeCode tc)
++{
++    MOZ_ASSERT(uint32_t(tc) <= 0xFF);
++    MOZ_ASSERT(tc != TypeCode::Ref);
++    return PackedTypeCode((NoRefTypeIndex << 8) | uint32_t(tc));
++}
++
++static inline PackedTypeCode
++PackTypeCode(TypeCode tc, uint32_t refTypeIndex)
++{
++    MOZ_ASSERT(uint32_t(tc) <= 0xFF);
++    MOZ_ASSERT_IF(tc != TypeCode::Ref, refTypeIndex == NoRefTypeIndex);
++    MOZ_ASSERT_IF(tc == TypeCode::Ref, refTypeIndex <= MaxTypes);
++    return PackedTypeCode((refTypeIndex << 8) | uint32_t(tc));
++}
++
++static inline PackedTypeCode
++PackedTypeCodeFromBits(uint32_t bits)
++{
++    return PackTypeCode(TypeCode(bits & 255), bits >> 8);
++}
++
++static inline bool
++IsValid(PackedTypeCode ptc)
++{
++    return (uint32_t(ptc) & 255) != NoTypeCode;
++}
++
++static inline uint32_t
++PackedTypeCodeToBits(PackedTypeCode ptc)
++{
++    return uint32_t(ptc);
++}
++
++static inline TypeCode
++UnpackTypeCodeType(PackedTypeCode ptc)
++{
++    MOZ_ASSERT(IsValid(ptc));
++    return TypeCode(uint32_t(ptc) & 255);
++}
++
++static inline uint32_t
++UnpackTypeCodeIndex(PackedTypeCode ptc)
++{
++    MOZ_ASSERT(UnpackTypeCodeType(ptc) == TypeCode::Ref);
++    return uint32_t(ptc) >> 8;
++}
++
++// The ExprType represents the type of a WebAssembly expression or return value
++// and may either be a ValType or void.
++//
++// (Soon, expression types will be generalized to a list of ValType and this
++// class will go away, replaced, wherever it is used, by a varU32 + list of
++// ValType.)
++
++class ValType;
++
++class ExprType
++{
++    PackedTypeCode tc_;
++
++#ifdef DEBUG
++    bool isValidCode() {
++        switch (UnpackTypeCodeType(tc_)) {
++          case TypeCode::I32:
++          case TypeCode::I64:
++          case TypeCode::F32:
++          case TypeCode::F64:
++          case TypeCode::I8x16:
++          case TypeCode::I16x8:
++          case TypeCode::I32x4:
++          case TypeCode::F32x4:
++          case TypeCode::B8x16:
++          case TypeCode::B16x8:
++          case TypeCode::B32x4:
++          case TypeCode::AnyRef:
++          case TypeCode::Ref:
++          case TypeCode::BlockVoid:
++          case TypeCode::Limit:
++            return true;
++          default:
++            return false;
++        }
++    }
++#endif
++
++  public:
++    enum Code {
++        Void   = uint8_t(TypeCode::BlockVoid),
++
++        I32    = uint8_t(TypeCode::I32),
++        I64    = uint8_t(TypeCode::I64),
++        F32    = uint8_t(TypeCode::F32),
++        F64    = uint8_t(TypeCode::F64),
++        AnyRef = uint8_t(TypeCode::AnyRef),
++        Ref    = uint8_t(TypeCode::Ref),
++
++        I8x16  = uint8_t(TypeCode::I8x16),
++        I16x8  = uint8_t(TypeCode::I16x8),
++        I32x4  = uint8_t(TypeCode::I32x4),
++        F32x4  = uint8_t(TypeCode::F32x4),
++        B8x16  = uint8_t(TypeCode::B8x16),
++        B16x8  = uint8_t(TypeCode::B16x8),
++        B32x4  = uint8_t(TypeCode::B32x4),
++
++        Limit  = uint8_t(TypeCode::Limit)
++    };
++
++    ExprType() : tc_() {}
++
++    ExprType(const ExprType& that) : tc_(that.tc_) {}
++
++    MOZ_IMPLICIT ExprType(Code c)
++      : tc_(PackTypeCode(TypeCode(c)))
++    {
++        MOZ_ASSERT(isValidCode());
++    }
++
++    ExprType(Code c, uint32_t refTypeIndex)
++      : tc_(PackTypeCode(TypeCode(c), refTypeIndex))
++    {
++        MOZ_ASSERT(isValidCode());
++    }
++
++    explicit ExprType(PackedTypeCode ptc)
++      : tc_(ptc)
++    {
++        MOZ_ASSERT(isValidCode());
++    }
++
++    explicit inline ExprType(const ValType& t);
++
++    PackedTypeCode packed() const {
++        return tc_;
++    }
++
++    Code code() const {
++        return Code(UnpackTypeCodeType(tc_));
++    }
++
++    uint32_t refTypeIndex() const {
++        return UnpackTypeCodeIndex(tc_);
++    }
++
++    bool isValid() const {
++        return IsValid(tc_);
++    }
++
++    bool isRef() const {
++        return UnpackTypeCodeType(tc_) == TypeCode::Ref;
++    }
++
++    bool isRefOrAnyRef() const {
++        TypeCode tc = UnpackTypeCodeType(tc_);
++        return tc == TypeCode::Ref || tc == TypeCode::AnyRef;
++    }
++
++    bool operator ==(const ExprType& that) const {
++        return tc_ == that.tc_;
++    }
++
++    bool operator !=(const ExprType& that) const {
++        return tc_ != that.tc_;
++    }
++
++    bool operator ==(Code that) const {
++        MOZ_ASSERT(that != Code::Ref);
++        return code() == that;
++    }
++
++    bool operator !=(Code that) const {
++        return !(*this == that);
++    }
++};
++
++// The ValType represents the storage type of a WebAssembly location, whether
++// parameter, local, or global.
+ 
+ class ValType
+ {
+-    struct {
+-        uint32_t code_ : 8;           // If code_ is InvalidCode then the ValType is invalid
+-        uint32_t refTypeIndex_ : 24;  // If code_ is not Ref then this must be NoIndex
+-    };
+-
+-    static const uint32_t InvalidCode  = uint32_t(TypeCode::Limit);
+-    static const uint32_t NoIndex = 0xFFFFFF;
++    PackedTypeCode tc_;
++
++#ifdef DEBUG
++    bool isValidCode() {
++        switch (UnpackTypeCodeType(tc_)) {
++          case TypeCode::I32:
++          case TypeCode::I64:
++          case TypeCode::F32:
++          case TypeCode::F64:
++          case TypeCode::I8x16:
++          case TypeCode::I16x8:
++          case TypeCode::I32x4:
++          case TypeCode::F32x4:
++          case TypeCode::B8x16:
++          case TypeCode::B16x8:
++          case TypeCode::B32x4:
++          case TypeCode::AnyRef:
++          case TypeCode::Ref:
++            return true;
++          default:
++            return false;
++        }
++    }
++#endif
+ 
+   public:
+     enum Code {
+         I32    = uint8_t(TypeCode::I32),
+         I64    = uint8_t(TypeCode::I64),
+         F32    = uint8_t(TypeCode::F32),
+         F64    = uint8_t(TypeCode::F64),
+ 
+         AnyRef = uint8_t(TypeCode::AnyRef),
+-
+-        // ------------------------------------------------------------------------
+-        // The rest of these types are currently only emitted internally when
+-        // compiling asm.js and are rejected by wasm validation.
++        Ref    = uint8_t(TypeCode::Ref),
+ 
+         I8x16  = uint8_t(TypeCode::I8x16),
+         I16x8  = uint8_t(TypeCode::I16x8),
+         I32x4  = uint8_t(TypeCode::I32x4),
+         F32x4  = uint8_t(TypeCode::F32x4),
+         B8x16  = uint8_t(TypeCode::B8x16),
+         B16x8  = uint8_t(TypeCode::B16x8),
+         B32x4  = uint8_t(TypeCode::B32x4)
+     };
+ 
+-    ValType()
+-      : code_(InvalidCode), refTypeIndex_(NoIndex)
+-    {}
+-
+-    MOZ_IMPLICIT ValType(ValType::Code c)
+-      : code_(uint32_t(c)), refTypeIndex_(NoIndex)
++    ValType() : tc_(InvalidPackedTypeCode()) {}
++
++    MOZ_IMPLICIT ValType(Code c)
++      : tc_(PackTypeCode(TypeCode(c)))
++    {
++        MOZ_ASSERT(isValidCode());
++    }
++
++    ValType(Code c, uint32_t refTypeIndex)
++      : tc_(PackTypeCode(TypeCode(c), refTypeIndex))
+     {
+-        assertValid();
++        MOZ_ASSERT(isValidCode());
+     }
+ 
+-    explicit inline ValType(ExprType t);
+-
+-    static ValType fromTypeCode(uint32_t code) {
+-        return ValType(code, NoIndex);
++    explicit ValType(const ExprType& t)
++      : tc_(t.packed())
++    {
++        MOZ_ASSERT(isValidCode());
++    }
++
++    explicit ValType(PackedTypeCode ptc)
++      : tc_(ptc)
++    {
++        MOZ_ASSERT(isValidCode());
+     }
+ 
+     static ValType fromBitsUnsafe(uint32_t bits) {
+-        // This will change once we have Ref types.
+-        return ValType(bits & 255, NoIndex);
++        return ValType(PackedTypeCodeFromBits(bits));
+     }
+ 
+-    bool isValid() const {
+-        return code_ != InvalidCode;
++    PackedTypeCode packed() const {
++        return tc_;
++    }
++
++    uint32_t bitsUnsafe() const {
++        return PackedTypeCodeToBits(tc_);
+     }
+ 
+     Code code() const {
+-        return Code(code_);
++        return Code(UnpackTypeCodeType(tc_));
+     }
++
+     uint32_t refTypeIndex() const {
+-        return refTypeIndex_;
++        return UnpackTypeCodeIndex(tc_);
+     }
+ 
+-    uint32_t bitsUnsafe() const {
+-        // This will change once we have Ref types.
+-        return code_;
++    bool isValid() const {
++        return IsValid(tc_);
++    }
++
++    bool isRef() const {
++        return UnpackTypeCodeType(tc_) == TypeCode::Ref;
++    }
++
++    bool isRefOrAnyRef() const {
++        TypeCode tc = UnpackTypeCodeType(tc_);
++        return tc == TypeCode::Ref || tc == TypeCode::AnyRef;
+     }
+ 
+     bool operator ==(const ValType& that) const {
+-        return code_ == that.code_ && refTypeIndex_ == that.refTypeIndex_;
+-    }
+-    bool operator !=(const ValType& that) const {
+-        return !(*this == that);
++        return tc_ == that.tc_;
+     }
+-    bool operator ==(ValType::Code that) const {
+-        // This will change once we have Ref types.
+-        return code_ == uint32_t(that) && refTypeIndex_ == NoIndex;
+-    }
+-    bool operator !=(ValType::Code that) const {
+-        return !(*this == that);
++
++    bool operator !=(const ValType& that) const {
++        return tc_ != that.tc_;
+     }
+ 
+-  private:
+-    ValType(uint32_t code, uint32_t refTypeIndex)
+-      : code_(code),
+-        refTypeIndex_(refTypeIndex)
+-    {
+-        // 8-bit field.  Invalid values have their own constructor and should
+-        // not appear here.
+-        MOZ_ASSERT(code <= 0xFF && code != InvalidCode);
+-        // 24-bit field.
+-        MOZ_ASSERT(refTypeIndex <= 0xFFFFFF);
+-
+-        assertValid();
++    bool operator ==(Code that) const {
++        MOZ_ASSERT(that != Code::Ref);
++        return code() == that;
+     }
+ 
+-    void assertValid() const {
+-#ifdef DEBUG
+-        // This will change once we have Ref types.
+-        MOZ_ASSERT(refTypeIndex_ == NoIndex);
+-        switch (code_) {
+-          case uint8_t(Code::I32):
+-          case uint8_t(Code::I64):
+-          case uint8_t(Code::F32):
+-          case uint8_t(Code::F64):
+-          case uint8_t(Code::AnyRef):
+-          case uint8_t(Code::I8x16):
+-          case uint8_t(Code::I16x8):
+-          case uint8_t(Code::I32x4):
+-          case uint8_t(Code::F32x4):
+-          case uint8_t(Code::B8x16):
+-          case uint8_t(Code::B16x8):
+-          case uint8_t(Code::B32x4):
+-          case InvalidCode:
+-            break;
+-          default:
+-            MOZ_CRASH("Invalid code");
+-        }
+-#endif
++    bool operator !=(Code that) const {
++        return !(*this == that);
+     }
+ };
+ 
+ typedef Vector<ValType, 8, SystemAllocPolicy> ValTypeVector;
+ 
+ // ValType utilities
+ 
+ static inline unsigned
+@@ -305,17 +501,18 @@ SizeOf(ValType vt)
+       case ValType::I16x8:
+       case ValType::I32x4:
+       case ValType::F32x4:
+       case ValType::B8x16:
+       case ValType::B16x8:
+       case ValType::B32x4:
+         return 16;
+       case ValType::AnyRef:
+-        MOZ_CRASH("unexpected anyref");
++      case ValType::Ref:
++        MOZ_CRASH("unexpected ref/anyref");
+     }
+     MOZ_CRASH("Invalid ValType");
+ }
+ 
+ static inline bool
+ IsSimdType(ValType vt)
+ {
+     switch (vt.code()) {
+@@ -401,129 +598,94 @@ IsSimdBoolType(ValType vt)
+ static inline jit::MIRType
+ ToMIRType(ValType vt)
+ {
+     switch (vt.code()) {
+       case ValType::I32:    return jit::MIRType::Int32;
+       case ValType::I64:    return jit::MIRType::Int64;
+       case ValType::F32:    return jit::MIRType::Float32;
+       case ValType::F64:    return jit::MIRType::Double;
++      case ValType::Ref:    return jit::MIRType::Pointer;
+       case ValType::AnyRef: return jit::MIRType::Pointer;
+       case ValType::I8x16:  return jit::MIRType::Int8x16;
+       case ValType::I16x8:  return jit::MIRType::Int16x8;
+       case ValType::I32x4:  return jit::MIRType::Int32x4;
+       case ValType::F32x4:  return jit::MIRType::Float32x4;
+       case ValType::B8x16:  return jit::MIRType::Bool8x16;
+       case ValType::B16x8:  return jit::MIRType::Bool16x8;
+       case ValType::B32x4:  return jit::MIRType::Bool32x4;
+     }
+     MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("bad type");
+ }
+ 
+ static inline bool
+-IsRefType(ValType vt)
+-{
+-    return vt == ValType::AnyRef;
+-}
+-
+-static inline bool
+ IsNumberType(ValType vt)
+ {
+-    return !IsRefType(vt);
++    return !vt.isRefOrAnyRef();
+ }
+ 
+-// The ExprType enum represents the type of a WebAssembly expression or return
+-// value and may either be a value type or void. Soon, expression types will be
+-// generalized to a list of ValType and this enum will go away, replaced,
+-// wherever it is used, by a varU32 + list of ValType.
+-
+-enum class ExprType
+-{
+-    Void   = uint8_t(TypeCode::BlockVoid),
+-
+-    I32    = uint8_t(TypeCode::I32),
+-    I64    = uint8_t(TypeCode::I64),
+-    F32    = uint8_t(TypeCode::F32),
+-    F64    = uint8_t(TypeCode::F64),
+-    AnyRef = uint8_t(TypeCode::AnyRef),
+-
+-    I8x16  = uint8_t(TypeCode::I8x16),
+-    I16x8  = uint8_t(TypeCode::I16x8),
+-    I32x4  = uint8_t(TypeCode::I32x4),
+-    F32x4  = uint8_t(TypeCode::F32x4),
+-    B8x16  = uint8_t(TypeCode::B8x16),
+-    B16x8  = uint8_t(TypeCode::B16x8),
+-    B32x4  = uint8_t(TypeCode::B32x4),
+-
+-    Limit  = uint8_t(TypeCode::Limit)
+-};
+-
+-inline ValType::ValType(ExprType t)
+-  : code_(uint32_t(t)), refTypeIndex_(NoIndex)
+-{
+-    assertValid();
+-}
++// ExprType utilities
++
++inline
++ExprType::ExprType(const ValType& t)
++  : tc_(t.packed())
++{}
+ 
+ static inline bool
+ IsVoid(ExprType et)
+ {
+     return et == ExprType::Void;
+ }
+ 
+ static inline ValType
+ NonVoidToValType(ExprType et)
+ {
+     MOZ_ASSERT(!IsVoid(et));
+     return ValType(et);
+ }
+ 
+-static inline ExprType
+-ToExprType(ValType vt)
+-{
+-    return ExprType(vt.bitsUnsafe());
+-}
+-
+ static inline bool
+ IsSimdType(ExprType et)
+ {
+     return IsVoid(et) ? false : IsSimdType(ValType(et));
+ }
+ 
+ static inline jit::MIRType
+ ToMIRType(ExprType et)
+ {
+     return IsVoid(et) ? jit::MIRType::None : ToMIRType(ValType(et));
+ }
+ 
+ static inline const char*
+ ToCString(ExprType type)
+ {
+-    switch (type) {
++    switch (type.code()) {
+       case ExprType::Void:    return "void";
+       case ExprType::I32:     return "i32";
+       case ExprType::I64:     return "i64";
+       case ExprType::F32:     return "f32";
+       case ExprType::F64:     return "f64";
+       case ExprType::AnyRef:  return "anyref";
++      case ExprType::Ref:     return "ref";
+       case ExprType::I8x16:   return "i8x16";
+       case ExprType::I16x8:   return "i16x8";
+       case ExprType::I32x4:   return "i32x4";
+       case ExprType::F32x4:   return "f32x4";
+       case ExprType::B8x16:   return "b8x16";
+       case ExprType::B16x8:   return "b16x8";
+       case ExprType::B32x4:   return "b32x4";
+       case ExprType::Limit:;
+     }
+     MOZ_CRASH("bad expression type");
+ }
+ 
+ static inline const char*
+ ToCString(ValType type)
+ {
+-    return ToCString(ToExprType(type));
++    return ToCString(ExprType(type));
+ }
+ 
+ // Code can be compiled either with the Baseline compiler or the Ion compiler,
+ // and tier-variant data are tagged with the Tier value.
+ //
+ // A tier value is used to request tier-variant aspects of code, metadata, or
+ // linkdata.  The tiers are normally explicit (Baseline and Ion); implicit tiers
+ // can be obtained through accessors on Code objects (eg, stableTier).
+@@ -706,17 +868,17 @@ class FuncType
+         return args_.appendAll(rhs.args_);
+     }
+ 
+     ValType arg(unsigned i) const { return args_[i]; }
+     const ValTypeVector& args() const { return args_; }
+     const ExprType& ret() const { return ret_; }
+ 
+     HashNumber hash() const {
+-        HashNumber hn = HashNumber(ret_);
++        HashNumber hn = HashNumber(ret_.code());
+         for (const ValType& vt : args_)
+             hn = mozilla::AddToHash(hn, HashNumber(vt.code()));
+         return hn;
+     }
+     bool operator==(const FuncType& rhs) const {
+         return ret() == rhs.ret() && EqualContainers(args(), rhs.args());
+     }
+     bool operator!=(const FuncType& rhs) const {
+@@ -728,20 +890,20 @@ class FuncType
+             return true;
+         for (ValType arg : args()) {
+             if (arg == ValType::I64)
+                 return true;
+         }
+         return false;
+     }
+     bool temporarilyUnsupportedAnyRef() const {
+-        if (ret() == ExprType::AnyRef)
++        if (ret().isRefOrAnyRef())
+             return true;
+         for (ValType arg : args()) {
+-            if (arg == ValType::AnyRef)
++            if (arg.isRefOrAnyRef())
+                 return true;
+         }
+         return false;
+     }
+ 
+     WASM_DECLARE_SERIALIZABLE(FuncType)
+ };
+ 
+@@ -1144,58 +1306,78 @@ class TypeDef
+ {
+     enum { IsFuncType, IsStructType, IsNone } tag_;
+     union {
+         FuncTypeWithId funcType_;
+         StructType     structType_;
+     };
+ 
+   public:
+-    TypeDef() : tag_(IsNone), structType_(StructType()) {}
++    TypeDef() : tag_(IsNone) {}
+ 
+     explicit TypeDef(FuncType&& funcType)
+       : tag_(IsFuncType),
+         funcType_(FuncTypeWithId(std::move(funcType)))
+     {}
+ 
+     explicit TypeDef(StructType&& structType)
+       : tag_(IsStructType),
+         structType_(std::move(structType))
+     {}
+ 
+-    TypeDef(TypeDef&& td) : tag_(td.tag_), structType_(StructType()) {
++    TypeDef(TypeDef&& td) : tag_(td.tag_) {
+         switch (tag_) {
+-          case IsFuncType:   funcType_ = std::move(td.funcType_); break;
+-          case IsStructType: structType_ = std::move(td.structType_); break;
+-          case IsNone:       break;
++          case IsFuncType:
++            new (&funcType_) FuncTypeWithId(std::move(td.funcType_));
++            break;
++          case IsStructType:
++            new (&structType_) StructType(std::move(td.structType_));
++            break;
++          case IsNone:
++            break;
+         }
+     }
+ 
+     ~TypeDef() {
+         switch (tag_) {
+-          case IsFuncType:   funcType_.~FuncTypeWithId(); break;
+-          case IsStructType: structType_.~StructType(); break;
+-          case IsNone:       break;
++          case IsFuncType:
++            funcType_.~FuncTypeWithId();
++            break;
++          case IsStructType:
++            structType_.~StructType();
++            break;
++          case IsNone:
++            break;
+         }
+     }
+ 
+     TypeDef& operator=(TypeDef&& that) {
++        MOZ_ASSERT(isNone());
++        switch (that.tag_) {
++          case IsFuncType:
++            new (&funcType_) FuncTypeWithId(std::move(that.funcType_));
++            break;
++          case IsStructType:
++            new (&structType_) StructType(std::move(that.structType_));
++            break;
++          case IsNone:
++            break;
++        }
+         tag_ = that.tag_;
+-        switch (tag_) {
+-          case IsFuncType:   funcType_ = std::move(that.funcType_); break;
+-          case IsStructType: structType_ = std::move(that.structType_); break;
+-          case IsNone:       break;
+-        }
+         return *this;
+     }
+ 
+     bool isFuncType() const {
+         return tag_ == IsFuncType;
+     }
+ 
++    bool isNone() const {
++        return tag_ == IsNone;
++    }
++
+     bool isStructType() const {
+         return tag_ == IsStructType;
+     }
+ 
+     const FuncTypeWithId& funcType() const {
+         MOZ_ASSERT(isFuncType());
+         return funcType_;
+     }
+diff --git a/js/src/wasm/WasmValidate.cpp b/js/src/wasm/WasmValidate.cpp
+--- a/js/src/wasm/WasmValidate.cpp
++++ b/js/src/wasm/WasmValidate.cpp
+@@ -373,78 +373,122 @@ wasm::EncodeLocalEntries(Encoder& e, con
+         if (!e.writeValType(prev))
+             return false;
+     }
+ 
+     return true;
+ }
+ 
+ static bool
+-DecodeValType(Decoder& d, ModuleKind kind, HasGcTypes gcTypesEnabled, ValType* type)
++DecodeValType(Decoder& d, ModuleKind kind, uint32_t numTypes, HasGcTypes gcTypesEnabled,
++              ValType* type)
+ {
+-    uint8_t unchecked;
+-    if (!d.readValType(&unchecked))
++    uint8_t uncheckedCode;
++    uint32_t uncheckedRefTypeIndex;
++    if (!d.readValType(&uncheckedCode, &uncheckedRefTypeIndex))
+         return false;
+ 
+-    switch (unchecked) {
++    switch (uncheckedCode) {
+       case uint8_t(ValType::I32):
+       case uint8_t(ValType::F32):
+       case uint8_t(ValType::F64):
+       case uint8_t(ValType::I64):
+-        *type = ValType::fromTypeCode(unchecked);
++        *type = ValType(ValType::Code(uncheckedCode));
+         return true;
+       case uint8_t(ValType::AnyRef):
+         if (gcTypesEnabled == HasGcTypes::False)
+             break;
+-        *type = ValType::fromTypeCode(unchecked);
++        *type = ValType(ValType::Code(uncheckedCode));
+         return true;
++      case uint8_t(ValType::Ref): {
++        if (gcTypesEnabled == HasGcTypes::False)
++            break;
++        if (uncheckedRefTypeIndex >= numTypes)
++            return d.fail("ref index out of range");
++        // We further validate ref types in the caller.
++        *type = ValType(ValType::Code(uncheckedCode), uncheckedRefTypeIndex);
++        return true;
++      }
+       case uint8_t(ValType::I8x16):
+       case uint8_t(ValType::I16x8):
+       case uint8_t(ValType::I32x4):
+       case uint8_t(ValType::F32x4):
+       case uint8_t(ValType::B8x16):
+       case uint8_t(ValType::B16x8):
+       case uint8_t(ValType::B32x4):
+         if (kind != ModuleKind::AsmJS)
+             return d.fail("bad type");
+-        *type = ValType::fromTypeCode(unchecked);
++        *type = ValType(ValType::Code(uncheckedCode));
+         return true;
+       default:
+         break;
+     }
+     return d.fail("bad type");
+ }
+ 
++static bool
++ValidateRefType(Decoder& d, const TypeDefVector& types, ValType type)
++{
++    if (type.isRef() && !types[type.refTypeIndex()].isStructType())
++        return d.fail("ref does not reference a struct type");
++    return true;
++}
++
+ bool
+-wasm::DecodeLocalEntries(Decoder& d, ModuleKind kind, HasGcTypes gcTypesEnabled,
+-                         ValTypeVector* locals)
++wasm::DecodeLocalEntries(Decoder& d, ModuleKind kind, const TypeDefVector& types,
++                         HasGcTypes gcTypesEnabled, ValTypeVector* locals)
+ {
+     uint32_t numLocalEntries;
+     if (!d.readVarU32(&numLocalEntries))
+         return d.fail("failed to read number of local entries");
+ 
+     for (uint32_t i = 0; i < numLocalEntries; i++) {
+         uint32_t count;
+         if (!d.readVarU32(&count))
+             return d.fail("failed to read local entry count");
+ 
+         if (MaxLocals - locals->length() < count)
+             return d.fail("too many locals");
+ 
+         ValType type;
+-        if (!DecodeValType(d, kind, gcTypesEnabled, &type))
++        if (!DecodeValType(d, kind, types.length(), gcTypesEnabled, &type))
++            return false;
++        if (!ValidateRefType(d, types, type))
+             return false;
+ 
+         if (!locals->appendN(type, count))
+             return false;
+     }
+ 
+     return true;
+ }
+ 
++bool
++wasm::DecodeValidatedLocalEntries(Decoder& d, ValTypeVector* locals)
++{
++    uint32_t numLocalEntries;
++    MOZ_ALWAYS_TRUE(d.readVarU32(&numLocalEntries));
++
++    for (uint32_t i = 0; i < numLocalEntries; i++) {
++        uint32_t count;
++        MOZ_ALWAYS_TRUE(d.readVarU32(&count));
++        MOZ_ASSERT(MaxLocals - locals->length() >= count);
++
++        uint8_t uncheckedCode;
++        uint32_t uncheckedRefTypeIndex;
++        MOZ_ALWAYS_TRUE(d.readValType(&uncheckedCode, &uncheckedRefTypeIndex));
++
++        ValType type = ValType(ValType::Code(uncheckedCode), uncheckedRefTypeIndex);
++        if (!locals->appendN(type, count))
++            return false;
++    }
++
++    return true;
++}
++
+ // Function body validation.
+ 
+ struct ValidatingPolicy
+ {
+     typedef Nothing Value;
+     typedef Nothing ControlItem;
+ };
+ 
+@@ -825,17 +869,18 @@ DecodeFunctionBodyExprs(const ModuleEnvi
+                 return iter.unrecognizedOpcode(&op);
+             }
+             break;
+           }
+ #ifdef ENABLE_WASM_GC
+           case uint16_t(Op::RefNull): {
+             if (env.gcTypesEnabled == HasGcTypes::False)
+                 return iter.unrecognizedOpcode(&op);
+-            CHECK(iter.readRefNull());
++            ValType unusedType;
++            CHECK(iter.readRefNull(&unusedType));
+             break;
+           }
+           case uint16_t(Op::RefIsNull): {
+             if (env.gcTypesEnabled == HasGcTypes::False)
+                 return iter.unrecognizedOpcode(&op);
+             CHECK(iter.readConversion(ValType::AnyRef, ValType::I32, &nothing));
+             break;
+           }
+@@ -1029,17 +1074,17 @@ wasm::ValidateFunctionBody(const ModuleE
+     const FuncType& funcType = *env.funcTypes[funcIndex];
+ 
+     ValTypeVector locals;
+     if (!locals.appendAll(funcType.args()))
+         return false;
+ 
+     const uint8_t* bodyBegin = d.currentPosition();
+ 
+-    if (!DecodeLocalEntries(d, ModuleKind::Wasm, env.gcTypesEnabled, &locals))
++    if (!DecodeLocalEntries(d, ModuleKind::Wasm, env.types, env.gcTypesEnabled, &locals))
+         return false;
+ 
+     if (!DecodeFunctionBodyExprs(env, funcType, locals, bodyBegin + bodySize, &d))
+         return false;
+ 
+     return true;
+ }
+ 
+@@ -1058,58 +1103,97 @@ DecodePreamble(Decoder& d)
+     if (!d.readFixedU32(&u32) || u32 != EncodingVersion) {
+         return d.failf("binary version 0x%" PRIx32 " does not match expected version 0x%" PRIx32,
+                        u32, EncodingVersion);
+     }
+ 
+     return true;
+ }
+ 
++enum class TypeState
++{
++    None,
++    Struct,
++    ForwardStruct,
++    Func
++};
++
++typedef Vector<TypeState, 0, SystemAllocPolicy> TypeStateVector;
++
+ static bool
+-DecodeFuncType(Decoder& d, ModuleEnvironment* env, uint32_t typeIndex)
++ValidateRefType(Decoder& d, TypeStateVector* typeState, ValType type)
++{
++    if (!type.isRef())
++        return true;
++
++    uint32_t refTypeIndex = type.refTypeIndex();
++    switch ((*typeState)[refTypeIndex]) {
++      case TypeState::None:
++        (*typeState)[refTypeIndex] = TypeState::ForwardStruct;
++        break;
++      case TypeState::Struct:
++      case TypeState::ForwardStruct:
++        break;
++      case TypeState::Func:
++        return d.fail("ref does not reference a struct type");
++    }
++    return true;
++}
++
++static bool
++DecodeFuncType(Decoder& d, ModuleEnvironment* env, TypeStateVector* typeState, uint32_t typeIndex)
+ {
+     uint32_t numArgs;
+     if (!d.readVarU32(&numArgs))
+         return d.fail("bad number of function args");
+ 
+     if (numArgs > MaxParams)
+         return d.fail("too many arguments in signature");
+ 
+     ValTypeVector args;
+     if (!args.resize(numArgs))
+         return false;
+ 
+     for (uint32_t i = 0; i < numArgs; i++) {
+-        if (!DecodeValType(d, ModuleKind::Wasm, env->gcTypesEnabled, &args[i]))
++        if (!DecodeValType(d, ModuleKind::Wasm, env->types.length(), env->gcTypesEnabled, &args[i]))
++            return false;
++        if (!ValidateRefType(d, typeState, args[i]))
+             return false;
+     }
+ 
+     uint32_t numRets;
+     if (!d.readVarU32(&numRets))
+         return d.fail("bad number of function returns");
+ 
+     if (numRets > 1)
+         return d.fail("too many returns in signature");
+ 
+     ExprType result = ExprType::Void;
+ 
+     if (numRets == 1) {
+         ValType type;
+-        if (!DecodeValType(d, ModuleKind::Wasm, env->gcTypesEnabled, &type))
++        if (!DecodeValType(d, ModuleKind::Wasm, env->types.length(), env->gcTypesEnabled, &type))
++            return false;
++        if (!ValidateRefType(d, typeState, type))
+             return false;
+ 
+-        result = ToExprType(type);
++        result = ExprType(type);
+     }
+ 
++    if ((*typeState)[typeIndex] != TypeState::None)
++        return d.fail("function type entry referenced as struct");
++
+     env->types[typeIndex] = TypeDef(FuncType(std::move(args), result));
++    (*typeState)[typeIndex] = TypeState::Func;
++
+     return true;
+ }
+ 
+ static bool
+-DecodeStructType(Decoder& d, ModuleEnvironment* env, uint32_t typeIndex)
++DecodeStructType(Decoder& d, ModuleEnvironment* env, TypeStateVector* typeState, uint32_t typeIndex)
+ {
+     if (env->gcTypesEnabled == HasGcTypes::False)
+         return d.fail("Structure types not enabled");
+ 
+     uint32_t numFields;
+     if (!d.readVarU32(&numFields))
+         return d.fail("Bad number of fields");
+ 
+@@ -1122,21 +1206,28 @@ DecodeStructType(Decoder& d, ModuleEnvir
+ 
+     Uint32Vector fieldOffsets;
+     if (!fieldOffsets.resize(numFields))
+         return false;
+ 
+     // TODO (subsequent patch): lay out the fields.
+ 
+     for (uint32_t i = 0; i < numFields; i++) {
+-        if (!DecodeValType(d, ModuleKind::Wasm, env->gcTypesEnabled, &fields[i]))
++        if (!DecodeValType(d, ModuleKind::Wasm, env->types.length(), env->gcTypesEnabled, &fields[i]))
++            return false;
++        if (!ValidateRefType(d, typeState, fields[i]))
+             return false;
+     }
+ 
++    if ((*typeState)[typeIndex] != TypeState::None && (*typeState)[typeIndex] != TypeState::ForwardStruct)
++        return d.fail("struct type entry referenced as function");
++
+     env->types[typeIndex] = TypeDef(StructType(std::move(fields), std::move(fieldOffsets)));
++    (*typeState)[typeIndex] = TypeState::Struct;
++
+     return true;
+ }
+ 
+ static bool
+ DecodeTypeSection(Decoder& d, ModuleEnvironment* env)
+ {
+     MaybeSectionRange range;
+     if (!d.startSection(SectionId::Type, env, &range, "type"))
+@@ -1149,28 +1240,32 @@ DecodeTypeSection(Decoder& d, ModuleEnvi
+         return d.fail("expected number of types");
+ 
+     if (numTypes > MaxTypes)
+         return d.fail("too many types");
+ 
+     if (!env->types.resize(numTypes))
+         return false;
+ 
++    TypeStateVector typeState;
++    if (!typeState.appendN(TypeState::None, numTypes))
++        return false;
++
+     for (uint32_t typeIndex = 0; typeIndex < numTypes; typeIndex++) {
+         uint8_t form;
+         if (!d.readFixedU8(&form))
+             return d.fail("expected type form");
+ 
+         switch (form) {
+           case uint8_t(TypeCode::Func):
+-            if (!DecodeFuncType(d, env, typeIndex))
++            if (!DecodeFuncType(d, env, &typeState, typeIndex))
+                 return false;
+             break;
+           case uint8_t(TypeCode::Struct):
+-            if (!DecodeStructType(d, env, typeIndex))
++            if (!DecodeStructType(d, env, &typeState, typeIndex))
+                 return false;
+             break;
+           default:
+             return d.fail("expected type form");
+         }
+     }
+ 
+     return d.finishSection(*range, "type");
+@@ -1305,20 +1400,22 @@ GlobalIsJSCompatible(Decoder& d, ValType
+       default:
+         return d.fail("unexpected variable type in global import/export");
+     }
+ 
+     return true;
+ }
+ 
+ static bool
+-DecodeGlobalType(Decoder& d, ValType* type, bool* isMutable)
++DecodeGlobalType(Decoder& d, const TypeDefVector& types, ValType* type, bool* isMutable)
+ {
+     // No gc types in globals at the moment.
+-    if (!DecodeValType(d, ModuleKind::Wasm, HasGcTypes::False, type))
++    if (!DecodeValType(d, ModuleKind::Wasm, types.length(), HasGcTypes::False, type))
++        return false;
++    if (!ValidateRefType(d, types, *type))
+         return false;
+ 
+     uint8_t flags;
+     if (!d.readFixedU8(&flags))
+         return d.fail("expected global flags");
+ 
+     if (flags & ~uint8_t(GlobalTypeImmediate::AllowedMask))
+         return d.fail("unexpected bits set in global flags");
+@@ -1405,17 +1502,17 @@ DecodeImport(Decoder& d, ModuleEnvironme
+       case DefinitionKind::Memory: {
+         if (!DecodeMemoryLimits(d, env))
+             return false;
+         break;
+       }
+       case DefinitionKind::Global: {
+         ValType type;
+         bool isMutable;
+-        if (!DecodeGlobalType(d, &type, &isMutable))
++        if (!DecodeGlobalType(d, env->types, &type, &isMutable))
+             return false;
+         if (!GlobalIsJSCompatible(d, type, isMutable))
+             return false;
+         if (!env->globals.append(GlobalDesc(type, isMutable, env->globals.length())))
+             return false;
+         if (env->globals.length() > MaxGlobals)
+             return d.fail("too many globals");
+         break;
+@@ -1619,17 +1716,17 @@ DecodeGlobalSection(Decoder& d, ModuleEn
+         return d.fail("too many globals");
+ 
+     if (!env->globals.reserve(numGlobals.value()))
+         return false;
+ 
+     for (uint32_t i = 0; i < numDefs; i++) {
+         ValType type;
+         bool isMutable;
+-        if (!DecodeGlobalType(d, &type, &isMutable))
++        if (!DecodeGlobalType(d, env->types, &type, &isMutable))
+             return false;
+ 
+         InitExpr initializer;
+         if (!DecodeInitializerExpression(d, env->globals, type, &initializer))
+             return false;
+ 
+         env->globals.infallibleAppend(GlobalDesc(initializer, isMutable));
+     }
+diff --git a/js/src/wasm/WasmValidate.h b/js/src/wasm/WasmValidate.h
+--- a/js/src/wasm/WasmValidate.h
++++ b/js/src/wasm/WasmValidate.h
+@@ -264,23 +264,31 @@ class Encoder
+     MOZ_MUST_USE bool writeVarU64(uint64_t i) {
+         return writeVarU<uint64_t>(i);
+     }
+     MOZ_MUST_USE bool writeVarS64(int64_t i) {
+         return writeVarS<int64_t>(i);
+     }
+     MOZ_MUST_USE bool writeValType(ValType type) {
+         static_assert(size_t(TypeCode::Limit) <= UINT8_MAX, "fits");
+-        MOZ_ASSERT(size_t(type.bitsUnsafe()) < size_t(TypeCode::Limit));
+-        return writeFixedU8(uint8_t(type.bitsUnsafe()));
++        MOZ_ASSERT(size_t(type.code()) < size_t(TypeCode::Limit));
++        if (type.isRef()) {
++            return writeFixedU8(uint8_t(TypeCode::Ref)) &&
++                   writeVarU32(type.refTypeIndex());
++        }
++        return writeFixedU8(uint8_t(type.code()));
+     }
+     MOZ_MUST_USE bool writeBlockType(ExprType type) {
+         static_assert(size_t(TypeCode::Limit) <= UINT8_MAX, "fits");
+-        MOZ_ASSERT(size_t(type) < size_t(TypeCode::Limit));
+-        return writeFixedU8(uint8_t(type));
++        MOZ_ASSERT(size_t(type.code()) < size_t(TypeCode::Limit));
++        if (type.isRef()) {
++            return writeFixedU8(uint8_t(ExprType::Ref)) &&
++                   writeVarU32(type.refTypeIndex());
++        }
++        return writeFixedU8(uint8_t(type.code()));
+     }
+     MOZ_MUST_USE bool writeOp(Op op) {
+         static_assert(size_t(Op::Limit) == 256, "fits");
+         MOZ_ASSERT(size_t(op) < size_t(Op::Limit));
+         return writeFixedU8(uint8_t(op));
+     }
+     MOZ_MUST_USE bool writeOp(MiscOp op) {
+         static_assert(size_t(MiscOp::Limit) <= 256, "fits");
+@@ -542,23 +550,39 @@ class Decoder
+         return readVarS<int32_t>(out);
+     }
+     MOZ_MUST_USE bool readVarU64(uint64_t* out) {
+         return readVarU<uint64_t>(out);
+     }
+     MOZ_MUST_USE bool readVarS64(int64_t* out) {
+         return readVarS<int64_t>(out);
+     }
+-    MOZ_MUST_USE bool readValType(uint8_t* type) {
++    MOZ_MUST_USE bool readValType(uint8_t* code, uint32_t* refTypeIndex) {
+         static_assert(uint8_t(TypeCode::Limit) <= UINT8_MAX, "fits");
+-        return readFixedU8(type);
++        if (!readFixedU8(code))
++            return false;
++        if (*code == uint8_t(TypeCode::Ref)) {
++            if (!readVarU32(refTypeIndex))
++                return false;
++        } else {
++            *refTypeIndex = NoRefTypeIndex;
++        }
++        return true;
+     }
+-    MOZ_MUST_USE bool readBlockType(uint8_t* type) {
++    MOZ_MUST_USE bool readBlockType(uint8_t* code, uint32_t* refTypeIndex) {
+         static_assert(size_t(TypeCode::Limit) <= UINT8_MAX, "fits");
+-        return readFixedU8(type);
++        if (!readFixedU8(code))
++            return false;
++        if (*code == uint8_t(TypeCode::Ref)) {
++            if (!readVarU32(refTypeIndex))
++                return false;
++        } else {
++            *refTypeIndex = NoRefTypeIndex;
++        }
++        return true;
+     }
+     MOZ_MUST_USE bool readOp(OpBytes* op) {
+         static_assert(size_t(Op::Limit) == 256, "fits");
+         uint8_t u8;
+         if (!readFixedU8(&u8))
+             return false;
+         op->b0 = u8;
+         if (MOZ_LIKELY(!IsPrefixByte(u8)))
+@@ -666,19 +690,16 @@ class Decoder
+     uint64_t uncheckedReadVarU64() {
+         return uncheckedReadVarU<uint64_t>();
+     }
+     int64_t uncheckedReadVarS64() {
+         int64_t i64 = 0;
+         MOZ_ALWAYS_TRUE(readVarS64(&i64));
+         return i64;
+     }
+-    ValType uncheckedReadValType() {
+-        return ValType::fromTypeCode(uncheckedReadFixedU8());
+-    }
+     Op uncheckedReadOp() {
+         static_assert(size_t(Op::Limit) == 256, "fits");
+         uint8_t u8 = uncheckedReadFixedU8();
+         return u8 != UINT8_MAX
+                ? Op(u8)
+                : Op(uncheckedReadFixedU8() + UINT8_MAX);
+     }
+     void uncheckedReadFixedI8x16(I8x16* i8x16) {
+@@ -704,18 +725,27 @@ class Decoder
+ };
+ 
+ // The local entries are part of function bodies and thus serialized by both
+ // wasm and asm.js and decoded as part of both validation and compilation.
+ 
+ MOZ_MUST_USE bool
+ EncodeLocalEntries(Encoder& d, const ValTypeVector& locals);
+ 
++// This performs no validation; the local entries must already have been
++// validated by an earlier pass.
++
+ MOZ_MUST_USE bool
+-DecodeLocalEntries(Decoder& d, ModuleKind kind, HasGcTypes gcTypesEnabled, ValTypeVector* locals);
++DecodeValidatedLocalEntries(Decoder& d, ValTypeVector* locals);
++
++// This validates the entries.
++
++MOZ_MUST_USE bool
++DecodeLocalEntries(Decoder& d, ModuleKind kind, const TypeDefVector& types,
++                   HasGcTypes gcTypesEnabled, ValTypeVector* locals);
+ 
+ // Returns whether the given [begin, end) prefix of a module's bytecode starts a
+ // code section and, if so, returns the SectionRange of that code section.
+ // Note that, even if this function returns 'false', [begin, end) may actually
+ // be a valid module in the special case when there are no function defs and the
+ // code section is not present. Such modules can be valid so the caller must
+ // handle this special case.
+ 

+ 332 - 0
frg/work-js/mozilla-release/patches/1459900-5-63a1.patch

@@ -0,0 +1,332 @@
+# HG changeset patch
+# User Lars T Hansen <lhansen@mozilla.com>
+# Date 1530099656 -7200
+#      Wed Jun 27 13:40:56 2018 +0200
+# Node ID 3ed7ed0f031b842901bee0dbc27c7a3a39ed16ae
+# Parent  9e3fb74a11eefbed803b9e8705c18ad754a1fe23
+Bug 1459900 - Track structure field mutability. r=luke
+
+The StructType and AstStructType machinery must retain the information about
+field mutability (to be used by subsequent patches).
+
+For StructType, I opted to create a StructField structure that holds the
+information per field.  This works well except when we (occasionally, not in
+this patch) need an array of ValTypes for some existing functionality.
+
+For AstStructType, I opted to keep the array of mutability flags separate from
+the arrays of names and field types, because this was simplest.
+
+In either case, we could do it the other way.
+
+diff --git a/js/src/wasm/WasmAST.h b/js/src/wasm/WasmAST.h
+--- a/js/src/wasm/WasmAST.h
++++ b/js/src/wasm/WasmAST.h
+@@ -36,16 +36,18 @@ const unsigned AST_LIFO_DEFAULT_CHUNK_SI
+ class AstExpr;
+ 
+ template <class T>
+ using AstVector = mozilla::Vector<T, 0, LifoAllocPolicy<Fallible>>;
+ 
+ template <class K, class V, class HP>
+ using AstHashMap = HashMap<K, V, HP, LifoAllocPolicy<Fallible>>;
+ 
++typedef AstVector<bool> AstBoolVector;
++
+ class AstName
+ {
+     const char16_t* begin_;
+     const char16_t* end_;
+   public:
+     template <size_t Length>
+     explicit AstName(const char16_t (&str)[Length]) : begin_(str), end_(str + Length - 1) {
+       MOZ_ASSERT(str[Length - 1] == u'\0');
+@@ -378,41 +380,48 @@ class AstFuncType : public AstTypeDef
+         return *lhs == rhs;
+     }
+ };
+ 
+ class AstStructType : public AstTypeDef
+ {
+     AstName          name_;
+     AstNameVector    fieldNames_;
++    AstBoolVector    fieldMutability_;
+     AstValTypeVector fieldTypes_;
+ 
+   public:
+     explicit AstStructType(LifoAlloc& lifo)
+       : AstTypeDef(Which::IsStructType),
+         fieldNames_(lifo),
++        fieldMutability_(lifo),
+         fieldTypes_(lifo)
+     {}
+-    AstStructType(AstNameVector&& names, AstValTypeVector&& types)
++    AstStructType(AstNameVector&& names, AstBoolVector&& mutability, AstValTypeVector&& types)
+       : AstTypeDef(Which::IsStructType),
+         fieldNames_(std::move(names)),
++        fieldMutability_(std::move(mutability)),
+         fieldTypes_(std::move(types))
+     {}
+     AstStructType(AstName name, AstStructType&& rhs)
+       : AstTypeDef(Which::IsStructType),
+         name_(name),
+         fieldNames_(std::move(rhs.fieldNames_)),
++        fieldMutability_(std::move(rhs.fieldMutability_)),
+         fieldTypes_(std::move(rhs.fieldTypes_))
+     {}
+     AstName name() const {
+         return name_;
+     }
+     const AstNameVector& fieldNames() const {
+         return fieldNames_;
+     }
++    const AstBoolVector& fieldMutability() const {
++        return fieldMutability_;
++    }
+     const AstValTypeVector& fieldTypes() const {
+         return fieldTypes_;
+     }
+     AstValTypeVector& fieldTypes() {
+         return fieldTypes_;
+     }
+ };
+ 
+diff --git a/js/src/wasm/WasmBinaryConstants.h b/js/src/wasm/WasmBinaryConstants.h
+--- a/js/src/wasm/WasmBinaryConstants.h
++++ b/js/src/wasm/WasmBinaryConstants.h
+@@ -592,16 +592,21 @@ static const char SourceMappingURLSectio
+ 
+ enum class NameType
+ {
+     Module   = 0,
+     Function = 1,
+     Local    = 2
+ };
+ 
++enum class FieldFlags {
++    Mutable     = 0x01,
++    AllowedMask = 0x01
++};
++
+ // These limits are agreed upon with other engines for consistency.
+ 
+ static const unsigned MaxTypes               =  1000000;
+ static const unsigned MaxFuncs               =  1000000;
+ static const unsigned MaxImports             =   100000;
+ static const unsigned MaxExports             =   100000;
+ static const unsigned MaxGlobals             =  1000000;
+ static const unsigned MaxDataSegments        =   100000;
+diff --git a/js/src/wasm/WasmTextToBinary.cpp b/js/src/wasm/WasmTextToBinary.cpp
+--- a/js/src/wasm/WasmTextToBinary.cpp
++++ b/js/src/wasm/WasmTextToBinary.cpp
+@@ -3399,16 +3399,17 @@ ParseFunc(WasmParseContext& c, AstModule
+ 
+ static bool
+ ParseGlobalType(WasmParseContext& c, AstValType* type, bool* isMutable);
+ 
+ static bool
+ ParseStructFields(WasmParseContext& c, AstStructType* st)
+ {
+     AstNameVector    names(c.lifo);
++    AstBoolVector    mutability(c.lifo);
+     AstValTypeVector types(c.lifo);
+ 
+     while (true) {
+         if (!c.ts.getIf(WasmToken::OpenParen))
+             break;
+ 
+         if (!c.ts.match(WasmToken::Field, c.error))
+             return false;
+@@ -3419,21 +3420,23 @@ ParseStructFields(WasmParseContext& c, A
+         bool isMutable;
+         if (!ParseGlobalType(c, &type, &isMutable))
+             return false;
+         if (!c.ts.match(WasmToken::CloseParen, c.error))
+             return false;
+ 
+         if (!names.append(name))
+             return false;
++        if (!mutability.append(isMutable))
++            return false;
+         if (!types.append(type))
+             return false;
+     }
+ 
+-    *st = AstStructType(std::move(names), std::move(types));
++    *st = AstStructType(std::move(names), std::move(mutability), std::move(types));
+     return true;
+ }
+ 
+ static AstTypeDef*
+ ParseTypeDef(WasmParseContext& c)
+ {
+     AstName name = c.ts.getIfName();
+ 
+@@ -5411,18 +5414,22 @@ EncodeTypeSection(Encoder& e, AstModule&
+         } else if (td->isStructType()) {
+             AstStructType* st = static_cast<AstStructType*>(td);
+             if (!e.writeVarU32(uint32_t(TypeCode::Struct)))
+                 return false;
+ 
+             if (!e.writeVarU32(st->fieldTypes().length()))
+                 return false;
+ 
+-            for (AstValType vt : st->fieldTypes()) {
+-                if (!e.writeValType(vt.type()))
++            const AstValTypeVector& fieldTypes = st->fieldTypes();
++            const AstBoolVector& fieldMutables = st->fieldMutability();
++            for (uint32_t i = 0; i < fieldTypes.length(); i++) {
++                if (!e.writeFixedU8(fieldMutables[i] ? uint8_t(FieldFlags::Mutable) : 0))
++                    return false;
++                if (!e.writeValType(fieldTypes[i].type()))
+                     return false;
+             }
+         } else {
+             MOZ_CRASH();
+         }
+     }
+ 
+     e.finishSection(offset);
+diff --git a/js/src/wasm/WasmTypes.cpp b/js/src/wasm/WasmTypes.cpp
+--- a/js/src/wasm/WasmTypes.cpp
++++ b/js/src/wasm/WasmTypes.cpp
+@@ -328,41 +328,37 @@ size_t
+ FuncTypeWithId::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
+ {
+     return FuncType::sizeOfExcludingThis(mallocSizeOf);
+ }
+ 
+ size_t
+ StructType::serializedSize() const
+ {
+-    return SerializedPodVectorSize(fields_) +
+-           SerializedPodVectorSize(fieldOffsets_);
++    return SerializedPodVectorSize(fields_);
+ }
+ 
+ uint8_t*
+ StructType::serialize(uint8_t* cursor) const
+ {
+     cursor = SerializePodVector(cursor, fields_);
+-    cursor = SerializePodVector(cursor, fieldOffsets_);
+     return cursor;
+ }
+ 
+ const uint8_t*
+ StructType::deserialize(const uint8_t* cursor)
+ {
+     (cursor = DeserializePodVector(cursor, &fields_));
+-    (cursor = DeserializePodVector(cursor, &fieldOffsets_));
+     return cursor;
+ }
+ 
+ size_t
+ StructType::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
+ {
+-    return fields_.sizeOfExcludingThis(mallocSizeOf) +
+-           fieldOffsets_.sizeOfExcludingThis(mallocSizeOf);
++    return fields_.sizeOfExcludingThis(mallocSizeOf);
+ }
+ 
+ size_t
+ Import::serializedSize() const
+ {
+     return module.serializedSize() +
+            field.serializedSize() +
+            sizeof(kind);
+diff --git a/js/src/wasm/WasmTypes.h b/js/src/wasm/WasmTypes.h
+--- a/js/src/wasm/WasmTypes.h
++++ b/js/src/wasm/WasmTypes.h
+@@ -915,28 +915,35 @@ struct FuncTypeHashPolicy
+ };
+ 
+ // Structure type.
+ //
+ // The Module owns a dense array of Struct values that represent the structure
+ // types that the module knows about.  It is created from the sparse array of
+ // types in the ModuleEnvironment when the Module is created.
+ 
++struct StructField
++{
++    ValType  type;
++    uint32_t offset;
++    bool     isMutable;
++};
++
++typedef Vector<StructField, 0, SystemAllocPolicy> StructFieldVector;
++
+ class StructType
+ {
+   public:
+-    ValTypeVector fields_;       // Scalar types of fields
+-    Uint32Vector  fieldOffsets_; // Byte offsets into an object for corresponding field
++    StructFieldVector fields_;
+ 
+   public:
+-    StructType() : fields_(), fieldOffsets_() {}
+-
+-    StructType(ValTypeVector&& fields, Uint32Vector&& fieldOffsets)
+-      : fields_(std::move(fields)),
+-        fieldOffsets_(std::move(fieldOffsets))
++    StructType() : fields_() {}
++
++    explicit StructType(StructFieldVector&& fields)
++      : fields_(std::move(fields))
+     {}
+ 
+     WASM_DECLARE_SERIALIZABLE(StructType)
+ };
+ 
+ typedef Vector<StructType, 0, SystemAllocPolicy> StructTypeVector;
+ 
+ // An InitExpr describes a deferred initializer expression, used to initialize
+diff --git a/js/src/wasm/WasmValidate.cpp b/js/src/wasm/WasmValidate.cpp
+--- a/js/src/wasm/WasmValidate.cpp
++++ b/js/src/wasm/WasmValidate.cpp
+@@ -1195,37 +1195,39 @@ DecodeStructType(Decoder& d, ModuleEnvir
+ 
+     uint32_t numFields;
+     if (!d.readVarU32(&numFields))
+         return d.fail("Bad number of fields");
+ 
+     if (numFields > MaxStructFields)
+         return d.fail("too many fields in structure");
+ 
+-    ValTypeVector fields;
++    StructFieldVector fields;
+     if (!fields.resize(numFields))
+         return false;
+ 
+-    Uint32Vector fieldOffsets;
+-    if (!fieldOffsets.resize(numFields))
+-        return false;
+-
+     // TODO (subsequent patch): lay out the fields.
+ 
+     for (uint32_t i = 0; i < numFields; i++) {
+-        if (!DecodeValType(d, ModuleKind::Wasm, env->types.length(), env->gcTypesEnabled, &fields[i]))
++        uint8_t flags;
++        if (!d.readFixedU8(&flags))
++            return d.fail("expected flag");
++        if ((flags & ~uint8_t(FieldFlags::AllowedMask)) != 0)
++            return d.fail("garbage flag bits");
++        fields[i].isMutable = flags & uint8_t(FieldFlags::Mutable);
++        if (!DecodeValType(d, ModuleKind::Wasm, env->types.length(), env->gcTypesEnabled, &fields[i].type))
+             return false;
+-        if (!ValidateRefType(d, typeState, fields[i]))
++        if (!ValidateRefType(d, typeState, fields[i].type))
+             return false;
+     }
+ 
+     if ((*typeState)[typeIndex] != TypeState::None && (*typeState)[typeIndex] != TypeState::ForwardStruct)
+         return d.fail("struct type entry referenced as function");
+ 
+-    env->types[typeIndex] = TypeDef(StructType(std::move(fields), std::move(fieldOffsets)));
++    env->types[typeIndex] = TypeDef(StructType(std::move(fields)));
+     (*typeState)[typeIndex] = TypeState::Struct;
+ 
+     return true;
+ }
+ 
+ static bool
+ DecodeTypeSection(Decoder& d, ModuleEnvironment* env)
+ {

+ 456 - 0
frg/work-js/mozilla-release/patches/1459900-6-63a1.patch

@@ -0,0 +1,456 @@
+# HG changeset patch
+# User Lars T Hansen <lhansen@mozilla.com>
+# Date 1528908706 25200
+#      Wed Jun 13 09:51:46 2018 -0700
+# Node ID 732ee560105619a92566348426823488a1dbc91e
+# Parent  3ed7ed0f031b842901bee0dbc27c7a3a39ed16ae
+Bug 1459900 - Automatic upcasts through prefix supertyping. r=luke
+
+This augments the Ref type patch by making the subtype test perform automatic
+upcasts to prefixes.  The prefixes are still quite strict; corresponding Ref
+types must name the same underlying type and corresponding fields must have the
+same mutability.
+
+diff --git a/js/src/jit-test/tests/wasm/gc/ref.js b/js/src/jit-test/tests/wasm/gc/ref.js
+--- a/js/src/jit-test/tests/wasm/gc/ref.js
++++ b/js/src/jit-test/tests/wasm/gc/ref.js
+@@ -87,43 +87,94 @@ new WebAssembly.Module(wasmTextToBinary(
+ // Misc failure modes
+ 
+ assertErrorMessage(() => wasmEvalText(`
+ (module
+   (func (param (ref $odd)) (unreachable)))
+ `),
+ SyntaxError, /Type label.*not found/);
+ 
+-// Ref type mismatch in parameter
++// Ref type mismatch in parameter is allowed through the prefix rule
++// but not if the structs are incompatible.
+ 
+-assertErrorMessage(() => wasmEvalText(`
++wasmEvalText(`
+ (module
+  (type $s (struct (field i32)))
+  (type $t (struct (field i32)))
+  (func $f (param (ref $s)) (unreachable))
+- (func $g (param (ref $t)) (call $f (get_local 0)) drop))
+-`),
++ (func $g (param (ref $t)) (call $f (get_local 0)))
++)`);
++
++assertErrorMessage(() => wasmEvalText(`
++(module
++ (type $s (struct (field i32)))
++ (type $t (struct (field f32))) ;; Incompatible type
++ (func $f (param (ref $s)) (unreachable))
++ (func $g (param (ref $t)) (call $f (get_local 0)))
++)`),
+ WebAssembly.CompileError, /expression has type ref.*but expected ref/);
+ 
+-// Ref type mismatch in assignment to local
++assertErrorMessage(() => wasmEvalText(`
++(module
++ (type $s (struct (field i32)))
++ (type $t (struct (field (mut i32)))) ;; Incompatible mutability
++ (func $f (param (ref $s)) (unreachable))
++ (func $g (param (ref $t)) (call $f (get_local 0)))
++)`),
++WebAssembly.CompileError, /expression has type ref.*but expected ref/);
++
++// Ref type mismatch in assignment to local but the prefix rule allows
++// the assignment to succeed if the structs are the same.
++
++wasmEvalText(`
++(module
++ (type $s (struct (field i32)))
++ (type $t (struct (field i32)))
++ (func $f (param (ref $s)) (local (ref $t)) (set_local 1 (get_local 0))))
++`)
+ 
+ assertErrorMessage(() => wasmEvalText(`
+ (module
+  (type $s (struct (field i32)))
+- (type $t (struct (field i32)))
++ (type $t (struct (field f32)))
+  (func $f (param (ref $s)) (local (ref $t)) (set_local 1 (get_local 0))))
+ `),
+ WebAssembly.CompileError, /expression has type ref.*but expected ref/);
+ 
+-// Ref type mismatch in return
++assertErrorMessage(() => wasmEvalText(`
++(module
++ (type $s (struct (field i32)))
++ (type $t (struct (field (mut i32))))
++ (func $f (param (ref $s)) (unreachable))
++ (func $g (param (ref $t)) (call $f (get_local 0)))
++)`),
++WebAssembly.CompileError, /expression has type ref.*but expected ref/);
++
++// Ref type mismatch in return but the prefix rule allows the return
++// to succeed if the structs are the same.
++
++wasmEvalText(`
++(module
++ (type $s (struct (field i32)))
++ (type $t (struct (field i32)))
++ (func $f (param (ref $s)) (result (ref $t)) (get_local 0)))
++`);
+ 
+ assertErrorMessage(() => wasmEvalText(`
+ (module
+  (type $s (struct (field i32)))
+- (type $t (struct (field i32)))
++ (type $t (struct (field f32)))
++ (func $f (param (ref $s)) (result (ref $t)) (get_local 0)))
++`),
++WebAssembly.CompileError, /expression has type ref.*but expected ref/);
++
++assertErrorMessage(() => wasmEvalText(`
++(module
++ (type $s (struct (field i32)))
++ (type $t (struct (field (mut i32))))
+  (func $f (param (ref $s)) (result (ref $t)) (get_local 0)))
+ `),
+ WebAssembly.CompileError, /expression has type ref.*but expected ref/);
+ 
+ // Ref type can't reference a function type
+ 
+ assertErrorMessage(() => wasmEvalText(`
+ (module
+diff --git a/js/src/wasm/WasmOpIter.h b/js/src/wasm/WasmOpIter.h
+--- a/js/src/wasm/WasmOpIter.h
++++ b/js/src/wasm/WasmOpIter.h
+@@ -142,89 +142,16 @@ class StackType
+ 
+ static inline ValType
+ NonAnyToValType(StackType type)
+ {
+     MOZ_ASSERT(type != StackType::Any);
+     return ValType(type.packed());
+ }
+ 
+-static inline bool
+-IsSubtypeOf(StackType one, StackType two)
+-{
+-    MOZ_ASSERT(one.isRefOrAnyRef());
+-    MOZ_ASSERT(two.isRefOrAnyRef());
+-    return one == two || two == StackType::AnyRef;
+-}
+-
+-static inline bool
+-Unify(HasGcTypes gcTypesEnabled, StackType observed, StackType expected, StackType* result)
+-{
+-    if (MOZ_LIKELY(observed == expected)) {
+-        *result = observed;
+-        return true;
+-    }
+-
+-    if (observed == StackType::Any) {
+-        *result = expected;
+-        return true;
+-    }
+-
+-    if (expected == StackType::Any) {
+-        *result = observed;
+-        return true;
+-    }
+-
+-    if (gcTypesEnabled == HasGcTypes::True && observed.isRefOrAnyRef() &&
+-        expected.isRefOrAnyRef() && IsSubtypeOf(observed, expected))
+-    {
+-        *result = expected;
+-        return true;
+-    }
+-
+-    return false;
+-}
+-
+-static inline bool
+-Join(HasGcTypes gcTypesEnabled, StackType one, StackType two, StackType* result)
+-{
+-    if (MOZ_LIKELY(one == two)) {
+-        *result = one;
+-        return true;
+-    }
+-
+-    if (one == StackType::Any) {
+-        *result = two;
+-        return true;
+-    }
+-
+-    if (two == StackType::Any) {
+-        *result = one;
+-        return true;
+-    }
+-
+-    if (gcTypesEnabled == HasGcTypes::True && one.isRefOrAnyRef() && two.isRefOrAnyRef()) {
+-        if (IsSubtypeOf(two, one)) {
+-            *result = one;
+-            return true;
+-        }
+-
+-        if (IsSubtypeOf(one, two)) {
+-            *result = two;
+-            return true;
+-        }
+-
+-        // No subtyping relations between the two types.
+-        *result = StackType::AnyRef;
+-        return true;
+-    }
+-
+-    return false;
+-}
+-
+ #ifdef DEBUG
+ // Families of opcodes that share a signature and validation logic.
+ enum class OpKind {
+     Block,
+     Loop,
+     Unreachable,
+     Drop,
+     I32,
+@@ -560,16 +487,21 @@ class MOZ_STACK_CLASS OpIter : private P
+         valueStack_.infallibleAppend(tv);
+     }
+ 
+     void afterUnconditionalBranch() {
+         valueStack_.shrinkTo(controlStack_.back().valueStackStart());
+         controlStack_.back().setPolymorphicBase();
+     }
+ 
++    inline bool IsPrefixOf(StackType a, StackType b);
++    inline bool IsSubtypeOf(StackType one, StackType two);
++    inline bool Unify(StackType observed, StackType expected, StackType* result);
++    inline bool Join(StackType one, StackType two, StackType* result);
++
+   public:
+     typedef Vector<Value, 8, SystemAllocPolicy> ValueVector;
+ 
+ #ifdef DEBUG
+     explicit OpIter(const ModuleEnvironment& env, Decoder& decoder)
+       : d_(decoder), env_(env), op_(OpBytes(Op::Limit)), offsetOfLastReadOp_(0)
+     {}
+ #else
+@@ -769,16 +701,100 @@ class MOZ_STACK_CLASS OpIter : private P
+     // end of the function body.
+     bool controlStackEmpty() const {
+         return controlStack_.empty();
+     }
+ };
+ 
+ template <typename Policy>
+ inline bool
++OpIter<Policy>::IsPrefixOf(StackType a, StackType b)
++{
++    const StructType& other = env_.types[a.refTypeIndex()].structType();
++    return env_.types[b.refTypeIndex()].structType().hasPrefix(other);
++}
++
++template <typename Policy>
++inline bool
++OpIter<Policy>::IsSubtypeOf(StackType one, StackType two)
++{
++    MOZ_ASSERT(one.isRefOrAnyRef());
++    MOZ_ASSERT(two.isRefOrAnyRef());
++    return one == two || two == StackType::AnyRef || (one.isRef() && IsPrefixOf(two, one));
++}
++
++template <typename Policy>
++inline bool
++OpIter<Policy>::Unify(StackType observed, StackType expected, StackType* result)
++{
++    if (MOZ_LIKELY(observed == expected)) {
++        *result = observed;
++        return true;
++    }
++
++    if (observed == StackType::Any) {
++        *result = expected;
++        return true;
++    }
++
++    if (expected == StackType::Any) {
++        *result = observed;
++        return true;
++    }
++
++    if (env_.gcTypesEnabled == HasGcTypes::True && observed.isRefOrAnyRef() &&
++        expected.isRefOrAnyRef() && IsSubtypeOf(observed, expected))
++    {
++        *result = expected;
++        return true;
++    }
++
++    return false;
++}
++
++template <typename Policy>
++inline bool
++OpIter<Policy>::Join(StackType one, StackType two, StackType* result)
++{
++    if (MOZ_LIKELY(one == two)) {
++        *result = one;
++        return true;
++    }
++
++    if (one == StackType::Any) {
++        *result = two;
++        return true;
++    }
++
++    if (two == StackType::Any) {
++        *result = one;
++        return true;
++    }
++
++    if (env_.gcTypesEnabled == HasGcTypes::True && one.isRefOrAnyRef() && two.isRefOrAnyRef()) {
++        if (IsSubtypeOf(two, one)) {
++            *result = one;
++            return true;
++        }
++
++        if (IsSubtypeOf(one, two)) {
++            *result = two;
++            return true;
++        }
++
++        // No subtyping relations between the two types.
++        *result = StackType::AnyRef;
++        return true;
++    }
++
++    return false;
++}
++
++template <typename Policy>
++inline bool
+ OpIter<Policy>::unrecognizedOpcode(const OpBytes* expr)
+ {
+     UniqueChars error(JS_smprintf("unrecognized opcode: %x %x", expr->b0,
+                                   IsPrefixByte(expr->b0) ? expr->b1 : 0));
+     if (!error)
+         return false;
+ 
+     return fail(error.get());
+@@ -862,17 +878,17 @@ OpIter<Policy>::popWithType(StackType ex
+         if (valueStack_.empty())
+             return fail("popping value from empty stack");
+         return fail("popping value from outside block");
+     }
+ 
+     TypeAndValue<Value> tv = valueStack_.popCopy();
+ 
+     StackType _;
+-    if (MOZ_UNLIKELY(!Unify(env_.gcTypesEnabled, tv.type(), expectedType, &_)))
++    if (MOZ_UNLIKELY(!Unify(tv.type(), expectedType, &_)))
+         return typeMismatch(tv.type(), expectedType);
+ 
+     *value = tv.value();
+     return true;
+ }
+ 
+ // This function pops as many types from the stack as determined by the given
+ // signature. Currently, all signatures are limited to 0 or 1 types, with
+@@ -914,21 +930,18 @@ OpIter<Policy>::topWithType(ValType expe
+ 
+         if (valueStack_.empty())
+             return fail("reading value from empty stack");
+         return fail("reading value from outside block");
+     }
+ 
+     TypeAndValue<Value>& tv = valueStack_.back();
+ 
+-    if (MOZ_UNLIKELY(!Unify(env_.gcTypesEnabled, tv.type(), StackType(expectedType),
+-                            &tv.typeRef())))
+-    {
++    if (MOZ_UNLIKELY(!Unify(tv.type(), StackType(expectedType), &tv.typeRef())))
+         return typeMismatch(tv.type(), StackType(expectedType));
+-    }
+ 
+     *value = tv.value();
+     return true;
+ }
+ 
+ template <typename Policy>
+ inline bool
+ OpIter<Policy>::topWithType(ExprType expectedType, Value* value)
+@@ -1528,17 +1541,17 @@ OpIter<Policy>::readSelect(StackType* ty
+     StackType falseType;
+     if (!popAnyType(&falseType, falseValue))
+         return false;
+ 
+     StackType trueType;
+     if (!popAnyType(&trueType, trueValue))
+         return false;
+ 
+-    if (!Join(env_.gcTypesEnabled, falseType, trueType, type))
++    if (!Join(falseType, trueType, type))
+         return fail("select operand types must match");
+ 
+     infalliblePush(*type);
+     return true;
+ }
+ 
+ template <typename Policy>
+ inline bool
+diff --git a/js/src/wasm/WasmTypes.cpp b/js/src/wasm/WasmTypes.cpp
+--- a/js/src/wasm/WasmTypes.cpp
++++ b/js/src/wasm/WasmTypes.cpp
+@@ -325,16 +325,34 @@ FuncTypeWithId::deserialize(const uint8_
+ }
+ 
+ size_t
+ FuncTypeWithId::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
+ {
+     return FuncType::sizeOfExcludingThis(mallocSizeOf);
+ }
+ 
++// A simple notion of prefix: types and mutability must match exactly.
++
++bool
++StructType::hasPrefix(const StructType& other) const
++{
++    if (fields_.length() < other.fields_.length())
++        return false;
++    uint32_t limit = other.fields_.length();
++    for (uint32_t i = 0; i < limit; i++) {
++        if (fields_[i].type != other.fields_[i].type ||
++            fields_[i].isMutable != other.fields_[i].isMutable)
++        {
++            return false;
++        }
++    }
++    return true;
++}
++
+ size_t
+ StructType::serializedSize() const
+ {
+     return SerializedPodVectorSize(fields_);
+ }
+ 
+ uint8_t*
+ StructType::serialize(uint8_t* cursor) const
+diff --git a/js/src/wasm/WasmTypes.h b/js/src/wasm/WasmTypes.h
+--- a/js/src/wasm/WasmTypes.h
++++ b/js/src/wasm/WasmTypes.h
+@@ -936,16 +936,18 @@ class StructType
+ 
+   public:
+     StructType() : fields_() {}
+ 
+     explicit StructType(StructFieldVector&& fields)
+       : fields_(std::move(fields))
+     {}
+ 
++    bool hasPrefix(const StructType& other) const;
++
+     WASM_DECLARE_SERIALIZABLE(StructType)
+ };
+ 
+ typedef Vector<StructType, 0, SystemAllocPolicy> StructTypeVector;
+ 
+ // An InitExpr describes a deferred initializer expression, used to initialize
+ // a global or a table element offset. Such expressions are created during
+ // decoding and actually executed on module instantiation.

+ 0 - 2938
frg/work-js/mozilla-release/patches/1465060-1-std-62a1.patch.bak

@@ -1,2938 +0,0 @@
-# HG changeset patch
-# User Miko Mynttinen <mikokm@gmail.com>
-# Date 1527868747 -7200
-# Node ID a0d11b55d5957a488b41420c4f6cc178df7cd2e7
-# Parent  cb1d6bfc93659e34080db0abdfd9e29d016e3182
-Bug 1465060 - Part 1: Fix warnings for std::move() use r=froydnj
-
-MozReview-Commit-ID: HpdFXqQdIOO
-
-diff --git a/accessible/ipc/other/ProxyAccessible.cpp b/accessible/ipc/other/ProxyAccessible.cpp
---- a/accessible/ipc/other/ProxyAccessible.cpp
-+++ b/accessible/ipc/other/ProxyAccessible.cpp
-@@ -73,17 +73,17 @@ ProxyAccessible::RelationByType(Relation
-                                      &targetIDs);
- 
-   size_t targetCount = targetIDs.Length();
-   nsTArray<ProxyAccessible*> targets(targetCount);
-   for (size_t i = 0; i < targetCount; i++)
-     if (ProxyAccessible* proxy = mDoc->GetAccessible(targetIDs[i]))
-       targets.AppendElement(proxy);
- 
--  return std::move(targets);
-+  return targets;
- }
- 
- void
- ProxyAccessible::Relations(nsTArray<RelationType>* aTypes,
-                            nsTArray<nsTArray<ProxyAccessible*>>* aTargetSets)
-   const
- {
-   nsTArray<RelationTargets> ipcRelations;
-diff --git a/devtools/shared/heapsnapshot/DeserializedNode.cpp b/devtools/shared/heapsnapshot/DeserializedNode.cpp
---- a/devtools/shared/heapsnapshot/DeserializedNode.cpp
-+++ b/devtools/shared/heapsnapshot/DeserializedNode.cpp
-@@ -82,18 +82,18 @@ class DeserializedEdgeRange : public Edg
-   void settle() {
-     if (i >= node->edges.length()) {
-       front_ = nullptr;
-       return;
-     }
- 
-     auto& edge = node->edges[i];
-     auto referent = node->getEdgeReferent(edge);
--    currentEdge = std::move(Edge(edge.name ? NS_strdup(edge.name) : nullptr,
--                                     referent));
-+    currentEdge = Edge(edge.name
-+                ? NS_strdup(edge.name) : nullptr, referent);
-     front_ = &currentEdge;
-   }
- 
- public:
-   explicit DeserializedEdgeRange(DeserializedNode& node)
-     : node(&node)
-     , i(0)
-   {
-diff --git a/docshell/base/nsDocShell.cpp b/docshell/base/nsDocShell.cpp
---- a/docshell/base/nsDocShell.cpp
-+++ b/docshell/base/nsDocShell.cpp
-@@ -14878,20 +14878,20 @@ nsDocShell::NotifyJSRunToCompletionStart
-                                          const uint32_t aLineNumber,
-                                          JS::Handle<JS::Value> aAsyncStack,
-                                          const char* aAsyncCause)
- {
-   // If first start, mark interval start.
-   if (mJSRunToCompletionDepth == 0) {
-     RefPtr<TimelineConsumers> timelines = TimelineConsumers::Get();
-     if (timelines && timelines->HasConsumer(this)) {
--      timelines->AddMarkerForDocShell(this, std::move(
-+      timelines->AddMarkerForDocShell(this,
-         mozilla::MakeUnique<JavascriptTimelineMarker>(
-           aReason, aFunctionName, aFilename, aLineNumber, MarkerTracingType::START,
--          aAsyncStack, aAsyncCause)));
-+          aAsyncStack, aAsyncCause));
-     }
-   }
- 
-   mJSRunToCompletionDepth++;
- }
- 
- void
- nsDocShell::NotifyJSRunToCompletionStop()
-diff --git a/docshell/base/timeline/AutoRestyleTimelineMarker.cpp b/docshell/base/timeline/AutoRestyleTimelineMarker.cpp
---- a/docshell/base/timeline/AutoRestyleTimelineMarker.cpp
-+++ b/docshell/base/timeline/AutoRestyleTimelineMarker.cpp
-@@ -27,34 +27,34 @@ AutoRestyleTimelineMarker::AutoRestyleTi
-   }
- 
-   RefPtr<TimelineConsumers> timelines = TimelineConsumers::Get();
-   if (!timelines || !timelines->HasConsumer(aDocShell)) {
-     return;
-   }
- 
-   mDocShell = aDocShell;
--  timelines->AddMarkerForDocShell(mDocShell, std::move(
-+  timelines->AddMarkerForDocShell(mDocShell,
-     MakeUnique<RestyleTimelineMarker>(
-       mIsAnimationOnly,
--      MarkerTracingType::START)));
-+      MarkerTracingType::START));
- }
- 
- AutoRestyleTimelineMarker::~AutoRestyleTimelineMarker()
- {
-   MOZ_ASSERT(NS_IsMainThread());
- 
-   if (!mDocShell) {
-     return;
-   }
- 
-   RefPtr<TimelineConsumers> timelines = TimelineConsumers::Get();
-   if (!timelines || !timelines->HasConsumer(mDocShell)) {
-     return;
-   }
- 
--  timelines->AddMarkerForDocShell(mDocShell, std::move(
-+  timelines->AddMarkerForDocShell(mDocShell,
-     MakeUnique<RestyleTimelineMarker>(
-       mIsAnimationOnly,
--      MarkerTracingType::END)));
-+      MarkerTracingType::END));
- }
- 
- } // namespace mozilla
-diff --git a/docshell/base/timeline/TimelineConsumers.cpp b/docshell/base/timeline/TimelineConsumers.cpp
---- a/docshell/base/timeline/TimelineConsumers.cpp
-+++ b/docshell/base/timeline/TimelineConsumers.cpp
-@@ -180,30 +180,30 @@ TimelineConsumers::IsEmpty()
- void
- TimelineConsumers::AddMarkerForDocShell(nsDocShell* aDocShell,
-                                         const char* aName,
-                                         MarkerTracingType aTracingType,
-                                         MarkerStackRequest aStackRequest)
- {
-   MOZ_ASSERT(NS_IsMainThread());
-   if (HasConsumer(aDocShell)) {
--    aDocShell->mObserved->AddMarker(std::move(MakeUnique<TimelineMarker>(aName, aTracingType, aStackRequest)));
-+    aDocShell->mObserved->AddMarker(MakeUnique<TimelineMarker>(aName, aTracingType, aStackRequest));
-   }
- }
- 
- void
- TimelineConsumers::AddMarkerForDocShell(nsDocShell* aDocShell,
-                                         const char* aName,
-                                         const TimeStamp& aTime,
-                                         MarkerTracingType aTracingType,
-                                         MarkerStackRequest aStackRequest)
- {
-   MOZ_ASSERT(NS_IsMainThread());
-   if (HasConsumer(aDocShell)) {
--    aDocShell->mObserved->AddMarker(std::move(MakeUnique<TimelineMarker>(aName, aTime, aTracingType, aStackRequest)));
-+    aDocShell->mObserved->AddMarker(MakeUnique<TimelineMarker>(aName, aTime, aTracingType, aStackRequest));
-   }
- }
- 
- void
- TimelineConsumers::AddMarkerForDocShell(nsDocShell* aDocShell,
-                                         UniquePtr<AbstractTimelineMarker>&& aMarker)
- {
-   MOZ_ASSERT(NS_IsMainThread());
-diff --git a/dom/animation/EffectSet.h b/dom/animation/EffectSet.h
---- a/dom/animation/EffectSet.h
-+++ b/dom/animation/EffectSet.h
-@@ -88,17 +88,17 @@ public:
-   // This allows us to avoid exposing mEffects directly and saves the
-   // caller from having to dereference hashtable iterators using
-   // the rather complicated: iter.Get()->GetKey().
-   class Iterator
-   {
-   public:
-     explicit Iterator(EffectSet& aEffectSet)
-       : mEffectSet(aEffectSet)
--      , mHashIterator(std::move(aEffectSet.mEffects.Iter()))
-+      , mHashIterator(aEffectSet.mEffects.Iter())
-       , mIsEndIterator(false)
-     {
- #ifdef DEBUG
-       mEffectSet.mActiveIterators++;
- #endif
-     }
- 
-     Iterator(Iterator&& aOther)
-diff --git a/dom/base/CustomElementRegistry.cpp b/dom/base/CustomElementRegistry.cpp
---- a/dom/base/CustomElementRegistry.cpp
-+++ b/dom/base/CustomElementRegistry.cpp
-@@ -402,17 +402,17 @@ CustomElementRegistry::CreateCustomEleme
- 
-   if (aArgs) {
-     callback->SetArgs(*aArgs);
-   }
- 
-   if (aAdoptedCallbackArgs) {
-     callback->SetAdoptedCallbackArgs(*aAdoptedCallbackArgs);
-   }
--  return std::move(callback);
-+  return callback;
- }
- 
- /* static */ void
- CustomElementRegistry::EnqueueLifecycleCallback(nsIDocument::ElementCallbackType aType,
-                                                 Element* aCustomElement,
-                                                 LifecycleCallbackArgs* aArgs,
-                                                 LifecycleAdoptedCallbackArgs* aAdoptedCallbackArgs,
-                                                 CustomElementDefinition* aDefinition)
-diff --git a/dom/base/nsContentPermissionHelper.cpp b/dom/base/nsContentPermissionHelper.cpp
---- a/dom/base/nsContentPermissionHelper.cpp
-+++ b/dom/base/nsContentPermissionHelper.cpp
-@@ -406,17 +406,17 @@ nsContentPermissionUtils::GetContentPerm
- {
-   nsTArray<PContentPermissionRequestParent*> parentArray;
-   for (auto& it : ContentPermissionRequestParentMap()) {
-     if (it.second == aTabId) {
-       parentArray.AppendElement(it.first);
-     }
-   }
- 
--  return std::move(parentArray);
-+  return parentArray;
- }
- 
- /* static */ void
- nsContentPermissionUtils::NotifyRemoveContentPermissionRequestParent(
-   PContentPermissionRequestParent* aParent)
- {
-   auto it = ContentPermissionRequestParentMap().find(aParent);
-   MOZ_ASSERT(it != ContentPermissionRequestParentMap().end());
-@@ -429,17 +429,17 @@ nsContentPermissionUtils::GetContentPerm
- {
-   nsTArray<PContentPermissionRequestChild*> childArray;
-   for (auto& it : ContentPermissionRequestChildMap()) {
-     if (it.second == aTabId) {
-       childArray.AppendElement(it.first);
-     }
-   }
- 
--  return std::move(childArray);
-+  return childArray;
- }
- 
- /* static */ void
- nsContentPermissionUtils::NotifyRemoveContentPermissionRequestChild(
-   PContentPermissionRequestChild* aChild)
- {
-   auto it = ContentPermissionRequestChildMap().find(aChild);
-   MOZ_ASSERT(it != ContentPermissionRequestChildMap().end());
-diff --git a/dom/base/nsGlobalWindow.cpp b/dom/base/nsGlobalWindow.cpp
---- a/dom/base/nsGlobalWindow.cpp
-+++ b/dom/base/nsGlobalWindow.cpp
-@@ -11616,17 +11616,17 @@ nsGlobalWindow::ShowSlowScriptDialog(con
-   auto getString = [&] (const char* name,
-                         nsContentUtils::PropertiesFile propFile = nsContentUtils::eDOM_PROPERTIES) {
-     nsAutoString result;
-     nsresult rv = nsContentUtils::GetLocalizedString(
-       propFile, name, result);
- 
-     // GetStringFromName can return NS_OK and still give nullptr string
-     failed = failed || NS_FAILED(rv) || result.IsEmpty();
--    return std::move(result);
-+    return result;
-   };
- 
-   bool isAddonScript = !aAddonId.IsEmpty();
-   bool showDebugButton = debugCallback && !isAddonScript;
- 
-   // Get localizable strings
- 
-   nsAutoString title, checkboxMsg, debugButton, msg;
-diff --git a/dom/canvas/ImageBitmap.cpp b/dom/canvas/ImageBitmap.cpp
---- a/dom/canvas/ImageBitmap.cpp
-+++ b/dom/canvas/ImageBitmap.cpp
-@@ -791,17 +791,17 @@ ImageBitmap::ToCloneData() const
-   UniquePtr<ImageBitmapCloneData> result(new ImageBitmapCloneData());
-   result->mPictureRect = mPictureRect;
-   result->mAlphaType = mAlphaType;
-   result->mIsCroppingAreaOutSideOfSourceImage = mIsCroppingAreaOutSideOfSourceImage;
-   RefPtr<SourceSurface> surface = mData->GetAsSourceSurface();
-   result->mSurface = surface->GetDataSurface();
-   MOZ_ASSERT(result->mSurface);
- 
--  return std::move(result);
-+  return result;
- }
- 
- /* static */ already_AddRefed<ImageBitmap>
- ImageBitmap::CreateFromCloneData(nsIGlobalObject* aGlobal,
-                                  ImageBitmapCloneData* aData)
- {
-   RefPtr<layers::Image> data = CreateImageFromSurface(aData->mSurface);
- 
-diff --git a/dom/canvas/WebGLFormats.cpp b/dom/canvas/WebGLFormats.cpp
---- a/dom/canvas/WebGLFormats.cpp
-+++ b/dom/canvas/WebGLFormats.cpp
-@@ -822,17 +822,17 @@ FormatUsageAuthority::CreateForWebGL1(gl
-     ptr->AllowRBFormat(LOCAL_GL_DEPTH_STENCIL,
-                        ptr->GetUsage(EffectiveFormat::DEPTH24_STENCIL8));
- 
-     ////////////////////////////////////////////////////////////////////////////
- 
-     if (!AddUnsizedFormats(ptr, gl))
-         return nullptr;
- 
--    return std::move(ret);
-+    return ret;
- }
- 
- UniquePtr<FormatUsageAuthority>
- FormatUsageAuthority::CreateForWebGL2(gl::GLContext* gl)
- {
-     UniquePtr<FormatUsageAuthority> ret(new FormatUsageAuthority);
-     const auto ptr = ret.get();
- 
-@@ -1057,17 +1057,17 @@ FormatUsageAuthority::CreateForWebGL2(gl
-         AddSimpleUnsized(ptr, LOCAL_GL_RGB , LOCAL_GL_FLOAT, EffectiveFormat::RGB32F );
- 
-         AddSimpleUnsized(ptr, LOCAL_GL_RGBA, LOCAL_GL_HALF_FLOAT_OES, EffectiveFormat::RGBA16F);
-         AddSimpleUnsized(ptr, LOCAL_GL_RGB , LOCAL_GL_HALF_FLOAT_OES, EffectiveFormat::RGB16F );
-     }
- 
-     ////////////////////////////////////
- 
--    return std::move(ret);
-+    return ret;
- }
- 
- //////////////////////////////////////////////////////////////////////////////////////////
- 
- void
- FormatUsageAuthority::AddTexUnpack(FormatUsageInfo* usage, const PackingInfo& pi,
-                                    const DriverUnpackInfo& dui)
- {
-diff --git a/dom/canvas/WebGLTextureUpload.cpp b/dom/canvas/WebGLTextureUpload.cpp
---- a/dom/canvas/WebGLTextureUpload.cpp
-+++ b/dom/canvas/WebGLTextureUpload.cpp
-@@ -222,17 +222,17 @@ FromPboOffset(WebGLContext* webgl, const
-                                              isClientData, ptr, availBufferBytes);
- }
- 
- static UniquePtr<webgl::TexUnpackBlob>
- FromImageBitmap(WebGLContext* webgl, const char* funcName, TexImageTarget target,
-               uint32_t width, uint32_t height, uint32_t depth,
-               const dom::ImageBitmap& imageBitmap)
- {
--    UniquePtr<dom::ImageBitmapCloneData> cloneData = std::move(imageBitmap.ToCloneData());
-+    UniquePtr<dom::ImageBitmapCloneData> cloneData = imageBitmap.ToCloneData();
-     const RefPtr<gfx::DataSourceSurface> surf = cloneData->mSurface;
- 
-     if (!width) {
-         width = surf->GetSize().width;
-     }
- 
-     if (!height) {
-         height = surf->GetSize().height;
-@@ -451,17 +451,17 @@ ValidateTexOrSubImage(WebGLContext* webg
-     if (!ValidateViewType(webgl, funcName, pi.type, src))
-         return nullptr;
- 
-     auto blob = webgl->From(funcName, target, rawWidth, rawHeight, rawDepth, border, src,
-                             scopedArr);
-     if (!blob || !blob->Validate(webgl, funcName, pi))
-         return nullptr;
- 
--    return std::move(blob);
-+    return blob;
- }
- 
- void
- WebGLTexture::TexImage(const char* funcName, TexImageTarget target, GLint level,
-                        GLenum internalFormat, GLsizei width, GLsizei height,
-                        GLsizei depth, GLint border, const webgl::PackingInfo& pi,
-                        const TexImageSource& src)
- {
-diff --git a/dom/clients/manager/ClientManager.cpp b/dom/clients/manager/ClientManager.cpp
---- a/dom/clients/manager/ClientManager.cpp
-+++ b/dom/clients/manager/ClientManager.cpp
-@@ -112,17 +112,17 @@ ClientManager::CreateSourceInternal(Clie
-   if (NS_WARN_IF(NS_FAILED(rv))) {
-     return nullptr;
-   }
- 
-   ClientSourceConstructorArgs args(id, aType, aPrincipal, TimeStamp::Now());
-   UniquePtr<ClientSource> source(new ClientSource(this, aEventTarget, args));
-   source->Activate(GetActor());
- 
--  return std::move(source);
-+  return source;
- }
- 
- already_AddRefed<ClientHandle>
- ClientManager::CreateHandleInternal(const ClientInfo& aClientInfo,
-                                     nsISerialEventTarget* aSerialEventTarget)
- {
-   NS_ASSERT_OWNINGTHREAD(ClientManager);
-   MOZ_DIAGNOSTIC_ASSERT(aSerialEventTarget);
-diff --git a/dom/console/Console.cpp b/dom/console/Console.cpp
---- a/dom/console/Console.cpp
-+++ b/dom/console/Console.cpp
-@@ -2459,36 +2459,36 @@ Console::MonotonicTimer(JSContext* aCx, 
-         return false;
-       }
- 
-       nsAutoJSString key;
-       if (!key.init(aCx, jsString)) {
-         return false;
-       }
- 
--      timelines->AddMarkerForDocShell(docShell, std::move(
--        MakeUnique<TimestampTimelineMarker>(key)));
-+      timelines->AddMarkerForDocShell(docShell,
-+        MakeUnique<TimestampTimelineMarker>(key));
-     }
-     // For `console.time(foo)` and `console.timeEnd(foo)`.
-     else if (isTimelineRecording && aData.Length() == 1) {
-       JS::Rooted<JS::Value> value(aCx, aData[0]);
-       JS::Rooted<JSString*> jsString(aCx, JS::ToString(aCx, value));
-       if (!jsString) {
-         return false;
-       }
- 
-       nsAutoJSString key;
-       if (!key.init(aCx, jsString)) {
-         return false;
-       }
- 
--      timelines->AddMarkerForDocShell(docShell, std::move(
-+      timelines->AddMarkerForDocShell(docShell,
-         MakeUnique<ConsoleTimelineMarker>(
-           key, aMethodName == MethodTime ? MarkerTracingType::START
--                                         : MarkerTracingType::END)));
-+                                         : MarkerTracingType::END));
-     }
- 
-     return true;
-   }
- 
-   if (NS_IsMainThread()) {
-     double duration = (TimeStamp::Now() - mCreationTimeStamp).ToMilliseconds();
- 
-diff --git a/dom/events/EventListenerManager.cpp b/dom/events/EventListenerManager.cpp
---- a/dom/events/EventListenerManager.cpp
-+++ b/dom/events/EventListenerManager.cpp
-@@ -1228,19 +1228,19 @@ EventListenerManager::HandleEventInterna
-               docShell = nsContentUtils::GetDocShellForEventTarget(mTarget);
-               if (docShell) {
-                 if (timelines && timelines->HasConsumer(docShell)) {
-                   needsEndEventMarker = true;
-                   nsAutoString typeStr;
-                   (*aDOMEvent)->GetType(typeStr);
-                   uint16_t phase;
-                   (*aDOMEvent)->GetEventPhase(&phase);
--                  timelines->AddMarkerForDocShell(docShell, std::move(
-+                  timelines->AddMarkerForDocShell(docShell,
-                     MakeUnique<EventTimelineMarker>(
--                      typeStr, phase, MarkerTracingType::START)));
-+                      typeStr, phase, MarkerTracingType::START));
-                 }
-               }
-             }
- 
-             aEvent->mFlags.mInPassiveListener = listener->mFlags.mPassive;
-             Maybe<Listener> listenerHolder;
-             if (listener->mFlags.mOnce) {
-               // Move the listener to the stack before handling the event.
-diff --git a/dom/file/nsHostObjectProtocolHandler.cpp b/dom/file/nsHostObjectProtocolHandler.cpp
---- a/dom/file/nsHostObjectProtocolHandler.cpp
-+++ b/dom/file/nsHostObjectProtocolHandler.cpp
-@@ -569,17 +569,17 @@ private:
-   {
-     nsCOMPtr<nsIAsyncShutdownService> svc = services::GetAsyncShutdown();
-     NS_ENSURE_TRUE(!!svc, nullptr);
- 
-     nsCOMPtr<nsIAsyncShutdownClient> phase;
-     nsresult rv = svc->GetXpcomWillShutdown(getter_AddRefs(phase));
-     NS_ENSURE_SUCCESS(rv, nullptr);
- 
--    return std::move(phase);
-+    return phase;
-   }
- 
-   nsCString mURI;
-   bool mBroadcastToOtherProcesses;
- 
-   nsCOMPtr<nsITimer> mTimer;
- };
- 
-diff --git a/dom/geolocation/nsGeolocation.cpp b/dom/geolocation/nsGeolocation.cpp
---- a/dom/geolocation/nsGeolocation.cpp
-+++ b/dom/geolocation/nsGeolocation.cpp
-@@ -1221,17 +1221,17 @@ void
- Geolocation::GetCurrentPosition(PositionCallback& aCallback,
-                                 PositionErrorCallback* aErrorCallback,
-                                 const PositionOptions& aOptions,
-                                 CallerType aCallerType,
-                                 ErrorResult& aRv)
- {
-   nsresult rv = GetCurrentPosition(GeoPositionCallback(&aCallback),
-                                    GeoPositionErrorCallback(aErrorCallback),
--                                   std::move(CreatePositionOptionsCopy(aOptions)),
-+                                   CreatePositionOptionsCopy(aOptions),
-                                    aCallerType);
- 
-   if (NS_FAILED(rv)) {
-     aRv.Throw(rv);
-   }
- }
- 
- nsresult
-@@ -1289,17 +1289,17 @@ Geolocation::WatchPosition(PositionCallb
-                            PositionErrorCallback* aErrorCallback,
-                            const PositionOptions& aOptions,
-                            CallerType aCallerType,
-                            ErrorResult& aRv)
- {
-   int32_t ret = 0;
-   nsresult rv = WatchPosition(GeoPositionCallback(&aCallback),
-                               GeoPositionErrorCallback(aErrorCallback),
--                              std::move(CreatePositionOptionsCopy(aOptions)),
-+                              CreatePositionOptionsCopy(aOptions),
-                               aCallerType,
-                               &ret);
- 
-   if (NS_FAILED(rv)) {
-     aRv.Throw(rv);
-   }
- 
-   return ret;
-diff --git a/dom/ipc/ContentParent.cpp b/dom/ipc/ContentParent.cpp
---- a/dom/ipc/ContentParent.cpp
-+++ b/dom/ipc/ContentParent.cpp
-@@ -4251,18 +4251,18 @@ ContentParent::RecvNotifyTabDestroying(c
- {
-   NotifyTabDestroying(aTabId, aCpId);
-   return IPC_OK();
- }
- 
- nsTArray<TabContext>
- ContentParent::GetManagedTabContext()
- {
--  return std::move(ContentProcessManager::GetSingleton()->
--          GetTabContextByContentProcess(this->ChildID()));
-+  return ContentProcessManager::GetSingleton()->
-+    GetTabContextByContentProcess(this->ChildID());
- }
- 
- mozilla::docshell::POfflineCacheUpdateParent*
- ContentParent::AllocPOfflineCacheUpdateParent(const URIParams& aManifestURI,
-                                               const URIParams& aDocumentURI,
-                                               const PrincipalInfo& aLoadingPrincipalInfo,
-                                               const bool& aStickDocument)
- {
-diff --git a/dom/ipc/ContentProcessManager.cpp b/dom/ipc/ContentProcessManager.cpp
---- a/dom/ipc/ContentProcessManager.cpp
-+++ b/dom/ipc/ContentProcessManager.cpp
-@@ -120,26 +120,26 @@ nsTArray<ContentParentId>
- ContentProcessManager::GetAllChildProcessById(const ContentParentId& aParentCpId)
- {
-   MOZ_ASSERT(NS_IsMainThread());
- 
-   nsTArray<ContentParentId> cpIdArray;
-   auto iter = mContentParentMap.find(aParentCpId);
-   if (NS_WARN_IF(iter == mContentParentMap.end())) {
-     ASSERT_UNLESS_FUZZING();
--    return std::move(cpIdArray);
-+    return cpIdArray;
-   }
- 
-   for (auto cpIter = iter->second.mChildrenCpId.begin();
-        cpIter != iter->second.mChildrenCpId.end();
-        ++cpIter) {
-     cpIdArray.AppendElement(*cpIter);
-   }
- 
--  return std::move(cpIdArray);
-+  return cpIdArray;
- }
- 
- bool
- ContentProcessManager::RegisterRemoteFrame(const TabId& aTabId,
-                                            const ContentParentId& aOpenerCpId,
-                                            const TabId& aOpenerTabId,
-                                            const IPCTabContext& aContext,
-                                            const ContentParentId& aChildCpId)
-@@ -231,26 +231,26 @@ nsTArray<TabContext>
- ContentProcessManager::GetTabContextByContentProcess(const ContentParentId& aChildCpId)
- {
-   MOZ_ASSERT(NS_IsMainThread());
- 
-   nsTArray<TabContext> tabContextArray;
-   auto iter = mContentParentMap.find(aChildCpId);
-   if (NS_WARN_IF(iter == mContentParentMap.end())) {
-     ASSERT_UNLESS_FUZZING();
--    return std::move(tabContextArray);
-+    return tabContextArray;
-   }
- 
-   for (auto remoteFrameIter = iter->second.mRemoteFrames.begin();
-        remoteFrameIter != iter->second.mRemoteFrames.end();
-        ++remoteFrameIter) {
-     tabContextArray.AppendElement(remoteFrameIter->second.mContext);
-   }
- 
--  return std::move(tabContextArray);
-+  return tabContextArray;
- }
- 
- bool
- ContentProcessManager::GetRemoteFrameOpenerTabId(const ContentParentId& aChildCpId,
-                                                  const TabId& aChildTabId,
-                                                  /*out*/ContentParentId* aOpenerCpId,
-                                                  /*out*/TabId* aOpenerTabId)
- {
-@@ -332,26 +332,26 @@ nsTArray<TabId>
- ContentProcessManager::GetTabParentsByProcessId(const ContentParentId& aChildCpId)
- {
-   MOZ_ASSERT(NS_IsMainThread());
- 
-   nsTArray<TabId> tabIdList;
-   auto iter = mContentParentMap.find(aChildCpId);
-   if (NS_WARN_IF(iter == mContentParentMap.end())) {
-     ASSERT_UNLESS_FUZZING();
--    return std::move(tabIdList);
-+    return tabIdList;
-   }
- 
-   for (auto remoteFrameIter = iter->second.mRemoteFrames.begin();
-       remoteFrameIter != iter->second.mRemoteFrames.end();
-       ++remoteFrameIter) {
-     tabIdList.AppendElement(remoteFrameIter->first);
-   }
- 
--  return std::move(tabIdList);
-+  return tabIdList;
- }
- 
- uint32_t
- ContentProcessManager::GetTabParentCountByProcessId(const ContentParentId& aChildCpId)
- {
-   MOZ_ASSERT(NS_IsMainThread());
- 
-   auto iter = mContentParentMap.find(aChildCpId);
-diff --git a/dom/media/MediaStreamGraph.cpp b/dom/media/MediaStreamGraph.cpp
---- a/dom/media/MediaStreamGraph.cpp
-+++ b/dom/media/MediaStreamGraph.cpp
-@@ -2594,17 +2594,17 @@ MediaStream::SetTrackEnabledImpl(TrackID
-     }
-   } else {
-     for (const DisabledTrack& t : mDisabledTracks) {
-       if (aTrackID == t.mTrackID) {
-         NS_ERROR("Changing disabled track mode for a track is not allowed");
-         return;
-       }
-     }
--    mDisabledTracks.AppendElement(std::move(DisabledTrack(aTrackID, aMode)));
-+    mDisabledTracks.AppendElement(DisabledTrack(aTrackID, aMode));
-   }
- }
- 
- DisabledTrackMode
- MediaStream::GetDisabledTrackMode(TrackID aTrackID)
- {
-   for (const DisabledTrack& t : mDisabledTracks) {
-     if (t.mTrackID == aTrackID) {
-diff --git a/dom/media/eme/MediaKeySystemAccess.cpp b/dom/media/eme/MediaKeySystemAccess.cpp
---- a/dom/media/eme/MediaKeySystemAccess.cpp
-+++ b/dom/media/eme/MediaKeySystemAccess.cpp
-@@ -757,17 +757,17 @@ GetSupportedCapabilities(
-     if (!supportedCapabilities.AppendElement(capabilities, mozilla::fallible)) {
-       NS_WARNING("GetSupportedCapabilities: Malloc failure");
-       return Sequence<MediaKeySystemMediaCapability>();
-     }
- 
-     // Note: omitting steps 3.13.2, our robustness is not sophisticated enough
-     // to require considering all requirements together.
-   }
--  return std::move(supportedCapabilities);
-+  return supportedCapabilities;
- }
- 
- // "Get Supported Configuration and Consent" algorithm, steps 4-7 for
- // distinctive identifier, and steps 8-11 for persistent state. The steps
- // are the same for both requirements/features, so we factor them out into
- // a single function.
- static bool
- CheckRequirement(const MediaKeysRequirement aRequirement,
-diff --git a/dom/media/fake-cdm/cdm-test-decryptor.cpp b/dom/media/fake-cdm/cdm-test-decryptor.cpp
---- a/dom/media/fake-cdm/cdm-test-decryptor.cpp
-+++ b/dom/media/fake-cdm/cdm-test-decryptor.cpp
-@@ -106,17 +106,17 @@ Tokenize(const std::string& aString)
- static const string TruncateRecordId = "truncate-record-id";
- static const string TruncateRecordData = "I will soon be truncated";
- 
- template<class Continuation>
- class WriteRecordSuccessTask {
- public:
-   WriteRecordSuccessTask(string aId, Continuation aThen)
-     : mId(aId)
--    , mThen(move(aThen))
-+    , mThen(std::move(aThen))
-   {}
- 
-   void operator()()
-   {
-     ReadRecord(FakeDecryptor::sInstance->mHost, mId, mThen);
-   }
- 
-   string mId;
-diff --git a/dom/media/fake-cdm/cdm-test-storage.cpp b/dom/media/fake-cdm/cdm-test-storage.cpp
---- a/dom/media/fake-cdm/cdm-test-storage.cpp
-+++ b/dom/media/fake-cdm/cdm-test-storage.cpp
-@@ -14,18 +14,18 @@ using namespace std;
- 
- class WriteRecordClient : public FileIOClient
- {
- public:
-   WriteRecordClient(function<void()>&& aOnSuccess,
-                     function<void()>&& aOnFailure,
-                     const uint8_t* aData,
-                     uint32_t aDataSize)
--    : mOnSuccess(move(aOnSuccess))
--    , mOnFailure(move(aOnFailure))
-+    : mOnSuccess(std::move(aOnSuccess))
-+    , mOnFailure(std::move(aOnFailure))
-   {
-     mData.insert(mData.end(), aData, aData + aDataSize);
-   }
- 
-   void OnOpenComplete(Status aStatus) override
-   {
-     // If we hit an error, fail.
-     if (aStatus != Status::kSuccess) {
-@@ -85,43 +85,43 @@ void
- WriteRecord(Host_9* aHost,
-             const std::string& aRecordName,
-             const uint8_t* aData,
-             uint32_t aNumBytes,
-             function<void()>&& aOnSuccess,
-             function<void()>&& aOnFailure)
- {
-   // client will be delete in WriteRecordClient::Done
--  WriteRecordClient* client = new WriteRecordClient(move(aOnSuccess),
--                                                    move(aOnFailure),
-+  WriteRecordClient* client = new WriteRecordClient(std::move(aOnSuccess),
-+                                                    std::move(aOnFailure),
-                                                     aData,
-                                                     aNumBytes);
-   client->Do(aRecordName, aHost);
- }
- 
- void
- WriteRecord(Host_9* aHost,
-             const std::string& aRecordName,
-             const std::string& aData,
-             function<void()> &&aOnSuccess,
-             function<void()>&& aOnFailure)
- {
-   return WriteRecord(aHost,
-                      aRecordName,
-                      (const uint8_t*)aData.c_str(),
-                      aData.size(),
--                     move(aOnSuccess),
--                     move(aOnFailure));
-+                     std::move(aOnSuccess),
-+                     std::move(aOnFailure));
- }
- 
- class ReadRecordClient : public FileIOClient
- {
- public:
-   explicit ReadRecordClient(function<void(bool, const uint8_t*, uint32_t)>&& aOnReadComplete)
--    : mOnReadComplete(move(aOnReadComplete))
-+    : mOnReadComplete(std::move(aOnReadComplete))
-   {
-   }
- 
-   void OnOpenComplete(Status aStatus) override
-   {
-     auto err = aStatus;
-     if (aStatus != Status::kSuccess) {
-       Done(err, reinterpret_cast<const uint8_t*>(""), 0);
-@@ -176,25 +176,25 @@ private:
- };
- 
- void
- ReadRecord(Host_9* aHost,
-            const std::string& aRecordName,
-            function<void(bool, const uint8_t*, uint32_t)>&& aOnReadComplete)
- {
-   // client will be delete in ReadRecordClient::Done
--  ReadRecordClient* client = new ReadRecordClient(move(aOnReadComplete));
-+  ReadRecordClient* client = new ReadRecordClient(std::move(aOnReadComplete));
-   client->Do(aRecordName, aHost);
- }
- 
- class OpenRecordClient : public FileIOClient
- {
- public:
-   explicit OpenRecordClient(function<void(bool)>&& aOpenComplete)
--    : mOpenComplete(move(aOpenComplete))
-+    : mOpenComplete(std::move(aOpenComplete))
-   {
-   }
- 
-   void OnOpenComplete(Status aStatus) override
-   {
-     Done(aStatus);
-   }
- 
-@@ -242,11 +242,11 @@ private:
- };
- 
- void
- OpenRecord(Host_9* aHost,
-            const std::string& aRecordName,
-            function<void(bool)>&& aOpenComplete)
- {
-   // client will be delete in OpenRecordClient::Done
--  OpenRecordClient* client = new OpenRecordClient(move(aOpenComplete));
-+  OpenRecordClient* client = new OpenRecordClient(std::move(aOpenComplete));
-   client->Do(aRecordName, aHost);
- }
-diff --git a/dom/media/gmp/ChromiumCDMParent.cpp b/dom/media/gmp/ChromiumCDMParent.cpp
---- a/dom/media/gmp/ChromiumCDMParent.cpp
-+++ b/dom/media/gmp/ChromiumCDMParent.cpp
-@@ -1073,17 +1073,17 @@ ChromiumCDMParent::RecvDrainComplete()
- {
-   if (mIsShutdown) {
-     MOZ_ASSERT(mDecodePromise.IsEmpty());
-     return IPC_OK();
-   }
- 
-   MediaDataDecoder::DecodedData samples;
-   while (!mReorderQueue.IsEmpty()) {
--    samples.AppendElement(std::move(mReorderQueue.Pop()));
-+    samples.AppendElement(mReorderQueue.Pop());
-   }
- 
-   mDecodePromise.ResolveIfExists(std::move(samples), __func__);
-   return IPC_OK();
- }
- RefPtr<ShutdownPromise>
- ChromiumCDMParent::ShutdownVideoDecoder()
- {
-diff --git a/dom/media/gmp/GMPParent.cpp b/dom/media/gmp/GMPParent.cpp
---- a/dom/media/gmp/GMPParent.cpp
-+++ b/dom/media/gmp/GMPParent.cpp
-@@ -946,17 +946,17 @@ GMPParent::GetGMPContentParent(UniquePtr
-     }
-   }
- }
- 
- already_AddRefed<GMPContentParent>
- GMPParent::ForgetGMPContentParent()
- {
-   MOZ_ASSERT(mGetContentParentPromises.IsEmpty());
--  return std::move(mGMPContentParent.forget());
-+  return mGMPContentParent.forget();
- }
- 
- bool
- GMPParent::EnsureProcessLoaded(base::ProcessId* aID)
- {
-   if (!EnsureProcessLoaded()) {
-     return false;
-   }
-diff --git a/dom/media/gmp/GMPServiceParent.cpp b/dom/media/gmp/GMPServiceParent.cpp
---- a/dom/media/gmp/GMPServiceParent.cpp
-+++ b/dom/media/gmp/GMPServiceParent.cpp
-@@ -1879,17 +1879,17 @@ GMPServiceParent::ActorDestroy(ActorDest
-   // Make sure the IPC channel is closed before destroying mToDelete.
-   MonitorAutoLock lock(monitor);
-   RefPtr<Runnable> task = NewNonOwningRunnableMethod<Monitor*, bool*>(
-     "gmp::GMPServiceParent::CloseTransport",
-     this,
-     &GMPServiceParent::CloseTransport,
-     &monitor,
-     &completed);
--  XRE_GetIOMessageLoop()->PostTask(std::move(task.forget()));
-+  XRE_GetIOMessageLoop()->PostTask(task.forget());
- 
-   while (!completed) {
-     lock.Wait();
-   }
- 
-   // Dispatch a task to the current thread to ensure we don't delete the
-   // GMPServiceParent until the current calling context is finished with
-   // the object.
-diff --git a/dom/media/gmp/GMPStorageChild.cpp b/dom/media/gmp/GMPStorageChild.cpp
---- a/dom/media/gmp/GMPStorageChild.cpp
-+++ b/dom/media/gmp/GMPStorageChild.cpp
-@@ -21,17 +21,17 @@
-     } \
-   } while(false)
- 
- static nsTArray<uint8_t>
- ToArray(const uint8_t* aData, uint32_t aDataSize)
- {
-   nsTArray<uint8_t> data;
-   data.AppendElements(aData, aDataSize);
--  return std::move(data);
-+  return data;
- }
- 
- namespace mozilla {
- namespace gmp {
- 
- GMPRecordImpl::GMPRecordImpl(GMPStorageChild* aOwner,
-                              const nsCString& aName,
-                              GMPRecordClient* aClient)
-diff --git a/dom/media/ipc/VideoDecoderManagerParent.cpp b/dom/media/ipc/VideoDecoderManagerParent.cpp
---- a/dom/media/ipc/VideoDecoderManagerParent.cpp
-+++ b/dom/media/ipc/VideoDecoderManagerParent.cpp
-@@ -33,17 +33,17 @@ using namespace gfx;
- SurfaceDescriptorGPUVideo
- VideoDecoderManagerParent::StoreImage(Image* aImage, TextureClient* aTexture)
- {
-   SurfaceDescriptorGPUVideo ret;
-   aTexture->GPUVideoDesc(&ret);
- 
-   mImageMap[ret.handle()] = aImage;
-   mTextureMap[ret.handle()] = aTexture;
--  return std::move(ret);
-+  return ret;
- }
- 
- StaticRefPtr<nsIThread> sVideoDecoderManagerThread;
- StaticRefPtr<TaskQueue> sManagerTaskQueue;
- 
- class VideoDecoderManagerThreadHolder
- {
-   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(VideoDecoderManagerThreadHolder)
-diff --git a/dom/media/platforms/apple/AppleVTDecoder.cpp b/dom/media/platforms/apple/AppleVTDecoder.cpp
---- a/dom/media/platforms/apple/AppleVTDecoder.cpp
-+++ b/dom/media/platforms/apple/AppleVTDecoder.cpp
-@@ -248,17 +248,17 @@ AppleVTDecoder::ProcessDrain()
-   AssertOnTaskQueueThread();
-   nsresult rv = WaitForAsynchronousFrames();
-   if (NS_FAILED(rv)) {
-     LOG("AppleVTDecoder::Drain failed waiting for platform decoder");
-   }
-   MonitorAutoLock mon(mMonitor);
-   DecodedData samples;
-   while (!mReorderQueue.IsEmpty()) {
--    samples.AppendElement(std::move(mReorderQueue.Pop()));
-+    samples.AppendElement(mReorderQueue.Pop());
-   }
-   return DecodePromise::CreateAndResolve(std::move(samples), __func__);
- }
- 
- AppleVTDecoder::AppleFrameRef*
- AppleVTDecoder::CreateAppleFrameRef(const MediaRawData* aSample)
- {
-   MOZ_ASSERT(aSample);
-diff --git a/dom/media/platforms/omx/OmxPlatformLayer.cpp b/dom/media/platforms/omx/OmxPlatformLayer.cpp
---- a/dom/media/platforms/omx/OmxPlatformLayer.cpp
-+++ b/dom/media/platforms/omx/OmxPlatformLayer.cpp
-@@ -172,17 +172,17 @@ ConfigForMime(const nsACString& aMimeTyp
-                 aMimeType.EqualsLiteral("audio/mpeg")) {
-       conf.reset(new OmxMp3Config());
-     } else if (aMimeType.EqualsLiteral("audio/3gpp")) {
-       conf.reset(new OmxAmrConfig<OmxAmrSampleRate::kNarrowBand>());
-     } else if (aMimeType.EqualsLiteral("audio/amr-wb")) {
-       conf.reset(new OmxAmrConfig<OmxAmrSampleRate::kWideBand>());
-     }
-   }
--  return std::move(conf);
-+  return conf;
- }
- 
- // There should be a better way to calculate it.
- #define MIN_VIDEO_INPUT_BUFFER_SIZE 64 * 1024
- 
- class OmxCommonVideoConfig : public OmxVideoConfig
- {
- public:
-@@ -230,17 +230,17 @@ template<>
- UniquePtr<OmxVideoConfig>
- ConfigForMime(const nsACString& aMimeType)
- {
-   UniquePtr<OmxVideoConfig> conf;
- 
-   if (OmxPlatformLayer::SupportsMimeType(aMimeType)) {
-     conf.reset(new OmxCommonVideoConfig());
-   }
--  return std::move(conf);
-+  return conf;
- }
- 
- OMX_ERRORTYPE
- OmxPlatformLayer::Config()
- {
-   MOZ_ASSERT(mInfo);
- 
-   OMX_PORT_PARAM_TYPE portParam;
-diff --git a/dom/u2f/U2F.cpp b/dom/u2f/U2F.cpp
---- a/dom/u2f/U2F.cpp
-+++ b/dom/u2f/U2F.cpp
-@@ -345,17 +345,17 @@ U2F::Register(const nsAString& aAppId,
-   WebAuthnMakeCredentialInfo info(rpIdHash,
-                                   clientDataHash,
-                                   adjustedTimeoutMillis,
-                                   excludeList,
-                                   extensions,
-                                   authSelection);
- 
-   MOZ_ASSERT(mTransaction.isNothing());
--  mTransaction = Some(U2FTransaction(clientData, std::move(AsVariant(callback))));
-+  mTransaction = Some(U2FTransaction(clientData, AsVariant(callback)));
-   mChild->SendRequestRegister(mTransaction.ref().mId, info);
- }
- 
- void
- U2F::FinishMakeCredential(const uint64_t& aTransactionId,
-                           const WebAuthnMakeCredentialResult& aResult)
- {
-   MOZ_ASSERT(NS_IsMainThread());
-@@ -486,17 +486,17 @@ U2F::Sign(const nsAString& aAppId,
-   WebAuthnGetAssertionInfo info(rpIdHash,
-                                 clientDataHash,
-                                 adjustedTimeoutMillis,
-                                 permittedList,
-                                 false, /* requireUserVerification */
-                                 extensions);
- 
-   MOZ_ASSERT(mTransaction.isNothing());
--  mTransaction = Some(U2FTransaction(clientData, std::move(AsVariant(callback))));
-+  mTransaction = Some(U2FTransaction(clientData, AsVariant(callback)));
-   mChild->SendRequestSign(mTransaction.ref().mId, info);
- }
- 
- void
- U2F::FinishGetAssertion(const uint64_t& aTransactionId,
-                         const WebAuthnGetAssertionResult& aResult)
- {
-   MOZ_ASSERT(NS_IsMainThread());
-diff --git a/dom/workers/ServiceWorkerRegistrar.cpp b/dom/workers/ServiceWorkerRegistrar.cpp
---- a/dom/workers/ServiceWorkerRegistrar.cpp
-+++ b/dom/workers/ServiceWorkerRegistrar.cpp
-@@ -1174,17 +1174,17 @@ ServiceWorkerRegistrar::GetShutdownPhase
-   // memory), and there's no point in continuing startup. Include as much
-   // information as possible in the crash report.
-   RELEASE_ASSERT_SUCCEEDED(rv, "async shutdown service");
- 
- 
-   nsCOMPtr<nsIAsyncShutdownClient> client;
-   rv = svc->GetProfileBeforeChange(getter_AddRefs(client));
-   RELEASE_ASSERT_SUCCEEDED(rv, "profileBeforeChange shutdown blocker");
--  return std::move(client);
-+  return client;
- }
- 
- #undef RELEASE_ASSERT_SUCCEEDED
- 
- void
- ServiceWorkerRegistrar::Shutdown()
- {
-   AssertIsOnBackgroundThread();
-diff --git a/editor/libeditor/HTMLAnonymousNodeEditor.cpp b/editor/libeditor/HTMLAnonymousNodeEditor.cpp
---- a/editor/libeditor/HTMLAnonymousNodeEditor.cpp
-+++ b/editor/libeditor/HTMLAnonymousNodeEditor.cpp
-@@ -250,17 +250,17 @@ HTMLEditor::CreateAnonymousElement(nsIAt
-   // sort of ok.
-   newContent->SetProperty(nsGkAtoms::restylableAnonymousNode,
- 			  reinterpret_cast<void*>(true));
- #endif // DEBUG
- 
-   // display the element
-   ps->PostRecreateFramesFor(newContent);
- 
--  return std::move(newContent);
-+  return newContent;
- }
- 
- // Removes event listener and calls DeleteRefToAnonymousNode.
- void
- HTMLEditor::RemoveListenerAndDeleteRef(const nsAString& aEvent,
-                                        nsIDOMEventListener* aListener,
-                                        bool aUseCapture,
-                                        ManualNACPtr aElement,
-diff --git a/editor/libeditor/HTMLEditRules.cpp b/editor/libeditor/HTMLEditRules.cpp
---- a/editor/libeditor/HTMLEditRules.cpp
-+++ b/editor/libeditor/HTMLEditRules.cpp
-@@ -4904,33 +4904,32 @@ HTMLEditRules::CreateStyleForInsertText(
-   NS_ENSURE_STATE(aSelection.GetRangeAt(0));
-   nsCOMPtr<nsINode> node = aSelection.GetRangeAt(0)->GetStartContainer();
-   int32_t offset = aSelection.GetRangeAt(0)->StartOffset();
- 
-   nsCOMPtr<Element> rootElement = aDoc.GetRootElement();
-   NS_ENSURE_STATE(rootElement);
- 
-   // process clearing any styles first
--  UniquePtr<PropItem> item =
--    std::move(mHTMLEditor->mTypeInState->TakeClearProperty());
-+  UniquePtr<PropItem> item = mHTMLEditor->mTypeInState->TakeClearProperty();
-   while (item && node != rootElement) {
-     NS_ENSURE_STATE(mHTMLEditor);
-     // XXX If we redesign ClearStyle(), we can use EditorDOMPoint in this
-     //     method.
-     nsresult rv =
-       mHTMLEditor->ClearStyle(address_of(node), &offset,
-                               item->tag, item->attr);
-     NS_ENSURE_SUCCESS(rv, rv);
--    item = std::move(mHTMLEditor->mTypeInState->TakeClearProperty());
-+    item = mHTMLEditor->mTypeInState->TakeClearProperty();
-     weDidSomething = true;
-   }
- 
-   // then process setting any styles
-   int32_t relFontSize = mHTMLEditor->mTypeInState->TakeRelativeFontSize();
--  item = std::move(mHTMLEditor->mTypeInState->TakeSetProperty());
-+  item = mHTMLEditor->mTypeInState->TakeSetProperty();
- 
-   if (item || relFontSize) {
-     // we have at least one style to add; make a new text node to insert style
-     // nodes above.
-     if (RefPtr<Text> text = node->GetAsText()) {
-       if (NS_WARN_IF(!mHTMLEditor)) {
-         return NS_ERROR_FAILURE;
-       }
-diff --git a/editor/libeditor/HTMLEditorObjectResizer.cpp b/editor/libeditor/HTMLEditorObjectResizer.cpp
---- a/editor/libeditor/HTMLEditorObjectResizer.cpp
-+++ b/editor/libeditor/HTMLEditorObjectResizer.cpp
-@@ -146,17 +146,17 @@ HTMLEditor::CreateResizer(int16_t aLocat
-     case nsIHTMLObjectResizer::eBottomRight:
-       locationStr = kBottomRight;
-       break;
-   }
- 
-   nsresult rv =
-     ret->SetAttr(kNameSpaceID_None, nsGkAtoms::anonlocation, locationStr, true);
-   NS_ENSURE_SUCCESS(rv, nullptr);
--  return std::move(ret);
-+  return ret;
- }
- 
- ManualNACPtr
- HTMLEditor::CreateShadow(nsIContent& aParentContent,
-                          Element& aOriginalObject)
- {
-   // let's create an image through the element factory
-   nsCOMPtr<nsIAtom> name;
-diff --git a/extensions/spellcheck/hunspell/glue/RemoteSpellCheckEngineChild.cpp b/extensions/spellcheck/hunspell/glue/RemoteSpellCheckEngineChild.cpp
---- a/extensions/spellcheck/hunspell/glue/RemoteSpellCheckEngineChild.cpp
-+++ b/extensions/spellcheck/hunspell/glue/RemoteSpellCheckEngineChild.cpp
-@@ -34,17 +34,17 @@ RemoteSpellcheckEngineChild::SetCurrentD
-   if (!SendSetDictionaryFromList(
-          aList,
-          reinterpret_cast<intptr_t>(promiseHolder.get()))) {
-     return GenericPromise::CreateAndReject(NS_ERROR_FAILURE, __func__);
-   }
-   RefPtr<GenericPromise> result = promiseHolder->Ensure(__func__);
-   // promiseHolder will removed by receive message
-   mResponsePromises.AppendElement(std::move(promiseHolder));
--  return std::move(result);
-+  return result;
- }
- 
- mozilla::ipc::IPCResult
- RemoteSpellcheckEngineChild::RecvNotifyOfCurrentDictionary(
-                                const nsString& aDictionary,
-                                const intptr_t& aId)
- {
-   MozPromiseHolder<GenericPromise>* promiseHolder =
-diff --git a/gfx/2d/SFNTData.cpp b/gfx/2d/SFNTData.cpp
---- a/gfx/2d/SFNTData.cpp
-+++ b/gfx/2d/SFNTData.cpp
-@@ -134,25 +134,25 @@ SFNTData::Create(const uint8_t *aFontDat
-     const BigEndianUint32* endOfOffsets = offset + numFonts;
-     while (offset != endOfOffsets) {
-       if (!sfntData->AddFont(aFontData, aDataLength, *offset)) {
-         return nullptr;
-       }
-       ++offset;
-     }
- 
--    return std::move(sfntData);
-+    return sfntData;
-   }
- 
-   UniquePtr<SFNTData> sfntData(new SFNTData);
-   if (!sfntData->AddFont(aFontData, aDataLength, 0)) {
-     return nullptr;
-   }
- 
--  return std::move(sfntData);
-+  return sfntData;
- }
- 
- /* static */
- uint64_t
- SFNTData::GetUniqueKey(const uint8_t *aFontData, uint32_t aDataLength,
-                        uint32_t aVarDataSize, const void* aVarData)
- {
-   uint64_t hash;
-diff --git a/gfx/gl/GLScreenBuffer.cpp b/gfx/gl/GLScreenBuffer.cpp
---- a/gfx/gl/GLScreenBuffer.cpp
-+++ b/gfx/gl/GLScreenBuffer.cpp
-@@ -43,28 +43,28 @@ UniquePtr<GLScreenBuffer>
- GLScreenBuffer::Create(GLContext* gl,
-                        const gfx::IntSize& size,
-                        const SurfaceCaps& caps)
- {
-     UniquePtr<GLScreenBuffer> ret;
-     if (caps.antialias &&
-         !gl->IsSupported(GLFeature::framebuffer_multisample))
-     {
--        return std::move(ret);
-+        return ret;
-     }
- 
-     layers::TextureFlags flags = layers::TextureFlags::ORIGIN_BOTTOM_LEFT;
-     if (!caps.premultAlpha) {
-         flags |= layers::TextureFlags::NON_PREMULTIPLIED;
-     }
- 
-     UniquePtr<SurfaceFactory> factory = MakeUnique<SurfaceFactory_Basic>(gl, caps, flags);
- 
-     ret.reset( new GLScreenBuffer(gl, caps, std::move(factory)) );
--    return std::move(ret);
-+    return ret;
- }
- 
- /* static */ UniquePtr<SurfaceFactory>
- GLScreenBuffer::CreateFactory(GLContext* gl,
-                               const SurfaceCaps& caps,
-                               KnowsCompositor* compositorConnection,
-                               const layers::TextureFlags& flags)
- {
-@@ -948,17 +948,17 @@ ReadBuffer::Create(GLContext* gl,
-     const bool isComplete = gl->IsFramebufferComplete(fb);
-     if (needsAcquire) {
-         surf->ProducerReadRelease();
-     }
- 
-     if (!isComplete)
-         return nullptr;
- 
--    return std::move(ret);
-+    return ret;
- }
- 
- ReadBuffer::~ReadBuffer()
- {
-     if (!mGL->MakeCurrent())
-         return;
- 
-     GLuint fb = mFB;
-diff --git a/gfx/gl/MozFramebuffer.cpp b/gfx/gl/MozFramebuffer.cpp
---- a/gfx/gl/MozFramebuffer.cpp
-+++ b/gfx/gl/MozFramebuffer.cpp
-@@ -129,17 +129,17 @@ MozFramebuffer::CreateWith(GLContext* co
-     }
- 
-     const auto status = gl->fCheckFramebufferStatus(LOCAL_GL_FRAMEBUFFER);
-     if (status != LOCAL_GL_FRAMEBUFFER_COMPLETE) {
-         MOZ_ASSERT(false);
-         return nullptr;
-     }
- 
--    return std::move(mozFB);
-+    return mozFB;
- }
- 
- ////////////////////
- 
- MozFramebuffer::MozFramebuffer(GLContext* const gl, const gfx::IntSize& size,
-                                const uint32_t samples, const bool depthStencil,
-                                const GLenum colorTarget, const GLuint colorName)
-     : mWeakGL(gl)
-diff --git a/gfx/gl/SharedSurface.cpp b/gfx/gl/SharedSurface.cpp
---- a/gfx/gl/SharedSurface.cpp
-+++ b/gfx/gl/SharedSurface.cpp
-@@ -327,17 +327,17 @@ SurfaceFactory::NewTexClient(const gfx::
-         if (cur->Surf()->mSize == size) {
-             cur->Surf()->WaitForBufferOwnership();
-             return cur.forget();
-         }
- 
-         StopRecycling(cur);
-     }
- 
--    UniquePtr<SharedSurface> surf = std::move(CreateShared(size));
-+    UniquePtr<SharedSurface> surf = CreateShared(size);
-     if (!surf)
-         return nullptr;
- 
-     RefPtr<layers::SharedSurfaceTextureClient> ret;
-     ret = layers::SharedSurfaceTextureClient::Create(std::move(surf), this, mAllocator, mFlags);
- 
-     StartRecycling(ret);
- 
-diff --git a/gfx/gl/SharedSurfaceEGL.cpp b/gfx/gl/SharedSurfaceEGL.cpp
---- a/gfx/gl/SharedSurfaceEGL.cpp
-+++ b/gfx/gl/SharedSurfaceEGL.cpp
-@@ -25,37 +25,37 @@ SharedSurface_EGLImage::Create(GLContext
- {
-     GLLibraryEGL* egl = &sEGLLibrary;
-     MOZ_ASSERT(egl);
-     MOZ_ASSERT(context);
- 
-     UniquePtr<SharedSurface_EGLImage> ret;
- 
-     if (!HasExtensions(egl, prodGL)) {
--        return std::move(ret);
-+        return ret;
-     }
- 
-     MOZ_ALWAYS_TRUE(prodGL->MakeCurrent());
-     GLuint prodTex = CreateTextureForOffscreen(prodGL, formats, size);
-     if (!prodTex) {
--        return std::move(ret);
-+        return ret;
-     }
- 
-     EGLClientBuffer buffer = reinterpret_cast<EGLClientBuffer>(uintptr_t(prodTex));
-     EGLImage image = egl->fCreateImage(egl->Display(), context,
-                                        LOCAL_EGL_GL_TEXTURE_2D, buffer,
-                                        nullptr);
-     if (!image) {
-         prodGL->fDeleteTextures(1, &prodTex);
--        return std::move(ret);
-+        return ret;
-     }
- 
-     ret.reset( new SharedSurface_EGLImage(prodGL, egl, size, hasAlpha,
-                                           formats, prodTex, image) );
--    return std::move(ret);
-+    return ret;
- }
- 
- bool
- SharedSurface_EGLImage::HasExtensions(GLLibraryEGL* egl, GLContext* gl)
- {
-     return egl->HasKHRImageBase() &&
-            egl->IsExtensionSupported(GLLibraryEGL::KHR_gl_texture_2D_image) &&
-            (gl->IsExtensionSupported(GLContext::OES_EGL_image_external) ||
-@@ -172,17 +172,17 @@ SurfaceFactory_EGLImage::Create(GLContex
-     typedef SurfaceFactory_EGLImage ptrT;
-     UniquePtr<ptrT> ret;
- 
-     GLLibraryEGL* egl = &sEGLLibrary;
-     if (SharedSurface_EGLImage::HasExtensions(egl, prodGL)) {
-         ret.reset( new ptrT(prodGL, caps, allocator, flags, context) );
-     }
- 
--    return std::move(ret);
-+    return ret;
- }
- 
- ////////////////////////////////////////////////////////////////////////
- 
- #ifdef MOZ_WIDGET_ANDROID
- 
- /*static*/ UniquePtr<SharedSurface_SurfaceTexture>
- SharedSurface_SurfaceTexture::Create(GLContext* prodGL,
-@@ -195,22 +195,22 @@ SharedSurface_SurfaceTexture::Create(GLC
- 
-     UniquePtr<SharedSurface_SurfaceTexture> ret;
- 
-     AndroidNativeWindow window(surface);
-     GLContextEGL* egl = GLContextEGL::Cast(prodGL);
-     MOZ_ASSERT(egl);
-     EGLSurface eglSurface = egl->CreateCompatibleSurface(window.NativeWindow());
-     if (!eglSurface) {
--        return std::move(ret);
-+        return ret;
-     }
- 
-     ret.reset(new SharedSurface_SurfaceTexture(prodGL, size, hasAlpha,
-                                                formats, surface, eglSurface));
--    return std::move(ret);
-+    return ret;
- }
- 
- SharedSurface_SurfaceTexture::SharedSurface_SurfaceTexture(GLContext* gl,
-                                                            const gfx::IntSize& size,
-                                                            bool hasAlpha,
-                                                            const GLFormats& formats,
-                                                            java::GeckoSurface::Param surface,
-                                                            EGLSurface eglSurface)
-@@ -287,17 +287,17 @@ SharedSurface_SurfaceTexture::ToSurfaceD
- 
- /*static*/ UniquePtr<SurfaceFactory_SurfaceTexture>
- SurfaceFactory_SurfaceTexture::Create(GLContext* prodGL, const SurfaceCaps& caps,
-                                       const RefPtr<layers::LayersIPCChannel>& allocator,
-                                       const layers::TextureFlags& flags)
- {
-     UniquePtr<SurfaceFactory_SurfaceTexture> ret(
-         new SurfaceFactory_SurfaceTexture(prodGL, caps, allocator, flags));
--    return std::move(ret);
-+    return ret;
- }
- 
- UniquePtr<SharedSurface>
- SurfaceFactory_SurfaceTexture::CreateShared(const gfx::IntSize& size)
- {
-     bool hasAlpha = mReadCaps.alpha;
- 
-     jni::Object::LocalRef surface = java::SurfaceAllocator::AcquireSurface(size.width, size.height, true);
-diff --git a/gfx/gl/SharedSurfaceGL.cpp b/gfx/gl/SharedSurfaceGL.cpp
---- a/gfx/gl/SharedSurfaceGL.cpp
-+++ b/gfx/gl/SharedSurfaceGL.cpp
-@@ -28,35 +28,35 @@ SharedSurface_Basic::Create(GLContext* g
- 
-     GLContext::LocalErrorScope localError(*gl);
-     GLuint tex = CreateTextureForOffscreen(gl, formats, size);
- 
-     GLenum err = localError.GetError();
-     MOZ_ASSERT_IF(err != LOCAL_GL_NO_ERROR, err == LOCAL_GL_OUT_OF_MEMORY);
-     if (err) {
-         gl->fDeleteTextures(1, &tex);
--        return std::move(ret);
-+        return ret;
-     }
- 
-     bool ownsTex = true;
-     ret.reset( new SharedSurface_Basic(gl, size, hasAlpha, tex, ownsTex) );
--    return std::move(ret);
-+    return ret;
- }
- 
- 
- /*static*/ UniquePtr<SharedSurface_Basic>
- SharedSurface_Basic::Wrap(GLContext* gl,
-                           const IntSize& size,
-                           bool hasAlpha,
-                           GLuint tex)
- {
-     bool ownsTex = false;
-     UniquePtr<SharedSurface_Basic> ret( new SharedSurface_Basic(gl, size, hasAlpha, tex,
-                                                                 ownsTex) );
--    return std::move(ret);
-+    return ret;
- }
- 
- SharedSurface_Basic::SharedSurface_Basic(GLContext* gl,
-                                          const IntSize& size,
-                                          bool hasAlpha,
-                                          GLuint tex,
-                                          bool ownsTex)
-     : SharedSurface(SharedSurfaceType::Basic,
-@@ -121,22 +121,22 @@ SharedSurface_GLTexture::Create(GLContex
-     GLContext::LocalErrorScope localError(*prodGL);
- 
-     GLuint tex = CreateTextureForOffscreen(prodGL, formats, size);
- 
-     GLenum err = localError.GetError();
-     MOZ_ASSERT_IF(err, err == LOCAL_GL_OUT_OF_MEMORY);
-     if (err) {
-         prodGL->fDeleteTextures(1, &tex);
--        return std::move(ret);
-+        return ret;
-     }
- 
-     ret.reset(new SharedSurface_GLTexture(prodGL, size,
-                                           hasAlpha, tex));
--    return std::move(ret);
-+    return ret;
- }
- 
- SharedSurface_GLTexture::~SharedSurface_GLTexture()
- {
-     if (!mGL->MakeCurrent())
-         return;
- 
-     if (mTex) {
-diff --git a/gfx/gl/SharedSurfaceGLX.cpp b/gfx/gl/SharedSurfaceGLX.cpp
---- a/gfx/gl/SharedSurfaceGLX.cpp
-+++ b/gfx/gl/SharedSurfaceGLX.cpp
-@@ -32,17 +32,17 @@ SharedSurface_GLXDrawable::Create(GLCont
-     Screen* screen = XDefaultScreenOfDisplay(display);
-     Visual* visual = gfxXlibSurface::FindVisual(screen, gfx::SurfaceFormat::A8R8G8B8_UINT32);
- 
-     RefPtr<gfxXlibSurface> surf = gfxXlibSurface::Create(screen, visual, size);
-     if (!deallocateClient)
-         surf->ReleasePixmap();
- 
-     ret.reset(new SharedSurface_GLXDrawable(prodGL, size, inSameProcess, surf));
--    return std::move(ret);
-+    return ret;
- }
- 
- 
- SharedSurface_GLXDrawable::SharedSurface_GLXDrawable(GLContext* gl,
-                                                      const gfx::IntSize& size,
-                                                      bool inSameProcess,
-                                                      const RefPtr<gfxXlibSurface>& xlibSurface)
-     : SharedSurface(SharedSurfaceType::GLXDrawable,
-@@ -124,17 +124,17 @@ SurfaceFactory_GLXDrawable::Create(GLCon
-                                    const RefPtr<layers::LayersIPCChannel>& allocator,
-                                    const layers::TextureFlags& flags)
- {
-     MOZ_ASSERT(caps.alpha, "GLX surfaces require an alpha channel!");
- 
-     typedef SurfaceFactory_GLXDrawable ptrT;
-     UniquePtr<ptrT> ret(new ptrT(prodGL, caps, allocator,
-                                  flags & ~layers::TextureFlags::ORIGIN_BOTTOM_LEFT));
--    return std::move(ret);
-+    return ret;
- }
- 
- UniquePtr<SharedSurface>
- SurfaceFactory_GLXDrawable::CreateShared(const gfx::IntSize& size)
- {
-     bool deallocateClient = !!(mFlags & layers::TextureFlags::DEALLOCATE_CLIENT);
-     return SharedSurface_GLXDrawable::Create(mGL, mCaps, size, deallocateClient,
-                                              mAllocator->IsSameProcess());
-diff --git a/gfx/gl/SharedSurfaceIO.cpp b/gfx/gl/SharedSurfaceIO.cpp
---- a/gfx/gl/SharedSurfaceIO.cpp
-+++ b/gfx/gl/SharedSurfaceIO.cpp
-@@ -21,17 +21,17 @@ SharedSurface_IOSurface::Create(const Re
- {
-     MOZ_ASSERT(ioSurf);
-     MOZ_ASSERT(gl);
- 
-     auto size = gfx::IntSize::Truncate(ioSurf->GetWidth(), ioSurf->GetHeight());
- 
-     typedef SharedSurface_IOSurface ptrT;
-     UniquePtr<ptrT> ret( new ptrT(ioSurf, gl, size, hasAlpha) );
--    return std::move(ret);
-+    return ret;
- }
- 
- void
- SharedSurface_IOSurface::ProducerReleaseImpl()
- {
-     mGL->MakeCurrent();
-     mGL->fFlush();
- }
-@@ -214,17 +214,17 @@ SurfaceFactory_IOSurface::Create(GLConte
-                                  const RefPtr<layers::LayersIPCChannel>& allocator,
-                                  const layers::TextureFlags& flags)
- {
-     auto maxDims = gfx::IntSize::Truncate(MacIOSurface::GetMaxWidth(),
-                                           MacIOSurface::GetMaxHeight());
- 
-     typedef SurfaceFactory_IOSurface ptrT;
-     UniquePtr<ptrT> ret( new ptrT(gl, caps, allocator, flags, maxDims) );
--    return std::move(ret);
-+    return ret;
- }
- 
- UniquePtr<SharedSurface>
- SurfaceFactory_IOSurface::CreateShared(const gfx::IntSize& size)
- {
-     if (size.width > mMaxDims.width ||
-         size.height > mMaxDims.height)
-     {
-diff --git a/gfx/layers/AnimationHelper.cpp b/gfx/layers/AnimationHelper.cpp
---- a/gfx/layers/AnimationHelper.cpp
-+++ b/gfx/layers/AnimationHelper.cpp
-@@ -231,18 +231,18 @@ AnimationHelper::SampleAnimationForEachN
-     TimingParams timing {
-       animation.duration(),
-       animation.delay(),
-       animation.endDelay(),
-       animation.iterations(),
-       animation.iterationStart(),
-       static_cast<dom::PlaybackDirection>(animation.direction()),
-       static_cast<dom::FillMode>(animation.fillMode()),
--      std::move(AnimationUtils::TimingFunctionToComputedTimingFunction(
--           animation.easingFunction()))
-+      AnimationUtils::TimingFunctionToComputedTimingFunction(
-+           animation.easingFunction())
-     };
- 
-     ComputedTiming computedTiming =
-       dom::AnimationEffectReadOnly::GetComputedTimingAt(
-         Nullable<TimeDuration>(elapsedDuration), timing,
-         animation.playbackRate());
- 
-     if (computedTiming.mProgress.IsNull()) {
-diff --git a/gfx/layers/LayerTreeInvalidation.cpp b/gfx/layers/LayerTreeInvalidation.cpp
---- a/gfx/layers/LayerTreeInvalidation.cpp
-+++ b/gfx/layers/LayerTreeInvalidation.cpp
-@@ -341,17 +341,17 @@ struct ContainerLayerProperties : public
- {
-   explicit ContainerLayerProperties(ContainerLayer* aLayer)
-     : LayerPropertiesBase(aLayer)
-     , mPreXScale(aLayer->GetPreXScale())
-     , mPreYScale(aLayer->GetPreYScale())
-   {
-     for (Layer* child = aLayer->GetFirstChild(); child; child = child->GetNextSibling()) {
-       child->CheckCanary();
--      mChildren.AppendElement(std::move(CloneLayerTreePropertiesInternal(child)));
-+      mChildren.AppendElement(CloneLayerTreePropertiesInternal(child));
-     }
-   }
- 
- protected:
-   ContainerLayerProperties(const ContainerLayerProperties& a) = delete;
-   ContainerLayerProperties& operator=(const ContainerLayerProperties& a) = delete;
- 
- public:
-diff --git a/gfx/layers/ipc/CompositorVsyncScheduler.cpp b/gfx/layers/ipc/CompositorVsyncScheduler.cpp
---- a/gfx/layers/ipc/CompositorVsyncScheduler.cpp
-+++ b/gfx/layers/ipc/CompositorVsyncScheduler.cpp
-@@ -139,17 +139,17 @@ CompositorVsyncScheduler::PostVRTask(Tim
-   MonitorAutoLock lockVR(mCurrentVRListenerTaskMonitor);
-   if (mCurrentVRListenerTask == nullptr && VRListenerThreadHolder::Loop()) {
-     RefPtr<Runnable> task = NewRunnableMethod<TimeStamp>(
-       "layers::CompositorVsyncScheduler::DispatchVREvents",
-       this,
-       &CompositorVsyncScheduler::DispatchVREvents,
-       aTimestamp);
-     mCurrentVRListenerTask = task;
--    VRListenerThreadHolder::Loop()->PostDelayedTask(std::move(task.forget()), 0);
-+    VRListenerThreadHolder::Loop()->PostDelayedTask(task.forget(), 0);
-   }
- }
- 
- void
- CompositorVsyncScheduler::ScheduleComposition()
- {
-   MOZ_ASSERT(CompositorThreadHolder::IsInCompositorThread());
-   if (!mVsyncObserver) {
-diff --git a/gfx/layers/ipc/UiCompositorControllerParent.cpp b/gfx/layers/ipc/UiCompositorControllerParent.cpp
---- a/gfx/layers/ipc/UiCompositorControllerParent.cpp
-+++ b/gfx/layers/ipc/UiCompositorControllerParent.cpp
-@@ -24,17 +24,17 @@ typedef CompositorBridgeParent::LayerTre
- /* static */ RefPtr<UiCompositorControllerParent>
- UiCompositorControllerParent::GetFromRootLayerTreeId(const LayersId& aRootLayerTreeId)
- {
-   RefPtr<UiCompositorControllerParent> controller;
-   CompositorBridgeParent::CallWithIndirectShadowTree(aRootLayerTreeId,
-     [&](LayerTreeState& aState) -> void {
-       controller = aState.mUiControllerParent;
-     });
--  return std::move(controller);
-+  return controller;
- }
- 
- /* static */ RefPtr<UiCompositorControllerParent>
- UiCompositorControllerParent::Start(const LayersId& aRootLayerTreeId, Endpoint<PUiCompositorControllerParent>&& aEndpoint)
- {
-   RefPtr<UiCompositorControllerParent> parent = new UiCompositorControllerParent(aRootLayerTreeId);
- 
-   RefPtr<Runnable> task =
-diff --git a/gfx/layers/wr/WebRenderCommandBuilder.cpp b/gfx/layers/wr/WebRenderCommandBuilder.cpp
---- a/gfx/layers/wr/WebRenderCommandBuilder.cpp
-+++ b/gfx/layers/wr/WebRenderCommandBuilder.cpp
-@@ -350,17 +350,17 @@ PaintByLayer(nsDisplayItem* aItem,
-              nsDisplayListBuilder* aDisplayListBuilder,
-              const RefPtr<BasicLayerManager>& aManager,
-              gfxContext* aContext,
-              const gfx::Size& aScale,
-              const std::function<void()>& aPaintFunc)
- {
-   UniquePtr<LayerProperties> props;
-   if (aManager->GetRoot()) {
--    props = std::move(LayerProperties::CloneFrom(aManager->GetRoot()));
-+    props = LayerProperties::CloneFrom(aManager->GetRoot());
-   }
-   FrameLayerBuilder* layerBuilder = new FrameLayerBuilder();
-   layerBuilder->Init(aDisplayListBuilder, aManager, nullptr, true);
-   layerBuilder->DidBeginRetainedLayerTransaction(aManager);
- 
-   aManager->SetDefaultTarget(aContext);
-   aManager->BeginTransactionWithTarget(aContext);
-   bool isInvalidated = false;
-diff --git a/gfx/thebes/gfxFont.cpp b/gfx/thebes/gfxFont.cpp
---- a/gfx/thebes/gfxFont.cpp
-+++ b/gfx/thebes/gfxFont.cpp
-@@ -2514,17 +2514,17 @@ gfxFont::Measure(const gfxTextRun *aText
- {
-     // If aBoundingBoxType is TIGHT_HINTED_OUTLINE_EXTENTS
-     // and the underlying cairo font may be antialiased,
-     // we need to create a copy in order to avoid getting cached extents.
-     // This is only used by MathML layout at present.
-     if (aBoundingBoxType == TIGHT_HINTED_OUTLINE_EXTENTS &&
-         mAntialiasOption != kAntialiasNone) {
-         if (!mNonAAFont) {
--            mNonAAFont = std::move(CopyWithAntialiasOption(kAntialiasNone));
-+            mNonAAFont = CopyWithAntialiasOption(kAntialiasNone);
-         }
-         // if font subclass doesn't implement CopyWithAntialiasOption(),
-         // it will return null and we'll proceed to use the existing font
-         if (mNonAAFont) {
-             return mNonAAFont->Measure(aTextRun, aStart, aEnd,
-                                        TIGHT_HINTED_OUTLINE_EXTENTS,
-                                        aRefDrawTarget, aSpacing, aOrientation);
-         }
-diff --git a/gfx/thebes/gfxPlatform.cpp b/gfx/thebes/gfxPlatform.cpp
---- a/gfx/thebes/gfxPlatform.cpp
-+++ b/gfx/thebes/gfxPlatform.cpp
-@@ -1818,17 +1818,17 @@ gfxPlatform::GetBackendPrefs()
-   data.mContentBitmask = BackendTypeBit(BackendType::CAIRO);
- #ifdef USE_SKIA
-   data.mCanvasBitmask |= BackendTypeBit(BackendType::SKIA);
-   data.mContentBitmask |= BackendTypeBit(BackendType::SKIA);
- #endif
-   data.mCanvasDefault = BackendType::CAIRO;
-   data.mContentDefault = BackendType::CAIRO;
- 
--  return std::move(data);
-+  return data;
- }
- 
- void
- gfxPlatform::InitBackendPrefs(BackendPrefsData&& aPrefsData)
- {
-     mPreferredCanvasBackend = GetCanvasBackendPref(aPrefsData.mCanvasBitmask);
-     if (mPreferredCanvasBackend == BackendType::NONE) {
-         mPreferredCanvasBackend = aPrefsData.mCanvasDefault;
-diff --git a/gfx/thebes/gfxPlatformMac.cpp b/gfx/thebes/gfxPlatformMac.cpp
---- a/gfx/thebes/gfxPlatformMac.cpp
-+++ b/gfx/thebes/gfxPlatformMac.cpp
-@@ -94,17 +94,17 @@ gfxPlatformMac::GetBackendPrefs()
- {
-   BackendPrefsData data;
- 
-   data.mCanvasBitmask = BackendTypeBit(BackendType::SKIA);
-   data.mContentBitmask = BackendTypeBit(BackendType::SKIA);
-   data.mCanvasDefault = BackendType::SKIA;
-   data.mContentDefault = BackendType::SKIA;
- 
--  return std::move(data);
-+  return data;
- }
- 
- bool
- gfxPlatformMac::UsesTiling() const
- {
-     // The non-tiling ContentClient requires CrossProcessSemaphore which
-     // isn't implemented for OSX.
-     return true;
-diff --git a/image/decoders/nsICODecoder.cpp b/image/decoders/nsICODecoder.cpp
---- a/image/decoders/nsICODecoder.cpp
-+++ b/image/decoders/nsICODecoder.cpp
-@@ -198,17 +198,17 @@ LexerTransition<ICOState>
- nsICODecoder::IterateUnsizedDirEntry()
- {
-   MOZ_ASSERT(!mUnsizedDirEntries.IsEmpty());
- 
-   if (!mDirEntry) {
-     // The first time we are here, there is no entry selected. We must prepare a
-     // new iterator for the contained decoder to advance as it wills. Cloning at
-     // this point ensures it will begin at the end of the dir entries.
--    mReturnIterator = std::move(mLexer.Clone(*mIterator, SIZE_MAX));
-+    mReturnIterator = mLexer.Clone(*mIterator, SIZE_MAX);
-     if (mReturnIterator.isNothing()) {
-       // If we cannot read further than this point, then there is no resource
-       // data to read.
-       return Transition::TerminateFailure();
-     }
-   } else {
-     // We have already selected an entry which means a metadata decoder has
-     // finished. Verify the size is valid and if so, add to the discovered
-@@ -218,17 +218,17 @@ nsICODecoder::IterateUnsizedDirEntry()
-     }
- 
-     // Remove the entry from the unsized list either way.
-     mDirEntry = nullptr;
-     mUnsizedDirEntries.RemoveElementAt(0);
- 
-     // Our iterator is at an unknown point, so reset it to the point that we
-     // saved.
--    mIterator = std::move(mLexer.Clone(*mReturnIterator, SIZE_MAX));
-+    mIterator = mLexer.Clone(*mReturnIterator, SIZE_MAX);
-     if (mIterator.isNothing()) {
-       MOZ_ASSERT_UNREACHABLE("Cannot re-clone return iterator");
-       return Transition::TerminateFailure();
-     }
-   }
- 
-   // There are no more unsized entries, so we can finally decide which entry to
-   // select for decoding.
-diff --git a/image/test/gtest/TestADAM7InterpolatingFilter.cpp b/image/test/gtest/TestADAM7InterpolatingFilter.cpp
---- a/image/test/gtest/TestADAM7InterpolatingFilter.cpp
-+++ b/image/test/gtest/TestADAM7InterpolatingFilter.cpp
-@@ -236,18 +236,18 @@ WriteUninterpolatedPixels(SurfaceFilter*
-                           uint8_t aPass,
-                           const vector<BGRAColor>& aColors)
- {
-   WriteState result = WriteState::NEED_MORE_DATA;
- 
-   for (int32_t row = 0; row < aSize.height; ++row) {
-     // Compute uninterpolated pixels for this row.
-     vector<BGRAColor> pixels =
--      std::move(ADAM7HorizontallyInterpolatedRow(aPass, row, aSize.width,
--                                            ShouldInterpolate::eNo, aColors));
-+      ADAM7HorizontallyInterpolatedRow(aPass, row, aSize.width,
-+                                       ShouldInterpolate::eNo, aColors);
- 
-     // Write them to the surface.
-     auto pixelIterator = pixels.cbegin();
-     result = aFilter->WritePixelsToRow<uint32_t>([&]{
-       return AsVariant((*pixelIterator++).AsPixel());
-     });
- 
-     if (result != WriteState::NEED_MORE_DATA) {
-@@ -270,18 +270,18 @@ CheckHorizontallyInterpolatedImage(Decod
-   for (int32_t row = 0; row < aSize.height; ++row) {
-     if (!IsImportantRow(row, aPass)) {
-       continue;  // Don't check rows which aren't important on this pass.
-     }
- 
-     // Compute the expected pixels, *with* interpolation to match what the
-     // filter should have done.
-     vector<BGRAColor> expectedPixels =
--      std::move(ADAM7HorizontallyInterpolatedRow(aPass, row, aSize.width,
--                                            ShouldInterpolate::eYes, aColors));
-+      ADAM7HorizontallyInterpolatedRow(aPass, row, aSize.width,
-+                                       ShouldInterpolate::eYes, aColors);
- 
-     if (!RowHasPixels(surface, row, expectedPixels)) {
-       return false;
-     }
-   }
- 
-   return true;
- }
-diff --git a/image/test/gtest/TestAnimationFrameBuffer.cpp b/image/test/gtest/TestAnimationFrameBuffer.cpp
---- a/image/test/gtest/TestAnimationFrameBuffer.cpp
-+++ b/image/test/gtest/TestAnimationFrameBuffer.cpp
-@@ -23,17 +23,17 @@ CreateEmptyFrame()
- }
- 
- static bool
- Fill(AnimationFrameBuffer& buffer, size_t aLength)
- {
-   bool keepDecoding = false;
-   for (size_t i = 0; i < aLength; ++i) {
-     RawAccessFrameRef frame = CreateEmptyFrame();
--    keepDecoding = buffer.Insert(std::move(frame->RawAccessRef()));
-+    keepDecoding = buffer.Insert(frame->RawAccessRef());
-   }
-   return keepDecoding;
- }
- 
- static void
- CheckFrames(const AnimationFrameBuffer& buffer, size_t aStart, size_t aEnd, bool aExpected)
- {
-   for (size_t i = aStart; i < aEnd; ++i) {
-@@ -128,17 +128,17 @@ TEST_F(ImageAnimationFrameBuffer, Finish
-   buffer.Initialize(kThreshold, kBatch, 0);
-   const auto& frames = buffer.Frames();
- 
-   EXPECT_EQ(kBatch * 2, buffer.PendingDecode());
- 
-   RawAccessFrameRef firstFrame;
-   for (size_t i = 0; i < 5; ++i) {
-     RawAccessFrameRef frame = CreateEmptyFrame();
--    bool keepDecoding = buffer.Insert(std::move(frame->RawAccessRef()));
-+    bool keepDecoding = buffer.Insert(frame->RawAccessRef());
-     EXPECT_TRUE(keepDecoding);
-     EXPECT_FALSE(buffer.SizeKnown());
- 
-     if (i == 4) {
-       EXPECT_EQ(size_t(15), buffer.PendingDecode());
-       keepDecoding = buffer.MarkComplete();
-       EXPECT_FALSE(keepDecoding);
-       EXPECT_TRUE(buffer.SizeKnown());
-diff --git a/image/test/gtest/TestSourceBuffer.cpp b/image/test/gtest/TestSourceBuffer.cpp
---- a/image/test/gtest/TestSourceBuffer.cpp
-+++ b/image/test/gtest/TestSourceBuffer.cpp
-@@ -589,30 +589,32 @@ TEST_F(ImageSourceBuffer, SourceBufferIt
-   auto GetIterator = [&]{
-     SourceBufferIterator lambdaIterator = mSourceBuffer->Iterator();
-     CheckedAdvanceIterator(lambdaIterator, chunkLength);
-     return lambdaIterator;
-   };
- 
-   // Move-construct |movedIterator| from the iterator returned from
-   // GetIterator() and check that its state is as we expect.
--  SourceBufferIterator movedIterator = std::move(GetIterator());
-+  SourceBufferIterator tmpIterator = GetIterator();
-+  SourceBufferIterator movedIterator(std::move(tmpIterator));
-   EXPECT_TRUE(movedIterator.Data());
-   EXPECT_EQ(chunkLength, movedIterator.Length());
-   ExpectChunkAndByteCount(movedIterator, 1, chunkLength);
- 
-   // Make sure that we can advance the iterator.
-   CheckedAdvanceIterator(movedIterator, chunkLength, 2, totalLength);
- 
-   // Make sure that the iterator handles completion properly.
-   CheckIteratorIsComplete(movedIterator, 2, totalLength);
- 
-   // Move-assign |movedIterator| from the iterator returned from
-   // GetIterator() and check that its state is as we expect.
--  movedIterator = std::move(GetIterator());
-+  tmpIterator = GetIterator();
-+  movedIterator = std::move(tmpIterator);
-   EXPECT_TRUE(movedIterator.Data());
-   EXPECT_EQ(chunkLength, movedIterator.Length());
-   ExpectChunkAndByteCount(movedIterator, 1, chunkLength);
- 
-   // Make sure that we can advance the iterator.
-   CheckedAdvanceIterator(movedIterator, chunkLength, 2, totalLength);
- 
-   // Make sure that the iterator handles completion properly.
-diff --git a/image/test/gtest/TestSurfacePipeIntegration.cpp b/image/test/gtest/TestSurfacePipeIntegration.cpp
---- a/image/test/gtest/TestSurfacePipeIntegration.cpp
-+++ b/image/test/gtest/TestSurfacePipeIntegration.cpp
-@@ -21,17 +21,17 @@ namespace mozilla {
- namespace image {
- 
- class TestSurfacePipeFactory
- {
- public:
-   static SurfacePipe SimpleSurfacePipe()
-   {
-     SurfacePipe pipe;
--    return std::move(pipe);
-+    return pipe;
-   }
- 
-   template <typename T>
-   static SurfacePipe SurfacePipeFromPipeline(T&& aPipeline)
-   {
-     return SurfacePipe { std::move(aPipeline) };
-   }
- 
-diff --git a/js/src/builtin/TestingFunctions.cpp b/js/src/builtin/TestingFunctions.cpp
---- a/js/src/builtin/TestingFunctions.cpp
-+++ b/js/src/builtin/TestingFunctions.cpp
-@@ -3523,17 +3523,17 @@ struct FindPathHandler {
-         if (!first)
-             return true;
- 
-         // Record how we reached this node. This is the last edge on a
-         // shortest path to this node.
-         EdgeName edgeName = DuplicateString(cx, edge.name.get());
-         if (!edgeName)
-             return false;
--        *backEdge = std::move(BackEdge(origin, std::move(edgeName)));
-+        *backEdge = BackEdge(origin, std::move(edgeName));
- 
-         // Have we reached our final target node?
-         if (edge.referent == target) {
-             // Record the path that got us here, which must be a shortest path.
-             if (!recordPath(traversal))
-                 return false;
-             foundPath = true;
-             traversal.stop();
-diff --git a/js/src/ctypes/CTypes.cpp b/js/src/ctypes/CTypes.cpp
---- a/js/src/ctypes/CTypes.cpp
-+++ b/js/src/ctypes/CTypes.cpp
-@@ -5754,17 +5754,17 @@ ArrayType::BuildFFIType(JSContext* cx, J
-     JS_ReportAllocationOverflow(cx);
-     return nullptr;
-   }
- 
-   for (size_t i = 0; i < length; ++i)
-     ffiType->elements[i] = ffiBaseType;
-   ffiType->elements[length] = nullptr;
- 
--  return std::move(ffiType);
-+  return ffiType;
- }
- 
- bool
- ArrayType::IsArrayType(HandleValue v)
- {
-   if (!v.isObject())
-     return false;
-   JSObject* obj = &v.toObject();
-@@ -6302,17 +6302,17 @@ StructType::BuildFFIType(JSContext* cx, 
-   // Fill in the ffi_type's size and align fields. This makes libffi treat the
-   // type as initialized; it will not recompute the values. (We assume
-   // everything agrees; if it doesn't, we really want to know about it, which
-   // is the purpose of the above debug-only check.)
-   ffiType->size = structSize;
-   ffiType->alignment = structAlign;
- #endif
- 
--  return std::move(ffiType);
-+  return ffiType;
- }
- 
- bool
- StructType::Define(JSContext* cx, unsigned argc, Value* vp)
- {
-   CallArgs args = CallArgsFromVp(argc, vp);
-   RootedObject obj(cx, JS_THIS_OBJECT(cx, vp));
-   if (!obj)
-diff --git a/js/src/ds/LifoAlloc.cpp b/js/src/ds/LifoAlloc.cpp
---- a/js/src/ds/LifoAlloc.cpp
-+++ b/js/src/ds/LifoAlloc.cpp
-@@ -46,21 +46,21 @@ BumpChunk::canAlloc(size_t n)
- 
- } // namespace detail
- } // namespace js
- 
- void
- LifoAlloc::freeAll()
- {
-     while (!chunks_.empty()) {
--        BumpChunk bc = std::move(chunks_.popFirst());
-+        BumpChunk bc = chunks_.popFirst();
-         decrementCurSize(bc->computedSizeOfIncludingThis());
-     }
-     while (!unused_.empty()) {
--        BumpChunk bc = std::move(unused_.popFirst());
-+        BumpChunk bc = unused_.popFirst();
-         decrementCurSize(bc->computedSizeOfIncludingThis());
-     }
- 
-     // Nb: maintaining curSize_ correctly isn't easy.  Fortunately, this is an
-     // excellent sanity check.
-     MOZ_ASSERT(curSize_ == 0);
- }
- 
-@@ -100,27 +100,27 @@ LifoAlloc::newChunkWithCapacity(size_t n
- bool
- LifoAlloc::getOrCreateChunk(size_t n)
- {
-     // Look for existing unused BumpChunks to satisfy the request, and pick the
-     // first one which is large enough, and move it into the list of used
-     // chunks.
-     if (!unused_.empty()) {
-         if (unused_.begin()->canAlloc(n)) {
--            chunks_.append(std::move(unused_.popFirst()));
-+            chunks_.append(unused_.popFirst());
-             return true;
-         }
- 
-         BumpChunkList::Iterator e(unused_.end());
-         for (BumpChunkList::Iterator i(unused_.begin()); i->next() != e.get(); ++i) {
-             detail::BumpChunk* elem = i->next();
-             MOZ_ASSERT(elem->empty());
-             if (elem->canAlloc(n)) {
--                BumpChunkList temp = std::move(unused_.splitAfter(i.get()));
--                chunks_.append(std::move(temp.popFirst()));
-+                BumpChunkList temp = unused_.splitAfter(i.get());
-+                chunks_.append(temp.popFirst());
-                 unused_.appendAll(std::move(temp));
-                 return true;
-             }
-         }
-     }
- 
-     // Allocate a new BumpChunk with enough space for the next allocation.
-     BumpChunk newChunk = newChunkWithCapacity(n);
-diff --git a/js/src/ds/LifoAlloc.h b/js/src/ds/LifoAlloc.h
---- a/js/src/ds/LifoAlloc.h
-+++ b/js/src/ds/LifoAlloc.h
-@@ -706,17 +706,17 @@ class LifoAlloc
-     void release(Mark mark) {
-         markCount--;
- 
-         // Move the blocks which are after the mark to the set of unused chunks.
-         BumpChunkList released;
-         if (!mark.markedChunk())
-             released = std::move(chunks_);
-         else
--            released = std::move(chunks_.splitAfter(mark.markedChunk()));
-+            released = chunks_.splitAfter(mark.markedChunk());
- 
-         // Release the content of all the blocks which are after the marks.
-         for (detail::BumpChunk& bc : released)
-             bc.release();
-         unused_.appendAll(std::move(released));
- 
-         // Release everything which follows the mark in the last chunk.
-         if (!chunks_.empty())
-diff --git a/js/src/vm/CodeCoverage.cpp b/js/src/vm/CodeCoverage.cpp
---- a/js/src/vm/CodeCoverage.cpp
-+++ b/js/src/vm/CodeCoverage.cpp
-@@ -517,17 +517,17 @@ LCovRealm::lookupOrAdd(JS::Realm* realm,
- 
-     char* source_name = js_strdup(name);
-     if (!source_name) {
-         outTN_.reportOutOfMemory();
-         return nullptr;
-     }
- 
-     // Allocate a new LCovSource for the current top-level.
--    if (!sources_->append(std::move(LCovSource(&alloc_, source_name)))) {
-+    if (!sources_->append(LCovSource(&alloc_, source_name))) {
-         outTN_.reportOutOfMemory();
-         return nullptr;
-     }
- 
-     return &sources_->back();
- }
- 
- void
-diff --git a/js/src/vm/UbiNode.cpp b/js/src/vm/UbiNode.cpp
---- a/js/src/vm/UbiNode.cpp
-+++ b/js/src/vm/UbiNode.cpp
-@@ -262,17 +262,17 @@ class EdgeVectorTracer : public JS::Call
-                 name16[i] = name[i];
-             name16[i] = '\0';
-         }
- 
-         // The simplest code is correct! The temporary Edge takes
-         // ownership of name; if the append succeeds, the vector element
-         // then takes ownership; if the append fails, then the temporary
-         // retains it, and its destructor will free it.
--        if (!vec->append(std::move(Edge(name16, Node(thing))))) {
-+        if (!vec->append(Edge(name16, Node(thing)))) {
-             okay = false;
-             return;
-         }
-     }
- 
-   public:
-     // True if no errors (OOM, say) have yet occurred.
-     bool okay;
-@@ -543,17 +543,17 @@ RootList::addRoot(Node node, const char1
- 
-     UniqueTwoByteChars name;
-     if (edgeName) {
-         name = js::DuplicateString(edgeName);
-         if (!name)
-             return false;
-     }
- 
--    return edges.append(std::move(Edge(name.release(), node)));
-+    return edges.append(Edge(name.release(), node));
- }
- 
- const char16_t Concrete<RootList>::concreteTypeName[] = u"JS::ubi::RootList";
- 
- UniquePtr<EdgeRange>
- Concrete<RootList>::edges(JSContext* cx, bool wantNames) const {
-     MOZ_ASSERT_IF(wantNames, get().wantNames);
-     return UniquePtr<EdgeRange>(js_new<PreComputedEdgeRange>(get().edges));
-diff --git a/js/src/vm/UbiNodeShortestPaths.cpp b/js/src/vm/UbiNodeShortestPaths.cpp
---- a/js/src/vm/UbiNodeShortestPaths.cpp
-+++ b/js/src/vm/UbiNodeShortestPaths.cpp
-@@ -23,17 +23,17 @@ BackEdge::clone() const
-         return nullptr;
- 
-     clone->predecessor_ = predecessor();
-     if (name()) {
-         clone->name_ = js::DuplicateString(name().get());
-         if (!clone->name_)
-             return nullptr;
-     }
--    return std::move(clone);
-+    return clone;
- }
- 
- #ifdef DEBUG
- 
- static void
- dumpNode(const JS::ubi::Node& node)
- {
-     fprintf(stderr, "    %p ", (void*) node.identifier());
-diff --git a/js/src/wasm/WasmValidate.cpp b/js/src/wasm/WasmValidate.cpp
---- a/js/src/wasm/WasmValidate.cpp
-+++ b/js/src/wasm/WasmValidate.cpp
-@@ -1596,17 +1596,17 @@ DecodeExportName(Decoder& d, CStringSet*
-     if (p) {
-         d.fail("duplicate export");
-         return nullptr;
-     }
- 
-     if (!dupSet->add(p, exportName.get()))
-         return nullptr;
- 
--    return std::move(exportName);
-+    return exportName;
- }
- 
- static bool
- DecodeExport(Decoder& d, ModuleEnvironment* env, CStringSet* dupSet)
- {
-     UniqueChars fieldName = DecodeExportName(d, dupSet);
-     if (!fieldName)
-         return false;
-diff --git a/js/xpconnect/loader/URLPreloader.cpp b/js/xpconnect/loader/URLPreloader.cpp
---- a/js/xpconnect/loader/URLPreloader.cpp
-+++ b/js/xpconnect/loader/URLPreloader.cpp
-@@ -612,21 +612,21 @@ URLPreloader::ShallowSizeOfIncludingThis
- 
- Result<FileLocation, nsresult>
- URLPreloader::CacheKey::ToFileLocation()
- {
-     if (mType == TypeFile) {
-         nsCOMPtr<nsIFile> file;
-         MOZ_TRY(NS_NewLocalFile(NS_ConvertUTF8toUTF16(mPath), false,
-                                 getter_AddRefs(file)));
--        return std::move(FileLocation(file));
-+        return FileLocation(file);
-     }
- 
-     RefPtr<nsZipArchive> zip = Archive();
--    return std::move(FileLocation(zip, mPath.get()));
-+    return FileLocation(zip, mPath.get());
- }
- 
- Result<const nsCString, nsresult>
- URLPreloader::URLEntry::Read()
- {
-     FileLocation location;
-     MOZ_TRY_VAR(location, ToFileLocation());
- 
-diff --git a/layout/base/PresShell.cpp b/layout/base/PresShell.cpp
---- a/layout/base/PresShell.cpp
-+++ b/layout/base/PresShell.cpp
-@@ -6227,17 +6227,17 @@ PresShell::Paint(nsView*         aViewTo
-       bool computeInvalidRect = computeInvalidFunc ||
-                                 (layerManager->GetBackendType() == LayersBackend::LAYERS_BASIC);
- 
-       UniquePtr<LayerProperties> props;
-       // For WR, the layermanager has no root layer. We want to avoid
-       // calling ComputeDifferences in that case because it assumes non-null
-       // and crashes.
-       if (computeInvalidRect && layerManager->GetRoot()) {
--        props = std::move(LayerProperties::CloneFrom(layerManager->GetRoot()));
-+        props = LayerProperties::CloneFrom(layerManager->GetRoot());
-       }
- 
-       MaybeSetupTransactionIdAllocator(layerManager, presContext);
- 
-       if (layerManager->EndEmptyTransaction((aFlags & PAINT_COMPOSITE) ?
-             LayerManager::END_DEFAULT : LayerManager::END_NO_COMPOSITE)) {
-         nsIntRegion invalid;
-         bool areaOverflowed = false;
-diff --git a/layout/painting/FrameLayerBuilder.cpp b/layout/painting/FrameLayerBuilder.cpp
---- a/layout/painting/FrameLayerBuilder.cpp
-+++ b/layout/painting/FrameLayerBuilder.cpp
-@@ -6192,18 +6192,18 @@ FrameLayerBuilder::DrawPaintedLayer(Pain
-     FlashPaint(aContext);
-   }
- 
-   if (presContext->GetDocShell() && isActiveLayerManager) {
-     nsDocShell* docShell = static_cast<nsDocShell*>(presContext->GetDocShell());
-     RefPtr<TimelineConsumers> timelines = TimelineConsumers::Get();
- 
-     if (timelines && timelines->HasConsumer(docShell)) {
--      timelines->AddMarkerForDocShell(docShell, std::move(
--        MakeUnique<LayerTimelineMarker>(aRegionToDraw)));
-+      timelines->AddMarkerForDocShell(docShell,
-+        MakeUnique<LayerTimelineMarker>(aRegionToDraw));
-     }
-   }
- 
-   if (!aRegionToInvalidate.IsEmpty()) {
-     aLayer->AddInvalidRect(aRegionToInvalidate.GetBounds());
-   }
- }
- 
-diff --git a/layout/painting/nsDisplayList.cpp b/layout/painting/nsDisplayList.cpp
---- a/layout/painting/nsDisplayList.cpp
-+++ b/layout/painting/nsDisplayList.cpp
-@@ -2663,17 +2663,17 @@ already_AddRefed<LayerManager> nsDisplay
- 
-   UniquePtr<LayerProperties> props;
- 
-   bool computeInvalidRect = (computeInvalidFunc ||
-                              (!layerManager->IsCompositingCheap() && layerManager->NeedsWidgetInvalidation())) &&
-                             widgetTransaction;
- 
-   if (computeInvalidRect) {
--    props = std::move(LayerProperties::CloneFrom(layerManager->GetRoot()));
-+    props = LayerProperties::CloneFrom(layerManager->GetRoot());
-   }
- 
-   if (doBeginTransaction) {
-     if (aCtx) {
-       if (!layerManager->BeginTransactionWithTarget(aCtx)) {
-         return nullptr;
-       }
-     } else {
-diff --git a/layout/style/nsAnimationManager.cpp b/layout/style/nsAnimationManager.cpp
---- a/layout/style/nsAnimationManager.cpp
-+++ b/layout/style/nsAnimationManager.cpp
-@@ -915,17 +915,17 @@ GeckoCSSAnimationBuilder::GetKeyframePro
-     DebugOnly<bool> uncomputeResult =
-       StyleAnimationValue::UncomputeValue(prop, std::move(computedValue),
-                                           propertyValue);
-     MOZ_ASSERT(uncomputeResult,
-                "Unable to get specified value from computed value");
-     MOZ_ASSERT(propertyValue.GetUnit() != eCSSUnit_Null,
-                "Not expecting to read invalid properties");
- 
--    result.AppendElement(std::move(PropertyValuePair(prop, std::move(propertyValue))));
-+    result.AppendElement(PropertyValuePair(prop, std::move(propertyValue)));
-     aAnimatedProperties.AddProperty(prop);
-   }
- 
-   return result;
- }
- 
- void
- GeckoCSSAnimationBuilder::FillInMissingKeyframeValues(
-@@ -985,22 +985,22 @@ GeckoCSSAnimationBuilder::FillInMissingK
-        prop < eCSSProperty_COUNT_no_shorthands;
-        prop = nsCSSPropertyID(prop + 1)) {
-     if (!aAnimatedProperties.HasProperty(prop)) {
-       continue;
-     }
- 
-     if (startKeyframe && !aPropertiesSetAtStart.HasProperty(prop)) {
-       // An uninitialized nsCSSValue represents the underlying value.
--      PropertyValuePair propertyValue(prop, std::move(nsCSSValue()));
-+      PropertyValuePair propertyValue(prop, nsCSSValue());
-       startKeyframe->mPropertyValues.AppendElement(std::move(propertyValue));
-     }
-     if (endKeyframe && !aPropertiesSetAtEnd.HasProperty(prop)) {
-       // An uninitialized nsCSSValue represents the underlying value.
--      PropertyValuePair propertyValue(prop, std::move(nsCSSValue()));
-+      PropertyValuePair propertyValue(prop, nsCSSValue());
-       endKeyframe->mPropertyValues.AppendElement(std::move(propertyValue));
-     }
-   }
- }
- #endif
- 
- template<class BuilderType>
- static nsAnimationManager::OwningCSSAnimationPtrArray
-diff --git a/layout/style/nsTransitionManager.cpp b/layout/style/nsTransitionManager.cpp
---- a/layout/style/nsTransitionManager.cpp
-+++ b/layout/style/nsTransitionManager.cpp
-@@ -794,27 +794,27 @@ AppendKeyframe(double aOffset,
- {
-   Keyframe& frame = *aKeyframes.AppendElement();
-   frame.mOffset.emplace(aOffset);
- 
-   if (aValue.mServo) {
-     RefPtr<RawServoDeclarationBlock> decl =
-       Servo_AnimationValue_Uncompute(aValue.mServo).Consume();
-     frame.mPropertyValues.AppendElement(
--      std::move(PropertyValuePair(aProperty, std::move(decl))));
-+      PropertyValuePair(aProperty, std::move(decl)));
-   } else {
- #ifdef MOZ_OLD_STYLE
-     nsCSSValue propertyValue;
-     DebugOnly<bool> uncomputeResult =
-       StyleAnimationValue::UncomputeValue(aProperty, std::move(aValue.mGecko),
-                                           propertyValue);
-     MOZ_ASSERT(uncomputeResult,
-                "Unable to get specified value from computed value");
-     frame.mPropertyValues.AppendElement(
--      std::move(PropertyValuePair(aProperty, std::move(propertyValue))));
-+      PropertyValuePair(aProperty, std::move(propertyValue)));
- #else
-     MOZ_CRASH("old style system disabled");
- #endif
-   }
-   return frame;
- }
- 
- static nsTArray<Keyframe>
-diff --git a/media/gmp-clearkey/0.1/ClearKeyPersistence.cpp b/media/gmp-clearkey/0.1/ClearKeyPersistence.cpp
---- a/media/gmp-clearkey/0.1/ClearKeyPersistence.cpp
-+++ b/media/gmp-clearkey/0.1/ClearKeyPersistence.cpp
-@@ -59,17 +59,17 @@ ClearKeyPersistence::ReadAllRecordsFromI
-     [self, aOnComplete] ()
-   {
-     CK_LOGD("ClearKeyPersistence: Failed to load index file (it might not exist");
-     self->mPersistentKeyState = PersistentKeyState::LOADED;
-     aOnComplete();
-   };
- 
-   string filename = "index";
--  ReadData(mHost, filename, move(onIndexSuccess), move(onIndexFailed));
-+  ReadData(mHost, filename, std::move(onIndexSuccess), std::move(onIndexFailed));
- }
- 
- void
- ClearKeyPersistence::WriteIndex() {
-   function <void()> onIndexSuccess =
-     [] ()
-   {
-     CK_LOGD("ClearKeyPersistence: Wrote index file");
-@@ -91,34 +91,34 @@ ClearKeyPersistence::WriteIndex() {
-   string dataString = ss.str();
-   uint8_t* dataArray = (uint8_t*)dataString.data();
-   vector<uint8_t> data(dataArray, dataArray + dataString.size());
- 
-   string filename = "index";
-   WriteData(mHost,
-             filename,
-             data,
--            move(onIndexSuccess),
--            move(onIndexFail));
-+            std::move(onIndexSuccess),
-+            std::move(onIndexFail));
- }
- 
- 
- ClearKeyPersistence::ClearKeyPersistence(Host_9* aHost)
- {
-   this->mHost = aHost;
- }
- 
- void
- ClearKeyPersistence::EnsureInitialized(bool aPersistentStateAllowed,
-                                        function<void()>&& aOnInitialized)
- {
-   if (aPersistentStateAllowed &&
-       mPersistentKeyState == PersistentKeyState::UNINITIALIZED) {
-     mPersistentKeyState = LOADING;
--    ReadAllRecordsFromIndex(move(aOnInitialized));
-+    ReadAllRecordsFromIndex(std::move(aOnInitialized));
-   } else {
-     mPersistentKeyState = PersistentKeyState::LOADED;
-     aOnInitialized();
-   }
- }
- 
- bool ClearKeyPersistence::IsLoaded() const
- {
-diff --git a/media/gmp-clearkey/0.1/ClearKeySessionManager.cpp b/media/gmp-clearkey/0.1/ClearKeySessionManager.cpp
---- a/media/gmp-clearkey/0.1/ClearKeySessionManager.cpp
-+++ b/media/gmp-clearkey/0.1/ClearKeySessionManager.cpp
-@@ -62,17 +62,17 @@ ClearKeySessionManager::Init(bool aDisti
-       function<void()> func = self->mDeferredInitialize.front();
-       self->mDeferredInitialize.pop();
- 
-       func();
-     }
-   };
- 
-   mPersistence->EnsureInitialized(aPersistentStateAllowed,
--                                  move(onPersistentStateLoaded));
-+                                  std::move(onPersistentStateLoaded));
- }
- 
- void
- ClearKeySessionManager::CreateSession(uint32_t aPromiseId,
-                                       InitDataType aInitDataType,
-                                       const uint8_t* aInitData,
-                                       uint32_t aInitDataSize,
-                                       SessionType aSessionType)
-@@ -89,17 +89,17 @@ ClearKeySessionManager::CreateSession(ui
-     self->CreateSession(aPromiseId,
-                         aInitDataType,
-                         initData.data(),
-                         initData.size(),
-                         aSessionType);
-   };
- 
-   // If we haven't loaded, don't do this yet
--  if (MaybeDeferTillInitialized(move(deferrer))) {
-+  if (MaybeDeferTillInitialized(std::move(deferrer))) {
-     CK_LOGD("Deferring CreateSession");
-     return;
-   }
- 
-   CK_LOGARRAY("ClearKeySessionManager::CreateSession initdata: ",
-               aInitData,
-               aInitDataSize);
- 
-@@ -195,17 +195,17 @@ ClearKeySessionManager::LoadSession(uint
-   // we try to use it.
-   RefPtr<ClearKeySessionManager> self(this);
-   function<void()> deferrer =
-     [self, aPromiseId, sessionId] ()
-   {
-     self->LoadSession(aPromiseId, sessionId.data(), sessionId.size());
-   };
- 
--  if (MaybeDeferTillInitialized(move(deferrer))) {
-+  if (MaybeDeferTillInitialized(std::move(deferrer))) {
-     CK_LOGD("Deferring LoadSession");
-     return;
-   }
- 
-   // If the SessionManager has been shutdown mHost will be null and we won't
-   // be able to resolve the promise.
-   if (!mHost) {
-     return;
-@@ -233,17 +233,17 @@ ClearKeySessionManager::LoadSession(uint
-   function<void()> failure = [self, aPromiseId] {
-     if (!self->mHost) {
-       return;
-     }
-     // As per the API described in ContentDecryptionModule_8
-     self->mHost->OnResolveNewSessionPromise(aPromiseId, nullptr, 0);
-   };
- 
--  ReadData(mHost, sessionId, move(success), move(failure));
-+  ReadData(mHost, sessionId, std::move(success), std::move(failure));
- }
- 
- void
- ClearKeySessionManager::PersistentSessionDataLoaded(uint32_t aPromiseId,
-                                                     const string& aSessionId,
-                                                     const uint8_t* aKeyData,
-                                                     uint32_t aKeyDataSize)
- {
-@@ -331,17 +331,17 @@ ClearKeySessionManager::UpdateSession(ui
-     self->UpdateSession(aPromiseId,
-                         sessionId.data(),
-                         sessionId.size(),
-                         response.data(),
-                         response.size());
-   };
- 
-   // If we haven't fully loaded, defer calling this method
--  if (MaybeDeferTillInitialized(move(deferrer))) {
-+  if (MaybeDeferTillInitialized(std::move(deferrer))) {
-     CK_LOGD("Deferring LoadSession");
-     return;
-   }
- 
-   // Make sure the SessionManager has not been shutdown before we try and
-   // resolve any promises.
-   if (!mHost) {
-     return;
-@@ -441,17 +441,17 @@ ClearKeySessionManager::UpdateSession(ui
-     static const char* message = "Couldn't store cenc key init data";
-     self->mHost->OnRejectPromise(aPromiseId,
-                                  Exception::kExceptionInvalidStateError,
-                                  0,
-                                  message,
-                                  strlen(message));
-   };
- 
--  WriteData(mHost, sessionId, keydata, move(resolve), move(reject));
-+  WriteData(mHost, sessionId, keydata, std::move(resolve), std::move(reject));
- }
- 
- void
- ClearKeySessionManager::Serialize(const ClearKeySession* aSession,
-                                   std::vector<uint8_t>& aOutKeyData)
- {
-   const std::vector<KeyId>& keyIds = aSession->GetKeyIds();
-   for (size_t i = 0; i < keyIds.size(); i++) {
-@@ -481,17 +481,17 @@ ClearKeySessionManager::CloseSession(uin
-   RefPtr<ClearKeySessionManager> self(this);
-   function<void()> deferrer =
-     [self, aPromiseId, sessionId] ()
-   {
-     self->CloseSession(aPromiseId, sessionId.data(), sessionId.size());
-   };
- 
-   // If we haven't loaded, call this method later.
--  if (MaybeDeferTillInitialized(move(deferrer))) {
-+  if (MaybeDeferTillInitialized(std::move(deferrer))) {
-     CK_LOGD("Deferring CloseSession");
-     return;
-   }
- 
-   // If DecryptingComplete has been called mHost will be null and we won't
-   // be able to resolve our promise.
-   if (!mHost) {
-     return;
-@@ -540,17 +540,17 @@ ClearKeySessionManager::RemoveSession(ui
-   RefPtr<ClearKeySessionManager> self(this);
-   function<void()> deferrer =
-     [self, aPromiseId, sessionId] ()
-   {
-     self->RemoveSession(aPromiseId, sessionId.data(), sessionId.size());
-   };
- 
-   // If we haven't fully loaded, defer calling this method.
--  if (MaybeDeferTillInitialized(move(deferrer))) {
-+  if (MaybeDeferTillInitialized(std::move(deferrer))) {
-     CK_LOGD("Deferring RemoveSession");
-     return;
-   }
- 
-   // Check that the SessionManager has not been shutdown before we try and
-   // resolve any promises.
-   if (!mHost) {
-     return;
-@@ -600,17 +600,17 @@ ClearKeySessionManager::RemoveSession(ui
-     static const char* message = "Could not remove session";
-     self->mHost->OnRejectPromise(aPromiseId,
-                                  Exception::kExceptionTypeError,
-                                  0,
-                                  message,
-                                  strlen(message));
-   };
- 
--  WriteData(mHost, sessionId, emptyKeydata, move(resolve), move(reject));
-+  WriteData(mHost, sessionId, emptyKeydata, std::move(resolve), std::move(reject));
- }
- 
- void
- ClearKeySessionManager::SetServerCertificate(uint32_t aPromiseId,
-                                              const uint8_t* aServerCert,
-                                              uint32_t aServerCertSize)
- {
-   // ClearKey CDM doesn't support this method by spec.
-@@ -669,11 +669,11 @@ ClearKeySessionManager::DecryptingComple
- }
- 
- bool ClearKeySessionManager::MaybeDeferTillInitialized(function<void()>&& aMaybeDefer)
- {
-   if (mPersistence->IsLoaded()) {
-     return false;
-   }
- 
--  mDeferredInitialize.emplace(move(aMaybeDefer));
-+  mDeferredInitialize.emplace(std::move(aMaybeDefer));
-   return true;
- }
-diff --git a/media/gmp-clearkey/0.1/ClearKeyStorage.cpp b/media/gmp-clearkey/0.1/ClearKeyStorage.cpp
---- a/media/gmp-clearkey/0.1/ClearKeyStorage.cpp
-+++ b/media/gmp-clearkey/0.1/ClearKeyStorage.cpp
-@@ -39,18 +39,18 @@ public:
-    */
-   static void Write(Host_9* aHost,
-                     string& aRecordName,
-                     const vector<uint8_t>& aData,
-                     function<void()>&& aOnSuccess,
-                     function<void()>&& aOnFailure)
- {
-     WriteRecordClient* client = new WriteRecordClient(aData,
--                                                      move(aOnSuccess),
--                                                      move(aOnFailure));
-+                                                      std::move(aOnSuccess),
-+                                                      std::move(aOnFailure));
-     client->Do(aRecordName, aHost);
-   }
- 
-   void OnOpenComplete(Status aStatus) override
-   {
-     // If we hit an error, fail.
-     if (aStatus != Status::kSuccess) {
-       Done(aStatus);
-@@ -73,18 +73,18 @@ public:
-     Done(aStatus);
-   }
- 
- private:
-   explicit WriteRecordClient(const vector<uint8_t>& aData,
-                              function<void()>&& aOnSuccess,
-                              function<void()>&& aOnFailure)
-     : mFileIO(nullptr)
--    , mOnSuccess(move(aOnSuccess))
--    , mOnFailure(move(aOnFailure))
-+    , mOnSuccess(std::move(aOnSuccess))
-+    , mOnFailure(std::move(aOnFailure))
-     , mData(aData) {}
- 
-   void Do(const string& aName, Host_9* aHost)
-   {
-     // Initialize the FileIO.
-     mFileIO = aHost->CreateFileIO(this);
-     mFileIO->Open(aName.c_str(), aName.size());
-   }
-@@ -122,34 +122,34 @@ WriteData(Host_9* aHost,
-           string& aRecordName,
-           const vector<uint8_t>& aData,
-           function<void()>&& aOnSuccess,
-           function<void()>&& aOnFailure)
- {
-   WriteRecordClient::Write(aHost,
-                            aRecordName,
-                            aData,
--                           move(aOnSuccess),
--                           move(aOnFailure));
-+                           std::move(aOnSuccess),
-+                           std::move(aOnFailure));
- }
- 
- class ReadRecordClient : public FileIOClient
- {
- public:
-   /*
-    * This function will take the memory ownership of the parameters and
-    * delete them when done.
-    */
-   static void Read(Host_9* aHost,
-                    string& aRecordName,
-                    function<void(const uint8_t*, uint32_t)>&& aOnSuccess,
-                    function<void()>&& aOnFailure)
-   {
- 
--    (new ReadRecordClient(move(aOnSuccess), move(aOnFailure)))->
-+    (new ReadRecordClient(std::move(aOnSuccess), std::move(aOnFailure)))->
-       Do(aRecordName, aHost);
-   }
- 
-   void OnOpenComplete(Status aStatus) override
-   {
-     auto err = aStatus;
-     if (aStatus != Status::kSuccess) {
-       Done(err, nullptr, 0);
-@@ -170,18 +170,18 @@ public:
-     // We should never reach here, this client only ever reads data.
-     assert(false);
-   }
- 
- private:
-   explicit ReadRecordClient(function<void(const uint8_t*, uint32_t)>&& aOnSuccess,
-                             function<void()>&& aOnFailure)
-     : mFileIO(nullptr)
--    , mOnSuccess(move(aOnSuccess))
--    , mOnFailure(move(aOnFailure))
-+    , mOnSuccess(std::move(aOnSuccess))
-+    , mOnFailure(std::move(aOnFailure))
-   {}
- 
-   void Do(const string& aName, Host_9* aHost)
-   {
-     mFileIO = aHost->CreateFileIO(this);
-     mFileIO->Open(aName.c_str(), aName.size());
-   }
- 
-@@ -216,11 +216,11 @@ private:
- void
- ReadData(Host_9* mHost,
-          string& aRecordName,
-          function<void(const uint8_t*, uint32_t)>&& aOnSuccess,
-          function<void()>&& aOnFailure)
- {
-   ReadRecordClient::Read(mHost,
-                          aRecordName,
--                         move(aOnSuccess),
--                         move(aOnFailure));
-+                         std::move(aOnSuccess),
-+                         std::move(aOnFailure));
- }
-diff --git a/media/webrtc/signaling/gtest/sdp_unittests.cpp.1465060.later b/media/webrtc/signaling/gtest/sdp_unittests.cpp.1465060.later
-new file mode 100644
---- /dev/null
-+++ b/media/webrtc/signaling/gtest/sdp_unittests.cpp.1465060.later
-@@ -0,0 +1,46 @@
-+--- sdp_unittests.cpp
-++++ sdp_unittests.cpp
-+@@ -1506,39 +1506,39 @@ class NewSdpTest : public ::testing::Tes
-+                    public ::testing::WithParamInterface<
-+                      ::testing::tuple<bool, bool> > {
-+   public:
-+     NewSdpTest() {}
-+ 
-+     void ParseSdp(const std::string &sdp, bool expectSuccess = true) {
-+       if (::testing::get<1>(GetParam())) {
-+         mSdpErrorHolder = &mSipccParser;
-+-        mSdp = std::move(mSipccParser.Parse(sdp));
-++        mSdp = mSipccParser.Parse(sdp);
-+       } else {
-+         mSdpErrorHolder = &mRustParser;
-+-        mSdp = std::move(mRustParser.Parse(sdp));
-++        mSdp = mRustParser.Parse(sdp);
-+       }
-+ 
-+       // Are we configured to do a parse and serialize before actually
-+       // running the test?
-+       if (::testing::get<0>(GetParam())) {
-+         std::stringstream os;
-+ 
-+         if (expectSuccess) {
-+           ASSERT_TRUE(!!mSdp) << "Parse failed on first pass: "
-+                               << GetParseErrors();
-+         }
-+ 
-+         if (mSdp) {
-+           // Serialize and re-parse
-+           mSdp->Serialize(os);
-+           if (::testing::get<1>(GetParam())) {
-+-            mSdp = std::move(mSipccParser.Parse(os.str()));
-++            mSdp = mSipccParser.Parse(os.str());
-+           } else {
-+-            mSdp = std::move(mRustParser.Parse(os.str()));
-++            mSdp = mRustParser.Parse(os.str());
-+           }
-+ 
-+           // Whether we expected the parse to work or not, it should
-+           // succeed the second time if it succeeded the first.
-+           ASSERT_TRUE(!!mSdp) << "Parse failed on second pass, SDP was: "
-+             << std::endl << os.str() <<  std::endl
-+             << "Errors were: " << GetParseErrors();
-+ 
-diff --git a/mfbt/tests/TestUniquePtr.cpp b/mfbt/tests/TestUniquePtr.cpp
---- a/mfbt/tests/TestUniquePtr.cpp
-+++ b/mfbt/tests/TestUniquePtr.cpp
-@@ -70,17 +70,17 @@ ReturnUniqueA()
- {
-   return UniqueA(new B);
- }
- 
- static UniqueA
- ReturnLocalA()
- {
-   UniqueA a(new A);
--  return std::move(a);
-+  return a;
- }
- 
- static void
- TestDeleterType()
- {
-   // Make sure UniquePtr will use its deleter's pointer type if it defines one.
-   typedef int* Ptr;
-   struct Deleter {
-@@ -363,17 +363,17 @@ SetMallocedInt(UniquePtr<int, FreeSignat
- }
- 
- static UniquePtr<int, FreeSignature>
- MallocedInt(int aI)
- {
-   UniquePtr<int, FreeSignature>
-     ptr(static_cast<int*>(malloc(sizeof(int))), free);
-   *ptr = aI;
--  return std::move(ptr);
-+  return ptr;
- }
- static bool
- TestFunctionReferenceDeleter()
- {
-   // Look for allocator mismatches and leaks to verify these bits
-   UniquePtr<int, FreeSignature> i1(MallocedInt(17));
-   CHECK(*i1 == 17);
- 
-diff --git a/security/sandbox/linux/broker/SandboxBroker.cpp b/security/sandbox/linux/broker/SandboxBroker.cpp
---- a/security/sandbox/linux/broker/SandboxBroker.cpp
-+++ b/security/sandbox/linux/broker/SandboxBroker.cpp
-@@ -72,17 +72,17 @@ SandboxBroker::Create(UniquePtr<const Po
-   // Can't use MakeUnique here because the constructor is private.
-   UniquePtr<SandboxBroker> rv(new SandboxBroker(std::move(aPolicy), aChildPid,
-                                                 clientFd));
-   if (clientFd < 0) {
-     rv = nullptr;
-   } else {
-     aClientFdOut = ipc::FileDescriptor(clientFd);
-   }
--  return std::move(rv);
-+  return rv;
- }
- 
- SandboxBroker::~SandboxBroker() {
-   // If the constructor failed, there's nothing to be done here.
-   if (mFileDesc < 0) {
-     return;
-   }
- 
-diff --git a/security/sandbox/linux/reporter/SandboxReporter.cpp b/security/sandbox/linux/reporter/SandboxReporter.cpp
---- a/security/sandbox/linux/reporter/SandboxReporter.cpp
-+++ b/security/sandbox/linux/reporter/SandboxReporter.cpp
-@@ -284,16 +284,12 @@ SandboxReporter::GetSnapshot()
-   snapshot.mOffset = start;
-   snapshot.mReports.Clear();
-   snapshot.mReports.SetCapacity(mCount - start);
-   for (size_t i = start; i < mCount; ++i) {
-     const SandboxReport* rep = &mBuffer[i % kSandboxReporterBufferSize];
-     MOZ_ASSERT(rep->IsValid());
-     snapshot.mReports.AppendElement(*rep);
-   }
--  // Named Return Value Optimization would apply here, but C++11
--  // doesn't require it; so, instead of possibly copying the entire
--  // array contents, invoke the move constructor and copy at most a
--  // few words.
--  return std::move(snapshot);
-+  return snapshot;
- }
- 
- } // namespace mozilla
-diff --git a/toolkit/components/extensions/webrequest/WebRequestService.cpp b/toolkit/components/extensions/webrequest/WebRequestService.cpp
---- a/toolkit/components/extensions/webrequest/WebRequestService.cpp
-+++ b/toolkit/components/extensions/webrequest/WebRequestService.cpp
-@@ -42,17 +42,17 @@ UniquePtr<WebRequestChannelEntry>
- WebRequestService::RegisterChannel(ChannelWrapper* aChannel)
- {
-   UniquePtr<ChannelEntry> entry(new ChannelEntry(aChannel));
- 
-   auto key = mChannelEntries.LookupForAdd(entry->mChannelId);
-   MOZ_DIAGNOSTIC_ASSERT(!key);
-   key.OrInsert([&entry]() { return entry.get(); });
- 
--  return std::move(entry);
-+  return entry;
- 
- }
- 
- already_AddRefed<nsITraceableChannel>
- WebRequestService::GetTraceableChannel(uint64_t aChannelId,
-                                        nsIAtom* aAddonId,
-                                        nsIContentParent* aContentParent)
- {
-diff --git a/toolkit/components/url-classifier/tests/gtest/TestClassifier.cpp b/toolkit/components/url-classifier/tests/gtest/TestClassifier.cpp
---- a/toolkit/components/url-classifier/tests/gtest/TestClassifier.cpp
-+++ b/toolkit/components/url-classifier/tests/gtest/TestClassifier.cpp
-@@ -17,17 +17,17 @@ GetClassifier()
- {
-   nsCOMPtr<nsIFile> file;
-   NS_GetSpecialDirectory(NS_APP_USER_PROFILE_50_DIR, getter_AddRefs(file));
- 
-   UniquePtr<Classifier> classifier = MakeUnique<Classifier>();
-   nsresult rv = classifier->Open(*file);
-   EXPECT_TRUE(rv == NS_OK);
- 
--  return std::move(classifier);
-+  return classifier;
- }
- 
- static nsresult
- SetupLookupCacheV4(Classifier* classifier,
-                    const _PrefixArray& aPrefixArray,
-                    const nsACString& aTable)
- {
-   LookupCacheV4* lookupCache =
-diff --git a/xpcom/tests/gtest/TestPLDHash.cpp b/xpcom/tests/gtest/TestPLDHash.cpp
---- a/xpcom/tests/gtest/TestPLDHash.cpp
-+++ b/xpcom/tests/gtest/TestPLDHash.cpp
-@@ -192,17 +192,24 @@ static const PLDHashTableOps trivialOps 
- 
- TEST(PLDHashTableTest, MoveSemantics)
- {
-   PLDHashTable t1(&trivialOps, sizeof(PLDHashEntryStub));
-   t1.Add((const void*)88);
-   PLDHashTable t2(&trivialOps, sizeof(PLDHashEntryStub));
-   t2.Add((const void*)99);
- 
-+#if defined(__clang__)
-+#pragma clang diagnostic push
-+#pragma clang diagnostic ignored "-Wself-move"
-+#endif
-   t1 = std::move(t1);   // self-move
-+#if defined(__clang__)
-+#pragma clang diagnostic pop
-+#endif
- 
-   t1 = std::move(t2);   // empty overwritten with empty
- 
-   PLDHashTable t3(&trivialOps, sizeof(PLDHashEntryStub));
-   PLDHashTable t4(&trivialOps, sizeof(PLDHashEntryStub));
-   t3.Add((const void*)88);
- 
-   t3 = std::move(t4);   // non-empty overwritten with empty
-diff --git a/xpcom/tests/gtest/TestTArray.cpp b/xpcom/tests/gtest/TestTArray.cpp
---- a/xpcom/tests/gtest/TestTArray.cpp
-+++ b/xpcom/tests/gtest/TestTArray.cpp
-@@ -133,18 +133,26 @@ TEST(TArray, Assign)
- 
- TEST(TArray, AssignmentOperatorSelfAssignment)
- {
-   nsTArray<int> array;
-   array = DummyArray();
- 
-   array = array;
-   ASSERT_EQ(DummyArray(), array);
--  array = std::move(array);
-+
-+#if defined(__clang__)
-+#pragma clang diagnostic push
-+#pragma clang diagnostic ignored "-Wself-move"
-+#endif
-+  array = std::move(array); // self-move
-   ASSERT_EQ(DummyArray(), array);
-+#if defined(__clang__)
-+#pragma clang diagnostic pop
-+#endif
- }
- 
- TEST(TArray, CopyOverlappingForwards)
- {
-   const size_t rangeLength = 8;
-   const size_t initialLength = 2 * rangeLength;
-   uint32_t destructionCounters[initialLength];
-   nsTArray<Movable> array;

+ 139 - 0
frg/work-js/mozilla-release/patches/1471289-63a1.patch

@@ -0,0 +1,139 @@
+# HG changeset patch
+# User Lars T Hansen <lhansen@mozilla.com>
+# Date 1530173163 -7200
+# Node ID 7c3c2d6c17d7563fa2ea242ee2ae08be4685370e
+# Parent  de900ae11c0287126721a63a408e699cfcd05700
+Bug 1471289 - Handle 'new WebAssembly.Global' on i64 with default value. r=jseward
+
+As a tweak the wasm CG has decided to allow the special case where an i64 global is
+created from JS with the default zero value, while we're waiting for BigInt and
+more general i64 support.  This amounts to removing the error we would otherwise
+signal in this case.
+
+diff --git a/js/src/jit-test/tests/wasm/globals.js b/js/src/jit-test/tests/wasm/globals.js
+--- a/js/src/jit-test/tests/wasm/globals.js
++++ b/js/src/jit-test/tests/wasm/globals.js
+@@ -310,35 +310,42 @@ wasmAssert(`(module
+ // WebAssembly.Global
+ {
+     const Global = WebAssembly.Global;
+ 
+     // These types should work:
+     assertEq(new Global({value: "i32"}) instanceof Global, true);
+     assertEq(new Global({value: "f32"}) instanceof Global, true);
+     assertEq(new Global({value: "f64"}) instanceof Global, true);
++    assertEq(new Global({value: "i64"}) instanceof Global, true); // No initial value works
+ 
+     // These types should not work:
+-    assertErrorMessage(() => new Global({value: "i64"}),   TypeError, /bad type for a WebAssembly.Global/);
+-    assertErrorMessage(() => new Global({}),               TypeError, /bad type for a WebAssembly.Global/);
+-    assertErrorMessage(() => new Global({value: "fnord"}), TypeError, /bad type for a WebAssembly.Global/);
+-    assertErrorMessage(() => new Global(),                 TypeError, /Global requires more than 0 arguments/);
++    assertErrorMessage(() => new Global({}),                TypeError, /bad type for a WebAssembly.Global/);
++    assertErrorMessage(() => new Global({value: "fnord"}),  TypeError, /bad type for a WebAssembly.Global/);
++    assertErrorMessage(() => new Global(),                  TypeError, /Global requires more than 0 arguments/);
++    assertErrorMessage(() => new Global({value: "i64"}, 0), TypeError, /bad type for a WebAssembly.Global/); // Initial value does not work
+ 
+     // Coercion of init value; ".value" accessor
+     assertEq((new Global({value: "i32"}, 3.14)).value, 3);
+     assertEq((new Global({value: "f32"}, { valueOf: () => 33.5 })).value, 33.5);
+     assertEq((new Global({value: "f64"}, "3.25")).value, 3.25);
+ 
+     // Nothing special about NaN, it coerces just fine
+     assertEq((new Global({value: "i32"}, NaN)).value, 0);
+ 
+     // The default init value is zero.
+     assertEq((new Global({value: "i32"})).value, 0);
+     assertEq((new Global({value: "f32"})).value, 0);
+     assertEq((new Global({value: "f64"})).value, 0);
++    let mod = wasmEvalText(`(module
++                             (import "" "g" (global i64))
++                             (func (export "f") (result i32)
++                              (i64.eqz (get_global 0))))`,
++                           {"":{g: new Global({value: "i64"})}});
++    assertEq(mod.exports.f(), 1);
+ 
+     {
+         // "value" is enumerable
+         let x = new Global({value: "i32"});
+         let s = "";
+         for ( let i in x )
+             s = s + i + ",";
+         assertEq(s, "value,");
+diff --git a/js/src/jit-test/tests/wasm/spec/jsapi.js b/js/src/jit-test/tests/wasm/spec/jsapi.js
+--- a/js/src/jit-test/tests/wasm/spec/jsapi.js
++++ b/js/src/jit-test/tests/wasm/spec/jsapi.js
+@@ -687,24 +687,27 @@ test(() => {
+     const globalDesc = Object.getOwnPropertyDescriptor(WebAssembly, 'Global');
+     assert_equals(Global, globalDesc.value);
+     assert_equals(Global.length, 1);
+     assert_equals(Global.name, "Global");
+     assertThrows(() => Global(), TypeError);
+     assertThrows(() => new Global(1), TypeError);
+     assertThrows(() => new Global({}), TypeError);
+     assertThrows(() => new Global({value: 'foo'}), TypeError);
+-    assertThrows(() => new Global({value: 'i64'}), TypeError);
++    assertThrows(() => new Global({value: 'i64'}, 0), TypeError);
+     assert_equals(new Global({value:'i32'}) instanceof Global, true);
++    assert_equals(new Global({value:'i64'}) instanceof Global, true);
+     assert_equals(new Global({value:'f32'}) instanceof Global, true);
+     assert_equals(new Global({value:'f64'}) instanceof Global, true);
+     assert_equals(new Global({value:'i32', mutable: false}) instanceof Global, true);
++    assert_equals(new Global({value:'i64', mutable: false}) instanceof Global, true);
+     assert_equals(new Global({value:'f64', mutable: false}) instanceof Global, true);
+     assert_equals(new Global({value:'f64', mutable: false}) instanceof Global, true);
+     assert_equals(new Global({value:'i32', mutable: true}) instanceof Global, true);
++    assert_equals(new Global({value:'i64', mutable: true}) instanceof Global, true);
+     assert_equals(new Global({value:'f64', mutable: true}) instanceof Global, true);
+     assert_equals(new Global({value:'f64', mutable: true}) instanceof Global, true);
+     assert_equals(new Global({value:'i32'}, 0x132) instanceof Global, true);
+     assert_equals(new Global({value:'f32'}, 0xf32) instanceof Global, true);
+     assert_equals(new Global({value:'f64'}, 0xf64) instanceof Global, true);
+     assert_equals(new Global({value:'i32', mutable: false}, 0x132) instanceof Global, true);
+     assert_equals(new Global({value:'f32', mutable: false}, 0xf32) instanceof Global, true);
+     assert_equals(new Global({value:'f64', mutable: false}, 0xf64) instanceof Global, true);
+diff --git a/js/src/wasm/WasmJS.cpp b/js/src/wasm/WasmJS.cpp
+--- a/js/src/wasm/WasmJS.cpp
++++ b/js/src/wasm/WasmJS.cpp
+@@ -2177,16 +2177,20 @@ WasmGlobalObject::construct(JSContext* c
+ 
+     RootedLinearString typeLinearStr(cx, typeStr->ensureLinear(cx));
+     if (!typeLinearStr)
+         return false;
+ 
+     ValType globalType;
+     if (StringEqualsAscii(typeLinearStr, "i32")) {
+         globalType = ValType::I32;
++    } else if (args.length() == 1 && StringEqualsAscii(typeLinearStr, "i64")) {
++        // For the time being, i64 is allowed only if there is not an
++        // initializing value.
++        globalType = ValType::I64;
+     } else if (StringEqualsAscii(typeLinearStr, "f32")) {
+         globalType = ValType::F32;
+     } else if (StringEqualsAscii(typeLinearStr, "f64")) {
+         globalType = ValType::F64;
+     } else {
+         JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_GLOBAL_TYPE);
+         return false;
+     }
+@@ -2202,17 +2206,18 @@ WasmGlobalObject::construct(JSContext* c
+     Val globalVal = Val(uint32_t(0));
+     if (args.length() >= 2) {
+         RootedValue valueVal(cx, args.get(1));
+ 
+         if (!ToWebAssemblyValue(cx, globalType, valueVal, &globalVal))
+             return false;
+     } else {
+         switch (globalType.code()) {
+-          case ValType::I32: /* set above */ break;
++          case ValType::I32: /* set above */               break;
++          case ValType::I64: globalVal = Val(uint64_t(0)); break;
+           case ValType::F32: globalVal = Val(float(0.0));  break;
+           case ValType::F64: globalVal = Val(double(0.0)); break;
+           default: MOZ_CRASH();
+         }
+     }
+ 
+     WasmGlobalObject* global = WasmGlobalObject::create(cx, globalVal, isMutable);
+     if (!global)
+

+ 183 - 0
frg/work-js/mozilla-release/patches/1472974-63a1.patch

@@ -0,0 +1,183 @@
+# HG changeset patch
+# User Benjamin Bouvier <benj@benj.me>
+# Date 1530625416 -7200
+# Node ID db22593e5f457f0d4ddac095c5997a0ab3c03b08
+# Parent  5019176ab037fe17b5eb38e6cbb08f562fc102c0
+Bug 1472974: Add more ENABLE_WASM_GC in Spidermonkey; r=jseward;
+
+diff --git a/js/src/wasm/WasmBuiltins.cpp b/js/src/wasm/WasmBuiltins.cpp
+--- a/js/src/wasm/WasmBuiltins.cpp
++++ b/js/src/wasm/WasmBuiltins.cpp
+@@ -672,20 +672,22 @@ AddressOf(SymbolicAddress imm, ABIFuncti
+         *abiType = Args_General3;
+         return FuncCast(Instance::wake, *abiType);
+       case SymbolicAddress::MemCopy:
+         *abiType = Args_General4;
+         return FuncCast(Instance::memCopy, *abiType);
+       case SymbolicAddress::MemFill:
+         *abiType = Args_General4;
+         return FuncCast(Instance::memFill, *abiType);
++#ifdef ENABLE_WASM_GC
+       case SymbolicAddress::PostBarrier:
+         *abiType = Args_General2;
+         static_assert(sizeof(PostBarrierArg) == sizeof(uint32_t), "passed arg is a u32");
+         return FuncCast(Instance::postBarrier, *abiType);
++#endif
+ #if defined(JS_CODEGEN_MIPS32)
+       case SymbolicAddress::js_jit_gAtomic64Lock:
+         return &js::jit::gAtomic64Lock;
+ #endif
+       case SymbolicAddress::Limit:
+         break;
+     }
+ 
+@@ -754,17 +756,19 @@ wasm::NeedsBuiltinThunk(SymbolicAddress 
+       case SymbolicAddress::CurrentMemory:
+       case SymbolicAddress::WaitI32:
+       case SymbolicAddress::WaitI64:
+       case SymbolicAddress::Wake:
+       case SymbolicAddress::CoerceInPlace_JitEntry:
+       case SymbolicAddress::ReportInt64JSCall:
+       case SymbolicAddress::MemCopy:
+       case SymbolicAddress::MemFill:
++#ifdef ENABLE_WASM_GC
+       case SymbolicAddress::PostBarrier:
++#endif
+         return true;
+       case SymbolicAddress::Limit:
+         break;
+     }
+ 
+     MOZ_CRASH("unexpected symbolic address");
+ }
+ 
+diff --git a/js/src/wasm/WasmFrameIter.cpp b/js/src/wasm/WasmFrameIter.cpp
+--- a/js/src/wasm/WasmFrameIter.cpp
++++ b/js/src/wasm/WasmFrameIter.cpp
+@@ -1262,18 +1262,20 @@ ThunkedNativeToDescription(SymbolicAddre
+       case SymbolicAddress::CoerceInPlace_JitEntry:
+         return "out-of-line coercion for jit entry arguments (in wasm)";
+       case SymbolicAddress::ReportInt64JSCall:
+         return "jit call to int64 wasm function";
+       case SymbolicAddress::MemCopy:
+         return "call to native memory.copy function";
+       case SymbolicAddress::MemFill:
+         return "call to native memory.fill function";
++#ifdef ENABLE_WASM_GC
+       case SymbolicAddress::PostBarrier:
+         return "call to native GC postbarrier (in wasm)";
++#endif
+ #if defined(JS_CODEGEN_MIPS32)
+       case SymbolicAddress::js_jit_gAtomic64Lock:
+         MOZ_CRASH();
+ #endif
+       case SymbolicAddress::Limit:
+         break;
+     }
+     return "?";
+diff --git a/js/src/wasm/WasmInstance.cpp b/js/src/wasm/WasmInstance.cpp
+--- a/js/src/wasm/WasmInstance.cpp
++++ b/js/src/wasm/WasmInstance.cpp
+@@ -473,16 +473,17 @@ Instance::memFill(Instance* instance, ui
+         // else fall through to failure case
+     }
+ 
+     JSContext* cx = TlsContext.get();
+     JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_WASM_OUT_OF_BOUNDS);
+     return -1;
+ }
+ 
++#ifdef ENABLE_WASM_GC
+ /* static */ void
+ Instance::postBarrier(Instance* instance, PostBarrierArg arg)
+ {
+     gc::Cell** cell = nullptr;
+     switch (arg.type()) {
+       case PostBarrierArg::Type::Global: {
+         const GlobalDesc& global = instance->metadata().globals[arg.globalIndex()];
+         MOZ_ASSERT(!global.isConstant());
+@@ -494,16 +495,17 @@ Instance::postBarrier(Instance* instance
+         cell = (gc::Cell**) globalAddr;
+         break;
+       }
+     }
+ 
+     MOZ_ASSERT(cell);
+     TlsContext.get()->runtime()->gc.storeBuffer().putCell(cell);
+ }
++#endif // ENABLE_WASM_GC
+ 
+ Instance::Instance(JSContext* cx,
+                    Handle<WasmInstanceObject*> object,
+                    SharedCode code,
+                    UniqueDebugState debug,
+                    UniqueTlsData tlsDataIn,
+                    HandleWasmMemoryObject memory,
+                    SharedTableVector&& tables,
+@@ -657,17 +659,19 @@ Instance::init(JSContext* cx)
+         }
+     }
+ 
+     JitRuntime* jitRuntime = cx->runtime()->getJitRuntime(cx);
+     if (!jitRuntime)
+         return false;
+     jsJitArgsRectifier_ = jitRuntime->getArgumentsRectifier();
+     jsJitExceptionHandler_ = jitRuntime->getExceptionTail();
++#ifdef ENABLE_WASM_GC
+     preBarrierCode_ = jitRuntime->preBarrier(MIRType::Object);
++#endif
+     return true;
+ }
+ 
+ Instance::~Instance()
+ {
+     realm_->wasm.unregisterInstance(*this);
+ 
+     const FuncImportVector& funcImports = metadata(code().stableTier()).funcImports;
+diff --git a/js/src/wasm/WasmInstance.h b/js/src/wasm/WasmInstance.h
+--- a/js/src/wasm/WasmInstance.h
++++ b/js/src/wasm/WasmInstance.h
+@@ -174,17 +174,19 @@ class Instance
+     static int32_t callImport_ref(Instance*, int32_t, int32_t, uint64_t*);
+     static uint32_t growMemory_i32(Instance* instance, uint32_t delta);
+     static uint32_t currentMemory_i32(Instance* instance);
+     static int32_t wait_i32(Instance* instance, uint32_t byteOffset, int32_t value, int64_t timeout);
+     static int32_t wait_i64(Instance* instance, uint32_t byteOffset, int64_t value, int64_t timeout);
+     static int32_t wake(Instance* instance, uint32_t byteOffset, int32_t count);
+     static int32_t memCopy(Instance* instance, uint32_t destByteOffset, uint32_t srcByteOffset, uint32_t len);
+     static int32_t memFill(Instance* instance, uint32_t byteOffset, uint32_t value, uint32_t len);
++#ifdef ENABLE_WASM_GC
+     static void postBarrier(Instance* instance, PostBarrierArg arg);
++#endif
+ };
+ 
+ typedef UniquePtr<Instance> UniqueInstance;
+ 
+ } // namespace wasm
+ } // namespace js
+ 
+ #endif // wasm_instance_h
+diff --git a/js/src/wasm/WasmTypes.h b/js/src/wasm/WasmTypes.h
+--- a/js/src/wasm/WasmTypes.h
++++ b/js/src/wasm/WasmTypes.h
+@@ -1976,17 +1976,19 @@ enum class SymbolicAddress
+     Int64ToDouble,
+     GrowMemory,
+     CurrentMemory,
+     WaitI32,
+     WaitI64,
+     Wake,
+     MemCopy,
+     MemFill,
++#ifdef ENABLE_WASM_GC
+     PostBarrier,
++#endif
+ #if defined(JS_CODEGEN_MIPS32)
+     js_jit_gAtomic64Lock,
+ #endif
+     Limit
+ };
+ 
+ bool
+ IsRoundingFunction(SymbolicAddress callee, jit::RoundingMode* mode);
+

+ 76 - 0
frg/work-js/mozilla-release/patches/1473956-63a1.patch

@@ -0,0 +1,76 @@
+# HG changeset patch
+# User Benjamin Bouvier <benj@benj.me>
+# Date 1531491927 -7200
+# Node ID 05c90ce07575bbaa9311d772240f43ff016aada7
+# Parent  4f6dddb432dae4dae41036ec23eb911f09200e09
+Bug 1473956: Report OOM when a wasm Global's cell couldn't be allocated; r=jseward
+
+diff --git a/js/src/wasm/WasmJS.cpp b/js/src/wasm/WasmJS.cpp
+--- a/js/src/wasm/WasmJS.cpp
++++ b/js/src/wasm/WasmJS.cpp
+@@ -2170,37 +2170,41 @@ WasmGlobalObject::trace(JSTracer* trc, J
+         MOZ_CRASH("Ref NYI");
+     }
+ }
+ 
+ /* static */ void
+ WasmGlobalObject::finalize(FreeOp*, JSObject* obj)
+ {
+     WasmGlobalObject* global = reinterpret_cast<WasmGlobalObject*>(obj);
+-    js_delete(global->cell());
++    if (!global->isNewborn())
++        js_delete(global->cell());
+ }
+ 
+ /* static */ WasmGlobalObject*
+ WasmGlobalObject::create(JSContext* cx, HandleVal hval, bool isMutable)
+ {
+     RootedObject proto(cx, &cx->global()->getPrototype(JSProto_WasmGlobal).toObject());
+ 
+     AutoSetNewObjectMetadata metadata(cx);
+     RootedWasmGlobalObject obj(cx, NewObjectWithGivenProto<WasmGlobalObject>(cx, proto));
+     if (!obj)
+         return nullptr;
+ 
++    MOZ_ASSERT(obj->isNewborn());
+     MOZ_ASSERT(obj->isTenured(), "assumed by set_global post barriers");
+ 
+     // It's simpler to initialize the cell after the object has been created,
+     // to avoid needing to root the cell before the object creation.
+ 
+     Cell* cell = js_new<Cell>();
+-    if (!cell)
++    if (!cell) {
++        ReportOutOfMemory(cx);
+         return nullptr;
++    }
+ 
+     const Val& val = hval.get();
+     switch (val.type().code()) {
+       case ValType::I32:
+         cell->i32 = val.i32();
+         break;
+       case ValType::I64:
+         cell->i64 = val.i64();
+diff --git a/js/src/wasm/WasmJS.h b/js/src/wasm/WasmJS.h
+--- a/js/src/wasm/WasmJS.h
++++ b/js/src/wasm/WasmJS.h
+@@ -149,16 +149,17 @@ class WasmGlobalObject : public NativeOb
+     static const unsigned RESERVED_SLOTS = 3;
+     static const Class class_;
+     static const JSPropertySpec properties[];
+     static const JSFunctionSpec methods[];
+     static const JSFunctionSpec static_methods[];
+     static bool construct(JSContext*, unsigned, Value*);
+ 
+     static WasmGlobalObject* create(JSContext* cx, wasm::HandleVal value, bool isMutable);
++    bool isNewborn() { return getReservedSlot(CELL_SLOT).isUndefined(); }
+ 
+     wasm::ValType type() const;
+     void val(wasm::MutableHandleVal outval) const;
+     bool isMutable() const;
+     // value() will MOZ_CRASH if the type is int64
+     Value value(JSContext* cx) const;
+     Cell* cell() const;
+ };
+

+ 65 - 0
frg/work-js/mozilla-release/patches/1475943-63a1.patch

@@ -0,0 +1,65 @@
+# HG changeset patch
+# User Benjamin Bouvier <benj@benj.me>
+# Date 1532022998 -7200
+# Node ID 5b31b8bf2d680fba2f46cfa99576f1027bed328f
+# Parent  d0cb9041aa08de83ab41909c913c788122de509f
+Bug 1475943: Don't trace a wasm global object if it's being created; r=jseward
+
+diff --git a/js/src/jit-test/tests/wasm/gc/fuzz-gc-while-allocating-global.js b/js/src/jit-test/tests/wasm/gc/fuzz-gc-while-allocating-global.js
+new file mode 100644
+--- /dev/null
++++ b/js/src/jit-test/tests/wasm/gc/fuzz-gc-while-allocating-global.js
+@@ -0,0 +1,7 @@
++if (!wasmGcEnabled()) {
++    quit(0);
++}
++
++enableShellAllocationMetadataBuilder();
++gczeal(9, 1);
++new WebAssembly.Global({ value: 'i32' }, 42);
+diff --git a/js/src/wasm/WasmJS.cpp b/js/src/wasm/WasmJS.cpp
+--- a/js/src/wasm/WasmJS.cpp
++++ b/js/src/wasm/WasmJS.cpp
+@@ -2150,16 +2150,22 @@ const Class WasmGlobalObject::class_ =
+     JSCLASS_BACKGROUND_FINALIZE,
+     &WasmGlobalObject::classOps_
+ };
+ 
+ /* static */ void
+ WasmGlobalObject::trace(JSTracer* trc, JSObject* obj)
+ {
+     WasmGlobalObject* global = reinterpret_cast<WasmGlobalObject*>(obj);
++    if (global->isNewborn()) {
++        // This can happen while we're allocating the object, in which case
++        // every single slot of the object is not defined yet. In particular,
++        // there's nothing to trace yet.
++        return;
++    }
+     switch (global->type().code()) {
+       case ValType::AnyRef:
+         if (global->cell()->ptr)
+             TraceManuallyBarrieredEdge(trc, &global->cell()->ptr, "wasm anyref global");
+         break;
+       case ValType::I32:
+       case ValType::F32:
+       case ValType::I64:
+@@ -2225,16 +2231,18 @@ WasmGlobalObject::create(JSContext* cx, 
+       case ValType::Ref:
+         MOZ_CRASH("Ref NYI");
+     }
+ 
+     obj->initReservedSlot(TYPE_SLOT, Int32Value(int32_t(val.type().bitsUnsafe())));
+     obj->initReservedSlot(MUTABLE_SLOT, JS::BooleanValue(isMutable));
+     obj->initReservedSlot(CELL_SLOT, PrivateValue(cell));
+ 
++    MOZ_ASSERT(!obj->isNewborn());
++
+     return obj;
+ }
+ 
+ /* static */ bool
+ WasmGlobalObject::construct(JSContext* cx, unsigned argc, Value* vp)
+ {
+     CallArgs args = CallArgsFromVp(argc, vp);
+ 
+

+ 2 - 2
frg/work-js/mozilla-release/patches/1476012-2-63a1.patch

@@ -3,7 +3,7 @@
 # Date 1531829163 -3600
 # Date 1531829163 -3600
 #      Tue Jul 17 13:06:03 2018 +0100
 #      Tue Jul 17 13:06:03 2018 +0100
 # Node ID 01ab0200841fb028d3579f19996e9f16081657d4
 # Node ID 01ab0200841fb028d3579f19996e9f16081657d4
-# Parent  9cf3721112a5598a47fa3b06ed8cf3b7da74b817
+# Parent  67721384b0f0497e84a025b9d3c36bc625210f28
 Bug 1476012 - Remove dependency of jit/BaselineJit.h on jit/MacroAssembler.h r=nbp
 Bug 1476012 - Remove dependency of jit/BaselineJit.h on jit/MacroAssembler.h r=nbp
 
 
 diff --git a/js/src/builtin/TestingFunctions.cpp b/js/src/builtin/TestingFunctions.cpp
 diff --git a/js/src/builtin/TestingFunctions.cpp b/js/src/builtin/TestingFunctions.cpp
@@ -170,8 +170,8 @@ diff --git a/js/src/wasm/WasmInstance.cpp b/js/src/wasm/WasmInstance.cpp
  #include "wasm/WasmBuiltins.h"
  #include "wasm/WasmBuiltins.h"
  #include "wasm/WasmModule.h"
  #include "wasm/WasmModule.h"
  
  
+ #include "gc/StoreBuffer-inl.h"
  #include "vm/ArrayBufferObject-inl.h"
  #include "vm/ArrayBufferObject-inl.h"
  #include "vm/JSObject-inl.h"
  #include "vm/JSObject-inl.h"
  
  
  using namespace js;
  using namespace js;
- using namespace js::jit;

+ 63 - 0
frg/work-js/mozilla-release/patches/1476953-63a1.patch

@@ -0,0 +1,63 @@
+# HG changeset patch
+# User Benjamin Bouvier <benj@benj.me>
+# Date 1532022257 -7200
+# Node ID f9769a072e13b591d962adfbec1f11152b6374b2
+# Parent  d3a864ec08368b36dc80d83c318e140b5205b1dd
+Bug 1476953: Update asm.js error message when atomics aren't enabled; r=jandem
+
+diff --git a/js/src/wasm/AsmJS.cpp b/js/src/wasm/AsmJS.cpp
+--- a/js/src/wasm/AsmJS.cpp
++++ b/js/src/wasm/AsmJS.cpp
+@@ -2105,18 +2105,20 @@ class MOZ_STACK_CLASS JS_HAZ_ROOTED Modu
+         AsmJSGlobal g(AsmJSGlobal::Constant, std::move(fieldChars));
+         g.pod.u.constant.value_ = constant;
+         g.pod.u.constant.kind_ = AsmJSGlobal::GlobalConstant;
+         return asmJSMetadata_->asmJSGlobals.append(std::move(g));
+     }
+     bool addAtomicsBuiltinFunction(PropertyName* var, AsmJSAtomicsBuiltinFunction func,
+                                    PropertyName* field)
+     {
+-        if (!JitOptions.asmJSAtomicsEnable)
+-            return failCurrentOffset("asm.js Atomics only enabled when asmjs.atomics.enable is set");
++        if (!JitOptions.asmJSAtomicsEnable) {
++            return failCurrentOffset("asm.js Atomics only enabled when the environment variable "
++                                     "JIT_OPTION_asmJSAtomicsEnable is set to true");
++        }
+ 
+         atomicsPresent_ = true;
+ 
+         UniqueChars fieldChars = StringToNewUTF8CharsZ(cx_, *field);
+         if (!fieldChars)
+             return false;
+ 
+         Global* global = validationLifo_.new_<Global>(Global::AtomicsBuiltinFunction);
+@@ -8115,20 +8117,24 @@ CheckBuffer(JSContext* cx, const AsmJSMe
+ #else
+         bool needGuard = metadata.usesSimd;
+ #endif
+         Rooted<ArrayBufferObject*> arrayBuffer(cx, &buffer->as<ArrayBufferObject>());
+         if (!ArrayBufferObject::prepareForAsmJS(cx, arrayBuffer, needGuard))
+             return LinkFail(cx, "Unable to prepare ArrayBuffer for asm.js use");
+     } else {
+         if (!buffer->as<SharedArrayBufferObject>().isPreparedForAsmJS()) {
+-            if (buffer->as<SharedArrayBufferObject>().isWasm())
+-                return LinkFail(cx, "SharedArrayBuffer created for Wasm cannot be used for asm.js");
+-            if (!jit::JitOptions.asmJSAtomicsEnable)
+-                return LinkFail(cx, "Can link with SharedArrayBuffer only when asmjs.atomics.enable is set");
++            if (buffer->as<SharedArrayBufferObject>().isWasm()) {
++                return LinkFail(cx, "SharedArrayBuffer created for Wasm cannot be used for "
++                                    "asm.js");
++            }
++            if (!jit::JitOptions.asmJSAtomicsEnable) {
++                return LinkFail(cx, "Can link with SharedArrayBuffer only when the environment "
++                                    "variable JIT_OPTION_asmJSAtomicsEnable is set to true");
++            }
+             return LinkFail(cx, "Unable to prepare SharedArrayBuffer for asm.js use");
+         }
+     }
+ 
+     MOZ_ASSERT(buffer->isPreparedForAsmJS());
+     return true;
+ }
+ 
+

+ 4 - 6
frg/work-js/mozilla-release/patches/1489698-2-65a1.patch

@@ -3,7 +3,7 @@
 # Date 1536360464 14400
 # Date 1536360464 14400
 #      Fri Sep 07 18:47:44 2018 -0400
 #      Fri Sep 07 18:47:44 2018 -0400
 # Node ID 6a27ee7d892bbc85ce8b2223724929ce783c5a76
 # Node ID 6a27ee7d892bbc85ce8b2223724929ce783c5a76
-# Parent  a657a2962e706bed82dd6df4b2d2ed76f28b1e28
+# Parent  6d3b3bcc2f2ebbb9c95f6d1f94c108123d264e0b
 Bug 1489698 - Move js/src/moz.build defines to own file. r=froydnj
 Bug 1489698 - Move js/src/moz.build defines to own file. r=froydnj
 
 
 MozReview-Commit-ID: ImcZFI3YlVo
 MozReview-Commit-ID: ImcZFI3YlVo
@@ -34,7 +34,7 @@ diff --git a/js/src/js-config.mozbuild b/js/src/js-config.mozbuild
 new file mode 100644
 new file mode 100644
 --- /dev/null
 --- /dev/null
 +++ b/js/src/js-config.mozbuild
 +++ b/js/src/js-config.mozbuild
-@@ -0,0 +1,39 @@
+@@ -0,0 +1,38 @@
 +# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
 +# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
 +# vim: set filetype=python:
 +# vim: set filetype=python:
 +# This Source Code Form is subject to the terms of the Mozilla Public
 +# This Source Code Form is subject to the terms of the Mozilla Public
@@ -44,7 +44,6 @@ new file mode 100644
 +# Nightly-only features
 +# Nightly-only features
 +if CONFIG['NIGHTLY_BUILD']:
 +if CONFIG['NIGHTLY_BUILD']:
 +    DEFINES['ENABLE_BINARYDATA'] = True
 +    DEFINES['ENABLE_BINARYDATA'] = True
-+    DEFINES['ENABLE_SIMD'] = True
 +    DEFINES['ENABLE_WASM_BULKMEM_OPS'] = True
 +    DEFINES['ENABLE_WASM_BULKMEM_OPS'] = True
 +    DEFINES['ENABLE_WASM_SATURATING_TRUNC_OPS'] = True
 +    DEFINES['ENABLE_WASM_SATURATING_TRUNC_OPS'] = True
 +    DEFINES['ENABLE_WASM_THREAD_OPS'] = True
 +    DEFINES['ENABLE_WASM_THREAD_OPS'] = True
@@ -94,7 +93,7 @@ diff --git a/js/src/moz.build b/js/src/moz.build
  component_stl    = ('Core', 'JavaScript: Standard Library')
  component_stl    = ('Core', 'JavaScript: Standard Library')
  
  
  FILES_PER_UNIFIED_FILE = 6
  FILES_PER_UNIFIED_FILE = 6
-@@ -687,43 +689,16 @@ ReservedWordsGenerated.inputs += [
+@@ -699,42 +701,16 @@ ReservedWordsGenerated.inputs += [
  ]
  ]
  
  
  DIRS += [
  DIRS += [
@@ -105,7 +104,6 @@ diff --git a/js/src/moz.build b/js/src/moz.build
  
  
 -if CONFIG['NIGHTLY_BUILD']:
 -if CONFIG['NIGHTLY_BUILD']:
 -    DEFINES['ENABLE_BINARYDATA'] = True
 -    DEFINES['ENABLE_BINARYDATA'] = True
--    DEFINES['ENABLE_SIMD'] = True
 -    DEFINES['ENABLE_WASM_BULKMEM_OPS'] = True
 -    DEFINES['ENABLE_WASM_BULKMEM_OPS'] = True
 -    DEFINES['ENABLE_WASM_SATURATING_TRUNC_OPS'] = True
 -    DEFINES['ENABLE_WASM_SATURATING_TRUNC_OPS'] = True
 -    DEFINES['ENABLE_WASM_THREAD_OPS'] = True
 -    DEFINES['ENABLE_WASM_THREAD_OPS'] = True
@@ -138,7 +136,7 @@ diff --git a/js/src/moz.build b/js/src/moz.build
      # C4661 ("no suitable definition provided for explicit template
      # C4661 ("no suitable definition provided for explicit template
      # instantiation request") is emitted for all Parser methods that
      # instantiation request") is emitted for all Parser methods that
      # have a Parser<FullParseHandler> definition but no
      # have a Parser<FullParseHandler> definition but no
-@@ -779,19 +754,16 @@ selfhosted.inputs = [
+@@ -790,19 +766,16 @@ selfhosted.inputs = [
      'builtin/TypedObject.js',
      'builtin/TypedObject.js',
      'builtin/WeakMap.js',
      'builtin/WeakMap.js',
      'builtin/WeakSet.js'
      'builtin/WeakSet.js'

+ 6 - 6
frg/work-js/mozilla-release/patches/1489698-5-65a1.patch

@@ -3,7 +3,7 @@
 # Date 1536361555 14400
 # Date 1536361555 14400
 #      Fri Sep 07 19:05:55 2018 -0400
 #      Fri Sep 07 19:05:55 2018 -0400
 # Node ID a17dfbac6b10ec140dc69ed3053662987eba25ef
 # Node ID a17dfbac6b10ec140dc69ed3053662987eba25ef
-# Parent  83ce73e2a4ec08fad943bd83a214419608be8a9b
+# Parent  5cb43e9b25a84ca0b5d097784e5be3289698b936
 Bug 1489698 - Add moz.build for js/src/jit. r=jandem,froydnj
 Bug 1489698 - Add moz.build for js/src/jit. r=jandem,froydnj
 
 
 diff --git a/js/src/jit/moz.build b/js/src/jit/moz.build
 diff --git a/js/src/jit/moz.build b/js/src/jit/moz.build
@@ -68,7 +68,6 @@ new file mode 100644
 +    'CodeGenerator.cpp',
 +    'CodeGenerator.cpp',
 +    'CompileWrappers.cpp',
 +    'CompileWrappers.cpp',
 +    'Disassembler.cpp',
 +    'Disassembler.cpp',
-+    'EagerSimdUnbox.cpp',
 +    'EdgeCaseAnalysis.cpp',
 +    'EdgeCaseAnalysis.cpp',
 +    'EffectiveAddressAnalysis.cpp',
 +    'EffectiveAddressAnalysis.cpp',
 +    'ExecutableAllocator.cpp',
 +    'ExecutableAllocator.cpp',
@@ -138,6 +137,7 @@ new file mode 100644
 +        'x86-shared/BaselineIC-x86-shared.cpp',
 +        'x86-shared/BaselineIC-x86-shared.cpp',
 +        'x86-shared/CodeGenerator-x86-shared.cpp',
 +        'x86-shared/CodeGenerator-x86-shared.cpp',
 +        'x86-shared/Lowering-x86-shared.cpp',
 +        'x86-shared/Lowering-x86-shared.cpp',
++        'x86-shared/MacroAssembler-x86-shared-SIMD.cpp',
 +        'x86-shared/MacroAssembler-x86-shared.cpp',
 +        'x86-shared/MacroAssembler-x86-shared.cpp',
 +        'x86-shared/MoveEmitter-x86-shared.cpp',
 +        'x86-shared/MoveEmitter-x86-shared.cpp',
 +    ]
 +    ]
@@ -312,7 +312,7 @@ diff --git a/js/src/moz.build b/js/src/moz.build
      'js.msg',
      'js.msg',
      'jsapi.h',
      'jsapi.h',
      'jsfriendapi.h',
      'jsfriendapi.h',
-@@ -236,91 +223,16 @@ UNIFIED_SOURCES += [
+@@ -251,90 +238,16 @@ UNIFIED_SOURCES += [
      'irregexp/NativeRegExpMacroAssembler.cpp',
      'irregexp/NativeRegExpMacroAssembler.cpp',
      'irregexp/RegExpAST.cpp',
      'irregexp/RegExpAST.cpp',
      'irregexp/RegExpCharacters.cpp',
      'irregexp/RegExpCharacters.cpp',
@@ -343,7 +343,6 @@ diff --git a/js/src/moz.build b/js/src/moz.build
 -    'jit/CodeGenerator.cpp',
 -    'jit/CodeGenerator.cpp',
 -    'jit/CompileWrappers.cpp',
 -    'jit/CompileWrappers.cpp',
 -    'jit/Disassembler.cpp',
 -    'jit/Disassembler.cpp',
--    'jit/EagerSimdUnbox.cpp',
 -    'jit/EdgeCaseAnalysis.cpp',
 -    'jit/EdgeCaseAnalysis.cpp',
 -    'jit/EffectiveAddressAnalysis.cpp',
 -    'jit/EffectiveAddressAnalysis.cpp',
 -    'jit/ExecutableAllocator.cpp',
 -    'jit/ExecutableAllocator.cpp',
@@ -404,7 +403,7 @@ diff --git a/js/src/moz.build b/js/src/moz.build
      'perf/jsperf.cpp',
      'perf/jsperf.cpp',
      'proxy/BaseProxyHandler.cpp',
      'proxy/BaseProxyHandler.cpp',
      'proxy/CrossCompartmentWrapper.cpp',
      'proxy/CrossCompartmentWrapper.cpp',
-@@ -478,173 +390,16 @@ if CONFIG['MOZ_INSTRUMENTS']:
+@@ -489,174 +402,16 @@ if CONFIG['MOZ_INSTRUMENTS']:
  
  
  if CONFIG['ENABLE_TRACE_LOGGING']:
  if CONFIG['ENABLE_TRACE_LOGGING']:
      SOURCES += [
      SOURCES += [
@@ -428,6 +427,7 @@ diff --git a/js/src/moz.build b/js/src/moz.build
 -        'jit/x86-shared/BaselineIC-x86-shared.cpp',
 -        'jit/x86-shared/BaselineIC-x86-shared.cpp',
 -        'jit/x86-shared/CodeGenerator-x86-shared.cpp',
 -        'jit/x86-shared/CodeGenerator-x86-shared.cpp',
 -        'jit/x86-shared/Lowering-x86-shared.cpp',
 -        'jit/x86-shared/Lowering-x86-shared.cpp',
+-        'jit/x86-shared/MacroAssembler-x86-shared-SIMD.cpp',
 -        'jit/x86-shared/MacroAssembler-x86-shared.cpp',
 -        'jit/x86-shared/MacroAssembler-x86-shared.cpp',
 -        'jit/x86-shared/MoveEmitter-x86-shared.cpp',
 -        'jit/x86-shared/MoveEmitter-x86-shared.cpp',
 -    ]
 -    ]
@@ -578,7 +578,7 @@ diff --git a/js/src/moz.build b/js/src/moz.build
  else:
  else:
      UNIFIED_SOURCES += [
      UNIFIED_SOURCES += [
          'threading/posix/CpuCount.cpp',
          'threading/posix/CpuCount.cpp',
-@@ -686,16 +441,17 @@ GENERATED_FILES += ['frontend/ReservedWo
+@@ -698,16 +453,17 @@ GENERATED_FILES += ['frontend/ReservedWo
  ReservedWordsGenerated = GENERATED_FILES['frontend/ReservedWordsGenerated.h']
  ReservedWordsGenerated = GENERATED_FILES['frontend/ReservedWordsGenerated.h']
  ReservedWordsGenerated.script = 'frontend/GenerateReservedWords.py'
  ReservedWordsGenerated.script = 'frontend/GenerateReservedWords.py'
  ReservedWordsGenerated.inputs += [
  ReservedWordsGenerated.inputs += [

+ 3 - 3
frg/work-js/mozilla-release/patches/1502159-2-65a1.patch

@@ -2,7 +2,7 @@
 # User Matthew Gaudet <mgaudet@mozilla.com>
 # User Matthew Gaudet <mgaudet@mozilla.com>
 # Date 1540568623 0
 # Date 1540568623 0
 # Node ID 1c2afde752e7eb78e6d4b4ebe10b1f1e3c3842f7
 # Node ID 1c2afde752e7eb78e6d4b4ebe10b1f1e3c3842f7
-# Parent  b0ddf8cbae94a49cad17d17dc2b14d4e32ba05af
+# Parent  3384d70110c951e4a1c9458f9862df4fcdfabfac
 Bug 1502159 - Remove C1Spewer r=jandem
 Bug 1502159 - Remove C1Spewer r=jandem
 
 
 It appears the C1 Spewer is not partciularly well used. Loading logs into
 It appears the C1 Spewer is not partciularly well used. Loading logs into
@@ -468,7 +468,7 @@ diff --git a/js/src/jit/JitSpewer.h b/js/src/jit/JitSpewer.h
 diff --git a/js/src/moz.build b/js/src/moz.build
 diff --git a/js/src/moz.build b/js/src/moz.build
 --- a/js/src/moz.build
 --- a/js/src/moz.build
 +++ b/js/src/moz.build
 +++ b/js/src/moz.build
-@@ -265,17 +265,16 @@ UNIFIED_SOURCES += [
+@@ -264,17 +264,16 @@ UNIFIED_SOURCES += [
      'jit/BaselineDebugModeOSR.cpp',
      'jit/BaselineDebugModeOSR.cpp',
      'jit/BaselineFrame.cpp',
      'jit/BaselineFrame.cpp',
      'jit/BaselineFrameInfo.cpp',
      'jit/BaselineFrameInfo.cpp',
@@ -484,5 +484,5 @@ diff --git a/js/src/moz.build b/js/src/moz.build
      'jit/CodeGenerator.cpp',
      'jit/CodeGenerator.cpp',
      'jit/CompileWrappers.cpp',
      'jit/CompileWrappers.cpp',
      'jit/Disassembler.cpp',
      'jit/Disassembler.cpp',
-     'jit/EagerSimdUnbox.cpp',
      'jit/EdgeCaseAnalysis.cpp',
      'jit/EdgeCaseAnalysis.cpp',
+     'jit/EffectiveAddressAnalysis.cpp',

+ 3 - 3
frg/work-js/mozilla-release/patches/1502886-1-65a1.patch

@@ -2,7 +2,7 @@
 # User Benjamin Bouvier <benj@benj.me>
 # User Benjamin Bouvier <benj@benj.me>
 # Date 1540835717 -3600
 # Date 1540835717 -3600
 # Node ID ab04d8c725fd0cbd61e6c32e4096ee4caa53d55c
 # Node ID ab04d8c725fd0cbd61e6c32e4096ee4caa53d55c
-# Parent  3b346840a52a7d1cbc890047a9c507f11a7d8cfe
+# Parent  39f7c24d11776e295348df75cb6fd7b0a00fa767
 Bug 1502886: Delete wasm breakpoints when the wasm instance's Zone is swept; r=jonco
 Bug 1502886: Delete wasm breakpoints when the wasm instance's Zone is swept; r=jonco
 
 
 diff --git a/js/src/gc/Zone.cpp b/js/src/gc/Zone.cpp
 diff --git a/js/src/gc/Zone.cpp b/js/src/gc/Zone.cpp
@@ -312,8 +312,8 @@ diff --git a/js/src/wasm/WasmDebug.h b/js/src/wasm/WasmDebug.h
 diff --git a/js/src/wasm/WasmInstance.h b/js/src/wasm/WasmInstance.h
 diff --git a/js/src/wasm/WasmInstance.h b/js/src/wasm/WasmInstance.h
 --- a/js/src/wasm/WasmInstance.h
 --- a/js/src/wasm/WasmInstance.h
 +++ b/js/src/wasm/WasmInstance.h
 +++ b/js/src/wasm/WasmInstance.h
-@@ -49,16 +49,18 @@ class Instance
-     jit::TrampolinePtr              jsJitExceptionHandler_;
+@@ -52,16 +52,18 @@ class Instance
+ #endif
      const SharedCode                code_;
      const SharedCode                code_;
      const UniqueDebugState          debug_;
      const UniqueDebugState          debug_;
      const UniqueTlsData             tlsData_;
      const UniqueTlsData             tlsData_;

+ 3 - 3
frg/work-js/mozilla-release/patches/1528028-67a1.patch

@@ -2,7 +2,7 @@
 # User Matthew Gaudet <mgaudet@mozilla.com>
 # User Matthew Gaudet <mgaudet@mozilla.com>
 # Date 1550606208 0
 # Date 1550606208 0
 # Node ID ef3a903f83998034acb43924a423bf4d370ac382
 # Node ID ef3a903f83998034acb43924a423bf4d370ac382
-# Parent  313ee36e5cfe9e8dbc26c90d49064ce32659a606
+# Parent  73d88f2daed5342bb09a98e531da568b6791fb3b
 Bug 1528028 - Remove obsolete Shark profiling support r=tcampbell
 Bug 1528028 - Remove obsolete Shark profiling support r=tcampbell
 
 
 Differential Revision: https://phabricator.services.mozilla.com/D20323
 Differential Revision: https://phabricator.services.mozilla.com/D20323
@@ -294,8 +294,8 @@ deleted file mode 100644
 diff --git a/js/src/moz.build b/js/src/moz.build
 diff --git a/js/src/moz.build b/js/src/moz.build
 --- a/js/src/moz.build
 --- a/js/src/moz.build
 +++ b/js/src/moz.build
 +++ b/js/src/moz.build
-@@ -189,17 +189,16 @@ UNIFIED_SOURCES += [
-     'builtin/SIMD.cpp',
+@@ -191,17 +191,16 @@ UNIFIED_SOURCES += [
+     'builtin/ReflectParse.cpp',
      'builtin/Stream.cpp',
      'builtin/Stream.cpp',
      'builtin/String.cpp',
      'builtin/String.cpp',
      'builtin/Symbol.cpp',
      'builtin/Symbol.cpp',

+ 9 - 9
frg/work-js/mozilla-release/patches/1590907-5-72a1.patch

@@ -2,7 +2,7 @@
 # User Philip Chimento <philip.chimento@gmail.com>
 # User Philip Chimento <philip.chimento@gmail.com>
 # Date 1574753135 0
 # Date 1574753135 0
 # Node ID bfdd5f34f2f4a11b84742400bb3130192c89d4fb
 # Node ID bfdd5f34f2f4a11b84742400bb3130192c89d4fb
-# Parent  96c5e6975b06b0771e7909ff0563583d80091aea
+# Parent  a2e9f9cb37ab7fc445cdcc7a3161459d6c7ec494
 Bug 1590907 - Make ENABLE_INTL_API and ENABLE_TYPED_OBJECTS into js-config macros. r=sfink,firefox-build-system-reviewers,mshal
 Bug 1590907 - Make ENABLE_INTL_API and ENABLE_TYPED_OBJECTS into js-config macros. r=sfink,firefox-build-system-reviewers,mshal
 
 
 Whether ENABLE_INTL_API and ENABLE_TYPED_OBJECTS are defined, affects
 Whether ENABLE_INTL_API and ENABLE_TYPED_OBJECTS are defined, affects
@@ -578,7 +578,7 @@ diff --git a/js/src/builtin/String.js b/js/src/builtin/String.js
 diff --git a/js/src/builtin/TestingFunctions.cpp b/js/src/builtin/TestingFunctions.cpp
 diff --git a/js/src/builtin/TestingFunctions.cpp b/js/src/builtin/TestingFunctions.cpp
 --- a/js/src/builtin/TestingFunctions.cpp
 --- a/js/src/builtin/TestingFunctions.cpp
 +++ b/js/src/builtin/TestingFunctions.cpp
 +++ b/js/src/builtin/TestingFunctions.cpp
-@@ -271,17 +271,17 @@ GetBuildConfiguration(JSContext* cx, uns
+@@ -270,17 +270,17 @@ GetBuildConfiguration(JSContext* cx, uns
  #ifdef ENABLE_BINARYDATA
  #ifdef ENABLE_BINARYDATA
      value = BooleanValue(true);
      value = BooleanValue(true);
  #else
  #else
@@ -863,8 +863,8 @@ diff --git a/js/src/jsdate.cpp b/js/src/jsdate.cpp
      return PositiveModulo(floor(t/msPerHour), HoursPerDay);
      return PositiveModulo(floor(t/msPerHour), HoursPerDay);
  }
  }
  
  
-@@ -2679,17 +2679,17 @@ date_toJSON(JSContext* cx, unsigned argc
-                                           JSMSG_BAD_TOISOSTRING_PROP);
+@@ -2678,17 +2678,17 @@ date_toJSON(JSContext* cx, unsigned argc
+         JS_ReportErrorNumberASCII(cx, js::GetErrorMessage, nullptr, JSMSG_BAD_TOISOSTRING_PROP);
          return false;
          return false;
      }
      }
  
  
@@ -882,7 +882,7 @@ diff --git a/js/src/jsdate.cpp b/js/src/jsdate.cpp
          JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_DEFAULT_LOCALE_ERROR);
          JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_DEFAULT_LOCALE_ERROR);
          return nullptr;
          return nullptr;
      }
      }
-@@ -2782,17 +2782,17 @@ TimeZoneComment(JSContext* cx, double ut
+@@ -2781,17 +2781,17 @@ TimeZoneComment(JSContext* cx, double ut
              usetz = false;
              usetz = false;
  
  
          if (usetz)
          if (usetz)
@@ -901,7 +901,7 @@ diff --git a/js/src/jsdate.cpp b/js/src/jsdate.cpp
  };
  };
  
  
  static bool
  static bool
-@@ -2879,17 +2879,17 @@ FormatDate(JSContext* cx, double utcTime
+@@ -2878,17 +2878,17 @@ FormatDate(JSContext* cx, double utcTime
          if (!str)
          if (!str)
              return false;
              return false;
      }
      }
@@ -920,7 +920,7 @@ diff --git a/js/src/jsdate.cpp b/js/src/jsdate.cpp
      char buf[100];
      char buf[100];
      if (!IsFinite(utcTime)) {
      if (!IsFinite(utcTime)) {
          strcpy(buf, js_InvalidDate_str);
          strcpy(buf, js_InvalidDate_str);
-@@ -2993,17 +2993,17 @@ date_toLocaleTimeString_impl(JSContext* 
+@@ -2992,17 +2992,17 @@ date_toLocaleTimeString_impl(JSContext* 
  }
  }
  
  
  static bool
  static bool
@@ -939,7 +939,7 @@ diff --git a/js/src/jsdate.cpp b/js/src/jsdate.cpp
      return FormatDate(cx, args.thisv().toObject().as<DateObject>().UTCTime().toNumber(),
      return FormatDate(cx, args.thisv().toObject().as<DateObject>().UTCTime().toNumber(),
                        FormatSpec::Time, args.rval());
                        FormatSpec::Time, args.rval());
  }
  }
-@@ -3149,17 +3149,17 @@ static const JSFunctionSpec date_methods
+@@ -3148,17 +3148,17 @@ static const JSFunctionSpec date_methods
      JS_FN("setUTCHours",         date_setUTCHours,        4,0),
      JS_FN("setUTCHours",         date_setUTCHours,        4,0),
      JS_FN("setMinutes",          date_setMinutes,         3,0),
      JS_FN("setMinutes",          date_setMinutes,         3,0),
      JS_FN("setUTCMinutes",       date_setUTCMinutes,      3,0),
      JS_FN("setUTCMinutes",       date_setUTCMinutes,      3,0),
@@ -1700,7 +1700,7 @@ diff --git a/js/src/vm/Runtime.h b/js/src/vm/Runtime.h
 diff --git a/js/src/vm/SelfHosting.cpp b/js/src/vm/SelfHosting.cpp
 diff --git a/js/src/vm/SelfHosting.cpp b/js/src/vm/SelfHosting.cpp
 --- a/js/src/vm/SelfHosting.cpp
 --- a/js/src/vm/SelfHosting.cpp
 +++ b/js/src/vm/SelfHosting.cpp
 +++ b/js/src/vm/SelfHosting.cpp
-@@ -2403,17 +2403,17 @@ static const JSFunctionSpec intrinsic_fu
+@@ -2402,17 +2402,17 @@ static const JSFunctionSpec intrinsic_fu
      JS_INLINABLE_FN("std_String_toLowerCase",    str_toLowerCase,              0,0, StringToLowerCase),
      JS_INLINABLE_FN("std_String_toLowerCase",    str_toLowerCase,              0,0, StringToLowerCase),
      JS_INLINABLE_FN("std_String_toUpperCase",    str_toUpperCase,              0,0, StringToUpperCase),
      JS_INLINABLE_FN("std_String_toUpperCase",    str_toUpperCase,              0,0, StringToUpperCase),
  
  

+ 4 - 4
frg/work-js/mozilla-release/patches/1727374-914.patch

@@ -2,7 +2,7 @@
 # User Steve Fink <sfink@mozilla.com>
 # User Steve Fink <sfink@mozilla.com>
 # Date 1630609618 0
 # Date 1630609618 0
 # Node ID c250dd684d8e7e770d562bbb2c7bfac464dc6593
 # Node ID c250dd684d8e7e770d562bbb2c7bfac464dc6593
-# Parent  e68e1c5e7d93a26c65f6fb3db01a34f257178633
+# Parent  e857c8268e2fdafdc759ff09f1e2599dc2cffaa3
 Bug 1727374 - Root Init dictionaries to fix rooting hazards. r=peterv, a=RyanVM
 Bug 1727374 - Root Init dictionaries to fix rooting hazards. r=peterv, a=RyanVM
 
 
 Differential Revision: https://phabricator.services.mozilla.com/D123633
 Differential Revision: https://phabricator.services.mozilla.com/D123633
@@ -181,7 +181,7 @@ new file mode 100644
 diff --git a/dom/workers/ServiceWorkerPrivate.cpp b/dom/workers/ServiceWorkerPrivate.cpp
 diff --git a/dom/workers/ServiceWorkerPrivate.cpp b/dom/workers/ServiceWorkerPrivate.cpp
 --- a/dom/workers/ServiceWorkerPrivate.cpp
 --- a/dom/workers/ServiceWorkerPrivate.cpp
 +++ b/dom/workers/ServiceWorkerPrivate.cpp
 +++ b/dom/workers/ServiceWorkerPrivate.cpp
-@@ -931,17 +931,17 @@ public:
+@@ -932,17 +932,17 @@ public:
    WorkerRun(JSContext* aCx, WorkerPrivate* aWorkerPrivate) override
    WorkerRun(JSContext* aCx, WorkerPrivate* aWorkerPrivate) override
    {
    {
      MOZ_ASSERT(aWorkerPrivate);
      MOZ_ASSERT(aWorkerPrivate);
@@ -229,12 +229,12 @@ new file mode 100644
 diff --git a/js/src/wasm/WasmJS.cpp b/js/src/wasm/WasmJS.cpp
 diff --git a/js/src/wasm/WasmJS.cpp b/js/src/wasm/WasmJS.cpp
 --- a/js/src/wasm/WasmJS.cpp
 --- a/js/src/wasm/WasmJS.cpp
 +++ b/js/src/wasm/WasmJS.cpp
 +++ b/js/src/wasm/WasmJS.cpp
-@@ -1048,27 +1048,31 @@ WasmInstanceObject::create(JSContext* cx
+@@ -1075,27 +1075,31 @@ WasmInstanceObject::create(JSContext* cx
                             HandleWasmMemoryObject memory,
                             HandleWasmMemoryObject memory,
                             SharedTableVector&& tables,
                             SharedTableVector&& tables,
                             Handle<FunctionVector> funcImports,
                             Handle<FunctionVector> funcImports,
                             const GlobalDescVector& globals,
                             const GlobalDescVector& globals,
-                            const ValVector& globalImportValues,
+                            HandleValVector globalImportValues,
                             const WasmGlobalObjectVector& globalObjs,
                             const WasmGlobalObjectVector& globalObjs,
                             HandleObject proto)
                             HandleObject proto)
  {
  {

+ 7 - 7
frg/work-js/mozilla-release/patches/TOP-NOBUG-REGEXP-03-1537978-68a1-25318.patch

@@ -1,7 +1,7 @@
 # HG changeset patch
 # HG changeset patch
 # User Dmitry Butskoy <buc@buc.me>
 # User Dmitry Butskoy <buc@buc.me>
 # Date 1690629335 -7200
 # Date 1690629335 -7200
-# Parent  6f2fb98fb9da227c60c100fdf1840905cfaa9d23
+# Parent  02fe021ade73ebec5f3f57b3b0b5d6688063890f
 No Bug - Import new regexp V8 engine. r=frg a=frg
 No Bug - Import new regexp V8 engine. r=frg a=frg
 
 
  Bug 1537978 - Move regular expression-related functionality out of jsapi.h
  Bug 1537978 - Move regular expression-related functionality out of jsapi.h
@@ -30,7 +30,7 @@ diff --git a/dom/base/nsContentUtils.cpp b/dom/base/nsContentUtils.cpp
  #include "ImageOps.h"
  #include "ImageOps.h"
  #include "mozAutoDocUpdate.h"
  #include "mozAutoDocUpdate.h"
  #include "mozilla/ArrayUtils.h"
  #include "mozilla/ArrayUtils.h"
-@@ -7364,30 +7365,30 @@ nsContentUtils::IsPatternMatching(nsAStr
+@@ -7348,30 +7349,30 @@ nsContentUtils::IsPatternMatching(nsAStr
    // regexp evaluation, not actual script execution.
    // regexp evaluation, not actual script execution.
    JSAutoRealm ar(cx, xpc::UnprivilegedJunkScope());
    JSAutoRealm ar(cx, xpc::UnprivilegedJunkScope());
  
  
@@ -372,12 +372,12 @@ diff --git a/js/src/jsapi.cpp b/js/src/jsapi.cpp
  #include "builtin/Stream.h"
  #include "builtin/Stream.h"
  #include "builtin/String.h"
  #include "builtin/String.h"
  #include "builtin/Symbol.h"
  #include "builtin/Symbol.h"
- #ifdef ENABLE_SIMD
- # include "builtin/SIMDConstants.h"
- #endif
  #ifdef ENABLE_BINARYDATA
  #ifdef ENABLE_BINARYDATA
  # include "builtin/TypedObject.h"
  # include "builtin/TypedObject.h"
-@@ -6636,148 +6635,16 @@ JS_ObjectIsDate(JSContext* cx, HandleObj
+ #endif
+ #include "frontend/BytecodeCompiler.h"
+ #include "frontend/FullParseHandler.h"  // for JS_BufferIsCompileableUnit
+@@ -6633,148 +6632,16 @@ JS_ObjectIsDate(JSContext* cx, HandleObj
          return false;
          return false;
  
  
      *isDate = cls == ESClass::Date;
      *isDate = cls == ESClass::Date;
@@ -714,7 +714,7 @@ diff --git a/js/src/vm/RegExpObject.cpp b/js/src/vm/RegExpObject.cpp
      if (!regexp)
      if (!regexp)
          return nullptr;
          return nullptr;
  
  
-@@ -1500,8 +1507,142 @@ js::RegExpToSharedNonInline(JSContext* c
+@@ -1499,8 +1506,142 @@ js::RegExpToSharedNonInline(JSContext* c
  }
  }
  
  
  JS::ubi::Node::Size
  JS::ubi::Node::Size

+ 21 - 21
frg/work-js/mozilla-release/patches/TOP-NOBUG-REGEXP-07-1626713-76a1-25318.patch

@@ -1,7 +1,7 @@
 # HG changeset patch
 # HG changeset patch
 # User Dmitry Butskoy <buc@buc.me>
 # User Dmitry Butskoy <buc@buc.me>
 # Date 1690629593 -7200
 # Date 1690629593 -7200
-# Parent  78e5e95d23f19e68807ed03e46fd1bd33ba5cdc2
+# Parent  92ff12078a07b83160ae09d2d1adcf772b6d416b
 No Bug - Import new regexp V8 engine. r=frg a=frg
 No Bug - Import new regexp V8 engine. r=frg a=frg
 
 
 diff --git a/js/src/builtin/RegExp.cpp b/js/src/builtin/RegExp.cpp
 diff --git a/js/src/builtin/RegExp.cpp b/js/src/builtin/RegExp.cpp
@@ -81,7 +81,7 @@ diff --git a/js/src/builtin/TestingFunctions.cpp b/js/src/builtin/TestingFunctio
  #include "js/AutoByteString.h"
  #include "js/AutoByteString.h"
  #include "js/Debug.h"
  #include "js/Debug.h"
  #include "js/HashTable.h"
  #include "js/HashTable.h"
-@@ -4522,17 +4524,17 @@ GetModuleEnvironmentValue(JSContext* cx,
+@@ -4483,17 +4485,17 @@ GetModuleEnvironmentValue(JSContext* cx,
      if (args.rval().isMagic(JS_UNINITIALIZED_LEXICAL)) {
      if (args.rval().isMagic(JS_UNINITIALIZED_LEXICAL)) {
          ReportRuntimeLexicalError(cx, JSMSG_UNINITIALIZED_LEXICAL, id);
          ReportRuntimeLexicalError(cx, JSMSG_UNINITIALIZED_LEXICAL, id);
          return false;
          return false;
@@ -100,7 +100,7 @@ diff --git a/js/src/builtin/TestingFunctions.cpp b/js/src/builtin/TestingFunctio
          return "START_OF_LINE";
          return "START_OF_LINE";
        case irregexp::RegExpAssertion::START_OF_INPUT:
        case irregexp::RegExpAssertion::START_OF_INPUT:
          return "START_OF_INPUT";
          return "START_OF_INPUT";
-@@ -4883,17 +4885,17 @@ DisRegExp(JSContext* cx, unsigned argc, 
+@@ -4844,17 +4846,17 @@ DisRegExp(JSContext* cx, unsigned argc, 
      }
      }
  
  
      if (!RegExpObject::dumpBytecode(cx, reobj, match_only, input))
      if (!RegExpObject::dumpBytecode(cx, reobj, match_only, input))
@@ -119,7 +119,7 @@ diff --git a/js/src/builtin/TestingFunctions.cpp b/js/src/builtin/TestingFunctio
      RootedObject callee(cx, &args.callee());
      RootedObject callee(cx, &args.callee());
  
  
      if (args.length() != 0) {
      if (args.length() != 0) {
-@@ -6012,17 +6014,17 @@ gc::ZealModeHelpText),
+@@ -5969,17 +5971,17 @@ gc::ZealModeHelpText),
  "    baselineCompile();  for (var i=0; i<1; i++) {} ...\n"
  "    baselineCompile();  for (var i=0; i<1; i++) {} ...\n"
  "  The interpreter will enter the new jitcode at the loop header unless\n"
  "  The interpreter will enter the new jitcode at the loop header unless\n"
  "  baselineCompile returned a string or threw an error.\n"),
  "  baselineCompile returned a string or threw an error.\n"),
@@ -217,7 +217,7 @@ diff --git a/js/src/jit/CodeGenerator.cpp b/js/src/jit/CodeGenerator.cpp
  #include "jit/JitcodeMap.h"
  #include "jit/JitcodeMap.h"
  #include "jit/JitSpewer.h"
  #include "jit/JitSpewer.h"
  #include "jit/Linker.h"
  #include "jit/Linker.h"
-@@ -1355,35 +1357,66 @@ CodeGenerator::visitRegExp(LRegExp* lir)
+@@ -1354,35 +1356,66 @@ CodeGenerator::visitRegExp(LRegExp* lir)
          TemplateObject templateObject(source);
          TemplateObject templateObject(source);
          masm.createGCObject(output, temp, templateObject, gc::DefaultHeap, ool->entry());
          masm.createGCObject(output, temp, templateObject, gc::DefaultHeap, ool->entry());
      } else {
      } else {
@@ -287,7 +287,7 @@ diff --git a/js/src/jit/CodeGenerator.cpp b/js/src/jit/CodeGenerator.cpp
  PrepareAndExecuteRegExp(JSContext* cx, MacroAssembler& masm, Register regexp, Register input,
  PrepareAndExecuteRegExp(JSContext* cx, MacroAssembler& masm, Register regexp, Register input,
                          Register lastIndex,
                          Register lastIndex,
                          Register temp1, Register temp2, Register temp3,
                          Register temp1, Register temp2, Register temp3,
-@@ -1676,16 +1709,18 @@ PrepareAndExecuteRegExp(JSContext* cx, M
+@@ -1675,16 +1708,18 @@ PrepareAndExecuteRegExp(JSContext* cx, M
      if (mode == RegExpShared::MatchOnly) {
      if (mode == RegExpShared::MatchOnly) {
          // endIndex is passed via temp3.
          // endIndex is passed via temp3.
          masm.load32(endIndexAddress, temp3);
          masm.load32(endIndexAddress, temp3);
@@ -306,7 +306,7 @@ diff --git a/js/src/jit/CodeGenerator.cpp b/js/src/jit/CodeGenerator.cpp
  {
  {
      CharEncoding encoding_;
      CharEncoding encoding_;
      Register string_;
      Register string_;
-@@ -2224,17 +2259,17 @@ CodeGenerator::visitOutOfLineRegExpMatch
+@@ -2223,17 +2258,17 @@ CodeGenerator::visitOutOfLineRegExpMatch
  
  
      AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
      AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
      regs.take(lastIndex);
      regs.take(lastIndex);
@@ -325,7 +325,7 @@ diff --git a/js/src/jit/CodeGenerator.cpp b/js/src/jit/CodeGenerator.cpp
  
  
      // We are not using oolCallVM because we are in a Call, and that live
      // We are not using oolCallVM because we are in a Call, and that live
      // registers are already saved by the the register allocator.
      // registers are already saved by the the register allocator.
-@@ -2415,17 +2450,17 @@ CodeGenerator::visitOutOfLineRegExpSearc
+@@ -2414,17 +2449,17 @@ CodeGenerator::visitOutOfLineRegExpSearc
  
  
      AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
      AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
      regs.take(lastIndex);
      regs.take(lastIndex);
@@ -344,7 +344,7 @@ diff --git a/js/src/jit/CodeGenerator.cpp b/js/src/jit/CodeGenerator.cpp
  
  
      // We are not using oolCallVM because we are in a Call, and that live
      // We are not using oolCallVM because we are in a Call, and that live
      // registers are already saved by the the register allocator.
      // registers are already saved by the the register allocator.
-@@ -2484,17 +2519,17 @@ JitRealm::generateRegExpTesterStub(JSCon
+@@ -2483,17 +2518,17 @@ JitRealm::generateRegExpTesterStub(JSCon
      regs.take(input);
      regs.take(input);
      regs.take(regexp);
      regs.take(regexp);
      regs.take(lastIndex);
      regs.take(lastIndex);
@@ -363,7 +363,7 @@ diff --git a/js/src/jit/CodeGenerator.cpp b/js/src/jit/CodeGenerator.cpp
                                   &notFound, &oolEntry))
                                   &notFound, &oolEntry))
      {
      {
          return nullptr;
          return nullptr;
-@@ -2509,17 +2544,17 @@ JitRealm::generateRegExpTesterStub(JSCon
+@@ -2508,17 +2543,17 @@ JitRealm::generateRegExpTesterStub(JSCon
      masm.bind(&notFound);
      masm.bind(&notFound);
      masm.move32(Imm32(RegExpTesterResultNotFound), result);
      masm.move32(Imm32(RegExpTesterResultNotFound), result);
      masm.jump(&done);
      masm.jump(&done);
@@ -385,7 +385,7 @@ diff --git a/js/src/jit/CodeGenerator.cpp b/js/src/jit/CodeGenerator.cpp
 diff --git a/js/src/js.msg b/js/src/js.msg
 diff --git a/js/src/js.msg b/js/src/js.msg
 --- a/js/src/js.msg
 --- a/js/src/js.msg
 +++ b/js/src/js.msg
 +++ b/js/src/js.msg
-@@ -530,16 +530,24 @@ MSG_DEF(JSMSG_NOTHING_TO_REPEAT,       0
+@@ -529,16 +529,24 @@ MSG_DEF(JSMSG_NOTHING_TO_REPEAT,       0
  MSG_DEF(JSMSG_NUMBERS_OUT_OF_ORDER,    0, JSEXN_SYNTAXERR, "numbers out of order in {} quantifier.")
  MSG_DEF(JSMSG_NUMBERS_OUT_OF_ORDER,    0, JSEXN_SYNTAXERR, "numbers out of order in {} quantifier.")
  MSG_DEF(JSMSG_RANGE_WITH_CLASS_ESCAPE, 0, JSEXN_SYNTAXERR, "character class escape cannot be used in class range in regular expression")
  MSG_DEF(JSMSG_RANGE_WITH_CLASS_ESCAPE, 0, JSEXN_SYNTAXERR, "character class escape cannot be used in class range in regular expression")
  MSG_DEF(JSMSG_RAW_BRACE_IN_REGEXP,     0, JSEXN_SYNTAXERR, "raw brace is not allowed in regular expression with unicode flag")
  MSG_DEF(JSMSG_RAW_BRACE_IN_REGEXP,     0, JSEXN_SYNTAXERR, "raw brace is not allowed in regular expression with unicode flag")
@@ -407,7 +407,7 @@ diff --git a/js/src/js.msg b/js/src/js.msg
  MSG_DEF(JSMSG_DEFAULT_LOCALE_ERROR,    0, JSEXN_ERR, "internal error getting the default locale")
  MSG_DEF(JSMSG_DEFAULT_LOCALE_ERROR,    0, JSEXN_ERR, "internal error getting the default locale")
  MSG_DEF(JSMSG_NO_SUCH_SELF_HOSTED_PROP,1, JSEXN_ERR, "No such property on self-hosted object: {0}")
  MSG_DEF(JSMSG_NO_SUCH_SELF_HOSTED_PROP,1, JSEXN_ERR, "No such property on self-hosted object: {0}")
  
  
- // Typed object / SIMD
+ // Typed object
  MSG_DEF(JSMSG_INVALID_PROTOTYPE,       0, JSEXN_TYPEERR, "prototype field is not an object")
  MSG_DEF(JSMSG_INVALID_PROTOTYPE,       0, JSEXN_TYPEERR, "prototype field is not an object")
  MSG_DEF(JSMSG_TYPEDOBJECT_BAD_ARGS,    0, JSEXN_TYPEERR, "invalid arguments")
  MSG_DEF(JSMSG_TYPEDOBJECT_BAD_ARGS,    0, JSEXN_TYPEERR, "invalid arguments")
 diff --git a/js/src/moz.build.1626713.later b/js/src/moz.build.1626713.later
 diff --git a/js/src/moz.build.1626713.later b/js/src/moz.build.1626713.later
@@ -743,15 +743,15 @@ diff --git a/js/src/new-regexp/moz.build b/js/src/new-regexp/moz.build
 diff --git a/js/src/vm/JSContext.cpp b/js/src/vm/JSContext.cpp
 diff --git a/js/src/vm/JSContext.cpp b/js/src/vm/JSContext.cpp
 --- a/js/src/vm/JSContext.cpp
 --- a/js/src/vm/JSContext.cpp
 +++ b/js/src/vm/JSContext.cpp
 +++ b/js/src/vm/JSContext.cpp
-@@ -35,16 +35,19 @@
- #include "builtin/String.h"
- #include "gc/FreeOp.h"
- #include "gc/Marking.h"
- #include "jit/Ion.h"
- #include "jit/PcScriptCache.h"
- #include "js/AutoByteString.h"
+@@ -41,16 +41,19 @@
  #include "js/CharacterEncoding.h"
  #include "js/CharacterEncoding.h"
  #include "js/Printf.h"
  #include "js/Printf.h"
+ #ifdef JS_SIMULATOR_ARM64
+ # include "jit/arm64/vixl/Simulator-vixl.h"
+ #endif
+ #ifdef JS_SIMULATOR_ARM
+ # include "jit/arm/Simulator-arm.h"
+ #endif
 +#ifdef JS_NEW_REGEXP
 +#ifdef JS_NEW_REGEXP
 +#  include "new-regexp/RegExpAPI.h"
 +#  include "new-regexp/RegExpAPI.h"
 +#endif
 +#endif
@@ -763,7 +763,7 @@ diff --git a/js/src/vm/JSContext.cpp b/js/src/vm/JSContext.cpp
  #include "vm/HelperThreads.h"
  #include "vm/HelperThreads.h"
  #include "vm/Iteration.h"
  #include "vm/Iteration.h"
  #include "vm/JSAtom.h"
  #include "vm/JSAtom.h"
-@@ -97,18 +100,25 @@ js::AutoCycleDetector::~AutoCycleDetecto
+@@ -103,18 +106,25 @@ js::AutoCycleDetector::~AutoCycleDetecto
      }
      }
  }
  }
  
  
@@ -812,7 +812,7 @@ diff --git a/js/src/vm/JSContext.h b/js/src/vm/JSContext.h
  struct DtoaState;
  struct DtoaState;
  
  
  namespace js {
  namespace js {
-@@ -350,18 +353,23 @@ struct JSContext : public JS::RootingCon
+@@ -351,18 +354,23 @@ struct JSContext : public JS::RootingCon
      mozilla::GenericErrorResult<JS::Error&> alreadyReportedError();
      mozilla::GenericErrorResult<JS::Error&> alreadyReportedError();
  
  
      /*
      /*

+ 28 - 28
frg/work-js/mozilla-release/patches/TOP-NOBUG-REGEXP-35-1435829-66a1-25318.patch

@@ -1,13 +1,13 @@
 # HG changeset patch
 # HG changeset patch
 # User Dmitry Butskoy <buc@buc.me>
 # User Dmitry Butskoy <buc@buc.me>
 # Date 1690631058 -7200
 # Date 1690631058 -7200
-# Parent  67a32166b33a5511d6d6ec2000a436a5e3466e1f
+# Parent  63c2b290b5a4c651a13dff091cffd4107c95bdf9
 No Bug - Import new regexp V8 engine. r=frg a=frg
 No Bug - Import new regexp V8 engine. r=frg a=frg
 
 
 diff --git a/js/public/Class.h b/js/public/Class.h
 diff --git a/js/public/Class.h b/js/public/Class.h
 --- a/js/public/Class.h
 --- a/js/public/Class.h
 +++ b/js/public/Class.h
 +++ b/js/public/Class.h
-@@ -833,17 +833,17 @@ static const uint32_t JSCLASS_FOREGROUND
+@@ -857,17 +857,17 @@ static const uint32_t JSCLASS_FOREGROUND
  // with the following flags. Failure to use JSCLASS_GLOBAL_FLAGS was
  // with the following flags. Failure to use JSCLASS_GLOBAL_FLAGS was
  // previously allowed, but is now an ES5 violation and thus unsupported.
  // previously allowed, but is now an ES5 violation and thus unsupported.
  //
  //
@@ -29,7 +29,7 @@ diff --git a/js/public/Class.h b/js/public/Class.h
 diff --git a/js/src/builtin/RegExp.cpp b/js/src/builtin/RegExp.cpp
 diff --git a/js/src/builtin/RegExp.cpp b/js/src/builtin/RegExp.cpp
 --- a/js/src/builtin/RegExp.cpp
 --- a/js/src/builtin/RegExp.cpp
 +++ b/js/src/builtin/RegExp.cpp
 +++ b/js/src/builtin/RegExp.cpp
-@@ -866,16 +866,17 @@ const JSPropertySpec js::regexp_properti
+@@ -870,16 +870,17 @@ const JSPropertySpec js::regexp_properti
  
  
  const JSFunctionSpec js::regexp_methods[] = {
  const JSFunctionSpec js::regexp_methods[] = {
      JS_SELF_HOSTED_FN(js_toSource_str, "RegExpToString", 0, 0),
      JS_SELF_HOSTED_FN(js_toSource_str, "RegExpToString", 0, 0),
@@ -330,7 +330,7 @@ diff --git a/js/src/builtin/SelfHostingDefines.h b/js/src/builtin/SelfHostingDef
 diff --git a/js/src/builtin/String.cpp b/js/src/builtin/String.cpp
 diff --git a/js/src/builtin/String.cpp b/js/src/builtin/String.cpp
 --- a/js/src/builtin/String.cpp
 --- a/js/src/builtin/String.cpp
 +++ b/js/src/builtin/String.cpp
 +++ b/js/src/builtin/String.cpp
-@@ -3572,16 +3572,17 @@ static const JSFunctionSpec string_metho
+@@ -3564,16 +3564,17 @@ static const JSFunctionSpec string_metho
  #endif
  #endif
      JS_SELF_HOSTED_FN("repeat", "String_repeat",      1,0),
      JS_SELF_HOSTED_FN("repeat", "String_repeat",      1,0),
  #if JS_HAS_INTL_API
  #if JS_HAS_INTL_API
@@ -398,7 +398,7 @@ diff --git a/js/src/builtin/String.js b/js/src/builtin/String.js
 diff --git a/js/src/jit/BaselineIC.cpp b/js/src/jit/BaselineIC.cpp
 diff --git a/js/src/jit/BaselineIC.cpp b/js/src/jit/BaselineIC.cpp
 --- a/js/src/jit/BaselineIC.cpp
 --- a/js/src/jit/BaselineIC.cpp
 +++ b/js/src/jit/BaselineIC.cpp
 +++ b/js/src/jit/BaselineIC.cpp
-@@ -1880,16 +1880,21 @@ GetTemplateObjectForNative(JSContext* cx
+@@ -1814,16 +1814,21 @@ GetTemplateObjectForNative(JSContext* cx
          return !!res;
          return !!res;
      }
      }
  
  
@@ -412,18 +412,18 @@ diff --git a/js/src/jit/BaselineIC.cpp b/js/src/jit/BaselineIC.cpp
 +        return !!res;
 +        return !!res;
 +    }
 +    }
 +
 +
-     if (JitSupportsSimd() && GetTemplateObjectForSimd(cx, target, res))
-        return !!res;
- 
      return true;
      return true;
  }
  }
  
  
  static bool
  static bool
  GetTemplateObjectForClassHook(JSContext* cx, JSNative hook, CallArgs& args,
  GetTemplateObjectForClassHook(JSContext* cx, JSNative hook, CallArgs& args,
+                               MutableHandleObject templateObject)
+ {
+     if (hook == TypedObject::construct) {
 diff --git a/js/src/jit/CodeGenerator.cpp b/js/src/jit/CodeGenerator.cpp
 diff --git a/js/src/jit/CodeGenerator.cpp b/js/src/jit/CodeGenerator.cpp
 --- a/js/src/jit/CodeGenerator.cpp
 --- a/js/src/jit/CodeGenerator.cpp
 +++ b/js/src/jit/CodeGenerator.cpp
 +++ b/js/src/jit/CodeGenerator.cpp
-@@ -6316,16 +6316,22 @@ CodeGenerator::visitNewArrayDynamicLengt
+@@ -6418,16 +6418,22 @@ CodeGenerator::visitNewArrayDynamicLengt
  typedef ArrayIteratorObject* (*NewArrayIteratorObjectFn)(JSContext*, NewObjectKind);
  typedef ArrayIteratorObject* (*NewArrayIteratorObjectFn)(JSContext*, NewObjectKind);
  static const VMFunction NewArrayIteratorObjectInfo =
  static const VMFunction NewArrayIteratorObjectInfo =
      FunctionInfo<NewArrayIteratorObjectFn>(NewArrayIteratorObject, "NewArrayIteratorObject");
      FunctionInfo<NewArrayIteratorObjectFn>(NewArrayIteratorObject, "NewArrayIteratorObject");
@@ -446,7 +446,7 @@ diff --git a/js/src/jit/CodeGenerator.cpp b/js/src/jit/CodeGenerator.cpp
  
  
      OutOfLineCode* ool;
      OutOfLineCode* ool;
      switch (lir->mir()->type()) {
      switch (lir->mir()->type()) {
-@@ -6334,16 +6340,22 @@ CodeGenerator::visitNewIterator(LNewIter
+@@ -6436,16 +6442,22 @@ CodeGenerator::visitNewIterator(LNewIter
                          ArgList(Imm32(GenericObject)),
                          ArgList(Imm32(GenericObject)),
                          StoreRegisterTo(objReg));
                          StoreRegisterTo(objReg));
          break;
          break;
@@ -472,7 +472,7 @@ diff --git a/js/src/jit/CodeGenerator.cpp b/js/src/jit/CodeGenerator.cpp
 diff --git a/js/src/jit/InlinableNatives.h b/js/src/jit/InlinableNatives.h
 diff --git a/js/src/jit/InlinableNatives.h b/js/src/jit/InlinableNatives.h
 --- a/js/src/jit/InlinableNatives.h
 --- a/js/src/jit/InlinableNatives.h
 +++ b/js/src/jit/InlinableNatives.h
 +++ b/js/src/jit/InlinableNatives.h
-@@ -132,25 +132,27 @@
+@@ -121,25 +121,27 @@
      _(IntrinsicObjectHasPrototype)  \
      _(IntrinsicObjectHasPrototype)  \
      _(IntrinsicFinishBoundFunctionInit) \
      _(IntrinsicFinishBoundFunctionInit) \
      _(IntrinsicIsPackedArray)       \
      _(IntrinsicIsPackedArray)       \
@@ -503,7 +503,7 @@ diff --git a/js/src/jit/InlinableNatives.h b/js/src/jit/InlinableNatives.h
 diff --git a/js/src/jit/MCallOptimize.cpp b/js/src/jit/MCallOptimize.cpp
 diff --git a/js/src/jit/MCallOptimize.cpp b/js/src/jit/MCallOptimize.cpp
 --- a/js/src/jit/MCallOptimize.cpp
 --- a/js/src/jit/MCallOptimize.cpp
 +++ b/js/src/jit/MCallOptimize.cpp
 +++ b/js/src/jit/MCallOptimize.cpp
-@@ -221,16 +221,18 @@ IonBuilder::inlineNativeCall(CallInfo& c
+@@ -220,16 +220,18 @@ IonBuilder::inlineNativeCall(CallInfo& c
        case InlinableNative::IsPossiblyWrappedRegExpObject:
        case InlinableNative::IsPossiblyWrappedRegExpObject:
          return inlineIsPossiblyWrappedRegExpObject(callInfo);
          return inlineIsPossiblyWrappedRegExpObject(callInfo);
        case InlinableNative::RegExpPrototypeOptimizable:
        case InlinableNative::RegExpPrototypeOptimizable:
@@ -522,7 +522,7 @@ diff --git a/js/src/jit/MCallOptimize.cpp b/js/src/jit/MCallOptimize.cpp
          return inlineStrCharCodeAt(callInfo);
          return inlineStrCharCodeAt(callInfo);
        case InlinableNative::StringFromCharCode:
        case InlinableNative::StringFromCharCode:
          return inlineStrFromCharCode(callInfo);
          return inlineStrFromCharCode(callInfo);
-@@ -327,16 +329,18 @@ IonBuilder::inlineNativeCall(CallInfo& c
+@@ -304,16 +306,18 @@ IonBuilder::inlineNativeCall(CallInfo& c
        case InlinableNative::IntrinsicGuardToArrayIterator:
        case InlinableNative::IntrinsicGuardToArrayIterator:
          return inlineGuardToClass(callInfo, &ArrayIteratorObject::class_);
          return inlineGuardToClass(callInfo, &ArrayIteratorObject::class_);
        case InlinableNative::IntrinsicGuardToMapIterator:
        case InlinableNative::IntrinsicGuardToMapIterator:
@@ -541,7 +541,7 @@ diff --git a/js/src/jit/MCallOptimize.cpp b/js/src/jit/MCallOptimize.cpp
          return inlineIsPackedArray(callInfo);
          return inlineIsPackedArray(callInfo);
  
  
        // Map intrinsics.
        // Map intrinsics.
-@@ -1044,16 +1048,20 @@ IonBuilder::inlineNewIterator(CallInfo& 
+@@ -1018,16 +1022,20 @@ IonBuilder::inlineNewIterator(CallInfo& 
        case MNewIterator::ArrayIterator:
        case MNewIterator::ArrayIterator:
          templateObject = inspector->getTemplateObjectForNative(pc, js::intrinsic_NewArrayIterator);
          templateObject = inspector->getTemplateObjectForNative(pc, js::intrinsic_NewArrayIterator);
          MOZ_ASSERT_IF(templateObject, templateObject->is<ArrayIteratorObject>());
          MOZ_ASSERT_IF(templateObject, templateObject->is<ArrayIteratorObject>());
@@ -565,7 +565,7 @@ diff --git a/js/src/jit/MCallOptimize.cpp b/js/src/jit/MCallOptimize.cpp
 diff --git a/js/src/jit/MIR.h b/js/src/jit/MIR.h
 diff --git a/js/src/jit/MIR.h b/js/src/jit/MIR.h
 --- a/js/src/jit/MIR.h
 --- a/js/src/jit/MIR.h
 +++ b/js/src/jit/MIR.h
 +++ b/js/src/jit/MIR.h
-@@ -3640,16 +3640,17 @@ class MNewObject
+@@ -2529,16 +2529,17 @@ class MNewObject
  class MNewIterator
  class MNewIterator
    : public MUnaryInstruction,
    : public MUnaryInstruction,
      public NoTypePolicy::Data
      public NoTypePolicy::Data
@@ -586,7 +586,7 @@ diff --git a/js/src/jit/MIR.h b/js/src/jit/MIR.h
 diff --git a/js/src/jit/Recover.cpp b/js/src/jit/Recover.cpp
 diff --git a/js/src/jit/Recover.cpp b/js/src/jit/Recover.cpp
 --- a/js/src/jit/Recover.cpp
 --- a/js/src/jit/Recover.cpp
 +++ b/js/src/jit/Recover.cpp
 +++ b/js/src/jit/Recover.cpp
-@@ -1506,16 +1506,19 @@ RNewIterator::recover(JSContext* cx, Sna
+@@ -1505,16 +1505,19 @@ RNewIterator::recover(JSContext* cx, Sna
      JSObject* resultObject = nullptr;
      JSObject* resultObject = nullptr;
      switch (MNewIterator::Type(type_)) {
      switch (MNewIterator::Type(type_)) {
        case MNewIterator::ArrayIterator:
        case MNewIterator::ArrayIterator:
@@ -609,7 +609,7 @@ diff --git a/js/src/jit/Recover.cpp b/js/src/jit/Recover.cpp
 diff --git a/js/src/jit/VMFunctions.h b/js/src/jit/VMFunctions.h
 diff --git a/js/src/jit/VMFunctions.h b/js/src/jit/VMFunctions.h
 --- a/js/src/jit/VMFunctions.h
 --- a/js/src/jit/VMFunctions.h
 +++ b/js/src/jit/VMFunctions.h
 +++ b/js/src/jit/VMFunctions.h
-@@ -336,16 +336,17 @@ template <> struct TypeToDataType<Native
+@@ -338,16 +338,17 @@ template <> struct TypeToDataType<Native
  template <> struct TypeToDataType<PlainObject*> { static const DataType result = Type_Object; };
  template <> struct TypeToDataType<PlainObject*> { static const DataType result = Type_Object; };
  template <> struct TypeToDataType<InlineTypedObject*> { static const DataType result = Type_Object; };
  template <> struct TypeToDataType<InlineTypedObject*> { static const DataType result = Type_Object; };
  template <> struct TypeToDataType<NamedLambdaObject*> { static const DataType result = Type_Object; };
  template <> struct TypeToDataType<NamedLambdaObject*> { static const DataType result = Type_Object; };
@@ -630,7 +630,7 @@ diff --git a/js/src/jit/VMFunctions.h b/js/src/jit/VMFunctions.h
 diff --git a/js/src/jsapi.h b/js/src/jsapi.h
 diff --git a/js/src/jsapi.h b/js/src/jsapi.h
 --- a/js/src/jsapi.h
 --- a/js/src/jsapi.h
 +++ b/js/src/jsapi.h
 +++ b/js/src/jsapi.h
-@@ -4764,17 +4764,18 @@ GetSymbolDescription(HandleSymbol symbol
+@@ -4693,17 +4693,18 @@ GetSymbolDescription(HandleSymbol symbol
      macro(replace) \
      macro(replace) \
      macro(search) \
      macro(search) \
      macro(species) \
      macro(species) \
@@ -653,7 +653,7 @@ diff --git a/js/src/jsapi.h b/js/src/jsapi.h
 diff --git a/js/src/vm/CommonPropertyNames.h b/js/src/vm/CommonPropertyNames.h
 diff --git a/js/src/vm/CommonPropertyNames.h b/js/src/vm/CommonPropertyNames.h
 --- a/js/src/vm/CommonPropertyNames.h
 --- a/js/src/vm/CommonPropertyNames.h
 +++ b/js/src/vm/CommonPropertyNames.h
 +++ b/js/src/vm/CommonPropertyNames.h
-@@ -349,16 +349,17 @@
+@@ -350,16 +350,17 @@
            ReadableStreamDefaultReader_read, \
            ReadableStreamDefaultReader_read, \
            "ReadableStreamDefaultReader_read") \
            "ReadableStreamDefaultReader_read") \
      macro(ReadableStreamDefaultReader_releaseLock, \
      macro(ReadableStreamDefaultReader_releaseLock, \
@@ -771,7 +771,7 @@ diff --git a/js/src/vm/GlobalObject.cpp b/js/src/vm/GlobalObject.cpp
 diff --git a/js/src/vm/GlobalObject.h b/js/src/vm/GlobalObject.h
 diff --git a/js/src/vm/GlobalObject.h b/js/src/vm/GlobalObject.h
 --- a/js/src/vm/GlobalObject.h
 --- a/js/src/vm/GlobalObject.h
 +++ b/js/src/vm/GlobalObject.h
 +++ b/js/src/vm/GlobalObject.h
-@@ -72,16 +72,17 @@ class GlobalObject : public NativeObject
+@@ -69,16 +69,17 @@ class GlobalObject : public NativeObject
          THROWTYPEERROR,
          THROWTYPEERROR,
  
  
          /* One-off properties stored after slots for built-ins. */
          /* One-off properties stored after slots for built-ins. */
@@ -789,7 +789,7 @@ diff --git a/js/src/vm/GlobalObject.h b/js/src/vm/GlobalObject.h
          ASYNC_ITERATOR_PROTO,
          ASYNC_ITERATOR_PROTO,
          ASYNC_FROM_SYNC_ITERATOR_PROTO,
          ASYNC_FROM_SYNC_ITERATOR_PROTO,
          ASYNC_GENERATOR,
          ASYNC_GENERATOR,
-@@ -570,16 +571,22 @@ class GlobalObject : public NativeObject
+@@ -556,16 +557,22 @@ class GlobalObject : public NativeObject
  
  
      static NativeObject*
      static NativeObject*
      getOrCreateStringIteratorPrototype(JSContext* cx, Handle<GlobalObject*> global) {
      getOrCreateStringIteratorPrototype(JSContext* cx, Handle<GlobalObject*> global) {
@@ -812,7 +812,7 @@ diff --git a/js/src/vm/GlobalObject.h b/js/src/vm/GlobalObject.h
  
  
      static JSObject*
      static JSObject*
      getOrCreateGeneratorFunctionPrototype(JSContext* cx, Handle<GlobalObject*> global) {
      getOrCreateGeneratorFunctionPrototype(JSContext* cx, Handle<GlobalObject*> global) {
-@@ -749,16 +756,17 @@ class GlobalObject : public NativeObject
+@@ -735,16 +742,17 @@ class GlobalObject : public NativeObject
  
  
      // Infallibly test whether the given value is the eval function for this global.
      // Infallibly test whether the given value is the eval function for this global.
      bool valueIsEval(const Value& val);
      bool valueIsEval(const Value& val);
@@ -851,7 +851,7 @@ diff --git a/js/src/vm/Iteration.cpp b/js/src/vm/Iteration.cpp
  #include "vm/GeneratorObject.h"
  #include "vm/GeneratorObject.h"
  #include "vm/GlobalObject.h"
  #include "vm/GlobalObject.h"
  #include "vm/Interpreter.h"
  #include "vm/Interpreter.h"
-@@ -1130,16 +1131,88 @@ js::NewStringIteratorObject(JSContext* c
+@@ -1131,16 +1132,88 @@ js::NewStringIteratorObject(JSContext* c
  {
  {
      RootedObject proto(cx, GlobalObject::getOrCreateStringIteratorPrototype(cx, cx->global()));
      RootedObject proto(cx, GlobalObject::getOrCreateStringIteratorPrototype(cx, cx->global()));
      if (!proto)
      if (!proto)
@@ -940,7 +940,7 @@ diff --git a/js/src/vm/Iteration.cpp b/js/src/vm/Iteration.cpp
          /* Common case. */
          /* Common case. */
          obj = &vp.toObject();
          obj = &vp.toObject();
      } else if (vp.isNullOrUndefined()) {
      } else if (vp.isNullOrUndefined()) {
-@@ -1463,8 +1536,34 @@ GlobalObject::initStringIteratorProto(JS
+@@ -1464,8 +1537,34 @@ GlobalObject::initStringIteratorProto(JS
          !DefineToStringTag(cx, proto, cx->names().StringIterator))
          !DefineToStringTag(cx, proto, cx->names().StringIterator))
      {
      {
          return false;
          return false;
@@ -978,7 +978,7 @@ diff --git a/js/src/vm/Iteration.cpp b/js/src/vm/Iteration.cpp
 diff --git a/js/src/vm/Iteration.h b/js/src/vm/Iteration.h
 diff --git a/js/src/vm/Iteration.h b/js/src/vm/Iteration.h
 --- a/js/src/vm/Iteration.h
 --- a/js/src/vm/Iteration.h
 +++ b/js/src/vm/Iteration.h
 +++ b/js/src/vm/Iteration.h
-@@ -356,16 +356,25 @@ JSObject*
+@@ -355,16 +355,25 @@ JSObject*
  GetIterator(JSContext* cx, HandleObject obj);
  GetIterator(JSContext* cx, HandleObject obj);
  
  
  PropertyIteratorObject*
  PropertyIteratorObject*
@@ -1007,7 +1007,7 @@ diff --git a/js/src/vm/Iteration.h b/js/src/vm/Iteration.h
 diff --git a/js/src/vm/SelfHosting.cpp b/js/src/vm/SelfHosting.cpp
 diff --git a/js/src/vm/SelfHosting.cpp b/js/src/vm/SelfHosting.cpp
 --- a/js/src/vm/SelfHosting.cpp
 --- a/js/src/vm/SelfHosting.cpp
 +++ b/js/src/vm/SelfHosting.cpp
 +++ b/js/src/vm/SelfHosting.cpp
-@@ -840,16 +840,31 @@ js::intrinsic_NewStringIterator(JSContex
+@@ -842,16 +842,31 @@ js::intrinsic_NewStringIterator(JSContex
      JSObject* obj = NewStringIteratorObject(cx);
      JSObject* obj = NewStringIteratorObject(cx);
      if (!obj)
      if (!obj)
          return false;
          return false;
@@ -1039,7 +1039,7 @@ diff --git a/js/src/vm/SelfHosting.cpp b/js/src/vm/SelfHosting.cpp
  
  
      RootedFunction fun(cx, &args[0].toObject().as<JSFunction>());
      RootedFunction fun(cx, &args[0].toObject().as<JSFunction>());
      MOZ_ASSERT(fun->isSelfHostedBuiltin());
      MOZ_ASSERT(fun->isSelfHostedBuiltin());
-@@ -2487,16 +2502,20 @@ static const JSFunctionSpec intrinsic_fu
+@@ -2478,16 +2493,20 @@ static const JSFunctionSpec intrinsic_fu
                      IntrinsicGuardToMapIterator),
                      IntrinsicGuardToMapIterator),
      JS_INLINABLE_FN("GuardToSetIterator",
      JS_INLINABLE_FN("GuardToSetIterator",
                      intrinsic_GuardToBuiltin<SetIteratorObject>,   1,0,
                      intrinsic_GuardToBuiltin<SetIteratorObject>,   1,0,
@@ -1060,7 +1060,7 @@ diff --git a/js/src/vm/SelfHosting.cpp b/js/src/vm/SelfHosting.cpp
  
  
      JS_FN("_CreateSetIterationResult", intrinsic_CreateSetIterationResult, 0, 0),
      JS_FN("_CreateSetIterationResult", intrinsic_CreateSetIterationResult, 0, 0),
      JS_INLINABLE_FN("_GetNextSetEntryForIterator", intrinsic_GetNextSetEntryForIterator, 2,0,
      JS_INLINABLE_FN("_GetNextSetEntryForIterator", intrinsic_GetNextSetEntryForIterator, 2,0,
-@@ -2505,16 +2524,22 @@ static const JSFunctionSpec intrinsic_fu
+@@ -2496,16 +2515,22 @@ static const JSFunctionSpec intrinsic_fu
            CallNonGenericSelfhostedMethod<Is<SetIteratorObject>>,        2,0),
            CallNonGenericSelfhostedMethod<Is<SetIteratorObject>>,        2,0),
  
  
  
  

+ 22 - 16
frg/work-js/mozilla-release/patches/TOP-NOBUG-REGEXP-37-1642493-79a1-25318.patch

@@ -1,7 +1,7 @@
 # HG changeset patch
 # HG changeset patch
 # User Dmitry Butskoy <buc@buc.me>
 # User Dmitry Butskoy <buc@buc.me>
 # Date 1690631112 -7200
 # Date 1690631112 -7200
-# Parent  0d1a9489f19b1283fbe802df310ebf8d9ff40886
+# Parent  5e0066d417942fa0d04d9dc3cee75e43d0833dfe
 No Bug - Import new regexp V8 engine. r=frg a=frg
 No Bug - Import new regexp V8 engine. r=frg a=frg
 
 
 diff --git a/.clang-format-ignore b/.clang-format-ignore
 diff --git a/.clang-format-ignore b/.clang-format-ignore
@@ -230,7 +230,7 @@ diff --git a/js/src/builtin/TestingFunctions.cpp b/js/src/builtin/TestingFunctio
  #include "js/AutoByteString.h"
  #include "js/AutoByteString.h"
  #include "js/Debug.h"
  #include "js/Debug.h"
  #include "js/HashTable.h"
  #include "js/HashTable.h"
-@@ -4524,370 +4519,16 @@ GetModuleEnvironmentValue(JSContext* cx,
+@@ -4485,370 +4480,16 @@ GetModuleEnvironmentValue(JSContext* cx,
      if (args.rval().isMagic(JS_UNINITIALIZED_LEXICAL)) {
      if (args.rval().isMagic(JS_UNINITIALIZED_LEXICAL)) {
          ReportRuntimeLexicalError(cx, JSMSG_UNINITIALIZED_LEXICAL, id);
          ReportRuntimeLexicalError(cx, JSMSG_UNINITIALIZED_LEXICAL, id);
          return false;
          return false;
@@ -601,7 +601,7 @@ diff --git a/js/src/builtin/TestingFunctions.cpp b/js/src/builtin/TestingFunctio
  
  
      if (args.length() != 0) {
      if (args.length() != 0) {
          ReportUsageErrorASCII(cx, callee, "Wrong number of arguments");
          ReportUsageErrorASCII(cx, callee, "Wrong number of arguments");
-@@ -6005,26 +5646,16 @@ gc::ZealModeHelpText),
+@@ -5962,26 +5603,16 @@ gc::ZealModeHelpText),
  "    baselineCompile();  for (var i=0; i<1; i++) {} ...\n"
  "    baselineCompile();  for (var i=0; i<1; i++) {} ...\n"
  "  The interpreter will enter the new jitcode at the loop header unless\n"
  "  The interpreter will enter the new jitcode at the loop header unless\n"
  "  baselineCompile returned a string or threw an error.\n"),
  "  baselineCompile returned a string or threw an error.\n"),
@@ -23890,7 +23890,7 @@ diff --git a/js/src/jit/CodeGenerator.cpp b/js/src/jit/CodeGenerator.cpp
  #include "vm/RegExpStatics.h"
  #include "vm/RegExpStatics.h"
  #include "vm/StringType.h"
  #include "vm/StringType.h"
  #include "vm/TraceLogging.h"
  #include "vm/TraceLogging.h"
-@@ -1500,18 +1495,16 @@ static void UpdateRegExpStatics(MacroAss
+@@ -1499,18 +1494,16 @@ static void UpdateRegExpStatics(MacroAss
                                     RegExpObject::PRIVATE_SLOT)),
                                     RegExpObject::PRIVATE_SLOT)),
                 temp1);
                 temp1);
    masm.loadPtr(Address(temp1, RegExpShared::offsetOfSource()), temp2);
    masm.loadPtr(Address(temp1, RegExpShared::offsetOfSource()), temp2);
@@ -23909,7 +23909,7 @@ diff --git a/js/src/jit/CodeGenerator.cpp b/js/src/jit/CodeGenerator.cpp
                                      Register regexp, Register input,
                                      Register regexp, Register input,
                                      Register lastIndex, Register temp1,
                                      Register lastIndex, Register temp1,
                                      Register temp2, Register temp3,
                                      Register temp2, Register temp3,
-@@ -1711,216 +1704,16 @@ static bool PrepareAndExecuteRegExp(JSCo
+@@ -1710,216 +1703,16 @@ static bool PrepareAndExecuteRegExp(JSCo
    }
    }
    masm.movePtr(ImmPtr(res), temp1);
    masm.movePtr(ImmPtr(res), temp1);
    UpdateRegExpStatics(masm, regexp, input, lastIndex, temp1, temp2, temp3,
    UpdateRegExpStatics(masm, regexp, input, lastIndex, temp1, temp2, temp3,
@@ -24126,7 +24126,7 @@ diff --git a/js/src/jit/CodeGenerator.cpp b/js/src/jit/CodeGenerator.cpp
  {
  {
      CharEncoding encoding_;
      CharEncoding encoding_;
      Register string_;
      Register string_;
-@@ -2237,27 +2030,25 @@ JitRealm::generateRegExpMatcherStub(JSCo
+@@ -2236,27 +2029,25 @@ JitRealm::generateRegExpMatcherStub(JSCo
      Label notFound, oolEntry;
      Label notFound, oolEntry;
      if (!PrepareAndExecuteRegExp(cx, masm, regexp, input, lastIndex,
      if (!PrepareAndExecuteRegExp(cx, masm, regexp, input, lastIndex,
                                   temp1, temp2, temp3, inputOutputDataStartOffset,
                                   temp1, temp2, temp3, inputOutputDataStartOffset,
@@ -24154,7 +24154,7 @@ diff --git a/js/src/jit/CodeGenerator.cpp b/js/src/jit/CodeGenerator.cpp
      masm.bind(&matchResultJoin);
      masm.bind(&matchResultJoin);
  
  
      // Initialize slots of result object.
      // Initialize slots of result object.
-@@ -2272,21 +2063,19 @@ JitRealm::generateRegExpMatcherStub(JSCo
+@@ -2271,21 +2062,19 @@ JitRealm::generateRegExpMatcherStub(JSCo
                  
                  
      // Initialize the slots of the result object with the dummy values
      // Initialize the slots of the result object with the dummy values
      // defined in createMatchResultTemplateObject.
      // defined in createMatchResultTemplateObject.
@@ -24179,7 +24179,7 @@ diff --git a/js/src/jit/CodeGenerator.cpp b/js/src/jit/CodeGenerator.cpp
 diff --git a/js/src/jit/JitOptions.cpp b/js/src/jit/JitOptions.cpp
 diff --git a/js/src/jit/JitOptions.cpp b/js/src/jit/JitOptions.cpp
 --- a/js/src/jit/JitOptions.cpp
 --- a/js/src/jit/JitOptions.cpp
 +++ b/js/src/jit/JitOptions.cpp
 +++ b/js/src/jit/JitOptions.cpp
-@@ -153,21 +153,19 @@ DefaultJitOptions::DefaultJitOptions()
+@@ -150,21 +150,19 @@ DefaultJitOptions::DefaultJitOptions()
  
  
      // Whether to enable extra code to perform dynamic validations.
      // Whether to enable extra code to perform dynamic validations.
      SET_DEFAULT(runExtraChecks, false);
      SET_DEFAULT(runExtraChecks, false);
@@ -24204,7 +24204,7 @@ diff --git a/js/src/jit/JitOptions.cpp b/js/src/jit/JitOptions.cpp
      // Number of bailouts without invalidation before we set
      // Number of bailouts without invalidation before we set
      // JSScript::hadFrequentBailouts and invalidate.
      // JSScript::hadFrequentBailouts and invalidate.
      SET_DEFAULT(frequentBailoutThreshold, 10);
      SET_DEFAULT(frequentBailoutThreshold, 10);
-@@ -275,19 +273,17 @@ DefaultJitOptions::enableGvn(bool enable
+@@ -272,19 +270,17 @@ DefaultJitOptions::enableGvn(bool enable
      disableGvn = !enable;
      disableGvn = !enable;
  }
  }
  
  
@@ -24227,7 +24227,7 @@ diff --git a/js/src/jit/JitOptions.cpp b/js/src/jit/JitOptions.cpp
 diff --git a/js/src/jit/JitOptions.h b/js/src/jit/JitOptions.h
 diff --git a/js/src/jit/JitOptions.h b/js/src/jit/JitOptions.h
 --- a/js/src/jit/JitOptions.h
 --- a/js/src/jit/JitOptions.h
 +++ b/js/src/jit/JitOptions.h
 +++ b/js/src/jit/JitOptions.h
-@@ -70,19 +70,17 @@ struct DefaultJitOptions
+@@ -69,19 +69,17 @@ struct DefaultJitOptions
      bool forceInlineCaches;
      bool forceInlineCaches;
      bool fullDebugChecks;
      bool fullDebugChecks;
      bool limitScriptSize;
      bool limitScriptSize;
@@ -24273,7 +24273,7 @@ diff --git a/js/src/jit/arm64/Assembler-arm64.h b/js/src/jit/arm64/Assembler-arm
 diff --git a/js/src/js.msg b/js/src/js.msg
 diff --git a/js/src/js.msg b/js/src/js.msg
 --- a/js/src/js.msg
 --- a/js/src/js.msg
 +++ b/js/src/js.msg
 +++ b/js/src/js.msg
-@@ -511,30 +511,28 @@ MSG_DEF(JSMSG_INVALID_KEY,             1
+@@ -510,30 +510,28 @@ MSG_DEF(JSMSG_INVALID_KEY,             1
  MSG_DEF(JSMSG_INVALID_LANGUAGE_TAG,    1, JSEXN_RANGEERR, "invalid language tag: {0}")
  MSG_DEF(JSMSG_INVALID_LANGUAGE_TAG,    1, JSEXN_RANGEERR, "invalid language tag: {0}")
  MSG_DEF(JSMSG_INVALID_LOCALES_ELEMENT, 0, JSEXN_TYPEERR, "invalid element in locales argument")
  MSG_DEF(JSMSG_INVALID_LOCALES_ELEMENT, 0, JSEXN_TYPEERR, "invalid element in locales argument")
  MSG_DEF(JSMSG_INVALID_LOCALE_MATCHER,  1, JSEXN_RANGEERR, "invalid locale matcher in supportedLocalesOf(): {0}")
  MSG_DEF(JSMSG_INVALID_LOCALE_MATCHER,  1, JSEXN_RANGEERR, "invalid locale matcher in supportedLocalesOf(): {0}")
@@ -41975,7 +41975,7 @@ diff --git a/js/src/shell/js.cpp b/js/src/shell/js.cpp
 diff --git a/js/src/vm/JSContext.cpp b/js/src/vm/JSContext.cpp
 diff --git a/js/src/vm/JSContext.cpp b/js/src/vm/JSContext.cpp
 --- a/js/src/vm/JSContext.cpp
 --- a/js/src/vm/JSContext.cpp
 +++ b/js/src/vm/JSContext.cpp
 +++ b/js/src/vm/JSContext.cpp
-@@ -30,24 +30,22 @@
+@@ -30,30 +30,28 @@
  
  
  #include "jsexn.h"
  #include "jsexn.h"
  #include "jspubtd.h"
  #include "jspubtd.h"
@@ -41990,6 +41990,12 @@ diff --git a/js/src/vm/JSContext.cpp b/js/src/vm/JSContext.cpp
  #include "js/AutoByteString.h"
  #include "js/AutoByteString.h"
  #include "js/CharacterEncoding.h"
  #include "js/CharacterEncoding.h"
  #include "js/Printf.h"
  #include "js/Printf.h"
+ #ifdef JS_SIMULATOR_ARM64
+ # include "jit/arm64/vixl/Simulator-vixl.h"
+ #endif
+ #ifdef JS_SIMULATOR_ARM
+ # include "jit/arm/Simulator-arm.h"
+ #endif
 -#ifdef JS_NEW_REGEXP
 -#ifdef JS_NEW_REGEXP
 -#  include "new-regexp/RegExpAPI.h"
 -#  include "new-regexp/RegExpAPI.h"
 -#endif
 -#endif
@@ -42001,7 +42007,7 @@ diff --git a/js/src/vm/JSContext.cpp b/js/src/vm/JSContext.cpp
  #include "vm/HelperThreads.h"
  #include "vm/HelperThreads.h"
  #include "vm/Iteration.h"
  #include "vm/Iteration.h"
  #include "vm/JSAtom.h"
  #include "vm/JSAtom.h"
-@@ -100,40 +98,33 @@ js::AutoCycleDetector::~AutoCycleDetecto
+@@ -106,40 +104,33 @@ js::AutoCycleDetector::~AutoCycleDetecto
      }
      }
  }
  }
  
  
@@ -42042,7 +42048,7 @@ diff --git a/js/src/vm/JSContext.cpp b/js/src/vm/JSContext.cpp
      return true;
      return true;
  }
  }
  
  
-@@ -1326,19 +1317,17 @@ JSContext::~JSContext()
+@@ -1318,19 +1309,17 @@ JSContext::~JSContext()
      js::jit::Simulator::Destroy(simulator_);
      js::jit::Simulator::Destroy(simulator_);
  #endif
  #endif
  
  
@@ -42091,7 +42097,7 @@ diff --git a/js/src/vm/JSContext.h b/js/src/vm/JSContext.h
  struct DtoaState;
  struct DtoaState;
  
  
  namespace js {
  namespace js {
-@@ -353,23 +351,18 @@ struct JSContext : public JS::RootingCon
+@@ -354,23 +352,18 @@ struct JSContext : public JS::RootingCon
      mozilla::GenericErrorResult<JS::Error&> alreadyReportedError();
      mozilla::GenericErrorResult<JS::Error&> alreadyReportedError();
  
  
      /*
      /*
@@ -42922,7 +42928,7 @@ diff --git a/js/src/vm/RegExpObject.cpp b/js/src/vm/RegExpObject.cpp
      AddTypePropertyId(cx, templateObject, JSID_VOID, TypeSet::UndefinedType());
      AddTypePropertyId(cx, templateObject, JSID_VOID, TypeSet::UndefinedType());
  
  
      matchResultTemplateObject_.set(templateObject);
      matchResultTemplateObject_.set(templateObject);
-@@ -1940,23 +1341,18 @@ JS_PUBLIC_API(bool) JS::CheckRegExpSynta
+@@ -1939,23 +1340,18 @@ JS_PUBLIC_API(bool) JS::CheckRegExpSynta
                                           MutableHandleValue error) {
                                           MutableHandleValue error) {
    AssertHeapIsIdle();
    AssertHeapIsIdle();
    CHECK_REQUEST(cx);
    CHECK_REQUEST(cx);

+ 15 - 1
frg/work-js/mozilla-release/patches/series

@@ -4987,6 +4987,7 @@ taken-out-stuff-comes-here.patch
 1467438-2-62a1.patch
 1467438-2-62a1.patch
 1467438-3-62a1.patch
 1467438-3-62a1.patch
 1467438-5-62a1.patch
 1467438-5-62a1.patch
+1450261-1-62a1.patch
 1466626-2no1-63a1.patch
 1466626-2no1-63a1.patch
 1466626-3-63a1.patch
 1466626-3-63a1.patch
 1398839-63a1.patch
 1398839-63a1.patch
@@ -5019,12 +5020,21 @@ taken-out-stuff-comes-here.patch
 1470250-1-63a1.patch
 1470250-1-63a1.patch
 1464782-1-63a1.patch
 1464782-1-63a1.patch
 1464782-2no34-63a1.patch
 1464782-2no34-63a1.patch
+1459900-4-63a1.patch
+1459900-5-63a1.patch
+1459900-6-63a1.patch
+1471289-63a1.patch
 1470522-63a1.patch
 1470522-63a1.patch
 1447591-1-63a1.patch
 1447591-1-63a1.patch
 1447591-2-63a1.patch
 1447591-2-63a1.patch
 1471272-63a1.patch
 1471272-63a1.patch
+1450261-2-63a1.patch
+1450261-3-63a1.patch
+1450261-4-63a1.patch
+1472974-63a1.patch
 1471841-63a1.patch
 1471841-63a1.patch
 1472734-63a1.patch
 1472734-63a1.patch
+1473956-63a1.patch
 1412200-63a1.patch
 1412200-63a1.patch
 1471931-1-63a1.patch
 1471931-1-63a1.patch
 1471931-2-63a1.patch
 1471931-2-63a1.patch
@@ -5252,7 +5262,7 @@ NOBUG-20180720-tokenstream-63a1.patch
 1476866-15-63a1.patch
 1476866-15-63a1.patch
 1456006-1-63a1.patch
 1456006-1-63a1.patch
 1456006-2-63a1.patch
 1456006-2-63a1.patch
-1477157-63a1.patch
+1475943-63a1.patch
 1323381-63a1.patch
 1323381-63a1.patch
 1477579-1-63a1.patch
 1477579-1-63a1.patch
 1477579-2-63a1.patch
 1477579-2-63a1.patch
@@ -5319,6 +5329,9 @@ NOBUG-20180720-tokenstream-63a1.patch
 1478036-63a1.patch
 1478036-63a1.patch
 1472211-1-63a1.patch
 1472211-1-63a1.patch
 1472211-2-63a1.patch
 1472211-2-63a1.patch
+1476953-63a1.patch
+1416723-1-63a1.patch
+1416723-2-63a1.patch
 1478587-01-63a1.patch
 1478587-01-63a1.patch
 1478587-02-63a1.patch
 1478587-02-63a1.patch
 1478587-03-63a1.patch
 1478587-03-63a1.patch
@@ -5330,6 +5343,7 @@ NOBUG-20180720-tokenstream-63a1.patch
 1478587-09-63a1.patch
 1478587-09-63a1.patch
 1478587-10-63a1.patch
 1478587-10-63a1.patch
 1478892-1-63a1.patch
 1478892-1-63a1.patch
+1477157-63a1.patch
 1477090-63a1.patch
 1477090-63a1.patch
 1469004-63a1.patch
 1469004-63a1.patch
 1480720-63a1.patch
 1480720-63a1.patch

+ 17 - 1
frg/work-js/mozilla-release/patches/series-test

@@ -4986,6 +4986,7 @@ NOBUG-20231031-formattingfixes-253.patch
 1467438-2-62a1.patch
 1467438-2-62a1.patch
 1467438-3-62a1.patch
 1467438-3-62a1.patch
 1467438-5-62a1.patch
 1467438-5-62a1.patch
+1450261-1-62a1.patch
 1466626-2no1-63a1.patch
 1466626-2no1-63a1.patch
 1466626-3-63a1.patch
 1466626-3-63a1.patch
 1398839-63a1.patch
 1398839-63a1.patch
@@ -5018,12 +5019,21 @@ NOBUG-20231031-formattingfixes-253.patch
 1470250-1-63a1.patch
 1470250-1-63a1.patch
 1464782-1-63a1.patch
 1464782-1-63a1.patch
 1464782-2no34-63a1.patch
 1464782-2no34-63a1.patch
+1459900-4-63a1.patch
+1459900-5-63a1.patch
+1459900-6-63a1.patch
+1471289-63a1.patch
 1470522-63a1.patch
 1470522-63a1.patch
 1447591-1-63a1.patch
 1447591-1-63a1.patch
 1447591-2-63a1.patch
 1447591-2-63a1.patch
 1471272-63a1.patch
 1471272-63a1.patch
+1450261-2-63a1.patch
+1450261-3-63a1.patch
+1450261-4-63a1.patch
+1472974-63a1.patch
 1471841-63a1.patch
 1471841-63a1.patch
 1472734-63a1.patch
 1472734-63a1.patch
+1473956-63a1.patch
 1412200-63a1.patch
 1412200-63a1.patch
 1471931-1-63a1.patch
 1471931-1-63a1.patch
 1471931-2-63a1.patch
 1471931-2-63a1.patch
@@ -5246,7 +5256,7 @@ NOBUG-20180720-tokenstream-63a1.patch
 1476866-15-63a1.patch
 1476866-15-63a1.patch
 1456006-1-63a1.patch
 1456006-1-63a1.patch
 1456006-2-63a1.patch
 1456006-2-63a1.patch
-1477157-63a1.patch
+1475943-63a1.patch
 1323381-63a1.patch
 1323381-63a1.patch
 1477579-1-63a1.patch
 1477579-1-63a1.patch
 1477579-2-63a1.patch
 1477579-2-63a1.patch
@@ -5313,6 +5323,9 @@ NOBUG-20180720-tokenstream-63a1.patch
 1478036-63a1.patch
 1478036-63a1.patch
 1472211-1-63a1.patch
 1472211-1-63a1.patch
 1472211-2-63a1.patch
 1472211-2-63a1.patch
+1476953-63a1.patch
+1416723-1-63a1.patch
+1416723-2-63a1.patch
 1478587-01-63a1.patch
 1478587-01-63a1.patch
 1478587-02-63a1.patch
 1478587-02-63a1.patch
 1478587-03-63a1.patch
 1478587-03-63a1.patch
@@ -5324,6 +5337,7 @@ NOBUG-20180720-tokenstream-63a1.patch
 1478587-09-63a1.patch
 1478587-09-63a1.patch
 1478587-10-63a1.patch
 1478587-10-63a1.patch
 1478892-1-63a1.patch
 1478892-1-63a1.patch
+1477157-63a1.patch
 1477090-63a1.patch
 1477090-63a1.patch
 1469004-63a1.patch
 1469004-63a1.patch
 1480720-63a1.patch
 1480720-63a1.patch
@@ -5808,6 +5822,8 @@ NOBUG-20180824-buildsetting-63a1.patch
 1488217-64a1.patch
 1488217-64a1.patch
 1489454-libmar-64a1.patch
 1489454-libmar-64a1.patch
 1458129-64a1.patch
 1458129-64a1.patch
+1502886-1-65a1.patch
+1502886-2-65a1.patch
 1499844-65a1.patch
 1499844-65a1.patch
 1498072-65a1.patch
 1498072-65a1.patch
 1501712-65a1.patch
 1501712-65a1.patch