mirror of
https://gitlab.winehq.org/wine/wine-gecko.git
synced 2024-09-13 09:24:08 -07:00
Bug 1118084 - Remove self-hosted and user-exposed PJS methods. (r=lth)
This commit is contained in:
parent
3bda017935
commit
50c6db483c
@ -313,7 +313,6 @@ selfhosting:: selfhosted.out.h
|
||||
|
||||
selfhosting_srcs := \
|
||||
$(srcdir)/builtin/Utilities.js \
|
||||
$(srcdir)/builtin/ParallelUtilities.js \
|
||||
$(srcdir)/builtin/Array.js \
|
||||
$(srcdir)/builtin/Date.js \
|
||||
$(srcdir)/builtin/Error.js \
|
||||
|
@ -782,526 +782,3 @@ function ArrayFrom(arrayLike, mapfn=undefined, thisArg=undefined) {
|
||||
A.length = k;
|
||||
return A;
|
||||
}
|
||||
|
||||
#ifdef ENABLE_PARALLEL_JS
|
||||
|
||||
/*
|
||||
* Strawman spec:
|
||||
* http://wiki.ecmascript.org/doku.php?id=strawman:data_parallelism
|
||||
*/
|
||||
|
||||
/**
|
||||
* Creates a new array by applying |func(e, i, self)| for each element |e|
|
||||
* with index |i|.
|
||||
*/
|
||||
function ArrayMapPar(func, mode) {
|
||||
if (!IsCallable(func))
|
||||
ThrowError(JSMSG_NOT_FUNCTION, DecompileArg(0, func));
|
||||
|
||||
var self = ToObject(this);
|
||||
var length = self.length;
|
||||
var buffer = NewDenseArray(length);
|
||||
|
||||
parallel: for (;;) {
|
||||
// Avoid parallel compilation if we are already nested in another
|
||||
// parallel section or the user told us not to parallelize. The
|
||||
// use of a for (;;) loop is working around some ion limitations:
|
||||
//
|
||||
// - Breaking out of named blocks does not currently work (bug 684384);
|
||||
// - Unreachable Code Elim. can't properly handle if (a && b) (bug 669796)
|
||||
if (ShouldForceSequential())
|
||||
break parallel;
|
||||
if (!TRY_PARALLEL(mode))
|
||||
break parallel;
|
||||
|
||||
var slicesInfo = ComputeSlicesInfo(length);
|
||||
ForkJoin(mapThread, 0, slicesInfo.count, ForkJoinMode(mode), buffer);
|
||||
return buffer;
|
||||
}
|
||||
|
||||
// Sequential fallback:
|
||||
ASSERT_SEQUENTIAL_IS_OK(mode);
|
||||
for (var i = 0; i < length; i++)
|
||||
UnsafePutElements(buffer, i, func(self[i], i, self));
|
||||
return buffer;
|
||||
|
||||
function mapThread(workerId, sliceStart, sliceEnd) {
|
||||
var sliceShift = slicesInfo.shift;
|
||||
var sliceId;
|
||||
while (GET_SLICE(sliceStart, sliceEnd, sliceId)) {
|
||||
var indexStart = SLICE_START_INDEX(sliceShift, sliceId);
|
||||
var indexEnd = SLICE_END_INDEX(sliceShift, indexStart, length);
|
||||
for (var i = indexStart; i < indexEnd; i++)
|
||||
UnsafePutElements(buffer, i, func(self[i], i, self));
|
||||
}
|
||||
return sliceId;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Reduces the elements in an array in parallel. Order is not fixed and |func|
|
||||
* is assumed to be associative.
|
||||
*/
|
||||
function ArrayReducePar(func, mode) {
|
||||
if (!IsCallable(func))
|
||||
ThrowError(JSMSG_NOT_FUNCTION, DecompileArg(0, func));
|
||||
|
||||
var self = ToObject(this);
|
||||
var length = self.length;
|
||||
|
||||
if (length === 0)
|
||||
ThrowError(JSMSG_EMPTY_ARRAY_REDUCE);
|
||||
|
||||
parallel: for (;;) { // see ArrayMapPar() to explain why for(;;) etc
|
||||
if (ShouldForceSequential())
|
||||
break parallel;
|
||||
if (!TRY_PARALLEL(mode))
|
||||
break parallel;
|
||||
|
||||
var slicesInfo = ComputeSlicesInfo(length);
|
||||
var numSlices = slicesInfo.count;
|
||||
var subreductions = NewDenseArray(numSlices);
|
||||
|
||||
ForkJoin(reduceThread, 0, numSlices, ForkJoinMode(mode), subreductions);
|
||||
|
||||
var accumulator = subreductions[0];
|
||||
for (var i = 1; i < numSlices; i++)
|
||||
accumulator = func(accumulator, subreductions[i]);
|
||||
return accumulator;
|
||||
}
|
||||
|
||||
// Sequential fallback:
|
||||
ASSERT_SEQUENTIAL_IS_OK(mode);
|
||||
var accumulator = self[0];
|
||||
for (var i = 1; i < length; i++)
|
||||
accumulator = func(accumulator, self[i]);
|
||||
return accumulator;
|
||||
|
||||
function reduceThread(workerId, sliceStart, sliceEnd) {
|
||||
var sliceShift = slicesInfo.shift;
|
||||
var sliceId;
|
||||
while (GET_SLICE(sliceStart, sliceEnd, sliceId)) {
|
||||
var indexStart = SLICE_START_INDEX(sliceShift, sliceId);
|
||||
var indexEnd = SLICE_END_INDEX(sliceShift, indexStart, length);
|
||||
var accumulator = self[indexStart];
|
||||
for (var i = indexStart + 1; i < indexEnd; i++)
|
||||
accumulator = func(accumulator, self[i]);
|
||||
UnsafePutElements(subreductions, sliceId, accumulator);
|
||||
}
|
||||
return sliceId;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an array [s_0, ..., s_N] where |s_i| is equal to the reduction (as
|
||||
* per |reduce()|) of elements |0..i|. This is the generalization of partial
|
||||
* sum.
|
||||
*/
|
||||
function ArrayScanPar(func, mode) {
|
||||
if (!IsCallable(func))
|
||||
ThrowError(JSMSG_NOT_FUNCTION, DecompileArg(0, func));
|
||||
|
||||
var self = ToObject(this);
|
||||
var length = self.length;
|
||||
|
||||
if (length === 0)
|
||||
ThrowError(JSMSG_EMPTY_ARRAY_REDUCE);
|
||||
|
||||
// We need two buffers because phase2() will read an intermediate result and
|
||||
// write a final result; that is safe against bailout-and-restart only if
|
||||
// the intermediate and final buffers are distinct. (Bug 1023755)
|
||||
// Obviously paying for a second buffer is undesirable.
|
||||
var buffer = NewDenseArray(length);
|
||||
var buffer2 = NewDenseArray(length);
|
||||
|
||||
parallel: for (;;) { // see ArrayMapPar() to explain why for(;;) etc
|
||||
if (ShouldForceSequential())
|
||||
break parallel;
|
||||
if (!TRY_PARALLEL(mode))
|
||||
break parallel;
|
||||
|
||||
var slicesInfo = ComputeSlicesInfo(length);
|
||||
var numSlices = slicesInfo.count;
|
||||
|
||||
// Scan slices individually (see comment on phase1()).
|
||||
ForkJoin(phase1, 0, numSlices, ForkJoinMode(mode), buffer);
|
||||
|
||||
// Compute intermediates array (see comment on phase2()).
|
||||
var intermediates = [];
|
||||
var accumulator = buffer[finalElement(0)];
|
||||
ARRAY_PUSH(intermediates, accumulator);
|
||||
for (var i = 1; i < numSlices - 1; i++) {
|
||||
accumulator = func(accumulator, buffer[finalElement(i)]);
|
||||
ARRAY_PUSH(intermediates, accumulator);
|
||||
}
|
||||
|
||||
// Complete each slice using intermediates array (see comment on phase2()).
|
||||
//
|
||||
// Slice 0 must be handled specially - it's just a copy - since we don't
|
||||
// have an identity value for the operation.
|
||||
for ( var k=0, limit=finalElement(0) ; k <= limit ; k++ )
|
||||
buffer2[k] = buffer[k];
|
||||
ForkJoin(phase2, 1, numSlices, ForkJoinMode(mode), buffer2);
|
||||
return buffer2;
|
||||
}
|
||||
|
||||
// Sequential fallback:
|
||||
ASSERT_SEQUENTIAL_IS_OK(mode);
|
||||
scan(self[0], 0, length);
|
||||
return buffer;
|
||||
|
||||
function scan(accumulator, start, end) {
|
||||
UnsafePutElements(buffer, start, accumulator);
|
||||
for (var i = start + 1; i < end; i++) {
|
||||
accumulator = func(accumulator, self[i]);
|
||||
UnsafePutElements(buffer, i, accumulator);
|
||||
}
|
||||
return accumulator;
|
||||
}
|
||||
|
||||
/**
|
||||
* In phase 1, we divide the source array into |numSlices| slices and
|
||||
* compute scan on each slice sequentially as if it were the entire
|
||||
* array. This function is responsible for computing one of those
|
||||
* slices.
|
||||
*
|
||||
* So, if we have an array [A,B,C,D,E,F,G,H,I], |numSlices === 3|,
|
||||
* and our function |func| is sum, then we would wind up computing a
|
||||
* result array like:
|
||||
*
|
||||
* [A, A+B, A+B+C, D, D+E, D+E+F, G, G+H, G+H+I]
|
||||
* ^~~~~~~~~~~~^ ^~~~~~~~~~~~^ ^~~~~~~~~~~~~^
|
||||
* Slice 0 Slice 1 Slice 2
|
||||
*
|
||||
* Read on in phase2 to see what we do next!
|
||||
*/
|
||||
function phase1(workerId, sliceStart, sliceEnd) {
|
||||
var sliceShift = slicesInfo.shift;
|
||||
var sliceId;
|
||||
while (GET_SLICE(sliceStart, sliceEnd, sliceId)) {
|
||||
var indexStart = SLICE_START_INDEX(sliceShift, sliceId);
|
||||
var indexEnd = SLICE_END_INDEX(sliceShift, indexStart, length);
|
||||
scan(self[indexStart], indexStart, indexEnd);
|
||||
}
|
||||
return sliceId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Computes the index of the final element computed by the slice |sliceId|.
|
||||
*/
|
||||
function finalElement(sliceId) {
|
||||
var sliceShift = slicesInfo.shift;
|
||||
return SLICE_END_INDEX(sliceShift, SLICE_START_INDEX(sliceShift, sliceId), length) - 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* After computing the phase1 results, we compute an
|
||||
* |intermediates| array. |intermediates[i]| contains the result
|
||||
* of reducing the final value from each preceding slice j<i with
|
||||
* the final value of slice i. So, to continue our previous
|
||||
* example, the intermediates array would contain:
|
||||
*
|
||||
* [A+B+C, (A+B+C)+(D+E+F), ((A+B+C)+(D+E+F))+(G+H+I)]
|
||||
*
|
||||
* Here I have used parenthesization to make clear the order of
|
||||
* evaluation in each case.
|
||||
*
|
||||
* An aside: currently the intermediates array is computed
|
||||
* sequentially. In principle, we could compute it in parallel,
|
||||
* at the cost of doing duplicate work. This did not seem
|
||||
* particularly advantageous to me, particularly as the number
|
||||
* of slices is typically quite small (one per core), so I opted
|
||||
* to just compute it sequentially.
|
||||
*
|
||||
* Phase 2 combines the results of phase1 with the intermediates
|
||||
* array to produce the final scan results. The idea is to
|
||||
* reiterate over each element S[i] in the slice |sliceId|, which
|
||||
* currently contains the result of reducing with S[0]...S[i]
|
||||
* (where S0 is the first thing in the slice), and combine that
|
||||
* with |intermediate[sliceId-1]|, which represents the result of
|
||||
* reducing everything in the input array prior to the slice.
|
||||
*
|
||||
* To continue with our example, in phase 1 we computed slice 1 to
|
||||
* be [D, D+E, D+E+F]. We will combine those results with
|
||||
* |intermediates[1-1]|, which is |A+B+C|, so that the final
|
||||
* result is [(A+B+C)+D, (A+B+C)+(D+E), (A+B+C)+(D+E+F)]. Again I
|
||||
* am using parentheses to clarify how these results were reduced.
|
||||
*/
|
||||
function phase2(workerId, sliceStart, sliceEnd) {
|
||||
var sliceShift = slicesInfo.shift;
|
||||
var sliceId;
|
||||
while (GET_SLICE(sliceStart, sliceEnd, sliceId)) {
|
||||
var indexPos = SLICE_START_INDEX(sliceShift, sliceId);
|
||||
var indexEnd = SLICE_END_INDEX(sliceShift, indexPos, length);
|
||||
var intermediate = intermediates[sliceId - 1];
|
||||
for (; indexPos < indexEnd; indexPos++)
|
||||
UnsafePutElements(buffer2, indexPos, func(intermediate, buffer[indexPos]));
|
||||
}
|
||||
return sliceId;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* |scatter()| redistributes the elements in the array into a new array.
|
||||
*
|
||||
* - targets: The index targets[i] indicates where the ith element
|
||||
* should appear in the result.
|
||||
*
|
||||
* - defaultValue: what value to use for indices in the output array that
|
||||
* are never targeted.
|
||||
*
|
||||
* - conflictFunc: The conflict function. Used to resolve what
|
||||
* happens if two indices i and j in the source array are targeted
|
||||
* as the same destination (i.e., targets[i] === targets[j]), then
|
||||
* the final result is determined by applying func(targets[i],
|
||||
* targets[j]). If no conflict function is provided, it is an error
|
||||
* if targets[i] === targets[j].
|
||||
*
|
||||
* - length: length of the output array (if not specified, uses the
|
||||
* length of the input).
|
||||
*
|
||||
* - mode: internal debugging specification.
|
||||
*/
|
||||
function ArrayScatterPar(targets, defaultValue, conflictFunc, length, mode) {
|
||||
if (conflictFunc && !IsCallable(conflictFunc))
|
||||
ThrowError(JSMSG_NOT_FUNCTION, DecompileArg(2, conflictFunc));
|
||||
|
||||
var self = ToObject(this);
|
||||
|
||||
if (length === undefined)
|
||||
length = self.length;
|
||||
|
||||
var targetsLength = std_Math_min(targets.length, self.length);
|
||||
|
||||
if (!IS_UINT32(targetsLength) || !IS_UINT32(length))
|
||||
ThrowError(JSMSG_BAD_ARRAY_LENGTH);
|
||||
|
||||
// FIXME: Bug 965609: Find a better parallel startegy for scatter.
|
||||
|
||||
// Sequential fallback:
|
||||
ASSERT_SEQUENTIAL_IS_OK(mode);
|
||||
return seq();
|
||||
|
||||
function seq() {
|
||||
var buffer = NewDenseArray(length);
|
||||
var conflicts = NewDenseArray(length);
|
||||
|
||||
for (var i = 0; i < length; i++) {
|
||||
UnsafePutElements(buffer, i, defaultValue);
|
||||
UnsafePutElements(conflicts, i, false);
|
||||
}
|
||||
|
||||
for (var i = 0; i < targetsLength; i++) {
|
||||
var x = self[i];
|
||||
var t = checkTarget(i, targets[i]);
|
||||
if (conflicts[t])
|
||||
x = collide(x, buffer[t]);
|
||||
|
||||
UnsafePutElements(buffer, t, x, conflicts, t, true);
|
||||
}
|
||||
|
||||
return buffer;
|
||||
}
|
||||
|
||||
function collide(elem1, elem2) {
|
||||
if (conflictFunc === undefined)
|
||||
ThrowError(JSMSG_PAR_ARRAY_SCATTER_CONFLICT);
|
||||
|
||||
return conflictFunc(elem1, elem2);
|
||||
}
|
||||
|
||||
function checkTarget(i, t) {
|
||||
if (TO_INT32(t) !== t)
|
||||
ThrowError(JSMSG_PAR_ARRAY_SCATTER_BAD_TARGET, i);
|
||||
|
||||
if (t < 0 || t >= length)
|
||||
ThrowError(JSMSG_PAR_ARRAY_SCATTER_BOUNDS);
|
||||
|
||||
// It's not enough to return t, as -0 | 0 === -0.
|
||||
return TO_INT32(t);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* The filter operation applied in parallel.
|
||||
*/
|
||||
function ArrayFilterPar(func, mode) {
|
||||
if (!IsCallable(func))
|
||||
ThrowError(JSMSG_NOT_FUNCTION, DecompileArg(0, func));
|
||||
|
||||
var self = ToObject(this);
|
||||
var length = self.length;
|
||||
|
||||
parallel: for (;;) { // see ArrayMapPar() to explain why for(;;) etc
|
||||
if (ShouldForceSequential())
|
||||
break parallel;
|
||||
if (!TRY_PARALLEL(mode))
|
||||
break parallel;
|
||||
|
||||
var slicesInfo = ComputeSlicesInfo(length);
|
||||
|
||||
// Step 1. Compute which items from each slice of the result buffer should
|
||||
// be preserved. When we're done, we have a uint8 array |survivors|
|
||||
// containing 0 or 1 for each source element, indicating which members of
|
||||
// the chunk survived. We also keep an array |counts| containing the total
|
||||
// number of items that are being preserved from within one slice.
|
||||
var numSlices = slicesInfo.count;
|
||||
var counts = NewDenseArray(numSlices);
|
||||
for (var i = 0; i < numSlices; i++)
|
||||
UnsafePutElements(counts, i, 0);
|
||||
|
||||
var survivors = new Uint8Array(length);
|
||||
ForkJoin(findSurvivorsThread, 0, numSlices, ForkJoinMode(mode), survivors);
|
||||
|
||||
// Step 2. Compress the slices into one contiguous set.
|
||||
var count = 0;
|
||||
for (var i = 0; i < numSlices; i++)
|
||||
count += counts[i];
|
||||
var buffer = NewDenseArray(count);
|
||||
if (count > 0)
|
||||
ForkJoin(copySurvivorsThread, 0, numSlices, ForkJoinMode(mode), buffer);
|
||||
|
||||
return buffer;
|
||||
}
|
||||
|
||||
// Sequential fallback:
|
||||
ASSERT_SEQUENTIAL_IS_OK(mode);
|
||||
var buffer = [];
|
||||
for (var i = 0; i < length; i++) {
|
||||
var elem = self[i];
|
||||
if (func(elem, i, self))
|
||||
ARRAY_PUSH(buffer, elem);
|
||||
}
|
||||
return buffer;
|
||||
|
||||
/**
|
||||
* As described above, our goal is to determine which items we will preserve
|
||||
* from a given slice, storing "to-keep" bits into 32-bit chunks.
|
||||
*/
|
||||
function findSurvivorsThread(workerId, sliceStart, sliceEnd) {
|
||||
var sliceShift = slicesInfo.shift;
|
||||
var sliceId;
|
||||
while (GET_SLICE(sliceStart, sliceEnd, sliceId)) {
|
||||
var count = 0;
|
||||
var indexStart = SLICE_START_INDEX(sliceShift, sliceId);
|
||||
var indexEnd = SLICE_END_INDEX(sliceShift, indexStart, length);
|
||||
for (var indexPos = indexStart; indexPos < indexEnd; indexPos++) {
|
||||
var keep = !!func(self[indexPos], indexPos, self);
|
||||
UnsafePutElements(survivors, indexPos, keep);
|
||||
count += keep;
|
||||
}
|
||||
UnsafePutElements(counts, sliceId, count);
|
||||
}
|
||||
return sliceId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Copies the survivors from this slice into the correct position. Note
|
||||
* that this is an idempotent operation that does not invoke user
|
||||
* code. Therefore, we don't expect bailouts and make an effort to proceed
|
||||
* chunk by chunk or avoid duplicating work.
|
||||
*/
|
||||
function copySurvivorsThread(workerId, sliceStart, sliceEnd) {
|
||||
var sliceShift = slicesInfo.shift;
|
||||
var sliceId;
|
||||
while (GET_SLICE(sliceStart, sliceEnd, sliceId)) {
|
||||
// Total up the items preserved by previous slices.
|
||||
var total = 0;
|
||||
for (var i = 0; i < sliceId + 1; i++)
|
||||
total += counts[i];
|
||||
|
||||
// Are we done?
|
||||
var count = total - counts[sliceId];
|
||||
if (count === total)
|
||||
continue;
|
||||
|
||||
var indexStart = SLICE_START_INDEX(sliceShift, sliceId);
|
||||
var indexEnd = SLICE_END_INDEX(sliceShift, indexStart, length);
|
||||
for (var indexPos = indexStart; indexPos < indexEnd; indexPos++) {
|
||||
if (survivors[indexPos]) {
|
||||
UnsafePutElements(buffer, count++, self[indexPos]);
|
||||
if (count == total)
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return sliceId;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* "Comprehension form": This is the function invoked for
|
||||
* |Array.{build,buildPar}(len, fn)| It creates a new array with length |len|
|
||||
* where index |i| is equal to |fn(i)|.
|
||||
*
|
||||
* The final |mode| argument is an internal argument used only during our
|
||||
* unit-testing.
|
||||
*/
|
||||
function ArrayStaticBuild(length, func) {
|
||||
if (!IS_UINT32(length))
|
||||
ThrowError(JSMSG_BAD_ARRAY_LENGTH);
|
||||
if (!IsCallable(func))
|
||||
ThrowError(JSMSG_NOT_FUNCTION, DecompileArg(1, func));
|
||||
|
||||
var buffer = NewDenseArray(length);
|
||||
|
||||
for (var i = 0; i < length; i++)
|
||||
UnsafePutElements(buffer, i, func(i));
|
||||
|
||||
return buffer;
|
||||
}
|
||||
|
||||
function ArrayStaticBuildPar(length, func, mode) {
|
||||
if (!IS_UINT32(length))
|
||||
ThrowError(JSMSG_BAD_ARRAY_LENGTH);
|
||||
if (!IsCallable(func))
|
||||
ThrowError(JSMSG_NOT_FUNCTION, DecompileArg(1, func));
|
||||
|
||||
var buffer = NewDenseArray(length);
|
||||
|
||||
parallel: for (;;) {
|
||||
if (ShouldForceSequential())
|
||||
break parallel;
|
||||
if (!TRY_PARALLEL(mode))
|
||||
break parallel;
|
||||
|
||||
var slicesInfo = ComputeSlicesInfo(length);
|
||||
ForkJoin(constructThread, 0, slicesInfo.count, ForkJoinMode(mode), buffer);
|
||||
return buffer;
|
||||
}
|
||||
|
||||
// Sequential fallback:
|
||||
ASSERT_SEQUENTIAL_IS_OK(mode);
|
||||
for (var i = 0; i < length; i++)
|
||||
UnsafePutElements(buffer, i, func(i));
|
||||
return buffer;
|
||||
|
||||
function constructThread(workerId, sliceStart, sliceEnd) {
|
||||
var sliceShift = slicesInfo.shift;
|
||||
var sliceId;
|
||||
while (GET_SLICE(sliceStart, sliceEnd, sliceId)) {
|
||||
var indexStart = SLICE_START_INDEX(sliceShift, sliceId);
|
||||
var indexEnd = SLICE_END_INDEX(sliceShift, indexStart, length);
|
||||
for (var i = indexStart; i < indexEnd; i++)
|
||||
UnsafePutElements(buffer, i, func(i));
|
||||
}
|
||||
return sliceId;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Mark the main operations as clone-at-callsite for better precision.
|
||||
* This is slightly overkill, as all that we really need is to
|
||||
* specialize to the receiver and the elemental function, but in
|
||||
* practice this is likely not so different, since element functions
|
||||
* are often used in exactly one place.
|
||||
*/
|
||||
SetScriptHints(ArrayMapPar, { cloneAtCallsite: true });
|
||||
SetScriptHints(ArrayReducePar, { cloneAtCallsite: true });
|
||||
SetScriptHints(ArrayScanPar, { cloneAtCallsite: true });
|
||||
SetScriptHints(ArrayScatterPar, { cloneAtCallsite: true });
|
||||
SetScriptHints(ArrayFilterPar, { cloneAtCallsite: true });
|
||||
SetScriptHints(ArrayStaticBuildPar, { cloneAtCallsite: true });
|
||||
|
||||
#endif /* ENABLE_PARALLEL_JS */
|
||||
|
@ -1,72 +0,0 @@
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
// Shared utility functions for and macros parallel operations in `Array.js`
|
||||
// and `TypedObject.js`.
|
||||
|
||||
#ifdef ENABLE_PARALLEL_JS
|
||||
|
||||
/* The mode asserts options object. */
|
||||
#define TRY_PARALLEL(MODE) \
|
||||
((!MODE || MODE.mode !== "seq"))
|
||||
#define ASSERT_SEQUENTIAL_IS_OK(MODE) \
|
||||
do { if (MODE) AssertSequentialIsOK(MODE) } while(false)
|
||||
|
||||
/**
|
||||
* The ParallelSpew intrinsic is only defined in debug mode, so define a dummy
|
||||
* if debug is not on.
|
||||
*/
|
||||
#ifndef DEBUG
|
||||
#define ParallelSpew(args)
|
||||
#endif
|
||||
|
||||
#define MAX_SLICE_SHIFT 6
|
||||
#define MAX_SLICE_SIZE 64
|
||||
#define MAX_SLICES_PER_WORKER 8
|
||||
|
||||
/**
|
||||
* Macros to help compute the start and end indices of slices based on id. Use
|
||||
* with the object returned by ComputeSliceInfo.
|
||||
*/
|
||||
#define SLICE_START_INDEX(shift, id) \
|
||||
(id << shift)
|
||||
#define SLICE_END_INDEX(shift, start, length) \
|
||||
std_Math_min(start + (1 << shift), length)
|
||||
|
||||
/**
|
||||
* ForkJoinGetSlice acts as identity when we are not in a parallel section, so
|
||||
* pass in the next sequential value when we are in sequential mode. The
|
||||
* reason for this odd API is because intrinsics *need* to be called during
|
||||
* ForkJoin's warmup to fill the TI info.
|
||||
*/
|
||||
#define GET_SLICE(sliceStart, sliceEnd, id) \
|
||||
((id = ForkJoinGetSlice((InParallelSection() ? -1 : sliceStart++) | 0)) < sliceEnd)
|
||||
|
||||
/**
|
||||
* Determine the number and size of slices. The info object has the following
|
||||
* properties:
|
||||
*
|
||||
* - shift: amount to shift by to compute indices
|
||||
* - count: number of slices
|
||||
*/
|
||||
function ComputeSlicesInfo(length) {
|
||||
var count = length >>> MAX_SLICE_SHIFT;
|
||||
var numWorkers = ForkJoinNumWorkers();
|
||||
if (count < numWorkers)
|
||||
count = numWorkers;
|
||||
else if (count >= numWorkers * MAX_SLICES_PER_WORKER)
|
||||
count = numWorkers * MAX_SLICES_PER_WORKER;
|
||||
|
||||
// Round the slice size to be a power of 2.
|
||||
var shift = std_Math_max(std_Math_log2(length / count) | 0, 1);
|
||||
|
||||
// Recompute count with the rounded size.
|
||||
count = length >>> shift;
|
||||
if (count << shift !== length)
|
||||
count += 1;
|
||||
|
||||
return { shift: shift, count: count };
|
||||
}
|
||||
|
||||
#endif // ENABLE_PARALLEL_JS
|
@ -169,14 +169,6 @@ GetBuildConfiguration(JSContext *cx, unsigned argc, jsval *vp)
|
||||
if (!JS_SetProperty(cx, info, "oom-backtraces", value))
|
||||
return false;
|
||||
|
||||
#ifdef ENABLE_PARALLEL_JS
|
||||
value = BooleanValue(true);
|
||||
#else
|
||||
value = BooleanValue(false);
|
||||
#endif
|
||||
if (!JS_SetProperty(cx, info, "parallelJS", value))
|
||||
return false;
|
||||
|
||||
#ifdef ENABLE_BINARYDATA
|
||||
value = BooleanValue(true);
|
||||
#else
|
||||
|
@ -505,9 +505,7 @@ const JSFunctionSpec ArrayMetaTypeDescr::typeObjectMethods[] = {
|
||||
JS_SELF_HOSTED_FN("toSource", "DescrToSource", 0, 0),
|
||||
{"equivalent", {nullptr, nullptr}, 1, 0, "TypeDescrEquivalent"},
|
||||
JS_SELF_HOSTED_FN("build", "TypedObjectArrayTypeBuild", 3, 0),
|
||||
JS_SELF_HOSTED_FN("buildPar", "TypedObjectArrayTypeBuildPar", 3, 0),
|
||||
JS_SELF_HOSTED_FN("from", "TypedObjectArrayTypeFrom", 3, 0),
|
||||
JS_SELF_HOSTED_FN("fromPar", "TypedObjectArrayTypeFromPar", 3, 0),
|
||||
JS_FS_END
|
||||
};
|
||||
|
||||
@ -519,13 +517,8 @@ const JSFunctionSpec ArrayMetaTypeDescr::typedObjectMethods[] = {
|
||||
{"forEach", {nullptr, nullptr}, 1, 0, "ArrayForEach"},
|
||||
{"redimension", {nullptr, nullptr}, 1, 0, "TypedObjectArrayRedimension"},
|
||||
JS_SELF_HOSTED_FN("map", "TypedObjectArrayMap", 2, 0),
|
||||
JS_SELF_HOSTED_FN("mapPar", "TypedObjectArrayMapPar", 2, 0),
|
||||
JS_SELF_HOSTED_FN("reduce", "TypedObjectArrayReduce", 2, 0),
|
||||
JS_SELF_HOSTED_FN("reducePar", "TypedObjectArrayReducePar", 2, 0),
|
||||
JS_SELF_HOSTED_FN("scatter", "TypedObjectArrayScatter", 4, 0),
|
||||
JS_SELF_HOSTED_FN("scatterPar", "TypedObjectArrayScatterPar", 4, 0),
|
||||
JS_SELF_HOSTED_FN("filter", "TypedObjectArrayFilter", 1, 0),
|
||||
JS_SELF_HOSTED_FN("filterPar", "TypedObjectArrayFilterPar", 1, 0),
|
||||
JS_FS_END
|
||||
};
|
||||
|
||||
|
@ -646,25 +646,6 @@ function TypedObjectArrayMap(a, b) {
|
||||
ThrowError(JSMSG_TYPEDOBJECT_BAD_ARGS);
|
||||
}
|
||||
|
||||
// Warning: user exposed!
|
||||
function TypedObjectArrayMapPar(a, b) {
|
||||
// Arguments: [depth], func
|
||||
|
||||
// Defer to the sequential variant for error cases or
|
||||
// when not working with typed objects.
|
||||
if (!IsObject(this) || !ObjectIsTypedObject(this))
|
||||
return callFunction(TypedObjectArrayMap, this, a, b);
|
||||
var thisType = TypedObjectTypeDescr(this);
|
||||
if (!TypeDescrIsArrayType(thisType))
|
||||
return callFunction(TypedObjectArrayMap, this, a, b);
|
||||
|
||||
if (typeof a === "number" && IsCallable(b))
|
||||
return MapTypedParImpl(this, a, thisType, b);
|
||||
else if (IsCallable(a))
|
||||
return MapTypedParImpl(this, 1, thisType, a);
|
||||
return callFunction(TypedObjectArrayMap, this, a, b);
|
||||
}
|
||||
|
||||
// Warning: user exposed!
|
||||
function TypedObjectArrayReduce(a, b) {
|
||||
// Arguments: func, [initial]
|
||||
@ -681,24 +662,6 @@ function TypedObjectArrayReduce(a, b) {
|
||||
return ReduceTypedSeqImpl(this, outputType, a, b);
|
||||
}
|
||||
|
||||
// Warning: user exposed!
|
||||
function TypedObjectArrayScatter(a, b, c, d) {
|
||||
// Arguments: outputArrayType, indices, defaultValue, conflictFunction
|
||||
if (!IsObject(this) || !ObjectIsTypedObject(this))
|
||||
ThrowError(JSMSG_TYPEDOBJECT_BAD_ARGS);
|
||||
var thisType = TypedObjectTypeDescr(this);
|
||||
if (!TypeDescrIsArrayType(thisType))
|
||||
ThrowError(JSMSG_TYPEDOBJECT_BAD_ARGS);
|
||||
|
||||
if (!IsObject(a) || !ObjectIsTypeDescr(a) || !TypeDescrIsArrayType(a))
|
||||
ThrowError(JSMSG_TYPEDOBJECT_BAD_ARGS);
|
||||
|
||||
if (d !== undefined && typeof d !== "function")
|
||||
ThrowError(JSMSG_TYPEDOBJECT_BAD_ARGS);
|
||||
|
||||
return ScatterTypedSeqImpl(this, a, b, c, d);
|
||||
}
|
||||
|
||||
// Warning: user exposed!
|
||||
function TypedObjectArrayFilter(func) {
|
||||
// Arguments: predicate
|
||||
@ -714,47 +677,6 @@ function TypedObjectArrayFilter(func) {
|
||||
return FilterTypedSeqImpl(this, func);
|
||||
}
|
||||
|
||||
// placeholders
|
||||
|
||||
// Warning: user exposed!
|
||||
function TypedObjectArrayTypeBuildPar(a,b,c) {
|
||||
return callFunction(TypedObjectArrayTypeBuild, this, a, b, c);
|
||||
}
|
||||
|
||||
// Warning: user exposed!
|
||||
function TypedObjectArrayTypeFromPar(a,b,c) {
|
||||
// Arguments: arrayLike, [depth], func
|
||||
|
||||
// Use the sequential version for error cases or when arrayLike is
|
||||
// not a typed object.
|
||||
if (!IsObject(this) || !ObjectIsTypeDescr(this) || !TypeDescrIsArrayType(this))
|
||||
return callFunction(TypedObjectArrayTypeFrom, this, a, b, c);
|
||||
if (!IsObject(a) || !ObjectIsTypedObject(a))
|
||||
return callFunction(TypedObjectArrayTypeFrom, this, a, b, c);
|
||||
|
||||
// Detect whether an explicit depth is supplied.
|
||||
if (typeof b === "number" && IsCallable(c))
|
||||
return MapTypedParImpl(a, b, this, c);
|
||||
if (IsCallable(b))
|
||||
return MapTypedParImpl(a, 1, this, b);
|
||||
return callFunction(TypedObjectArrayTypeFrom, this, a, b, c);
|
||||
}
|
||||
|
||||
// Warning: user exposed!
|
||||
function TypedObjectArrayReducePar(a, b) {
|
||||
return callFunction(TypedObjectArrayReduce, this, a, b);
|
||||
}
|
||||
|
||||
// Warning: user exposed!
|
||||
function TypedObjectArrayScatterPar(a, b, c, d) {
|
||||
return callFunction(TypedObjectArrayScatter, this, a, b, c, d);
|
||||
}
|
||||
|
||||
// Warning: user exposed!
|
||||
function TypedObjectArrayFilterPar(func) {
|
||||
return callFunction(TypedObjectArrayFilter, this, func);
|
||||
}
|
||||
|
||||
// should eventually become macros
|
||||
function NUM_BYTES(bits) {
|
||||
return (bits + 7) >> 3;
|
||||
@ -1025,208 +947,6 @@ function MapTypedSeqImpl(inArray, depth, outputType, func) {
|
||||
return DoMapTypedSeqDepthN();
|
||||
}
|
||||
|
||||
// Implements |map| and |from| methods for typed |inArray|.
|
||||
function MapTypedParImpl(inArray, depth, outputType, func) {
|
||||
assert(IsObject(outputType) && ObjectIsTypeDescr(outputType),
|
||||
"Map/From called on non-type-object outputType");
|
||||
assert(IsObject(inArray) && ObjectIsTypedObject(inArray),
|
||||
"Map/From called on non-object or untyped input array.");
|
||||
assert(TypeDescrIsArrayType(outputType),
|
||||
"Map/From called on non array-type outputType");
|
||||
assert(typeof depth === "number",
|
||||
"Map/From called with non-numeric depth");
|
||||
assert(IsCallable(func),
|
||||
"Map/From called on something not callable");
|
||||
|
||||
var inArrayType = TypeOfTypedObject(inArray);
|
||||
|
||||
if (ShouldForceSequential() ||
|
||||
depth <= 0 ||
|
||||
TO_INT32(depth) !== depth ||
|
||||
!TypeDescrIsArrayType(inArrayType) ||
|
||||
!TypeDescrIsArrayType(outputType))
|
||||
{
|
||||
// defer error cases to seq implementation:
|
||||
return MapTypedSeqImpl(inArray, depth, outputType, func);
|
||||
}
|
||||
|
||||
switch (depth) {
|
||||
case 1:
|
||||
return MapTypedParImplDepth1(inArray, inArrayType, outputType, func);
|
||||
default:
|
||||
return MapTypedSeqImpl(inArray, depth, outputType, func);
|
||||
}
|
||||
}
|
||||
|
||||
function RedirectPointer(typedObj, offset, outputIsScalar) {
|
||||
if (!outputIsScalar || !InParallelSection()) {
|
||||
// ^ Subtle note: always check InParallelSection() last, because
|
||||
// otherwise the other if conditions will not execute during
|
||||
// sequential mode and we will not gather enough type
|
||||
// information.
|
||||
|
||||
// Here `typedObj` represents the input or output pointer we will
|
||||
// pass to the user function. Ideally, we will just update the
|
||||
// offset of `typedObj` in place so that it moves along the
|
||||
// input/output buffer without incurring any allocation costs. But
|
||||
// we can only do this if these changes are invisible to the user.
|
||||
//
|
||||
// Under normal uses, such changes *should* be invisible -- the
|
||||
// in/out pointers are only intended to be used during the
|
||||
// callback and then discarded, but of course in the general case
|
||||
// nothing prevents them from escaping.
|
||||
//
|
||||
// However, if we are in parallel mode, we know that the pointers
|
||||
// will not escape into global state. They could still escape by
|
||||
// being returned into the resulting array, but even that avenue
|
||||
// is impossible if the result array cannot contain objects.
|
||||
//
|
||||
// Therefore, we reuse a pointer if we are both in parallel mode
|
||||
// and we have a transparent output type. It'd be nice to loosen
|
||||
// this condition later by using fancy ion optimizations that
|
||||
// assume the value won't escape and copy it if it does. But those
|
||||
// don't exist yet. Moreover, checking if the type is transparent
|
||||
// is an overapproximation: users can manually declare opaque
|
||||
// types that nonetheless only contain scalar data.
|
||||
|
||||
typedObj = NewDerivedTypedObject(TypedObjectTypeDescr(typedObj),
|
||||
typedObj, 0);
|
||||
}
|
||||
|
||||
SetTypedObjectOffset(typedObj, offset);
|
||||
return typedObj;
|
||||
}
|
||||
SetScriptHints(RedirectPointer, { inline: true });
|
||||
|
||||
function MapTypedParImplDepth1(inArray, inArrayType, outArrayType, func) {
|
||||
assert(IsObject(inArrayType) && ObjectIsTypeDescr(inArrayType) &&
|
||||
TypeDescrIsArrayType(inArrayType),
|
||||
"DoMapTypedParDepth1: invalid inArrayType");
|
||||
assert(IsObject(outArrayType) && ObjectIsTypeDescr(outArrayType) &&
|
||||
TypeDescrIsArrayType(outArrayType),
|
||||
"DoMapTypedParDepth1: invalid outArrayType");
|
||||
assert(IsObject(inArray) && ObjectIsTypedObject(inArray),
|
||||
"DoMapTypedParDepth1: invalid inArray");
|
||||
|
||||
if (!TypedObjectIsAttached(inArray))
|
||||
ThrowError(JSMSG_TYPEDOBJECT_HANDLE_UNATTACHED);
|
||||
|
||||
// Determine the grain types of the input and output.
|
||||
const inGrainType = inArrayType.elementType;
|
||||
const outGrainType = outArrayType.elementType;
|
||||
const inGrainTypeSize = DESCR_SIZE(inGrainType);
|
||||
const outGrainTypeSize = DESCR_SIZE(outGrainType);
|
||||
const inGrainTypeIsComplex = !TypeDescrIsSimpleType(inGrainType);
|
||||
const outGrainTypeIsComplex = !TypeDescrIsSimpleType(outGrainType);
|
||||
|
||||
const length = inArray.length;
|
||||
const mode = undefined;
|
||||
|
||||
const outArray = new outArrayType();
|
||||
if (length === 0)
|
||||
return outArray;
|
||||
|
||||
if (outArray.length != length)
|
||||
ThrowError(JSMSG_TYPEDOBJECT_ARRAYTYPE_BAD_ARGS);
|
||||
|
||||
const outGrainTypeIsTransparent = ObjectIsTransparentTypedObject(outArray);
|
||||
|
||||
// Construct the slices and initial pointers for each worker:
|
||||
const slicesInfo = ComputeSlicesInfo(length);
|
||||
const numWorkers = ForkJoinNumWorkers();
|
||||
assert(numWorkers > 0, "Should have at least the main thread");
|
||||
const pointers = [];
|
||||
for (var i = 0; i < numWorkers; i++) {
|
||||
const inTypedObject = TypedObjectGetDerivedIf(inGrainType, inArray, 0,
|
||||
inGrainTypeIsComplex);
|
||||
const outTypedObject = TypedObjectGetOpaqueIf(outGrainType, outArray, 0,
|
||||
outGrainTypeIsComplex);
|
||||
ARRAY_PUSH(pointers, ({ inTypedObject: inTypedObject,
|
||||
outTypedObject: outTypedObject }));
|
||||
}
|
||||
|
||||
// Below we will be adjusting offsets within the input to point at
|
||||
// successive entries; we'll need to know the offset of inArray
|
||||
// relative to its owner (which is often but not always 0).
|
||||
const inBaseOffset = TypedObjectByteOffset(inArray);
|
||||
|
||||
ForkJoin(mapThread, 0, slicesInfo.count, ForkJoinMode(mode), outArray);
|
||||
return outArray;
|
||||
|
||||
function mapThread(workerId, sliceStart, sliceEnd) {
|
||||
assert(TO_INT32(workerId) === workerId,
|
||||
"workerId not int: " + workerId);
|
||||
assert(workerId < pointers.length,
|
||||
"workerId too large: " + workerId + " >= " + pointers.length);
|
||||
|
||||
var pointerIndex = InParallelSection() ? workerId : 0;
|
||||
assert(!!pointers[pointerIndex],
|
||||
"no pointer data for workerId: " + workerId);
|
||||
|
||||
const { inTypedObject, outTypedObject } = pointers[pointerIndex];
|
||||
const sliceShift = slicesInfo.shift;
|
||||
var sliceId;
|
||||
|
||||
while (GET_SLICE(sliceStart, sliceEnd, sliceId)) {
|
||||
const indexStart = SLICE_START_INDEX(sliceShift, sliceId);
|
||||
const indexEnd = SLICE_END_INDEX(sliceShift, indexStart, length);
|
||||
|
||||
var inOffset = inBaseOffset + std_Math_imul(inGrainTypeSize, indexStart);
|
||||
var outOffset = std_Math_imul(outGrainTypeSize, indexStart);
|
||||
|
||||
// Set the target region so that user is only permitted to write
|
||||
// within the range set aside for this slice. This prevents user
|
||||
// from writing to typed objects that escaped from prior slices
|
||||
// during sequential iteration. Note that, for any particular
|
||||
// iteration of the loop below, it's only valid to write to the
|
||||
// memory range corresponding to the index `i` -- however, since
|
||||
// the different iterations cannot communicate typed object
|
||||
// pointers to one another during parallel exec, we need only
|
||||
// fear escaped typed objects from *other* slices, so we can
|
||||
// just set the target region once.
|
||||
const endOffset = std_Math_imul(outGrainTypeSize, indexEnd);
|
||||
SetForkJoinTargetRegion(outArray, outOffset, endOffset);
|
||||
|
||||
for (var i = indexStart; i < indexEnd; i++) {
|
||||
var inVal = (inGrainTypeIsComplex
|
||||
? RedirectPointer(inTypedObject, inOffset,
|
||||
outGrainTypeIsTransparent)
|
||||
: inArray[i]);
|
||||
var outVal = (outGrainTypeIsComplex
|
||||
? RedirectPointer(outTypedObject, outOffset,
|
||||
outGrainTypeIsTransparent)
|
||||
: undefined);
|
||||
const r = func(inVal, i, inArray, outVal);
|
||||
if (r !== undefined) {
|
||||
if (outGrainTypeIsComplex)
|
||||
SetTypedObjectValue(outGrainType, outArray, outOffset, r);
|
||||
else
|
||||
UnsafePutElements(outArray, i, r);
|
||||
}
|
||||
inOffset += inGrainTypeSize;
|
||||
outOffset += outGrainTypeSize;
|
||||
|
||||
#ifndef JSGC_FJGENERATIONAL
|
||||
// A transparent result type cannot contain references, and
|
||||
// hence there is no way for a pointer to a thread-local object
|
||||
// to escape.
|
||||
//
|
||||
// This has been disabled for the PJS generational collector
|
||||
// as it probably has little effect in that setting and adds
|
||||
// per-iteration cost.
|
||||
if (outGrainTypeIsTransparent)
|
||||
ClearThreadLocalArenas();
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
return sliceId;
|
||||
}
|
||||
|
||||
return undefined;
|
||||
}
|
||||
SetScriptHints(MapTypedParImplDepth1, { cloneAtCallsite: true });
|
||||
|
||||
function ReduceTypedSeqImpl(array, outputType, func, initial) {
|
||||
assert(IsObject(array) && ObjectIsTypedObject(array), "Reduce called on non-object or untyped input array.");
|
||||
assert(IsObject(outputType) && ObjectIsTypeDescr(outputType), "Reduce called on non-type-object outputType");
|
||||
@ -1268,36 +988,6 @@ function ReduceTypedSeqImpl(array, outputType, func, initial) {
|
||||
return value;
|
||||
}
|
||||
|
||||
function ScatterTypedSeqImpl(array, outputType, indices, defaultValue, conflictFunc) {
|
||||
assert(IsObject(array) && ObjectIsTypedObject(array), "Scatter called on non-object or untyped input array.");
|
||||
assert(IsObject(outputType) && ObjectIsTypeDescr(outputType), "Scatter called on non-type-object outputType");
|
||||
assert(TypeDescrIsArrayType(outputType), "Scatter called on non-array type");
|
||||
assert(conflictFunc === undefined || typeof conflictFunc === "function", "Scatter called with invalid conflictFunc");
|
||||
|
||||
var result = new outputType();
|
||||
var bitvec = new Uint8Array(result.length);
|
||||
var elemType = outputType.elementType;
|
||||
var i, j;
|
||||
if (defaultValue !== elemType(undefined)) {
|
||||
for (i = 0; i < result.length; i++) {
|
||||
result[i] = defaultValue;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < indices.length; i++) {
|
||||
j = indices[i];
|
||||
if (!GET_BIT(bitvec, j)) {
|
||||
result[j] = array[i];
|
||||
SET_BIT(bitvec, j);
|
||||
} else if (conflictFunc === undefined) {
|
||||
ThrowError(JSMSG_PAR_ARRAY_SCATTER_CONFLICT);
|
||||
} else {
|
||||
result[j] = conflictFunc(result[j], elemType(array[i]));
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
function FilterTypedSeqImpl(array, func) {
|
||||
assert(IsObject(array) && ObjectIsTypedObject(array), "Filter called on non-object or untyped input array.");
|
||||
assert(typeof func === "function", "Filter called with non-function predicate");
|
||||
|
@ -107,34 +107,3 @@ function ToLength(v) {
|
||||
function SameValueZero(x, y) {
|
||||
return x === y || (x !== x && y !== y);
|
||||
}
|
||||
|
||||
/********** Testing code **********/
|
||||
|
||||
#ifdef ENABLE_PARALLEL_JS
|
||||
|
||||
/**
|
||||
* Internal debugging tool: checks that the given `mode` permits
|
||||
* sequential execution
|
||||
*/
|
||||
function AssertSequentialIsOK(mode) {
|
||||
if (mode && mode.mode && mode.mode !== "seq" && ParallelTestsShouldPass())
|
||||
ThrowError(JSMSG_WRONG_VALUE, "parallel execution", "sequential was forced");
|
||||
}
|
||||
|
||||
function ForkJoinMode(mode) {
|
||||
// WARNING: this must match the enum ForkJoinMode in ForkJoin.cpp
|
||||
if (!mode || !mode.mode) {
|
||||
return 0;
|
||||
} else if (mode.mode === "compile") {
|
||||
return 1;
|
||||
} else if (mode.mode === "par") {
|
||||
return 2;
|
||||
} else if (mode.mode === "recover") {
|
||||
return 3;
|
||||
} else if (mode.mode === "bailout") {
|
||||
return 4;
|
||||
}
|
||||
ThrowError(JSMSG_PAR_ARRAY_BAD_ARG);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -1,42 +0,0 @@
|
||||
/* -*- tab-width: 8; indent-tabs-mode: nil; js-indent-level: 2 -*-
|
||||
* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
/* Testing TypeDescrIsSizedArrayType() is fairly straightforward:
|
||||
* it's used early in the sequential scatter() method on TypedObject
|
||||
* arrays, and is applied to the first argument (not to "this").
|
||||
*
|
||||
* Run this with IONFLAGS=logs, generate pdfs with iongraph, and then
|
||||
* try running "pdfgrep TypeDescrIsSizedArrayType func*pass00*.pdf", this
|
||||
* might net a function that is a likely candidate for manual inspection.
|
||||
*
|
||||
* (It is sometimes useful to neuter the assert() macro in the
|
||||
* self-hosted code.)
|
||||
*/
|
||||
|
||||
if (!this.TypedObject) {
|
||||
print("No TypedObject, skipping");
|
||||
quit();
|
||||
}
|
||||
|
||||
var T = TypedObject;
|
||||
var IT = new T.ArrayType(T.int32, 100);
|
||||
var ix = IT.build(x => x == 0 ? 99 : x-1); // [99, 0, 1, ..., 98]
|
||||
|
||||
// This is a left-rotate by one place
|
||||
function check(v) {
|
||||
return v.scatter(IT, ix);
|
||||
}
|
||||
|
||||
function test() {
|
||||
var w = IT.build(x => x);
|
||||
for ( var i=0 ; i < 77 ; i++ )
|
||||
w = check(w);
|
||||
return w;
|
||||
}
|
||||
|
||||
w = test();
|
||||
assertEq(w.length, 100);
|
||||
assertEq(w[0], 77);
|
||||
print("Done");
|
@ -444,12 +444,6 @@ MSG_DEF(JSMSG_SHARED_TYPED_ARRAY_BAD_ARGS, 0, JSEXN_RANGEERR, "bad combinatio
|
||||
MSG_DEF(JSMSG_SHARED_TYPED_ARRAY_ARG_RANGE, 1, JSEXN_RANGEERR, "argument {0} out of range")
|
||||
MSG_DEF(JSMSG_SHARED_TYPED_ARRAY_BAD_LENGTH, 0, JSEXN_TYPEERR, "length argument must not be an object")
|
||||
|
||||
// Parallel array
|
||||
MSG_DEF(JSMSG_PAR_ARRAY_BAD_ARG, 0, JSEXN_RANGEERR, "invalid parallel method argument")
|
||||
MSG_DEF(JSMSG_PAR_ARRAY_SCATTER_BAD_TARGET, 1, JSEXN_ERR, "target for index {0} is not an integer")
|
||||
MSG_DEF(JSMSG_PAR_ARRAY_SCATTER_BOUNDS,0, JSEXN_ERR, "index in scatter vector out of bounds")
|
||||
MSG_DEF(JSMSG_PAR_ARRAY_SCATTER_CONFLICT, 0, JSEXN_ERR, "no conflict resolution function provided")
|
||||
|
||||
// Reflect
|
||||
MSG_DEF(JSMSG_BAD_PARSE_NODE, 0, JSEXN_INTERNALERR, "bad parse node")
|
||||
|
||||
|
@ -3209,15 +3209,6 @@ static const JSFunctionSpec array_methods[] = {
|
||||
JS_SELF_HOSTED_FN("some", "ArraySome", 1,0),
|
||||
JS_SELF_HOSTED_FN("every", "ArrayEvery", 1,0),
|
||||
|
||||
#ifdef ENABLE_PARALLEL_JS
|
||||
/* Parallelizable and pure methods. */
|
||||
JS_SELF_HOSTED_FN("mapPar", "ArrayMapPar", 2,0),
|
||||
JS_SELF_HOSTED_FN("reducePar", "ArrayReducePar", 2,0),
|
||||
JS_SELF_HOSTED_FN("scanPar", "ArrayScanPar", 2,0),
|
||||
JS_SELF_HOSTED_FN("scatterPar", "ArrayScatterPar", 5,0),
|
||||
JS_SELF_HOSTED_FN("filterPar", "ArrayFilterPar", 2,0),
|
||||
#endif
|
||||
|
||||
/* ES6 additions */
|
||||
JS_SELF_HOSTED_FN("find", "ArrayFind", 1,0),
|
||||
JS_SELF_HOSTED_FN("findIndex", "ArrayFindIndex", 1,0),
|
||||
@ -3250,12 +3241,6 @@ static const JSFunctionSpec array_static_methods[] = {
|
||||
JS_SELF_HOSTED_FN("from", "ArrayFrom", 3,0),
|
||||
JS_FN("of", array_of, 0,0),
|
||||
|
||||
#ifdef ENABLE_PARALLEL_JS
|
||||
JS_SELF_HOSTED_FN("build", "ArrayStaticBuild", 2,0),
|
||||
/* Parallelizable and pure static methods. */
|
||||
JS_SELF_HOSTED_FN("buildPar", "ArrayStaticBuildPar", 3,0),
|
||||
#endif
|
||||
|
||||
JS_FS_END
|
||||
};
|
||||
|
||||
|
@ -1,95 +0,0 @@
|
||||
// |reftest| skip-if(!this.hasOwnProperty("TypedObject"))
|
||||
var BUGNUMBER = 939715;
|
||||
var summary = 'method instance.scatter';
|
||||
|
||||
/*
|
||||
* Any copyright is dedicated to the Public Domain.
|
||||
* http://creativecommons.org/licenses/publicdomain/
|
||||
*/
|
||||
|
||||
var ArrayType = TypedObject.ArrayType;
|
||||
var StructType = TypedObject.StructType;
|
||||
var uint8 = TypedObject.uint8;
|
||||
var uint16 = TypedObject.uint16;
|
||||
var uint32 = TypedObject.uint32;
|
||||
var uint8Clamped = TypedObject.uint8Clamped;
|
||||
var int8 = TypedObject.int8;
|
||||
var int16 = TypedObject.int16;
|
||||
var int32 = TypedObject.int32;
|
||||
var float32 = TypedObject.float32;
|
||||
var float64 = TypedObject.float64;
|
||||
|
||||
function scatterUint8sPermute() {
|
||||
var uint8Array = uint8.array(5);
|
||||
var array = new uint8Array([124, 120, 122, 123, 121]);
|
||||
|
||||
var perm = array.scatter(uint8Array, [4, 0, 2, 3, 1]);
|
||||
assertTypedEqual(uint8Array, perm, [120, 121, 122, 123, 124]);
|
||||
}
|
||||
|
||||
function scatterUint8sPermuteIncomplete() {
|
||||
var uint8Array4 = uint8.array(4);
|
||||
var uint8Array5 = uint8.array(5);
|
||||
var array = new uint8Array4([124, 120, 122, 123]);
|
||||
|
||||
var perm;
|
||||
perm = array.scatter(uint8Array5, [4, 0, 2, 3]);
|
||||
assertTypedEqual(uint8Array5, perm, [120, 0, 122, 123, 124]);
|
||||
|
||||
perm = array.scatter(uint8Array5, [4, 0, 2, 3], 77);
|
||||
assertTypedEqual(uint8Array5, perm, [120, 77, 122, 123, 124]);
|
||||
}
|
||||
|
||||
function scatterUint8sHistogram() {
|
||||
var uint32Array5 = uint32.array(5);
|
||||
var uint32Array3 = uint32.array(3);
|
||||
var array = new uint32Array5([1, 10, 100, 1000, 10000]);
|
||||
|
||||
var hist = array.scatter(uint32Array3, [1, 1, 2, 1, 0], 0, (a,b) => a+b);
|
||||
assertTypedEqual(uint32Array3, hist, [10000, 1011, 100]);
|
||||
}
|
||||
|
||||
function scatterUint8sCollisionThrows() {
|
||||
var uint32Array5 = uint32.array(5);
|
||||
var uint32Array3 = uint32.array(3);
|
||||
var array = new uint32Array5([1, 10, 100, 1000, 10000]);
|
||||
|
||||
var unset_nonce = new Object();
|
||||
var unset = unset_nonce;
|
||||
try {
|
||||
unset = array.scatter(uint32Array3, [1, 1, 2, 1, 0], 0);
|
||||
} catch (e) {
|
||||
assertEq(unset, unset_nonce);
|
||||
}
|
||||
}
|
||||
|
||||
function scatterUint8sConflictIsAssocNonCommute() {
|
||||
var uint32Array5 = uint32.array(5);
|
||||
var uint32Array3 = uint32.array(3);
|
||||
var array = new uint32Array5([1, 10, 100, 1000, 10000]);
|
||||
|
||||
// FIXME strawman spec says conflict must be associative, but does
|
||||
// not dictate commutative. Yet, strawman spec does not appear to
|
||||
// specify operation order; must address incongruence.
|
||||
|
||||
var lfts = array.scatter(uint32Array3, [1, 1, 2, 1, 0], 0, (a,b) => a);
|
||||
assertTypedEqual(uint32Array3, lfts, [10000, 1, 100]);
|
||||
var rgts = array.scatter(uint32Array3, [1, 1, 2, 1, 0], 0, (a,b) => b);
|
||||
assertTypedEqual(uint32Array3, rgts, [10000, 1000, 100]);
|
||||
}
|
||||
|
||||
function runTests() {
|
||||
print(BUGNUMBER + ": " + summary);
|
||||
|
||||
scatterUint8sPermute();
|
||||
scatterUint8sPermuteIncomplete();
|
||||
scatterUint8sHistogram();
|
||||
scatterUint8sCollisionThrows();
|
||||
scatterUint8sConflictIsAssocNonCommute();
|
||||
|
||||
if (typeof reportCompare === "function")
|
||||
reportCompare(true, true);
|
||||
print("Tests complete");
|
||||
}
|
||||
|
||||
runTests();
|
@ -304,122 +304,6 @@ intrinsic_SetScriptHints(JSContext *cx, unsigned argc, Value *vp)
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
/*
|
||||
* Dump(val): Dumps a value for debugging, even in parallel mode.
|
||||
*/
|
||||
bool
|
||||
intrinsic_Dump(ThreadSafeContext *cx, unsigned argc, Value *vp)
|
||||
{
|
||||
CallArgs args = CallArgsFromVp(argc, vp);
|
||||
js_DumpValue(args[0]);
|
||||
if (args[0].isObject()) {
|
||||
fprintf(stderr, "\n");
|
||||
js_DumpObject(&args[0].toObject());
|
||||
}
|
||||
args.rval().setUndefined();
|
||||
return true;
|
||||
}
|
||||
|
||||
JS_JITINFO_NATIVE_PARALLEL_THREADSAFE(intrinsic_Dump_jitInfo, intrinsic_Dump_jitInfo,
|
||||
intrinsic_Dump);
|
||||
|
||||
bool
|
||||
intrinsic_ParallelSpew(ThreadSafeContext *cx, unsigned argc, Value *vp)
|
||||
{
|
||||
CallArgs args = CallArgsFromVp(argc, vp);
|
||||
MOZ_ASSERT(args.length() == 1);
|
||||
MOZ_ASSERT(args[0].isString());
|
||||
|
||||
AutoCheckCannotGC nogc;
|
||||
ScopedThreadSafeStringInspector inspector(args[0].toString());
|
||||
if (!inspector.ensureChars(cx, nogc))
|
||||
return false;
|
||||
|
||||
ScopedJSFreePtr<char> bytes;
|
||||
if (inspector.hasLatin1Chars())
|
||||
bytes = JS::CharsToNewUTF8CharsZ(cx, inspector.latin1Range()).c_str();
|
||||
else
|
||||
bytes = JS::CharsToNewUTF8CharsZ(cx, inspector.twoByteRange()).c_str();
|
||||
|
||||
parallel::Spew(parallel::SpewOps, bytes);
|
||||
|
||||
args.rval().setUndefined();
|
||||
return true;
|
||||
}
|
||||
|
||||
JS_JITINFO_NATIVE_PARALLEL_THREADSAFE(intrinsic_ParallelSpew_jitInfo, intrinsic_ParallelSpew_jitInfo,
|
||||
intrinsic_ParallelSpew);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* ForkJoin(func, sliceStart, sliceEnd, mode, updatable): Invokes |func| many times in parallel.
|
||||
*
|
||||
* If "func" will update a pre-existing object then that object /must/ be passed
|
||||
* as the object "updatable". It is /not/ correct to pass an object that
|
||||
* references the updatable objects indirectly.
|
||||
*
|
||||
* See ForkJoin.cpp for details and ParallelArray.js for examples.
|
||||
*/
|
||||
static bool
|
||||
intrinsic_ForkJoin(JSContext *cx, unsigned argc, Value *vp)
|
||||
{
|
||||
CallArgs args = CallArgsFromVp(argc, vp);
|
||||
return ForkJoin(cx, args);
|
||||
}
|
||||
|
||||
/*
|
||||
* ForkJoinWorkerNumWorkers(): Returns the number of workers in the fork join
|
||||
* thread pool, including the main thread.
|
||||
*/
|
||||
static bool
|
||||
intrinsic_ForkJoinNumWorkers(JSContext *cx, unsigned argc, Value *vp)
|
||||
{
|
||||
CallArgs args = CallArgsFromVp(argc, vp);
|
||||
args.rval().setInt32(cx->runtime()->threadPool.numWorkers());
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* ForkJoinGetSlice(id): Returns the id of the next slice to be worked
|
||||
* on.
|
||||
*
|
||||
* Acts as the identity function when called from outside of a ForkJoin
|
||||
* thread. This odd API is because intrinsics must be called during the
|
||||
* parallel warm up phase to populate observed type sets, so we must call it
|
||||
* even during sequential execution. But since there is no thread pool during
|
||||
* sequential execution, the selfhosted code is responsible for computing the
|
||||
* next sequential slice id and passing it in itself.
|
||||
*/
|
||||
bool
|
||||
js::intrinsic_ForkJoinGetSlice(JSContext *cx, unsigned argc, Value *vp)
|
||||
{
|
||||
CallArgs args = CallArgsFromVp(argc, vp);
|
||||
MOZ_ASSERT(args.length() == 1);
|
||||
MOZ_ASSERT(args[0].isInt32());
|
||||
args.rval().set(args[0]);
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool
|
||||
intrinsic_ForkJoinGetSlicePar(ForkJoinContext *cx, unsigned argc, Value *vp)
|
||||
{
|
||||
CallArgs args = CallArgsFromVp(argc, vp);
|
||||
MOZ_ASSERT(args.length() == 1);
|
||||
MOZ_ASSERT(args[0].isInt32());
|
||||
|
||||
uint16_t sliceId;
|
||||
if (cx->getSlice(&sliceId))
|
||||
args.rval().setInt32(sliceId);
|
||||
else
|
||||
args.rval().setInt32(ThreadPool::MAX_SLICE_ID);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
JS_JITINFO_NATIVE_PARALLEL(intrinsic_ForkJoinGetSlice_jitInfo,
|
||||
intrinsic_ForkJoinGetSlicePar);
|
||||
|
||||
/*
|
||||
* NewDenseArray(length): Allocates and returns a new dense array with
|
||||
* the given length where all values are initialized to holes.
|
||||
@ -896,59 +780,6 @@ intrinsic_IsWeakSet(JSContext *cx, unsigned argc, Value *vp)
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* ParallelTestsShouldPass(): Returns false if we are running in a
|
||||
* mode (such as --ion-eager) that is known to cause additional
|
||||
* bailouts or disqualifications for parallel array tests.
|
||||
*
|
||||
* This is needed because the parallel tests generally assert that,
|
||||
* under normal conditions, they will run without bailouts or
|
||||
* compilation failures, but this does not hold under "stress-testing"
|
||||
* conditions like --ion-eager or --no-ti. However, running the tests
|
||||
* under those conditions HAS exposed bugs and thus we do not wish to
|
||||
* disable them entirely. Instead, we simply disable the assertions
|
||||
* that state that no bailouts etc should occur.
|
||||
*/
|
||||
static bool
|
||||
intrinsic_ParallelTestsShouldPass(JSContext *cx, unsigned argc, Value *vp)
|
||||
{
|
||||
CallArgs args = CallArgsFromVp(argc, vp);
|
||||
args.rval().setBoolean(ParallelTestsShouldPass(cx));
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* ShouldForceSequential(): Returns true if parallel ops should take
|
||||
* the sequential fallback path.
|
||||
*/
|
||||
bool
|
||||
js::intrinsic_ShouldForceSequential(JSContext *cx, unsigned argc, Value *vp)
|
||||
{
|
||||
CallArgs args = CallArgsFromVp(argc, vp);
|
||||
args.rval().setBoolean(cx->runtime()->forkJoinWarmup ||
|
||||
InParallelSection());
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
js::intrinsic_InParallelSection(JSContext *cx, unsigned argc, Value *vp)
|
||||
{
|
||||
CallArgs args = CallArgsFromVp(argc, vp);
|
||||
args.rval().setBoolean(false);
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool
|
||||
intrinsic_InParallelSectionPar(ForkJoinContext *cx, unsigned argc, Value *vp)
|
||||
{
|
||||
CallArgs args = CallArgsFromVp(argc, vp);
|
||||
args.rval().setBoolean(true);
|
||||
return true;
|
||||
}
|
||||
|
||||
JS_JITINFO_NATIVE_PARALLEL(intrinsic_InParallelSection_jitInfo,
|
||||
intrinsic_InParallelSectionPar);
|
||||
|
||||
/* These wrappers are needed in order to recognize the function
|
||||
* pointers within the JIT, and the raw js:: functions can't be used
|
||||
* directly because they take a ThreadSafeContext* argument.
|
||||
@ -1159,23 +990,7 @@ static const JSFunctionSpec intrinsic_functions[] = {
|
||||
|
||||
JS_FN("IsWeakSet", intrinsic_IsWeakSet, 1,0),
|
||||
|
||||
JS_FN("ForkJoin", intrinsic_ForkJoin, 5,0),
|
||||
JS_FN("ForkJoinNumWorkers", intrinsic_ForkJoinNumWorkers, 0,0),
|
||||
JS_FN("NewDenseArray", intrinsic_NewDenseArray, 1,0),
|
||||
JS_FN("ShouldForceSequential", intrinsic_ShouldForceSequential, 0,0),
|
||||
JS_FN("ParallelTestsShouldPass", intrinsic_ParallelTestsShouldPass, 0,0),
|
||||
JS_FNINFO("ClearThreadLocalArenas",
|
||||
intrinsic_ClearThreadLocalArenas,
|
||||
&intrinsic_ClearThreadLocalArenasInfo, 0,0),
|
||||
JS_FNINFO("SetForkJoinTargetRegion",
|
||||
intrinsic_SetForkJoinTargetRegion,
|
||||
&intrinsic_SetForkJoinTargetRegionInfo, 2, 0),
|
||||
JS_FNINFO("ForkJoinGetSlice",
|
||||
intrinsic_ForkJoinGetSlice,
|
||||
&intrinsic_ForkJoinGetSlice_jitInfo, 1, 0),
|
||||
JS_FNINFO("InParallelSection",
|
||||
intrinsic_InParallelSection,
|
||||
&intrinsic_InParallelSection_jitInfo, 0, 0),
|
||||
|
||||
// See builtin/TypedObject.h for descriptors of the typedobj functions.
|
||||
JS_FN("NewOpaqueTypedObject",
|
||||
@ -1264,16 +1079,6 @@ static const JSFunctionSpec intrinsic_functions[] = {
|
||||
JS_FN("regexp_exec_no_statics", regexp_exec_no_statics, 2,0),
|
||||
JS_FN("regexp_test_no_statics", regexp_test_no_statics, 2,0),
|
||||
|
||||
#ifdef DEBUG
|
||||
JS_FNINFO("Dump",
|
||||
JSNativeThreadSafeWrapper<intrinsic_Dump>,
|
||||
&intrinsic_Dump_jitInfo, 1,0),
|
||||
|
||||
JS_FNINFO("ParallelSpew",
|
||||
JSNativeThreadSafeWrapper<intrinsic_ParallelSpew>,
|
||||
&intrinsic_ParallelSpew_jitInfo, 1,0),
|
||||
#endif
|
||||
|
||||
JS_FS_END
|
||||
};
|
||||
|
||||
|
@ -35,7 +35,7 @@ namespace js {
|
||||
* Nightly) and without (all others). FIXME: Bug 1066322 - Enable ES6 symbols
|
||||
* in all builds.
|
||||
*/
|
||||
static const uint32_t XDR_BYTECODE_VERSION_SUBTRAHEND = 224;
|
||||
static const uint32_t XDR_BYTECODE_VERSION_SUBTRAHEND = 226;
|
||||
static_assert(XDR_BYTECODE_VERSION_SUBTRAHEND % 2 == 0, "see the comment above");
|
||||
static const uint32_t XDR_BYTECODE_VERSION =
|
||||
uint32_t(0xb973c0de - (XDR_BYTECODE_VERSION_SUBTRAHEND
|
||||
@ -44,7 +44,7 @@ static const uint32_t XDR_BYTECODE_VERSION =
|
||||
#endif
|
||||
));
|
||||
|
||||
static_assert(JSErr_Limit == 369,
|
||||
static_assert(JSErr_Limit == 365,
|
||||
"GREETINGS, POTENTIAL SUBTRAHEND INCREMENTER! If you added or "
|
||||
"removed MSG_DEFs from js.msg, you should increment "
|
||||
"XDR_BYTECODE_VERSION_SUBTRAHEND and update this assertion's "
|
||||
|
Loading…
Reference in New Issue
Block a user