mirror of
https://gitlab.winehq.org/wine/wine-gecko.git
synced 2024-09-13 09:24:08 -07:00
Bug 1140752 - Land code to self-host %TypedArray%.prototype.set, but don't enable it yet, pending perf-testing. This shouldn't make any changes to the build, except in terms of adding more code to it and affecting binary layout. r=jandem, also much feedback from till that effectively amounts to a review as well
This commit is contained in:
parent
029a9f89dc
commit
3886e5a7ec
@ -2,6 +2,8 @@
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include "TypedObjectConstants.h"
|
||||
|
||||
// ES6 draft 20150304 %TypedArray%.prototype.copyWithin
|
||||
function TypedArrayCopyWithin(target, start, end = undefined) {
|
||||
// This function is not generic.
|
||||
@ -649,6 +651,142 @@ function TypedArrayReverse() {
|
||||
return O;
|
||||
}
|
||||
|
||||
function ViewedArrayBufferIfReified(tarray) {
|
||||
assert(IsTypedArray(tarray), "non-typed array asked for its buffer");
|
||||
|
||||
var buf = UnsafeGetReservedSlot(tarray, JS_TYPEDARRAYLAYOUT_BUFFER_SLOT);
|
||||
assert(buf === null || (IsObject(buf) && IsArrayBuffer(buf)),
|
||||
"unexpected value in buffer slot");
|
||||
return buf;
|
||||
}
|
||||
|
||||
function IsDetachedBuffer(buffer) {
|
||||
// Typed arrays whose buffers are null use inline storage and can't have
|
||||
// been neutered.
|
||||
if (buffer === null)
|
||||
return false;
|
||||
|
||||
assert(IsArrayBuffer(buffer),
|
||||
"non-ArrayBuffer passed to IsDetachedBuffer");
|
||||
|
||||
var flags = UnsafeGetInt32FromReservedSlot(buffer, JS_ARRAYBUFFER_FLAGS_SLOT);
|
||||
return (flags & JS_ARRAYBUFFER_NEUTERED_FLAG) !== 0;
|
||||
}
|
||||
|
||||
// ES6 draft 20150220 22.2.3.22.1 %TypedArray%.prototype.set(array [, offset])
|
||||
function SetFromNonTypedArray(target, array, targetOffset, targetLength, targetBuffer) {
|
||||
assert(!IsPossiblyWrappedTypedArray(array),
|
||||
"typed arrays must be passed to SetFromTypedArray");
|
||||
|
||||
// Steps 1-11 provided by caller.
|
||||
|
||||
// Steps 16-17.
|
||||
var src = ToObject(array);
|
||||
|
||||
// Steps 18-19.
|
||||
var srcLength = ToLength(src.length);
|
||||
|
||||
// Step 20.
|
||||
var limitOffset = targetOffset + srcLength;
|
||||
if (limitOffset > targetLength)
|
||||
ThrowRangeError(JSMSG_BAD_INDEX);
|
||||
|
||||
// Step 22.
|
||||
var k = 0;
|
||||
|
||||
// Steps 12-15, 21, 23-24.
|
||||
while (targetOffset < limitOffset) {
|
||||
// Steps 24a-c.
|
||||
var kNumber = ToNumber(src[k]);
|
||||
|
||||
// Step 24d. This explicit check will be unnecessary when we implement
|
||||
// throw-on-getting/setting-element-in-detached-buffer semantics.
|
||||
if (targetBuffer === null) {
|
||||
// A typed array previously using inline storage may acquire a
|
||||
// buffer, so we must check with the source.
|
||||
targetBuffer = ViewedArrayBufferIfReified(target);
|
||||
}
|
||||
if (IsDetachedBuffer(targetBuffer))
|
||||
ThrowTypeError(JSMSG_TYPED_ARRAY_DETACHED);
|
||||
|
||||
// Step 24e.
|
||||
target[targetOffset] = kNumber;
|
||||
|
||||
// Steps 24f-g.
|
||||
k++;
|
||||
targetOffset++;
|
||||
}
|
||||
|
||||
// Step 25.
|
||||
return undefined;
|
||||
}
|
||||
|
||||
// ES6 draft 20150220 22.2.3.22.2 %TypedArray%.prototype.set(typedArray [, offset])
|
||||
function SetFromTypedArray(target, typedArray, targetOffset, targetLength) {
|
||||
assert(IsPossiblyWrappedTypedArray(typedArray),
|
||||
"only typed arrays may be passed to this method");
|
||||
|
||||
// Steps 1-11 provided by caller.
|
||||
|
||||
// Steps 12-24.
|
||||
var res = SetFromTypedArrayApproach(target, typedArray, targetOffset,
|
||||
targetLength | 0);
|
||||
assert(res === JS_SETTYPEDARRAY_SAME_TYPE ||
|
||||
res === JS_SETTYPEDARRAY_OVERLAPPING ||
|
||||
res === JS_SETTYPEDARRAY_DISJOINT,
|
||||
"intrinsic didn't return one of its enumerated return values");
|
||||
|
||||
// If the elements had the same type, then SetFromTypedArrayApproach also
|
||||
// performed step 29.
|
||||
if (res == JS_SETTYPEDARRAY_SAME_TYPE)
|
||||
return undefined; // Step 25: done.
|
||||
|
||||
// Otherwise, all checks and side effects except the actual element-writing
|
||||
// happened. Either we're assigning from one range to a non-overlapping
|
||||
// second range, or we're not.
|
||||
|
||||
if (res === JS_SETTYPEDARRAY_DISJOINT) {
|
||||
SetDisjointTypedElements(target, targetOffset | 0, typedArray);
|
||||
return undefined; // Step 25: done.
|
||||
}
|
||||
|
||||
// Now the hard case: overlapping memory ranges. Delegate to yet another
|
||||
// intrinsic.
|
||||
SetOverlappingTypedElements(target, targetOffset | 0, typedArray);
|
||||
|
||||
// Step 25.
|
||||
return undefined;
|
||||
}
|
||||
|
||||
// ES6 draft 20150304 %TypedArray%.prototype.set
|
||||
function TypedArraySet(overloaded, offset) {
|
||||
// Steps 2-5, either algorithm.
|
||||
var target = this;
|
||||
if (!IsObject(target) || !IsTypedArray(target)) {
|
||||
return callFunction(CallTypedArrayMethodIfWrapped,
|
||||
target, overloaded, offset, "TypedArraySet");
|
||||
}
|
||||
|
||||
// Steps 6-8, either algorithm.
|
||||
var targetOffset = ToInteger(offset);
|
||||
if (targetOffset < 0)
|
||||
ThrowRangeError(JSMSG_TYPED_ARRAY_NEGATIVE_ARG, "2");
|
||||
|
||||
// Steps 9-10.
|
||||
var targetBuffer = ViewedArrayBufferIfReified(target);
|
||||
if (IsDetachedBuffer(targetBuffer))
|
||||
ThrowTypeError(JSMSG_TYPED_ARRAY_DETACHED);
|
||||
|
||||
// Step 11.
|
||||
var targetLength = TypedArrayLength(target);
|
||||
|
||||
// Steps 12 et seq.
|
||||
if (IsPossiblyWrappedTypedArray(overloaded))
|
||||
return SetFromTypedArray(target, overloaded, targetOffset, targetLength);
|
||||
|
||||
return SetFromNonTypedArray(target, overloaded, targetOffset, targetLength, targetBuffer);
|
||||
}
|
||||
|
||||
// ES6 draft rev32 (2015-02-02) 22.2.3.23 %TypedArray%.prototype.slice(start, end).
|
||||
function TypedArraySlice(start, end) {
|
||||
|
||||
|
@ -9,6 +9,27 @@
|
||||
#ifndef builtin_TypedObjectConstants_h
|
||||
#define builtin_TypedObjectConstants_h
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// Values to be returned by SetFromTypedArrayApproach
|
||||
|
||||
#define JS_SETTYPEDARRAY_SAME_TYPE 0
|
||||
#define JS_SETTYPEDARRAY_OVERLAPPING 1
|
||||
#define JS_SETTYPEDARRAY_DISJOINT 2
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// Slots for objects using the typed array layout
|
||||
|
||||
#define JS_TYPEDARRAYLAYOUT_BUFFER_SLOT 0
|
||||
#define JS_TYPEDARRAYLAYOUT_LENGTH_SLOT 1
|
||||
#define JS_TYPEDARRAYLAYOUT_BYTEOFFSET_SLOT 2
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// Slots and flags for ArrayBuffer objects
|
||||
|
||||
#define JS_ARRAYBUFFER_FLAGS_SLOT 3
|
||||
|
||||
#define JS_ARRAYBUFFER_NEUTERED_FLAG 0x4
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// Slots for typed prototypes
|
||||
|
||||
|
@ -4974,6 +4974,22 @@ CodeGenerator::visitTypedArrayElements(LTypedArrayElements* lir)
|
||||
masm.loadPtr(Address(obj, TypedArrayLayout::dataOffset()), out);
|
||||
}
|
||||
|
||||
void
|
||||
CodeGenerator::visitSetDisjointTypedElements(LSetDisjointTypedElements* lir)
|
||||
{
|
||||
Register target = ToRegister(lir->target());
|
||||
Register targetOffset = ToRegister(lir->targetOffset());
|
||||
Register source = ToRegister(lir->source());
|
||||
|
||||
Register temp = ToRegister(lir->temp());
|
||||
|
||||
masm.setupUnalignedABICall(3, temp);
|
||||
masm.passABIArg(target);
|
||||
masm.passABIArg(targetOffset);
|
||||
masm.passABIArg(source);
|
||||
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, js::SetDisjointTypedElements));
|
||||
}
|
||||
|
||||
void
|
||||
CodeGenerator::visitTypedObjectDescr(LTypedObjectDescr* lir)
|
||||
{
|
||||
|
@ -180,6 +180,7 @@ class CodeGenerator : public CodeGeneratorSpecific
|
||||
void visitSetArrayLength(LSetArrayLength* lir);
|
||||
void visitTypedArrayLength(LTypedArrayLength* lir);
|
||||
void visitTypedArrayElements(LTypedArrayElements* lir);
|
||||
void visitSetDisjointTypedElements(LSetDisjointTypedElements* lir);
|
||||
void visitTypedObjectElements(LTypedObjectElements* lir);
|
||||
void visitSetTypedObjectOffset(LSetTypedObjectOffset* lir);
|
||||
void visitTypedObjectDescr(LTypedObjectDescr* ins);
|
||||
|
@ -803,8 +803,12 @@ class IonBuilder
|
||||
MIRType knownValueType);
|
||||
|
||||
// TypedArray intrinsics.
|
||||
enum WrappingBehavior { AllowWrappedTypedArrays, RejectWrappedTypedArrays };
|
||||
InliningStatus inlineIsTypedArrayHelper(CallInfo& callInfo, WrappingBehavior wrappingBehavior);
|
||||
InliningStatus inlineIsTypedArray(CallInfo& callInfo);
|
||||
InliningStatus inlineIsPossiblyWrappedTypedArray(CallInfo& callInfo);
|
||||
InliningStatus inlineTypedArrayLength(CallInfo& callInfo);
|
||||
InliningStatus inlineSetDisjointTypedElements(CallInfo& callInfo);
|
||||
|
||||
// TypedObject intrinsics and natives.
|
||||
InliningStatus inlineObjectIsTypeDescr(CallInfo& callInfo);
|
||||
|
@ -4235,6 +4235,43 @@ class LTypedArrayElements : public LInstructionHelper<1, 1, 0>
|
||||
}
|
||||
};
|
||||
|
||||
// Assign
|
||||
//
|
||||
// target[targetOffset..targetOffset + source.length] = source[0..source.length]
|
||||
//
|
||||
// where the source element range doesn't overlap the target element range in
|
||||
// memory.
|
||||
class LSetDisjointTypedElements : public LCallInstructionHelper<0, 3, 1>
|
||||
{
|
||||
public:
|
||||
LIR_HEADER(SetDisjointTypedElements)
|
||||
|
||||
explicit LSetDisjointTypedElements(const LAllocation& target, const LAllocation& targetOffset,
|
||||
const LAllocation& source, const LDefinition& temp)
|
||||
{
|
||||
setOperand(0, target);
|
||||
setOperand(1, targetOffset);
|
||||
setOperand(2, source);
|
||||
setTemp(0, temp);
|
||||
}
|
||||
|
||||
const LAllocation* target() {
|
||||
return getOperand(0);
|
||||
}
|
||||
|
||||
const LAllocation* targetOffset() {
|
||||
return getOperand(1);
|
||||
}
|
||||
|
||||
const LAllocation* source() {
|
||||
return getOperand(2);
|
||||
}
|
||||
|
||||
const LDefinition* temp() {
|
||||
return getTemp(0);
|
||||
}
|
||||
};
|
||||
|
||||
// Load a typed object's descriptor.
|
||||
class LTypedObjectDescr : public LInstructionHelper<1, 1, 0>
|
||||
{
|
||||
|
@ -284,6 +284,7 @@
|
||||
_(SetArrayLength) \
|
||||
_(TypedArrayLength) \
|
||||
_(TypedArrayElements) \
|
||||
_(SetDisjointTypedElements) \
|
||||
_(TypedObjectDescr) \
|
||||
_(TypedObjectElements) \
|
||||
_(SetTypedObjectOffset) \
|
||||
|
@ -2433,6 +2433,27 @@ LIRGenerator::visitTypedArrayElements(MTypedArrayElements* ins)
|
||||
define(new(alloc()) LTypedArrayElements(useRegisterAtStart(ins->object())), ins);
|
||||
}
|
||||
|
||||
void
|
||||
LIRGenerator::visitSetDisjointTypedElements(MSetDisjointTypedElements* ins)
|
||||
{
|
||||
MOZ_ASSERT(ins->type() == MIRType_None);
|
||||
|
||||
MDefinition* target = ins->target();
|
||||
MOZ_ASSERT(target->type() == MIRType_Object);
|
||||
|
||||
MDefinition* targetOffset = ins->targetOffset();
|
||||
MOZ_ASSERT(targetOffset->type() == MIRType_Int32);
|
||||
|
||||
MDefinition* source = ins->source();
|
||||
MOZ_ASSERT(source->type() == MIRType_Object);
|
||||
|
||||
auto lir = new(alloc()) LSetDisjointTypedElements(useRegister(target),
|
||||
useRegister(targetOffset),
|
||||
useRegister(source),
|
||||
temp());
|
||||
add(lir, ins);
|
||||
}
|
||||
|
||||
void
|
||||
LIRGenerator::visitTypedObjectDescr(MTypedObjectDescr* ins)
|
||||
{
|
||||
|
@ -181,6 +181,7 @@ class LIRGenerator : public LIRGeneratorSpecific
|
||||
void visitSetArrayLength(MSetArrayLength* ins);
|
||||
void visitTypedArrayLength(MTypedArrayLength* ins);
|
||||
void visitTypedArrayElements(MTypedArrayElements* ins);
|
||||
void visitSetDisjointTypedElements(MSetDisjointTypedElements* ins);
|
||||
void visitTypedObjectElements(MTypedObjectElements* ins);
|
||||
void visitSetTypedObjectOffset(MSetTypedObjectOffset* ins);
|
||||
void visitTypedObjectDescr(MTypedObjectDescr* ins);
|
||||
|
@ -217,8 +217,12 @@ IonBuilder::inlineNativeCall(CallInfo& callInfo, JSFunction* target)
|
||||
// TypedArray intrinsics.
|
||||
if (native == intrinsic_IsTypedArray)
|
||||
return inlineIsTypedArray(callInfo);
|
||||
if (native == intrinsic_IsPossiblyWrappedTypedArray)
|
||||
return inlineIsPossiblyWrappedTypedArray(callInfo);
|
||||
if (native == intrinsic_TypedArrayLength)
|
||||
return inlineTypedArrayLength(callInfo);
|
||||
if (native == intrinsic_SetDisjointTypedElements)
|
||||
return inlineSetDisjointTypedElements(callInfo);
|
||||
|
||||
// TypedObject intrinsics.
|
||||
if (native == js::ObjectIsTypedObject)
|
||||
@ -2219,10 +2223,11 @@ IonBuilder::inlineHasClass(CallInfo& callInfo,
|
||||
}
|
||||
|
||||
IonBuilder::InliningStatus
|
||||
IonBuilder::inlineIsTypedArray(CallInfo& callInfo)
|
||||
IonBuilder::inlineIsTypedArrayHelper(CallInfo& callInfo, WrappingBehavior wrappingBehavior)
|
||||
{
|
||||
MOZ_ASSERT(!callInfo.constructing());
|
||||
MOZ_ASSERT(callInfo.argc() == 1);
|
||||
|
||||
if (callInfo.getArg(0)->type() != MIRType_Object)
|
||||
return InliningStatus_NotInlined;
|
||||
if (getInlineReturnType() != MIRType_Boolean)
|
||||
@ -2238,12 +2243,22 @@ IonBuilder::inlineIsTypedArray(CallInfo& callInfo)
|
||||
bool result = false;
|
||||
switch (types->forAllClasses(constraints(), IsTypedArrayClass)) {
|
||||
case TemporaryTypeSet::ForAllResult::ALL_FALSE:
|
||||
case TemporaryTypeSet::ForAllResult::EMPTY:
|
||||
case TemporaryTypeSet::ForAllResult::EMPTY: {
|
||||
// Wrapped typed arrays won't appear to be typed arrays per a
|
||||
// |forAllClasses| query. If wrapped typed arrays are to be considered
|
||||
// typed arrays, a negative answer is not conclusive. Don't inline in
|
||||
// that case.
|
||||
if (wrappingBehavior == AllowWrappedTypedArrays)
|
||||
return InliningStatus_NotInlined;
|
||||
|
||||
result = false;
|
||||
break;
|
||||
}
|
||||
|
||||
case TemporaryTypeSet::ForAllResult::ALL_TRUE:
|
||||
result = true;
|
||||
break;
|
||||
|
||||
case TemporaryTypeSet::ForAllResult::MIXED:
|
||||
return InliningStatus_NotInlined;
|
||||
}
|
||||
@ -2254,6 +2269,18 @@ IonBuilder::inlineIsTypedArray(CallInfo& callInfo)
|
||||
return InliningStatus_Inlined;
|
||||
}
|
||||
|
||||
IonBuilder::InliningStatus
|
||||
IonBuilder::inlineIsTypedArray(CallInfo& callInfo)
|
||||
{
|
||||
return inlineIsTypedArrayHelper(callInfo, RejectWrappedTypedArrays);
|
||||
}
|
||||
|
||||
IonBuilder::InliningStatus
|
||||
IonBuilder::inlineIsPossiblyWrappedTypedArray(CallInfo& callInfo)
|
||||
{
|
||||
return inlineIsTypedArrayHelper(callInfo, AllowWrappedTypedArrays);
|
||||
}
|
||||
|
||||
IonBuilder::InliningStatus
|
||||
IonBuilder::inlineTypedArrayLength(CallInfo& callInfo)
|
||||
{
|
||||
@ -2274,6 +2301,58 @@ IonBuilder::inlineTypedArrayLength(CallInfo& callInfo)
|
||||
return InliningStatus_Inlined;
|
||||
}
|
||||
|
||||
IonBuilder::InliningStatus
|
||||
IonBuilder::inlineSetDisjointTypedElements(CallInfo& callInfo)
|
||||
{
|
||||
MOZ_ASSERT(!callInfo.constructing());
|
||||
MOZ_ASSERT(callInfo.argc() == 3);
|
||||
|
||||
// Initial argument requirements.
|
||||
|
||||
MDefinition* target = callInfo.getArg(0);
|
||||
if (target->type() != MIRType_Object)
|
||||
return InliningStatus_NotInlined;
|
||||
|
||||
if (getInlineReturnType() != MIRType_Undefined)
|
||||
return InliningStatus_NotInlined;
|
||||
|
||||
MDefinition* targetOffset = callInfo.getArg(1);
|
||||
MOZ_ASSERT(targetOffset->type() == MIRType_Int32);
|
||||
|
||||
MDefinition* sourceTypedArray = callInfo.getArg(2);
|
||||
if (sourceTypedArray->type() != MIRType_Object)
|
||||
return InliningStatus_NotInlined;
|
||||
|
||||
// Only attempt to optimize if |target| and |sourceTypedArray| are both
|
||||
// definitely typed arrays. (The former always is. The latter is not,
|
||||
// necessarily, because of wrappers.)
|
||||
|
||||
MDefinition* arrays[] = { target, sourceTypedArray };
|
||||
|
||||
for (MDefinition* def : arrays) {
|
||||
TemporaryTypeSet* types = def->resultTypeSet();
|
||||
if (!types)
|
||||
return InliningStatus_NotInlined;
|
||||
|
||||
if (types->forAllClasses(constraints(), IsTypedArrayClass) !=
|
||||
TemporaryTypeSet::ForAllResult::ALL_TRUE)
|
||||
{
|
||||
return InliningStatus_NotInlined;
|
||||
}
|
||||
}
|
||||
|
||||
auto sets = MSetDisjointTypedElements::New(alloc(), target, targetOffset, sourceTypedArray);
|
||||
current->add(sets);
|
||||
|
||||
pushConstant(UndefinedValue());
|
||||
|
||||
if (!resumeAfter(sets))
|
||||
return InliningStatus_Error;
|
||||
|
||||
callInfo.setImplicitlyUsedUnchecked();
|
||||
return InliningStatus_Inlined;
|
||||
}
|
||||
|
||||
IonBuilder::InliningStatus
|
||||
IonBuilder::inlineObjectIsTypeDescr(CallInfo& callInfo)
|
||||
{
|
||||
|
@ -7944,6 +7944,49 @@ class MTypedArrayElements
|
||||
ALLOW_CLONE(MTypedArrayElements)
|
||||
};
|
||||
|
||||
class MSetDisjointTypedElements
|
||||
: public MTernaryInstruction,
|
||||
public NoTypePolicy::Data
|
||||
{
|
||||
explicit MSetDisjointTypedElements(MDefinition* target, MDefinition* targetOffset,
|
||||
MDefinition* source)
|
||||
: MTernaryInstruction(target, targetOffset, source)
|
||||
{
|
||||
MOZ_ASSERT(target->type() == MIRType_Object);
|
||||
MOZ_ASSERT(targetOffset->type() == MIRType_Int32);
|
||||
MOZ_ASSERT(source->type() == MIRType_Object);
|
||||
setResultType(MIRType_None);
|
||||
}
|
||||
|
||||
public:
|
||||
INSTRUCTION_HEADER(SetDisjointTypedElements)
|
||||
|
||||
static MSetDisjointTypedElements*
|
||||
New(TempAllocator& alloc, MDefinition* target, MDefinition* targetOffset,
|
||||
MDefinition* source)
|
||||
{
|
||||
return new(alloc) MSetDisjointTypedElements(target, targetOffset, source);
|
||||
}
|
||||
|
||||
MDefinition* target() const {
|
||||
return getOperand(0);
|
||||
}
|
||||
|
||||
MDefinition* targetOffset() const {
|
||||
return getOperand(1);
|
||||
}
|
||||
|
||||
MDefinition* source() const {
|
||||
return getOperand(2);
|
||||
}
|
||||
|
||||
AliasSet getAliasSet() const override {
|
||||
return AliasSet::Store(AliasSet::UnboxedElement);
|
||||
}
|
||||
|
||||
ALLOW_CLONE(MSetDisjointTypedElements)
|
||||
};
|
||||
|
||||
// Load a binary data object's "elements", which is just its opaque
|
||||
// binary data space. Eventually this should probably be
|
||||
// unified with `MTypedArrayElements`.
|
||||
|
@ -172,6 +172,7 @@ namespace jit {
|
||||
_(SetArrayLength) \
|
||||
_(TypedArrayLength) \
|
||||
_(TypedArrayElements) \
|
||||
_(SetDisjointTypedElements) \
|
||||
_(TypedObjectDescr) \
|
||||
_(TypedObjectElements) \
|
||||
_(SetTypedObjectOffset) \
|
||||
|
@ -840,13 +840,19 @@ bool intrinsic_IsSuspendedStarGenerator(JSContext* cx, unsigned argc, Value* vp)
|
||||
bool intrinsic_IsArrayIterator(JSContext* cx, unsigned argc, Value* vp);
|
||||
bool intrinsic_IsStringIterator(JSContext* cx, unsigned argc, Value* vp);
|
||||
|
||||
bool intrinsic_IsArrayBuffer(JSContext* cx, unsigned argc, Value* vp);
|
||||
|
||||
bool intrinsic_IsTypedArray(JSContext* cx, unsigned argc, Value* vp);
|
||||
bool intrinsic_IsPossiblyWrappedTypedArray(JSContext* cx, unsigned argc, Value* vp);
|
||||
bool intrinsic_TypedArrayBuffer(JSContext* cx, unsigned argc, Value* vp);
|
||||
bool intrinsic_TypedArrayByteOffset(JSContext* cx, unsigned argc, Value* vp);
|
||||
bool intrinsic_TypedArrayElementShift(JSContext* cx, unsigned argc, Value* vp);
|
||||
bool intrinsic_TypedArrayLength(JSContext* cx, unsigned argc, Value* vp);
|
||||
|
||||
bool intrinsic_MoveTypedArrayElements(JSContext* cx, unsigned argc, Value* vp);
|
||||
bool intrinsic_SetFromTypedArrayApproach(JSContext* cx, unsigned argc, Value* vp);
|
||||
bool intrinsic_SetDisjointTypedElements(JSContext* cx, unsigned argc, Value* vp);
|
||||
bool intrinsic_SetOverlappingTypedElements(JSContext* cx, unsigned argc, Value* vp);
|
||||
|
||||
class AutoLockForExclusiveAccess
|
||||
{
|
||||
|
@ -1433,8 +1433,9 @@ GetSCOffset(JSStructuredCloneWriter* writer);
|
||||
|
||||
namespace Scalar {
|
||||
|
||||
/* Scalar types which can appear in typed arrays and typed objects. The enum
|
||||
* values need to be kept in sync with the JS_SCALARTYPEREPR_ constants, as
|
||||
/*
|
||||
* Scalar types that can appear in typed arrays and typed objects. The enum
|
||||
* values must to be kept in sync with the JS_SCALARTYPEREPR_ constants, as
|
||||
* well as the TypedArrayObject::classes and TypedArrayObject::protoClasses
|
||||
* definitions.
|
||||
*/
|
||||
|
@ -118,6 +118,10 @@ class ArrayBufferObject : public ArrayBufferObjectMaybeShared
|
||||
|
||||
static const size_t ARRAY_BUFFER_ALIGNMENT = 8;
|
||||
|
||||
static_assert(FLAGS_SLOT == JS_ARRAYBUFFER_FLAGS_SLOT,
|
||||
"self-hosted code with burned-in constants must get the "
|
||||
"right flags slot");
|
||||
|
||||
public:
|
||||
|
||||
enum OwnsState {
|
||||
@ -163,6 +167,9 @@ class ArrayBufferObject : public ArrayBufferObjectMaybeShared
|
||||
TYPED_OBJECT_VIEWS = 0x20
|
||||
};
|
||||
|
||||
static_assert(JS_ARRAYBUFFER_NEUTERED_FLAG == NEUTERED,
|
||||
"self-hosted code with burned-in constants must use the "
|
||||
"correct NEUTERED bit value");
|
||||
public:
|
||||
|
||||
class BufferContents {
|
||||
|
@ -6,6 +6,7 @@
|
||||
|
||||
#include "vm/SelfHosting.h"
|
||||
|
||||
#include "mozilla/ArrayUtils.h"
|
||||
#include "mozilla/Casting.h"
|
||||
#include "mozilla/DebugOnly.h"
|
||||
|
||||
@ -42,6 +43,8 @@ using namespace js;
|
||||
using namespace js::selfhosted;
|
||||
|
||||
using JS::AutoCheckCannotGC;
|
||||
using mozilla::IsInRange;
|
||||
using mozilla::PodMove;
|
||||
using mozilla::UniquePtr;
|
||||
|
||||
static void
|
||||
@ -670,6 +673,17 @@ intrinsic_GeneratorSetClosed(JSContext* cx, unsigned argc, Value* vp)
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
js::intrinsic_IsArrayBuffer(JSContext* cx, unsigned argc, Value* vp)
|
||||
{
|
||||
CallArgs args = CallArgsFromVp(argc, vp);
|
||||
MOZ_ASSERT(args.length() == 1);
|
||||
MOZ_ASSERT(args[0].isObject());
|
||||
|
||||
args.rval().setBoolean(args[0].toObject().is<ArrayBufferObject>());
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
js::intrinsic_IsTypedArray(JSContext* cx, unsigned argc, Value* vp)
|
||||
{
|
||||
@ -681,6 +695,27 @@ js::intrinsic_IsTypedArray(JSContext* cx, unsigned argc, Value* vp)
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
js::intrinsic_IsPossiblyWrappedTypedArray(JSContext* cx, unsigned argc, Value* vp)
|
||||
{
|
||||
CallArgs args = CallArgsFromVp(argc, vp);
|
||||
MOZ_ASSERT(args.length() == 1);
|
||||
|
||||
bool isTypedArray = false;
|
||||
if (args[0].isObject()) {
|
||||
JSObject* obj = CheckedUnwrap(&args[0].toObject());
|
||||
if (!obj) {
|
||||
JS_ReportError(cx, "Permission denied to access object");
|
||||
return false;
|
||||
}
|
||||
|
||||
isTypedArray = obj->is<TypedArrayObject>();
|
||||
}
|
||||
|
||||
args.rval().setBoolean(isTypedArray);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
js::intrinsic_TypedArrayBuffer(JSContext* cx, unsigned argc, Value* vp)
|
||||
{
|
||||
@ -779,7 +814,362 @@ js::intrinsic_MoveTypedArrayElements(JSContext* cx, unsigned argc, Value* vp)
|
||||
#endif
|
||||
|
||||
uint8_t* data = static_cast<uint8_t*>(tarray->viewData());
|
||||
mozilla::PodMove(&data[byteDest], &data[byteSrc], byteSize);
|
||||
PodMove(&data[byteDest], &data[byteSrc], byteSize);
|
||||
|
||||
args.rval().setUndefined();
|
||||
return true;
|
||||
}
|
||||
|
||||
// Extract the TypedArrayObject* underlying |obj| and return it. This method,
|
||||
// in a TOTALLY UNSAFE manner, completely violates the normal compartment
|
||||
// boundaries, returning an object not necessarily in the current compartment
|
||||
// or in |obj|'s compartment.
|
||||
//
|
||||
// All callers of this method are expected to sigil this TypedArrayObject*, and
|
||||
// all values and information derived from it, with an "unsafe" prefix, to
|
||||
// indicate the extreme caution required when dealing with such values.
|
||||
//
|
||||
// If calling code discipline ever fails to be maintained, it's gonna have a
|
||||
// bad time.
|
||||
static TypedArrayObject*
|
||||
DangerouslyUnwrapTypedArray(JSContext* cx, JSObject* obj)
|
||||
{
|
||||
// An unwrapped pointer to an object potentially on the other side of a
|
||||
// compartment boundary! Isn't this such fun?
|
||||
JSObject* unwrapped = CheckedUnwrap(obj);
|
||||
if (!unwrapped->is<TypedArrayObject>()) {
|
||||
// By *appearances* this can't happen, as self-hosted TypedArraySet
|
||||
// checked this. But. Who's to say a GC couldn't happen between
|
||||
// the check that this value was a typed array, and this extraction
|
||||
// occurring? A GC might turn a cross-compartment wrapper |obj| into
|
||||
// |unwrapped == obj|, a dead object no longer connected its typed
|
||||
// array.
|
||||
//
|
||||
// Yeah, yeah, it's pretty unlikely. Are you willing to stake a
|
||||
// sec-critical bug on that assessment, now and forever, against
|
||||
// all changes those pesky GC and JIT people might make?
|
||||
JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_DEAD_OBJECT);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Be super-duper careful using this, as we've just punched through
|
||||
// the compartment boundary, and things like buffer() on this aren't
|
||||
// same-compartment with anything else in the calling method.
|
||||
return &unwrapped->as<TypedArrayObject>();
|
||||
}
|
||||
|
||||
// ES6 draft 20150403 22.2.3.22.2, steps 12-24, 29.
|
||||
bool
|
||||
js::intrinsic_SetFromTypedArrayApproach(JSContext* cx, unsigned argc, Value* vp)
|
||||
{
|
||||
CallArgs args = CallArgsFromVp(argc, vp);
|
||||
MOZ_ASSERT(args.length() == 4);
|
||||
|
||||
Rooted<TypedArrayObject*> target(cx, &args[0].toObject().as<TypedArrayObject>());
|
||||
MOZ_ASSERT(!target->hasBuffer() || !target->buffer()->isNeutered(),
|
||||
"something should have defended against a neutered target");
|
||||
|
||||
// As directed by |DangerouslyUnwrapTypedArray|, sigil this pointer and all
|
||||
// variables derived from it to counsel extreme caution here.
|
||||
Rooted<TypedArrayObject*> unsafeTypedArrayCrossCompartment(cx);
|
||||
unsafeTypedArrayCrossCompartment = DangerouslyUnwrapTypedArray(cx, &args[1].toObject());
|
||||
if (!unsafeTypedArrayCrossCompartment)
|
||||
return false;
|
||||
|
||||
double doubleTargetOffset = args[2].toNumber();
|
||||
MOZ_ASSERT(doubleTargetOffset >= 0, "caller failed to ensure |targetOffset >= 0|");
|
||||
|
||||
uint32_t targetLength = uint32_t(args[3].toInt32());
|
||||
|
||||
// Handle all checks preceding the actual element-setting. A visual skim
|
||||
// of 22.2.3.22.2 should confirm these are the only steps after steps 1-11
|
||||
// that might abort processing (other than for reason of internal error.)
|
||||
|
||||
// Steps 12-13.
|
||||
if (unsafeTypedArrayCrossCompartment->hasBuffer() &&
|
||||
unsafeTypedArrayCrossCompartment->buffer()->isNeutered())
|
||||
{
|
||||
JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_TYPED_ARRAY_DETACHED);
|
||||
return false;
|
||||
}
|
||||
|
||||
// Steps 21, 23.
|
||||
uint32_t unsafeSrcLengthCrossCompartment = unsafeTypedArrayCrossCompartment->length();
|
||||
if (unsafeSrcLengthCrossCompartment + doubleTargetOffset > targetLength) {
|
||||
JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_BAD_INDEX);
|
||||
return false;
|
||||
}
|
||||
|
||||
// Now that that's confirmed, we can use |targetOffset| of a sane type.
|
||||
uint32_t targetOffset = uint32_t(doubleTargetOffset);
|
||||
|
||||
// The remaining steps are unobservable *except* through their effect on
|
||||
// which elements are copied and how.
|
||||
|
||||
Scalar::Type targetType = target->type();
|
||||
Scalar::Type unsafeSrcTypeCrossCompartment = unsafeTypedArrayCrossCompartment->type();
|
||||
|
||||
size_t targetElementSize = TypedArrayElemSize(targetType);
|
||||
uint8_t* targetData =
|
||||
static_cast<uint8_t*>(target->viewData()) + targetOffset * targetElementSize;
|
||||
|
||||
uint8_t* unsafeSrcDataCrossCompartment =
|
||||
static_cast<uint8_t*>(unsafeTypedArrayCrossCompartment->viewData());
|
||||
|
||||
uint32_t unsafeSrcElementSizeCrossCompartment =
|
||||
TypedArrayElemSize(unsafeSrcTypeCrossCompartment);
|
||||
uint32_t unsafeSrcByteLengthCrossCompartment =
|
||||
unsafeSrcLengthCrossCompartment * unsafeSrcElementSizeCrossCompartment;
|
||||
|
||||
// Step 29.
|
||||
//
|
||||
// The same-type case requires exact copying preserving the bit-level
|
||||
// encoding of the source data, so move the values. (We could PodCopy if
|
||||
// we knew the buffers differed, but it's doubtful the work to check
|
||||
// wouldn't swap any minor wins PodCopy would afford. Because of the
|
||||
// TOTALLY UNSAFE CROSS-COMPARTMENT NONSENSE here, comparing buffer
|
||||
// pointers directly could give an incorrect answer.) If this occurs,
|
||||
// the %TypedArray%.prototype.set operation is completely finished.
|
||||
if (targetType == unsafeSrcTypeCrossCompartment) {
|
||||
PodMove(targetData, unsafeSrcDataCrossCompartment, unsafeSrcByteLengthCrossCompartment);
|
||||
args.rval().setInt32(JS_SETTYPEDARRAY_SAME_TYPE);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Every other bit of element-copying is handled by step 28. Indicate
|
||||
// whether such copying must take care not to overlap, so that self-hosted
|
||||
// code may correctly perform the copying.
|
||||
|
||||
uint8_t* unsafeSrcDataLimitCrossCompartment =
|
||||
unsafeSrcDataCrossCompartment + unsafeSrcByteLengthCrossCompartment;
|
||||
uint8_t* targetDataLimit =
|
||||
static_cast<uint8_t*>(target->viewData()) + targetLength * targetElementSize;
|
||||
|
||||
// Step 24 test (but not steps 24a-d -- the caller handles those).
|
||||
bool overlap =
|
||||
IsInRange(targetData, unsafeSrcDataCrossCompartment, unsafeSrcDataLimitCrossCompartment) ||
|
||||
IsInRange(unsafeSrcDataCrossCompartment, targetData, targetDataLimit);
|
||||
|
||||
args.rval().setInt32(overlap ? JS_SETTYPEDARRAY_OVERLAPPING : JS_SETTYPEDARRAY_DISJOINT);
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename From, typename To>
|
||||
static void
|
||||
CopyValues(To* dest, const From* src, uint32_t count)
|
||||
{
|
||||
#ifdef DEBUG
|
||||
void* destVoid = static_cast<void*>(dest);
|
||||
void* destVoidEnd = static_cast<void*>(dest + count);
|
||||
const void* srcVoid = static_cast<const void*>(src);
|
||||
const void* srcVoidEnd = static_cast<const void*>(src + count);
|
||||
MOZ_ASSERT(!IsInRange(destVoid, srcVoid, srcVoidEnd));
|
||||
MOZ_ASSERT(!IsInRange(srcVoid, destVoid, destVoidEnd));
|
||||
#endif
|
||||
|
||||
for (; count > 0; count--)
|
||||
*dest++ = To(*src++);
|
||||
}
|
||||
|
||||
struct DisjointElements
|
||||
{
|
||||
template <typename To>
|
||||
static void
|
||||
copy(To* dest, const void* src, Scalar::Type fromType, uint32_t count) {
|
||||
switch (fromType) {
|
||||
case Scalar::Int8:
|
||||
CopyValues(dest, static_cast<const int8_t*>(src), count);
|
||||
return;
|
||||
|
||||
case Scalar::Uint8:
|
||||
CopyValues(dest, static_cast<const uint8_t*>(src), count);
|
||||
return;
|
||||
|
||||
case Scalar::Int16:
|
||||
CopyValues(dest, static_cast<const int16_t*>(src), count);
|
||||
return;
|
||||
|
||||
case Scalar::Uint16:
|
||||
CopyValues(dest, static_cast<const uint16_t*>(src), count);
|
||||
return;
|
||||
|
||||
case Scalar::Int32:
|
||||
CopyValues(dest, static_cast<const int32_t*>(src), count);
|
||||
return;
|
||||
|
||||
case Scalar::Uint32:
|
||||
CopyValues(dest, static_cast<const uint32_t*>(src), count);
|
||||
return;
|
||||
|
||||
case Scalar::Float32:
|
||||
CopyValues(dest, static_cast<const float*>(src), count);
|
||||
return;
|
||||
|
||||
case Scalar::Float64:
|
||||
CopyValues(dest, static_cast<const double*>(src), count);
|
||||
return;
|
||||
|
||||
case Scalar::Uint8Clamped:
|
||||
CopyValues(dest, static_cast<const uint8_clamped*>(src), count);
|
||||
return;
|
||||
|
||||
default:
|
||||
MOZ_CRASH("NonoverlappingSet with bogus from-type");
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
static void
|
||||
CopyToDisjointArray(TypedArrayObject* target, uint32_t targetOffset, const void* src,
|
||||
Scalar::Type srcType, uint32_t count)
|
||||
{
|
||||
Scalar::Type destType = target->type();
|
||||
void* dest =
|
||||
static_cast<uint8_t*>(target->viewData()) + targetOffset * TypedArrayElemSize(destType);
|
||||
|
||||
switch (destType) {
|
||||
case Scalar::Int8: {
|
||||
int8_t* dst = reinterpret_cast<int8_t*>(dest);
|
||||
DisjointElements::copy(dst, src, srcType, count);
|
||||
break;
|
||||
}
|
||||
|
||||
case Scalar::Uint8: {
|
||||
uint8_t* dst = reinterpret_cast<uint8_t*>(dest);
|
||||
DisjointElements::copy(dst, src, srcType, count);
|
||||
break;
|
||||
}
|
||||
|
||||
case Scalar::Int16: {
|
||||
int16_t* dst = reinterpret_cast<int16_t*>(dest);
|
||||
DisjointElements::copy(dst, src, srcType, count);
|
||||
break;
|
||||
}
|
||||
|
||||
case Scalar::Uint16: {
|
||||
uint16_t* dst = reinterpret_cast<uint16_t*>(dest);
|
||||
DisjointElements::copy(dst, src, srcType, count);
|
||||
break;
|
||||
}
|
||||
|
||||
case Scalar::Int32: {
|
||||
int32_t* dst = reinterpret_cast<int32_t*>(dest);
|
||||
DisjointElements::copy(dst, src, srcType, count);
|
||||
break;
|
||||
}
|
||||
|
||||
case Scalar::Uint32: {
|
||||
uint32_t* dst = reinterpret_cast<uint32_t*>(dest);
|
||||
DisjointElements::copy(dst, src, srcType, count);
|
||||
break;
|
||||
}
|
||||
|
||||
case Scalar::Float32: {
|
||||
float* dst = reinterpret_cast<float*>(dest);
|
||||
DisjointElements::copy(dst, src, srcType, count);
|
||||
break;
|
||||
}
|
||||
|
||||
case Scalar::Float64: {
|
||||
double* dst = reinterpret_cast<double*>(dest);
|
||||
DisjointElements::copy(dst, src, srcType, count);
|
||||
break;
|
||||
}
|
||||
|
||||
case Scalar::Uint8Clamped: {
|
||||
uint8_clamped* dst = reinterpret_cast<uint8_clamped*>(dest);
|
||||
DisjointElements::copy(dst, src, srcType, count);
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
MOZ_CRASH("setFromAnyTypedArray with a typed array with bogus type");
|
||||
}
|
||||
}
|
||||
|
||||
// |unsafeSrcCrossCompartment| is produced by |DangerouslyUnwrapTypedArray|,
|
||||
// counseling extreme caution when using it. As directed by
|
||||
// |DangerouslyUnwrapTypedArray|, sigil this pointer and all variables derived
|
||||
// from it to counsel extreme caution here.
|
||||
void
|
||||
js::SetDisjointTypedElements(TypedArrayObject* target, uint32_t targetOffset,
|
||||
TypedArrayObject* unsafeSrcCrossCompartment)
|
||||
{
|
||||
Scalar::Type unsafeSrcTypeCrossCompartment = unsafeSrcCrossCompartment->type();
|
||||
|
||||
const void* unsafeSrcDataCrossCompartment = unsafeSrcCrossCompartment->viewData();
|
||||
uint32_t count = unsafeSrcCrossCompartment->length();
|
||||
|
||||
CopyToDisjointArray(target, targetOffset,
|
||||
unsafeSrcDataCrossCompartment, unsafeSrcTypeCrossCompartment, count);
|
||||
}
|
||||
|
||||
bool
|
||||
js::intrinsic_SetDisjointTypedElements(JSContext* cx, unsigned argc, Value* vp)
|
||||
{
|
||||
CallArgs args = CallArgsFromVp(argc, vp);
|
||||
MOZ_ASSERT(args.length() == 3);
|
||||
|
||||
Rooted<TypedArrayObject*> target(cx, &args[0].toObject().as<TypedArrayObject>());
|
||||
MOZ_ASSERT(!target->hasBuffer() || !target->buffer()->isNeutered(),
|
||||
"a neutered typed array has no elements to set, so "
|
||||
"it's nonsensical to be setting them");
|
||||
|
||||
uint32_t targetOffset = uint32_t(args[1].toInt32());
|
||||
|
||||
// As directed by |DangerouslyUnwrapTypedArray|, sigil this pointer and all
|
||||
// variables derived from it to counsel extreme caution here.
|
||||
Rooted<TypedArrayObject*> unsafeSrcCrossCompartment(cx);
|
||||
unsafeSrcCrossCompartment = DangerouslyUnwrapTypedArray(cx, &args[2].toObject());
|
||||
if (!unsafeSrcCrossCompartment)
|
||||
return false;
|
||||
|
||||
SetDisjointTypedElements(target, targetOffset, unsafeSrcCrossCompartment);
|
||||
|
||||
args.rval().setUndefined();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
js::intrinsic_SetOverlappingTypedElements(JSContext* cx, unsigned argc, Value* vp)
|
||||
{
|
||||
CallArgs args = CallArgsFromVp(argc, vp);
|
||||
MOZ_ASSERT(args.length() == 3);
|
||||
|
||||
Rooted<TypedArrayObject*> target(cx, &args[0].toObject().as<TypedArrayObject>());
|
||||
MOZ_ASSERT(!target->hasBuffer() || !target->buffer()->isNeutered(),
|
||||
"shouldn't be setting elements if neutered");
|
||||
|
||||
uint32_t targetOffset = uint32_t(args[1].toInt32());
|
||||
|
||||
// As directed by |DangerouslyUnwrapTypedArray|, sigil this pointer and all
|
||||
// variables derived from it to counsel extreme caution here.
|
||||
Rooted<TypedArrayObject*> unsafeSrcCrossCompartment(cx);
|
||||
unsafeSrcCrossCompartment = DangerouslyUnwrapTypedArray(cx, &args[2].toObject());
|
||||
if (!unsafeSrcCrossCompartment)
|
||||
return false;
|
||||
|
||||
// Smarter algorithms exist to perform overlapping transfers of the sort
|
||||
// this method performs (for example, v8's self-hosted implementation).
|
||||
// But it seems likely deliberate overlapping transfers are rare enough
|
||||
// that it's not worth the trouble to implement one (and worry about its
|
||||
// safety/correctness!). Make a copy and do a disjoint set from that.
|
||||
uint32_t count = unsafeSrcCrossCompartment->length();
|
||||
Scalar::Type unsafeSrcTypeCrossCompartment = unsafeSrcCrossCompartment->type();
|
||||
size_t sourceByteLen = count * TypedArrayElemSize(unsafeSrcTypeCrossCompartment);
|
||||
|
||||
const void* unsafeSrcDataCrossCompartment = unsafeSrcCrossCompartment->viewData();
|
||||
|
||||
auto copyOfSrcData = target->zone()->make_pod_array<uint8_t>(sourceByteLen);
|
||||
if (!copyOfSrcData)
|
||||
return false;
|
||||
|
||||
mozilla::PodCopy(copyOfSrcData.get(),
|
||||
static_cast<const uint8_t*>(unsafeSrcDataCrossCompartment),
|
||||
sourceByteLen);
|
||||
|
||||
CopyToDisjointArray(target, targetOffset, copyOfSrcData.get(),
|
||||
unsafeSrcTypeCrossCompartment, count);
|
||||
|
||||
args.rval().setUndefined();
|
||||
return true;
|
||||
@ -1022,13 +1412,19 @@ static const JSFunctionSpec intrinsic_functions[] = {
|
||||
JS_FN("GeneratorIsRunning", intrinsic_GeneratorIsRunning, 1,0),
|
||||
JS_FN("GeneratorSetClosed", intrinsic_GeneratorSetClosed, 1,0),
|
||||
|
||||
JS_FN("IsArrayBuffer", intrinsic_IsArrayBuffer, 1,0),
|
||||
|
||||
JS_FN("IsTypedArray", intrinsic_IsTypedArray, 1,0),
|
||||
JS_FN("IsPossiblyWrappedTypedArray",intrinsic_IsPossiblyWrappedTypedArray,1,0),
|
||||
JS_FN("TypedArrayBuffer", intrinsic_TypedArrayBuffer, 1,0),
|
||||
JS_FN("TypedArrayByteOffset", intrinsic_TypedArrayByteOffset, 1,0),
|
||||
JS_FN("TypedArrayElementShift", intrinsic_TypedArrayElementShift, 1,0),
|
||||
JS_FN("TypedArrayLength", intrinsic_TypedArrayLength, 1,0),
|
||||
|
||||
JS_FN("MoveTypedArrayElements", intrinsic_MoveTypedArrayElements, 4,0),
|
||||
JS_FN("SetFromTypedArrayApproach",intrinsic_SetFromTypedArrayApproach, 4, 0),
|
||||
JS_FN("SetDisjointTypedElements",intrinsic_SetDisjointTypedElements,3,0),
|
||||
JS_FN("SetOverlappingTypedElements",intrinsic_SetOverlappingTypedElements,3,0),
|
||||
|
||||
JS_FN("CallTypedArrayMethodIfWrapped",
|
||||
CallNonGenericSelfhostedMethod<Is<TypedArrayObject>>, 2, 0),
|
||||
|
@ -784,7 +784,11 @@ TypedArrayObject::set(JSContext* cx, unsigned argc, Value* vp)
|
||||
/* static */ const JSFunctionSpec
|
||||
TypedArrayObject::protoFunctions[] = {
|
||||
JS_SELF_HOSTED_FN("subarray", "TypedArraySubarray", 2, 0),
|
||||
#if 0 /* disabled until perf-testing is completed */
|
||||
JS_SELF_HOSTED_FN("set", "TypedArraySet", 2, 0),
|
||||
#else
|
||||
JS_FN("set", TypedArrayObject::set, 2, 0),
|
||||
#endif
|
||||
JS_SELF_HOSTED_FN("copyWithin", "TypedArrayCopyWithin", 3, 0),
|
||||
JS_SELF_HOSTED_FN("every", "TypedArrayEvery", 2, 0),
|
||||
JS_SELF_HOSTED_FN("fill", "TypedArrayFill", 3, 0),
|
||||
|
@ -47,12 +47,21 @@ class TypedArrayLayout
|
||||
|
||||
// Underlying (Shared)ArrayBufferObject.
|
||||
static const size_t BUFFER_SLOT = 0;
|
||||
static_assert(BUFFER_SLOT == JS_TYPEDARRAYLAYOUT_BUFFER_SLOT,
|
||||
"self-hosted code with burned-in constants must get the "
|
||||
"right buffer slot");
|
||||
|
||||
// Slot containing length of the view in number of typed elements.
|
||||
static const size_t LENGTH_SLOT = 1;
|
||||
static_assert(LENGTH_SLOT == JS_TYPEDARRAYLAYOUT_LENGTH_SLOT,
|
||||
"self-hosted code with burned-in constants must get the "
|
||||
"right length slot");
|
||||
|
||||
// Offset of view within underlying (Shared)ArrayBufferObject.
|
||||
static const size_t BYTEOFFSET_SLOT = 2;
|
||||
static_assert(BYTEOFFSET_SLOT == JS_TYPEDARRAYLAYOUT_BYTEOFFSET_SLOT,
|
||||
"self-hosted code with burned-in constants must get the "
|
||||
"right byteOffset slot");
|
||||
|
||||
static const size_t RESERVED_SLOTS = 3;
|
||||
|
||||
@ -316,6 +325,19 @@ TypedArrayElemSize(Scalar::Type viewType)
|
||||
return 1u << TypedArrayShift(viewType);
|
||||
}
|
||||
|
||||
// Assign
|
||||
//
|
||||
// target[targetOffset] = unsafeSrcCrossCompartment[0]
|
||||
// ...
|
||||
// target[targetOffset + unsafeSrcCrossCompartment.length - 1] =
|
||||
// unsafeSrcCrossCompartment[unsafeSrcCrossCompartment.length - 1]
|
||||
//
|
||||
// where the source element range doesn't overlap the target element range in
|
||||
// memory.
|
||||
extern void
|
||||
SetDisjointTypedElements(TypedArrayObject* target, uint32_t targetOffset,
|
||||
TypedArrayObject* unsafeSrcCrossCompartment);
|
||||
|
||||
extern JSObject*
|
||||
InitDataViewClass(JSContext* cx, HandleObject obj);
|
||||
|
||||
|
@ -92,22 +92,23 @@ ArrayEnd(const Array<T, N>& aArr)
|
||||
|
||||
namespace detail {
|
||||
|
||||
template<typename AlignType, typename Pointee>
|
||||
template<typename AlignType, typename Pointee,
|
||||
typename = EnableIf<!IsVoid<AlignType>::value>>
|
||||
struct AlignedChecker
|
||||
{
|
||||
static void
|
||||
test(Pointee* aPtr)
|
||||
test(const Pointee* aPtr)
|
||||
{
|
||||
MOZ_ASSERT((uintptr_t(aPtr) % MOZ_ALIGNOF(AlignType)) == 0,
|
||||
"performing a range-check with a misaligned pointer");
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Pointee>
|
||||
struct AlignedChecker<void, Pointee>
|
||||
template<typename AlignType, typename Pointee>
|
||||
struct AlignedChecker<AlignType, Pointee>
|
||||
{
|
||||
static void
|
||||
test(Pointee* aPtr)
|
||||
test(const Pointee* aPtr)
|
||||
{
|
||||
}
|
||||
};
|
||||
@ -132,13 +133,14 @@ inline typename EnableIf<IsSame<T, U>::value ||
|
||||
IsBaseOf<T, U>::value ||
|
||||
IsVoid<T>::value,
|
||||
bool>::Type
|
||||
IsInRange(T* aPtr, U* aBegin, U* aEnd)
|
||||
IsInRange(const T* aPtr, const U* aBegin, const U* aEnd)
|
||||
{
|
||||
MOZ_ASSERT(aBegin <= aEnd);
|
||||
detail::AlignedChecker<U, T>::test(aPtr);
|
||||
detail::AlignedChecker<U, U>::test(aBegin);
|
||||
detail::AlignedChecker<U, U>::test(aEnd);
|
||||
return aBegin <= static_cast<U*>(aPtr) && static_cast<U*>(aPtr) < aEnd;
|
||||
return aBegin <= reinterpret_cast<const U*>(aPtr) &&
|
||||
reinterpret_cast<const U*>(aPtr) < aEnd;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -148,10 +150,11 @@ IsInRange(T* aPtr, U* aBegin, U* aEnd)
|
||||
*/
|
||||
template<typename T>
|
||||
inline bool
|
||||
IsInRange(T* aPtr, uintptr_t aBegin, uintptr_t aEnd)
|
||||
IsInRange(const T* aPtr, uintptr_t aBegin, uintptr_t aEnd)
|
||||
{
|
||||
return IsInRange(aPtr,
|
||||
reinterpret_cast<T*>(aBegin), reinterpret_cast<T*>(aEnd));
|
||||
reinterpret_cast<const T*>(aBegin),
|
||||
reinterpret_cast<const T*>(aEnd));
|
||||
}
|
||||
|
||||
namespace detail {
|
||||
|
Loading…
Reference in New Issue
Block a user