Merge tracemonkey to mozilla-central. a=blockers

This commit is contained in:
Robert Sayre 2010-12-06 16:10:01 -05:00
commit b788d0255b
69 changed files with 1559 additions and 484 deletions

View File

@ -932,6 +932,13 @@ namespace JSC {
return m_buffer.sizeOfConstantPool();
}
#ifdef DEBUG
void allowPoolFlush(bool allowFlush)
{
m_buffer.allowPoolFlush(allowFlush);
}
#endif
JmpDst label()
{
JmpDst label(m_buffer.size());

View File

@ -37,6 +37,7 @@
#include "AssemblerBuffer.h"
#include "assembler/wtf/SegmentedVector.h"
#include "assembler/wtf/Assertions.h"
#define ASSEMBLER_HAS_CONSTANT_POOL 1
@ -103,6 +104,9 @@ public:
, m_numConsts(0)
, m_maxDistance(maxPoolSize)
, m_lastConstDelta(0)
#ifdef DEBUG
, m_allowFlush(true)
#endif
{
m_pool = static_cast<uint32_t*>(malloc(maxPoolSize));
m_mask = static_cast<char*>(malloc(maxPoolSize / sizeof(uint32_t)));
@ -235,6 +239,15 @@ public:
return m_numConsts;
}
#ifdef DEBUG
// Guard constant pool flushes to ensure that they don't occur during
// regions where offsets into the code have to be maintained (such as PICs).
void allowPoolFlush(bool allowFlush)
{
m_allowFlush = allowFlush;
}
#endif
private:
void correctDeltas(int insnSize)
{
@ -254,6 +267,7 @@ private:
void flushConstantPool(bool useBarrier = true)
{
ASSERT(m_allowFlush);
if (m_numConsts == 0)
return;
int alignPool = (AssemblerBuffer::size() + (useBarrier ? barrierSize : 0)) & (sizeof(uint64_t) - 1);
@ -313,6 +327,10 @@ private:
int m_numConsts;
int m_maxDistance;
int m_lastConstDelta;
#ifdef DEBUG
bool m_allowFlush;
#endif
};
} // namespace JSC

View File

@ -1078,6 +1078,13 @@ public:
m_assembler.forceFlushConstantPool();
}
#ifdef DEBUG
void allowPoolFlush(bool allowFlush)
{
m_assembler.allowPoolFlush(allowFlush);
}
#endif
protected:
ARMAssembler::Condition ARMCondition(Condition cond)
{

View File

@ -380,6 +380,12 @@ public:
m_assembler.movzbl_rr(dest, dest);
}
void setPtr(Condition cond, RegisterID left, ImmPtr right, RegisterID dest)
{
move(right, scratchRegister);
setPtr(cond, left, scratchRegister, dest);
}
Jump branchPtr(Condition cond, RegisterID left, RegisterID right)
{
m_assembler.cmpq_rr(right, left);

View File

@ -0,0 +1,9 @@
// |jit-test| error: TypeError
// don't assert
print(this.watch("x",
function() {
Object.defineProperty(this, "x", ({
get: (Int8Array)
}))
}))(x = /x/)

View File

@ -0,0 +1,2 @@
var s = [undefined, undefined].sort();
assertEq(s.length, 2);

View File

@ -0,0 +1,185 @@
// test dense -> slow array transitions during the recording and on trace
// for various array functions and property accessors
function test_set_elem() {
function f() {
var bag = [];
for (var i = 0; i != 100; ++i) {
var a = [0];
a[100*100] = i;
bag.push(a);
}
for (var i = 0; i != 100; ++i) {
var a = [0];
a[200 + i] = i;
bag.push(a);
}
return bag;
}
var bag = f();
for (var i = 0; i != 100; ++i) {
var a = bag[i];
assertEq(a.length, 100 * 100 + 1);
assertEq(a[100*100], i);
assertEq(a[0], 0);
assertEq(1 + i in a, false);
}
for (var i = 0; i != 100; ++i) {
var a = bag[100 + i];
assertEq(a.length, 200 + i + 1);
assertEq(a[200 + i], i);
assertEq(a[0], 0);
assertEq(1 + i in a, false);
}
}
function test_reverse() {
function prepare_arays() {
var bag = [];
var base_index = 245;
for (var i = 0; i != 50; ++i) {
var a = [1, 2, 3, 4, 5];
a.length = i + base_index;
bag.push(a);
}
return bag;
}
function test(bag) {
for (var i = 0; i != bag.length; ++i) {
var a = bag[i];
a.reverse();
a[0] = 1;
}
}
var bag = prepare_arays();
test(bag);
for (var i = 0; i != bag.length; ++i) {
var a = bag[i];
assertEq(a[0], 1);
for (var j = 1; j <= 5; ++j) {
assertEq(a[a.length - j], j);
}
for (var j = 1; j < a.length - 5; ++j) {
assertEq(j in a, false);
}
}
}
function test_push() {
function prepare_arays() {
var bag = [];
var base_index = 245;
for (var i = 0; i != 50; ++i) {
var a = [0];
a.length = i + base_index;
bag.push(a);
}
return bag;
}
function test(bag) {
for (var i = 0; i != bag.length; ++i) {
var a = bag[i];
a.push(2);
a[0] = 1;
}
}
var bag = prepare_arays();
test(bag);
for (var i = 0; i != bag.length; ++i) {
var a = bag[i];
assertEq(a[0], 1);
assertEq(a[a.length - 1], 2);
for (var j = 1; j < a.length - 1; ++j) {
assertEq(j in a, false);
}
}
}
function test_unshift() {
function prepare_arays() {
var bag = [];
var base_index = 245;
for (var i = 0; i != 50; ++i) {
var a = [0];
a.length = i + base_index;
bag.push(a);
}
return bag;
}
function test(bag) {
for (var i = 0; i != bag.length; ++i) {
var a = bag[i];
a.unshift(1);
a[2] = 2;
}
}
var bag = prepare_arays();
test(bag);
for (var i = 0; i != bag.length; ++i) {
var a = bag[i];
assertEq(a[0], 1);
assertEq(a[1], 0);
assertEq(a[2], 2);
for (var j = 3; j < a.length; ++j) {
assertEq(j in a, false);
}
}
}
function test_splice() {
function prepare_arays() {
var bag = [];
var base_index = 245;
for (var i = 0; i != 50; ++i) {
var a = [1, 2];
a.length = i + base_index;
bag.push(a);
}
return bag;
}
function test(bag) {
for (var i = 0; i != bag.length; ++i) {
var a = bag[i];
a.splice(1, 0, "a", "b", "c");
a[2] = 100;
}
}
var bag = prepare_arays();
test(bag);
for (var i = 0; i != bag.length; ++i) {
var a = bag[i];
assertEq(a[0], 1);
assertEq(a[1], "a");
assertEq(a[2], 100);
assertEq(a[3], "c");
assertEq(a[4], 2);
for (var j = 5; j < a.length; ++j) {
assertEq(j in a, false);
}
}
}
test_set_elem();
test_reverse();
test_push();
test_unshift();
test_splice();

View File

@ -0,0 +1,14 @@
// vim: set ts=4 sw=4 tw=99 et:
function F() {
var T = { };
try {
throw 12;
} catch (e) {
T.x = 5;
return T;
}
}
assertEq((new F()).x, 5);

View File

@ -0,0 +1,5 @@
Array.prototype.__proto__ = null;
for (var r = 0; r < 3; ++r) [][0] = 1;
// Don't crash.

View File

@ -0,0 +1,9 @@
// |jit-test| error: ReferenceError
// vim: set ts=4 sw=4 tw=99 et:
try {
(function () {
__proto__ = Uint32Array()
}())
} catch (e) {}(function () {
length, ([eval()] ? x : 7)
})()

View File

@ -49,6 +49,7 @@ PROGRAM = jsapi-tests$(BIN_SUFFIX)
CPPSRCS = \
tests.cpp \
selfTest.cpp \
testBug604087.cpp \
testClassGetter.cpp \
testCloneScript.cpp \
testConservativeGC.cpp \
@ -69,7 +70,7 @@ CPPSRCS = \
testSameValue.cpp \
testScriptObject.cpp \
testSetPropertyWithNativeGetterStubSetter.cpp \
testBug604087.cpp \
testThreadGC.cpp \
testThreads.cpp \
testTrap.cpp \
testUTF8.cpp \

View File

@ -6,9 +6,49 @@
BEGIN_TEST(testDeepFreeze_bug535703)
{
JSObject *obj = JS_NewObject(cx, NULL, NULL, NULL);
CHECK(obj);
JS_DeepFreezeObject(cx, obj); // don't crash
jsval v;
EVAL("var x = {}; x;", &v);
CHECK(JS_DeepFreezeObject(cx, JSVAL_TO_OBJECT(v))); // don't crash
EVAL("Object.isFrozen(x)", &v);
CHECK_SAME(v, JSVAL_TRUE);
return true;
}
END_TEST(testDeepFreeze_bug535703)
BEGIN_TEST(testDeepFreeze_deep)
{
jsval a, o;
EXEC("var a = {}, o = a;\n"
"for (var i = 0; i < 10000; i++)\n"
" a = {x: a, y: a};\n");
EVAL("a", &a);
EVAL("o", &o);
CHECK(JS_DeepFreezeObject(cx, JSVAL_TO_OBJECT(a)));
jsval b;
EVAL("Object.isFrozen(a)", &b);
CHECK_SAME(b, JSVAL_TRUE);
EVAL("Object.isFrozen(o)", &b);
CHECK_SAME(b, JSVAL_TRUE);
return true;
}
END_TEST(testDeepFreeze_deep)
BEGIN_TEST(testDeepFreeze_loop)
{
jsval x, y;
EXEC("var x = [], y = {x: x}; y.y = y; x.push(x, y);");
EVAL("x", &x);
EVAL("y", &y);
CHECK(JS_DeepFreezeObject(cx, JSVAL_TO_OBJECT(x)));
jsval b;
EVAL("Object.isFrozen(x)", &b);
CHECK_SAME(b, JSVAL_TRUE);
EVAL("Object.isFrozen(y)", &b);
CHECK_SAME(b, JSVAL_TRUE);
return true;
}
END_TEST(testDeepFreeze_loop)

View File

@ -0,0 +1,195 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sw=4 et tw=99:
*/
#ifdef JS_THREADSAFE
#include "tests.h"
#include "prthread.h"
#include "jscntxt.h"
/*
* We test that if a GC callback cancels the GC on a child thread the GC can
* still proceed on the main thread even if the child thread continue to
* run uninterrupted.
*/
struct SharedData {
enum ChildState {
CHILD_STARTING,
CHILD_RUNNING,
CHILD_DONE,
CHILD_ERROR
};
JSRuntime *const runtime;
PRThread *const mainThread;
PRLock *const lock;
PRCondVar *const signal;
ChildState childState;
bool childShouldStop;
JSContext *childContext;
SharedData(JSRuntime *rt, bool *ok)
: runtime(rt),
mainThread(PR_GetCurrentThread()),
lock(PR_NewLock()),
signal(lock ? PR_NewCondVar(lock) : NULL),
childState(CHILD_STARTING),
childShouldStop(false),
childContext(NULL)
{
JS_ASSERT(!*ok);
*ok = !!signal;
}
~SharedData() {
if (signal)
PR_DestroyCondVar(signal);
if (lock)
PR_DestroyLock(lock);
}
};
static SharedData *shared;
static JSBool
CancelNonMainThreadGCCallback(JSContext *cx, JSGCStatus status)
{
return status != JSGC_BEGIN || PR_GetCurrentThread() == shared->mainThread;
}
static JSBool
StopChildOperationCallback(JSContext *cx)
{
bool shouldStop;
PR_Lock(shared->lock);
shouldStop = shared->childShouldStop;
PR_Unlock(shared->lock);
return !shouldStop;
}
static JSBool
NotifyMainThreadAboutBusyLoop(JSContext *cx, uintN argc, jsval *vp)
{
PR_Lock(shared->lock);
JS_ASSERT(shared->childState == SharedData::CHILD_STARTING);
shared->childState = SharedData::CHILD_RUNNING;
shared->childContext = cx;
PR_NotifyCondVar(shared->signal);
PR_Unlock(shared->lock);
return true;
}
static void
ChildThreadMain(void *arg)
{
JS_ASSERT(!arg);
bool error = true;
JSContext *cx = JS_NewContext(shared->runtime, 8192);
if (cx) {
JS_SetOperationCallback(cx, StopChildOperationCallback);
JSAutoRequest ar(cx);
JSObject *global = JS_NewCompartmentAndGlobalObject(cx, JSAPITest::basicGlobalClass(),
NULL);
if (global) {
JS_SetGlobalObject(cx, global);
if (JS_InitStandardClasses(cx, global) &&
JS_DefineFunction(cx, global, "notify", NotifyMainThreadAboutBusyLoop, 0, 0)) {
jsval rval;
static const char code[] = "var i = 0; notify(); for (var i = 0; ; ++i);";
JSBool ok = JS_EvaluateScript(cx, global, code, strlen(code),
__FILE__, __LINE__, &rval);
if (!ok && !JS_IsExceptionPending(cx)) {
/* Evaluate should only return via the callback cancellation. */
error = false;
}
}
}
}
PR_Lock(shared->lock);
shared->childState = error ? SharedData::CHILD_DONE : SharedData::CHILD_ERROR;
shared->childContext = NULL;
PR_NotifyCondVar(shared->signal);
PR_Unlock(shared->lock);
if (cx)
JS_DestroyContextNoGC(cx);
}
BEGIN_TEST(testThreadGC_bug590533)
{
/*
* Test the child thread busy running while the current thread calls
* the GC both with JSRuntime->gcIsNeeded set and unset.
*/
bool ok = TestChildThread(true);
CHECK(ok);
ok = TestChildThread(false);
CHECK(ok);
return ok;
}
bool TestChildThread(bool setGCIsNeeded)
{
bool ok = false;
shared = new SharedData(rt, &ok);
CHECK(ok);
JSGCCallback oldGCCallback = JS_SetGCCallback(cx, CancelNonMainThreadGCCallback);
PRThread *thread =
PR_CreateThread(PR_USER_THREAD, ChildThreadMain, NULL,
PR_PRIORITY_NORMAL, PR_LOCAL_THREAD, PR_JOINABLE_THREAD, 0);
if (!thread)
return false;
PR_Lock(shared->lock);
while (shared->childState == SharedData::CHILD_STARTING)
PR_WaitCondVar(shared->signal, PR_INTERVAL_NO_TIMEOUT);
JS_ASSERT(shared->childState != SharedData::CHILD_DONE);
ok = (shared->childState == SharedData::CHILD_RUNNING);
PR_Unlock(shared->lock);
CHECK(ok);
if (setGCIsNeeded) {
/*
* Use JS internal API to set the GC trigger flag after we know
* that the child is in a request and is about to run an infinite
* loop. Then run the GC with JSRuntime->gcIsNeeded flag set.
*/
js::AutoLockGC lock(rt);
js::TriggerGC(rt);
}
JS_GC(cx);
PR_Lock(shared->lock);
shared->childShouldStop = true;
while (shared->childState == SharedData::CHILD_RUNNING) {
JS_TriggerOperationCallback(shared->childContext);
PR_WaitCondVar(shared->signal, PR_INTERVAL_NO_TIMEOUT);
}
JS_ASSERT(shared->childState != SharedData::CHILD_STARTING);
ok = (shared->childState == SharedData::CHILD_DONE);
PR_Unlock(shared->lock);
JS_SetGCCallback(cx, oldGCCallback);
PR_JoinThread(thread);
delete shared;
shared = NULL;
return true;
}
END_TEST(testThreadGC_bug590533)
#endif

View File

@ -3049,7 +3049,7 @@ JS_DeepFreezeObject(JSContext *cx, JSObject *obj)
assertSameCompartment(cx, obj);
/* Assume that non-extensible objects are already deep-frozen, to avoid divergence. */
if (obj->isExtensible())
if (!obj->isExtensible())
return true;
if (!obj->freeze(cx))
@ -3954,7 +3954,6 @@ JS_PUBLIC_API(JSBool)
JS_NextProperty(JSContext *cx, JSObject *iterobj, jsid *idp)
{
jsint i;
JSObject *obj;
const Shape *shape;
JSIdArray *ida;
@ -3963,15 +3962,9 @@ JS_NextProperty(JSContext *cx, JSObject *iterobj, jsid *idp)
i = iterobj->getSlot(JSSLOT_ITER_INDEX).toInt32();
if (i < 0) {
/* Native case: private data is a property tree node pointer. */
obj = iterobj->getParent();
JS_ASSERT(obj->isNative());
JS_ASSERT(iterobj->getParent()->isNative());
shape = (Shape *) iterobj->getPrivate();
/*
* If the next property mapped by obj in the property tree ancestor
* line is not enumerable, or it's an alias, skip it and keep on trying
* to find an enumerable property that is still in obj.
*/
while (shape->previous() && (!shape->enumerable() || shape->isAlias()))
shape = shape->previous();

View File

@ -62,9 +62,9 @@
*
* Arrays are converted to use js_SlowArrayClass when any of these conditions
* are met:
* - the load factor (COUNT / capacity) is less than 0.25, and there are
* more than MIN_SPARSE_INDEX slots total
* - a property is set that is not indexed (and not "length"); or
* - there are more than MIN_SPARSE_INDEX slots total
* - the load factor (COUNT / capacity) is less than 0.25
* - a property is set that is not indexed (and not "length")
* - a property is defined that has non-default property attributes.
*
* Dense arrays do not track property creation order, so unlike other native
@ -115,30 +115,6 @@ using namespace js::gc;
#define MAXINDEX 4294967295u
#define MAXSTR "4294967295"
/*
* Use the limit on number of object slots for sanity and consistency (see the
* assertion in JSObject::makeDenseArraySlow).
*/
static inline bool
INDEX_TOO_BIG(jsuint index)
{
return index >= JSObject::NSLOTS_LIMIT;
}
static inline bool
INDEX_TOO_SPARSE(JSObject *array, jsuint index)
{
/* Small arrays with less than 256 elements are dense, no matter what. */
if (index < 256)
return false;
/*
* Otherwise if the index becomes too large or is more than 256 past
* the current capacity, we have to slowify.
*/
return INDEX_TOO_BIG(index) || (index > array->getDenseArrayCapacity() + 256);
}
static inline bool
ENSURE_SLOW_ARRAY(JSContext *cx, JSObject *obj)
{
@ -310,6 +286,34 @@ BigIndexToId(JSContext *cx, JSObject *obj, jsuint index, JSBool createAtom,
return JS_TRUE;
}
bool
JSObject::willBeSparseDenseArray(uintN requiredCapacity, uintN newElementsHint)
{
JS_ASSERT(isDenseArray());
JS_ASSERT(requiredCapacity > MIN_SPARSE_INDEX);
uintN cap = numSlots();
JS_ASSERT(requiredCapacity >= cap);
if (requiredCapacity >= JSObject::NSLOTS_LIMIT)
return true;
uintN minimalDenseCount = requiredCapacity / 4;
if (newElementsHint >= minimalDenseCount)
return false;
minimalDenseCount -= newElementsHint;
if (minimalDenseCount > cap)
return true;
Value *elems = getDenseArrayElements();
for (uintN i = 0; i < cap; i++) {
if (!elems[i].isMagic(JS_ARRAY_HOLE) && !--minimalDenseCount)
return false;
}
return true;
}
static bool
ReallyBigIndexToId(JSContext* cx, jsdouble index, jsid* idp)
{
@ -439,19 +443,23 @@ SetArrayElement(JSContext *cx, JSObject *obj, jsdouble index, const Value &v)
if (obj->isDenseArray()) {
/* Predicted/prefetched code should favor the remains-dense case. */
if (index <= jsuint(-1)) {
JSObject::EnsureDenseResult result = JSObject::ED_SPARSE;
do {
if (index > jsuint(-1))
break;
jsuint idx = jsuint(index);
if (!INDEX_TOO_SPARSE(obj, idx)) {
JS_ASSERT(idx + 1 > idx);
if (!obj->ensureDenseArrayElements(cx, idx + 1))
return JS_FALSE;
if (idx >= obj->getArrayLength())
obj->setArrayLength(idx + 1);
obj->setDenseArrayElement(idx, v);
return JS_TRUE;
}
}
result = obj->ensureDenseArrayElements(cx, idx, 1);
if (result != JSObject::ED_OK)
break;
if (idx >= obj->getArrayLength())
obj->setArrayLength(idx + 1);
obj->setDenseArrayElement(idx, v);
return true;
} while (false);
if (result == JSObject::ED_FAILED)
return false;
JS_ASSERT(result == JSObject::ED_SPARSE);
if (!obj->makeDenseArraySlow(cx))
return JS_FALSE;
}
@ -474,13 +482,7 @@ js_EnsureDenseArrayCapacity(JSContext *cx, JSObject *obj, jsint i)
Class *origObjClasp = obj->clasp;
#endif
jsuint u = jsuint(i);
jsuint capacity = obj->getDenseArrayCapacity();
if (u < capacity)
return true;
if (INDEX_TOO_SPARSE(obj, u))
return false;
JSBool ret = obj->ensureDenseArrayElements(cx, u + 1);
JSBool ret = (obj->ensureDenseArrayElements(cx, u, 1) == JSObject::ED_OK);
/* Partially check the CallInfo's storeAccSet is correct. */
JS_ASSERT(obj->clasp == origObjClasp);
@ -801,20 +803,29 @@ array_setProperty(JSContext *cx, JSObject *obj, jsid id, Value *vp, JSBool stric
if (!obj->isDenseArray())
return js_SetProperty(cx, obj, id, vp, strict);
if (!js_IdIsIndex(id, &i) || js_PrototypeHasIndexedProperties(cx, obj) ||
INDEX_TOO_SPARSE(obj, i)) {
if (!obj->makeDenseArraySlow(cx))
return false;
return js_SetProperty(cx, obj, id, vp, strict);
}
do {
if (!js_IdIsIndex(id, &i))
break;
if (js_PrototypeHasIndexedProperties(cx, obj))
break;
if (!obj->ensureDenseArrayElements(cx, i + 1))
JSObject::EnsureDenseResult result = obj->ensureDenseArrayElements(cx, i, 1);
if (result != JSObject::ED_OK) {
if (result == JSObject::ED_FAILED)
return false;
JS_ASSERT(result == JSObject::ED_SPARSE);
break;
}
if (i >= obj->getArrayLength())
obj->setArrayLength(i + 1);
obj->setDenseArrayElement(i, *vp);
return true;
} while (false);
if (!obj->makeDenseArraySlow(cx))
return false;
if (i >= obj->getArrayLength())
obj->setArrayLength(i + 1);
obj->setDenseArrayElement(i, *vp);
return true;
return js_SetProperty(cx, obj, id, vp, strict);
}
static JSBool
@ -861,7 +872,7 @@ array_defineProperty(JSContext *cx, JSObject *obj, jsid id, const Value *value,
return JS_TRUE;
isIndex = js_IdIsIndex(id, &i);
if (!isIndex || attrs != JSPROP_ENUMERATE || !obj->isDenseArray() || INDEX_TOO_SPARSE(obj, i)) {
if (!isIndex || attrs != JSPROP_ENUMERATE) {
if (!ENSURE_SLOW_ARRAY(cx, obj))
return JS_FALSE;
return js_DefineProperty(cx, obj, id, value, getter, setter, attrs);
@ -915,20 +926,9 @@ array_trace(JSTracer *trc, JSObject *obj)
{
JS_ASSERT(obj->isDenseArray());
size_t holes = 0;
uint32 capacity = obj->getDenseArrayCapacity();
for (uint32 i = 0; i < capacity; i++) {
Value v = obj->getDenseArrayElement(i);
if (v.isMagic(JS_ARRAY_HOLE))
++holes;
else
MarkValue(trc, obj->getDenseArrayElement(i), "dense_array_elems");
}
if (IS_GC_MARKING_TRACER(trc) && holes > MIN_SPARSE_INDEX && holes > capacity / 4 * 3) {
/* This might fail, in which case we don't slowify it. */
static_cast<GCMarker *>(trc)->arraysToSlowify.append(obj);
}
for (uint32 i = 0; i < capacity; i++)
MarkValue(trc, obj->getDenseArrayElement(i), "dense_array_elems");
}
static JSBool
@ -1379,22 +1379,28 @@ InitArrayElements(JSContext *cx, JSObject *obj, jsuint start, jsuint count, Valu
* Optimize for dense arrays so long as adding the given set of elements
* wouldn't otherwise make the array slow.
*/
if (obj->isDenseArray() && !js_PrototypeHasIndexedProperties(cx, obj) &&
start <= MAXINDEX - count && !INDEX_TOO_BIG(start + count)) {
do {
if (!obj->isDenseArray())
break;
if (js_PrototypeHasIndexedProperties(cx, obj))
break;
JSObject::EnsureDenseResult result = obj->ensureDenseArrayElements(cx, start, count);
if (result != JSObject::ED_OK) {
if (result == JSObject::ED_FAILED)
return false;
JS_ASSERT(result == JSObject::ED_SPARSE);
break;
}
jsuint newlen = start + count;
JS_ASSERT(jsdouble(start) + count == jsdouble(newlen));
if (!obj->ensureDenseArrayElements(cx, newlen))
return JS_FALSE;
if (newlen > obj->getArrayLength())
obj->setArrayLength(newlen);
JS_ASSERT(count < uint32(-1) / sizeof(Value));
memcpy(obj->getDenseArrayElements() + start, vector, sizeof(jsval) * count);
JS_ASSERT_IF(count != 0, !obj->getDenseArrayElement(newlen - 1).isMagic(JS_ARRAY_HOLE));
return JS_TRUE;
}
return true;
} while (false);
Value* end = vector + count;
while (vector != end && start < MAXINDEX) {
@ -1436,7 +1442,9 @@ InitArrayObject(JSContext *cx, JSObject *obj, jsuint length, const Value *vector
obj->setArrayLength(length);
if (!vector || !length)
return true;
if (!obj->ensureDenseArrayElements(cx, length))
/* Avoid ensureDenseArrayElements to skip sparse array checks there. */
if (!obj->ensureSlots(cx, length))
return false;
memcpy(obj->getDenseArrayElements(), vector, length * sizeof(Value));
return true;
@ -1470,7 +1478,12 @@ array_reverse(JSContext *cx, uintN argc, Value *vp)
return JS_FALSE;
vp->setObject(*obj);
if (obj->isDenseArray() && !js_PrototypeHasIndexedProperties(cx, obj)) {
do {
if (!obj->isDenseArray())
break;
if (js_PrototypeHasIndexedProperties(cx, obj))
break;
/* An empty array or an array with no elements is already reversed. */
if (len == 0 || obj->getDenseArrayCapacity() == 0)
return JS_TRUE;
@ -1484,8 +1497,13 @@ array_reverse(JSContext *cx, uintN argc, Value *vp)
* holes in the array at its start) and ensure that the capacity is
* sufficient to hold all the elements in the array if it were full.
*/
if (!obj->ensureDenseArrayElements(cx, len))
return JS_FALSE;
JSObject::EnsureDenseResult result = obj->ensureDenseArrayElements(cx, len, 0);
if (result != JSObject::ED_OK) {
if (result == JSObject::ED_FAILED)
return false;
JS_ASSERT(result == JSObject::ED_SPARSE);
break;
}
uint32 lo = 0, hi = len - 1;
for (; lo < hi; lo++, hi--) {
@ -1500,7 +1518,7 @@ array_reverse(JSContext *cx, uintN argc, Value *vp)
* holes).
*/
return JS_TRUE;
}
} while (false);
AutoValueRooter tvr(cx);
for (jsuint i = 0, half = len / 2; i < half; i++) {
@ -1843,8 +1861,10 @@ js::array_sort(JSContext *cx, uintN argc, Value *vp)
++newlen;
}
if (newlen == 0)
if (newlen == 0) {
vp->setObject(*obj);
return true; /* The array has only holes and undefs. */
}
/*
* The first newlen elements of vec are copied from the array object
@ -2003,21 +2023,27 @@ static JSBool
array_push1_dense(JSContext* cx, JSObject* obj, const Value &v, Value *rval)
{
uint32 length = obj->getArrayLength();
if (INDEX_TOO_SPARSE(obj, length)) {
if (!obj->makeDenseArraySlow(cx))
return JS_FALSE;
Value tmp = v;
return array_push_slowly(cx, obj, 1, &tmp, rval);
}
do {
JSObject::EnsureDenseResult result = obj->ensureDenseArrayElements(cx, length, 1);
if (result != JSObject::ED_OK) {
if (result == JSObject::ED_FAILED)
return false;
JS_ASSERT(result == JSObject::ED_SPARSE);
break;
}
if (!obj->ensureDenseArrayElements(cx, length + 1))
return JS_FALSE;
obj->setArrayLength(length + 1);
obj->setArrayLength(length + 1);
JS_ASSERT(obj->getDenseArrayElement(length).isMagic(JS_ARRAY_HOLE));
obj->setDenseArrayElement(length, v);
rval->setNumber(obj->getArrayLength());
return JS_TRUE;
JS_ASSERT(obj->getDenseArrayElement(length).isMagic(JS_ARRAY_HOLE));
obj->setDenseArrayElement(length, v);
rval->setNumber(obj->getArrayLength());
return true;
} while (false);
if (!obj->makeDenseArraySlow(cx))
return false;
Value tmp = v;
return array_push_slowly(cx, obj, 1, &tmp, rval);
}
JS_ALWAYS_INLINE JSBool
@ -2034,8 +2060,13 @@ ArrayCompPushImpl(JSContext *cx, JSObject *obj, const Value &v)
return JS_FALSE;
}
if (!obj->ensureDenseArrayElements(cx, length + 1))
return JS_FALSE;
/*
* Array comprehension cannot add holes to the array and never leaks
* the array before it is fully initialized. So we can use ensureSlots
* instead of ensureDenseArrayElements.
*/
if (!obj->ensureSlots(cx, length + 1))
return false;
}
obj->setArrayLength(length + 1);
obj->setDenseArrayElement(length, v);
@ -2193,16 +2224,27 @@ array_unshift(JSContext *cx, uintN argc, Value *vp)
/* Slide up the array to make room for argc at the bottom. */
argv = JS_ARGV(cx, vp);
if (length > 0) {
if (obj->isDenseArray() && !js_PrototypeHasIndexedProperties(cx, obj) &&
!INDEX_TOO_SPARSE(obj, unsigned(newlen + argc))) {
JS_ASSERT(newlen + argc == length + argc);
if (!obj->ensureDenseArrayElements(cx, length + argc))
return JS_FALSE;
bool optimized = false;
do {
if (!obj->isDenseArray())
break;
if (js_PrototypeHasIndexedProperties(cx, obj))
break;
JSObject::EnsureDenseResult result = obj->ensureDenseArrayElements(cx, length, argc);
if (result != JSObject::ED_OK) {
if (result == JSObject::ED_FAILED)
return false;
JS_ASSERT(result == JSObject::ED_SPARSE);
break;
}
Value *elems = obj->getDenseArrayElements();
memmove(elems + argc, elems, length * sizeof(jsval));
for (uint32 i = 0; i < argc; i++)
obj->setDenseArrayElement(i, MagicValue(JS_ARRAY_HOLE));
} else {
optimized = true;
} while (false);
if (!optimized) {
last = length;
jsdouble upperIndex = last + argc;
AutoValueRooter tvr(cx);
@ -2322,12 +2364,23 @@ array_splice(JSContext *cx, uintN argc, Value *vp)
if (argc > count) {
delta = (jsuint)argc - count;
last = length;
if (obj->isDenseArray() && !js_PrototypeHasIndexedProperties(cx, obj) &&
length <= obj->getDenseArrayCapacity() &&
(length == 0 || !obj->getDenseArrayElement(length - 1).isMagic(JS_ARRAY_HOLE))) {
if (!obj->ensureDenseArrayElements(cx, length + delta))
return JS_FALSE;
bool optimized = false;
do {
if (!obj->isDenseArray())
break;
if (js_PrototypeHasIndexedProperties(cx, obj))
break;
if (length > obj->getDenseArrayCapacity())
break;
if (length != 0 && obj->getDenseArrayElement(length - 1).isMagic(JS_ARRAY_HOLE))
break;
JSObject::EnsureDenseResult result = obj->ensureDenseArrayElements(cx, length, delta);
if (result != JSObject::ED_OK) {
if (result == JSObject::ED_FAILED)
return false;
JS_ASSERT(result == JSObject::ED_SPARSE);
break;
}
Value *arraybeg = obj->getDenseArrayElements();
Value *srcbeg = arraybeg + last - 1;
Value *srcend = arraybeg + end - 1;
@ -2336,7 +2389,10 @@ array_splice(JSContext *cx, uintN argc, Value *vp)
*dst = *src;
obj->setArrayLength(obj->getArrayLength() + delta);
} else {
optimized = true;
} while (false);
if (!optimized) {
/* (uint) end could be 0, so we can't use a vanilla >= test. */
while (last-- > end) {
if (!JS_CHECK_OPERATION_LIMIT(cx) ||
@ -2977,7 +3033,9 @@ js_NewPreallocatedArray(JSContext* cx, JSObject* proto, int32 len)
JSObject *obj = js_NewEmptyArray(cx, proto, len);
if (!obj)
return NULL;
if (!obj->ensureDenseArrayElements(cx, len))
/* Avoid ensureDenseArrayElements to skip sparse array checks there. */
if (!obj->ensureSlots(cx, len))
return NULL;
return obj;
}

View File

@ -46,6 +46,46 @@
#include "jspubtd.h"
#include "jsobj.h"
/* Small arrays are dense, no matter what. */
const uintN MIN_SPARSE_INDEX = 256;
inline JSObject::EnsureDenseResult
JSObject::ensureDenseArrayElements(JSContext *cx, uintN index, uintN extra)
{
JS_ASSERT(isDenseArray());
uintN currentCapacity = numSlots();
uintN requiredCapacity;
if (extra == 1) {
/* Optimize for the common case. */
if (index < currentCapacity)
return ED_OK;
requiredCapacity = index + 1;
if (requiredCapacity == 0) {
/* Overflow. */
return ED_SPARSE;
}
} else {
requiredCapacity = index + extra;
if (requiredCapacity < index) {
/* Overflow. */
return ED_SPARSE;
}
if (requiredCapacity <= currentCapacity)
return ED_OK;
}
/*
* We use the extra argument also as a hint about number of non-hole
* elements to be inserted.
*/
if (requiredCapacity > MIN_SPARSE_INDEX &&
willBeSparseDenseArray(requiredCapacity, extra)) {
return ED_SPARSE;
}
return growSlots(cx, requiredCapacity) ? ED_OK : ED_FAILED;
}
extern JSBool
js_StringIsIndex(JSString *str, jsuint *indexp);
@ -144,9 +184,6 @@ js_NewArrayObject(JSContext *cx, jsuint length, const js::Value *vector);
extern JSObject *
js_NewSlowArrayObject(JSContext *cx);
/* Minimum size at which a dense array can be made sparse. */
const uint32 MIN_SPARSE_INDEX = 256;
extern JSBool
js_GetLengthProperty(JSContext *cx, JSObject *obj, jsuint *lengthp);

View File

@ -79,6 +79,9 @@ enum StructuredDataType {
SCTAG_ARRAY_OBJECT,
SCTAG_OBJECT_OBJECT,
SCTAG_ARRAY_BUFFER_OBJECT,
SCTAG_BOOLEAN_OBJECT,
SCTAG_STRING_OBJECT,
SCTAG_NUMBER_OBJECT,
SCTAG_TYPED_ARRAY_MIN = 0xFFFF0100,
SCTAG_TYPED_ARRAY_MAX = SCTAG_TYPED_ARRAY_MIN + TypedArray::TYPE_MAX - 1,
SCTAG_END_OF_BUILTIN_TYPES
@ -345,12 +348,12 @@ SCOutput::extractBuffer(uint64_t **datap, size_t *sizep)
JS_STATIC_ASSERT(JSString::MAX_LENGTH < UINT32_MAX);
bool
JSStructuredCloneWriter::writeString(JSString *str)
JSStructuredCloneWriter::writeString(uint32_t tag, JSString *str)
{
const jschar *chars;
size_t length;
str->getCharsAndLength(chars, length);
return out.writePair(SCTAG_STRING, uint32_t(length)) && out.writeChars(chars, length);
return out.writePair(tag, uint32_t(length)) && out.writeChars(chars, length);
}
bool
@ -359,7 +362,7 @@ JSStructuredCloneWriter::writeId(jsid id)
if (JSID_IS_INT(id))
return out.writePair(SCTAG_INDEX, uint32_t(JSID_TO_INT(id)));
JS_ASSERT(JSID_IS_STRING(id));
return writeString(JSID_TO_STRING(id));
return writeString(SCTAG_STRING, JSID_TO_STRING(id));
}
inline void
@ -496,7 +499,7 @@ bool
JSStructuredCloneWriter::startWrite(const js::Value &v)
{
if (v.isString()) {
return writeString(v.toString());
return writeString(SCTAG_STRING, v.toString());
} else if (v.isNumber()) {
return out.writeDouble(v.toNumber());
} else if (v.isBoolean()) {
@ -510,7 +513,7 @@ JSStructuredCloneWriter::startWrite(const js::Value &v)
if (obj->isRegExp()) {
RegExp *re = RegExp::extractFrom(obj);
return out.writePair(SCTAG_REGEXP_OBJECT, re->getFlags()) &&
writeString(re->getSource());
writeString(SCTAG_STRING, re->getSource());
} else if (obj->isDate()) {
jsdouble d = js_DateGetMsecSinceEpoch(context(), obj);
return out.writePair(SCTAG_DATE_OBJECT, 0) && out.writeDouble(d);
@ -520,6 +523,13 @@ JSStructuredCloneWriter::startWrite(const js::Value &v)
return writeTypedArray(obj);
} else if (js_IsArrayBuffer(obj) && ArrayBuffer::fromJSObject(obj)) {
return writeArrayBuffer(obj);
} else if (obj->isBoolean()) {
return out.writePair(SCTAG_BOOLEAN_OBJECT, obj->getPrimitiveThis().toBoolean());
} else if (obj->isNumber()) {
return out.writePair(SCTAG_NUMBER_OBJECT, 0) &&
out.writeDouble(obj->getPrimitiveThis().toNumber());
} else if (obj->isString()) {
return writeString(SCTAG_STRING_OBJECT, obj->getPrimitiveThis().toString());
}
const JSStructuredCloneCallbacks *cb = context()->runtime->structuredCloneCallbacks;
@ -574,6 +584,17 @@ JSStructuredCloneWriter::write(const Value &v)
return true;
}
bool
JSStructuredCloneReader::checkDouble(jsdouble d)
{
if (IsNonCanonicalizedNaN(d)) {
JS_ReportErrorNumber(context(), js_GetErrorMessage, NULL,
JSMSG_SC_BAD_SERIALIZED_DATA, "unrecognized NaN");
return false;
}
return true;
}
class Chars {
JSContext *cx;
jschar *p;
@ -669,21 +690,42 @@ JSStructuredCloneReader::startRead(Value *vp)
break;
case SCTAG_BOOLEAN:
case SCTAG_BOOLEAN_OBJECT:
vp->setBoolean(!!data);
if (tag == SCTAG_BOOLEAN_OBJECT && !js_PrimitiveToObject(context(), vp))
return false;
break;
case SCTAG_STRING: {
case SCTAG_STRING:
case SCTAG_STRING_OBJECT: {
JSString *str = readString(data);
if (!str)
return false;
vp->setString(str);
if (tag == SCTAG_STRING_OBJECT && !js_PrimitiveToObject(context(), vp))
return false;
break;
}
case SCTAG_NUMBER_OBJECT: {
jsdouble d;
if (!in.readDouble(&d) || !checkDouble(d))
return false;
vp->setDouble(d);
if (!js_PrimitiveToObject(context(), vp))
return false;
break;
}
case SCTAG_DATE_OBJECT: {
jsdouble d;
if (!in.readDouble(&d))
if (!in.readDouble(&d) || !checkDouble(d))
return false;
if (d == d && d != TIMECLIP(d)) {
JS_ReportErrorNumber(context(), js_GetErrorMessage, NULL, JSMSG_SC_BAD_SERIALIZED_DATA,
"date");
return false;
}
JSObject *obj = js_NewDateObjectMsec(context(), d);
if (!obj)
return false;
@ -730,11 +772,8 @@ JSStructuredCloneReader::startRead(Value *vp)
default: {
if (tag <= SCTAG_FLOAT_MAX) {
jsdouble d = ReinterpretPairAsDouble(tag, data);
if (IsNonCanonicalizedNaN(d)) {
JS_ReportErrorNumber(context(), js_GetErrorMessage, NULL,
JSMSG_SC_BAD_SERIALIZED_DATA, "unrecognized NaN");
if (!checkDouble(d))
return false;
}
vp->setNumber(d);
break;
}

View File

@ -118,6 +118,7 @@ struct JSStructuredCloneReader {
private:
JSContext *context() { return in.context(); }
bool checkDouble(jsdouble d);
JSString *readString(uint32_t nchars);
bool readTypedArray(uint32_t tag, uint32_t nelems, js::Value *vp);
bool readArrayBuffer(uint32_t nbytes, js::Value *vp);
@ -145,7 +146,7 @@ struct JSStructuredCloneWriter {
private:
JSContext *context() { return out.context(); }
bool writeString(JSString *str);
bool writeString(uint32_t tag, JSString *str);
bool writeId(jsid id);
bool writeArrayBuffer(JSObject *obj);
bool writeTypedArray(JSObject *obj);

View File

@ -1845,9 +1845,9 @@ js_InvokeOperationCallback(JSContext *cx)
JS_ASSERT(td->interruptFlags != 0);
/*
* Reset the callback counter first, then yield. If another thread is racing
* us here we will accumulate another callback request which will be
* serviced at the next opportunity.
* Reset the callback counter first, then run GC and yield. If another
* thread is racing us here we will accumulate another callback request
* which will be serviced at the next opportunity.
*/
JS_LOCK_GC(rt);
td->interruptFlags = 0;
@ -1856,13 +1856,6 @@ js_InvokeOperationCallback(JSContext *cx)
#endif
JS_UNLOCK_GC(rt);
/*
* Unless we are going to run the GC, we automatically yield the current
* context every time the operation callback is hit since we might be
* called as a result of an impending GC, which would deadlock if we do
* not yield. Operation callbacks are supposed to happen rarely (seconds,
* not milliseconds) so it is acceptable to yield at every callback.
*/
if (rt->gcIsNeeded) {
js_GC(cx, GC_NORMAL);
@ -1879,10 +1872,19 @@ js_InvokeOperationCallback(JSContext *cx)
return false;
}
}
#ifdef JS_THREADSAFE
else {
JS_YieldRequest(cx);
}
/*
* We automatically yield the current context every time the operation
* callback is hit since we might be called as a result of an impending
* GC on another thread, which would deadlock if we do not yield.
* Operation callbacks are supposed to happen rarely (seconds, not
* milliseconds) so it is acceptable to yield at every callback.
*
* As the GC can be canceled before it does any request checks we yield
* even if rt->gcIsNeeded was true above. See bug 590533.
*/
JS_YieldRequest(cx);
#endif
JSOperationCallback cb = cx->operationCallback;

View File

@ -928,7 +928,7 @@ struct TraceMonitor {
TraceNativeStorage *storage;
/*
* There are 5 allocators here. This might seem like overkill, but they
* There are 4 allocators here. This might seem like overkill, but they
* have different lifecycles, and by keeping them separate we keep the
* amount of retained memory down significantly. They are flushed (ie.
* all the allocated memory is freed) periodically.
@ -946,10 +946,6 @@ struct TraceMonitor {
* used to store LIR code and for all other elements in the LIR
* pipeline.
*
* - reTempAlloc is just like tempAlloc, but is used for regexp
* compilation in RegExpNativeCompiler rather than normal compilation in
* TraceRecorder.
*
* - codeAlloc has the same lifetime as dataAlloc, but its API is
* different (CodeAlloc vs. VMAllocator). It's used for native code.
* It's also a good idea to keep code and data separate to avoid I-cache
@ -958,7 +954,6 @@ struct TraceMonitor {
VMAllocator* dataAlloc;
VMAllocator* traceAlloc;
VMAllocator* tempAlloc;
VMAllocator* reTempAlloc;
nanojit::CodeAlloc* codeAlloc;
nanojit::Assembler* assembler;
FrameInfoCache* frameCache;

View File

@ -152,7 +152,6 @@ using namespace js;
* Supporting functions - ECMA 15.9.1.*
*/
#define HalfTimeDomain 8.64e15
#define HoursPerDay 24.0
#define MinutesPerDay (HoursPerDay * MinutesPerHour)
#define MinutesPerHour 60.0
@ -482,10 +481,6 @@ msFromTime(jsdouble t)
return result;
}
#define TIMECLIP(d) ((JSDOUBLE_IS_FINITE(d) \
&& !((d < 0 ? -d : d) > HalfTimeDomain)) \
? js_DoubleToInteger(d + (+0.)) : js_NaN)
/**
* end of ECMA 'support' functions
*/

View File

@ -54,6 +54,12 @@ JSObject::isDate() const
return getClass() == &js_DateClass;
}
#define HalfTimeDomain 8.64e15
#define TIMECLIP(d) ((JSDOUBLE_IS_FINITE(d) \
&& !((d < 0 ? -d : d) > HalfTimeDomain)) \
? js_DoubleToInteger(d + (+0.)) : js_NaN)
extern JSObject *
js_InitDateClass(JSContext *cx, JSObject *obj);

View File

@ -115,6 +115,28 @@ JS_SetRuntimeDebugMode(JSRuntime *rt, JSBool debug)
rt->debugMode = debug;
}
static void
PurgeCallICs(JSContext *cx, JSScript *start)
{
#ifdef JS_METHODJIT
for (JSScript *script = start;
&script->links != &cx->compartment->scripts;
script = (JSScript *)script->links.next)
{
// Debug mode does not use call ICs.
if (script->debugMode)
continue;
JS_ASSERT(!IsScriptLive(cx, script));
if (script->jitNormal)
script->jitNormal->nukeScriptDependentICs();
if (script->jitCtor)
script->jitCtor->nukeScriptDependentICs();
}
#endif
}
JS_FRIEND_API(JSBool)
js_SetDebugMode(JSContext *cx, JSBool debug)
{
@ -134,6 +156,12 @@ js_SetDebugMode(JSContext *cx, JSBool debug)
*/
js::mjit::Recompiler recompiler(cx, script);
if (!recompiler.recompile()) {
/*
* If recompilation failed, we could be in a state where
* remaining compiled scripts hold call IC references that
* have been destroyed by recompilation. Clear those ICs now.
*/
PurgeCallICs(cx, script);
cx->compartment->debugMode = JS_FALSE;
return JS_FALSE;
}

View File

@ -6809,12 +6809,8 @@ js_EmitTree(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
}
#endif /* JS_HAS_GENERATORS */
/*
* Use the slower NEWINIT for arrays in scripts containing sharps, and when
* the array length exceeds MIN_SPARSE_INDEX and can be slowified during GC.
* :FIXME: bug 607825 handle slowify case.
*/
if (cg->hasSharps() || pn->pn_count >= MIN_SPARSE_INDEX) {
/* Use the slower NEWINIT for arrays in scripts containing sharps. */
if (cg->hasSharps()) {
if (!EmitNewInit(cx, cg, JSProto_Array, pn, sharpnum))
return JS_FALSE;
} else {

View File

@ -1374,16 +1374,6 @@ GCMarker::markDelayedChildren()
JS_ASSERT(!unmarkedArenaStackTop);
}
void
GCMarker::slowifyArrays()
{
while (!arraysToSlowify.empty()) {
JSObject *obj = arraysToSlowify.back();
arraysToSlowify.popBack();
if (obj->isMarked())
obj->makeDenseArraySlow(context);
}
}
} /* namespace js */
static void
@ -2243,9 +2233,6 @@ MarkAndSweep(JSContext *cx, JSGCInvocationKind gckind GCTIMER_PARAM)
*/
js_SweepScriptFilenames(rt);
/* Slowify arrays we have accumulated. */
gcmarker.slowifyArrays();
/*
* Destroy arenas after we finished the sweeping so finalizers can safely
* use js_IsAboutToBeFinalized().

View File

@ -983,8 +983,6 @@ struct GCMarker : public JSTracer {
void dumpConservativeRoots();
#endif
js::Vector<JSObject *, 0, js::SystemAllocPolicy> arraysToSlowify;
public:
explicit GCMarker(JSContext *cx);
~GCMarker();
@ -1005,8 +1003,6 @@ struct GCMarker : public JSTracer {
void delayMarkingChildren(void *thing);
JS_FRIEND_API(void) markDelayedChildren();
void slowifyArrays();
};
void

View File

@ -5911,7 +5911,8 @@ BEGIN_CASE(JSOP_NEWARRAY)
unsigned count = GET_UINT24(regs.pc);
JSObject *obj = js_NewArrayObject(cx, count, NULL);
if (!obj || !obj->ensureDenseArrayElements(cx, count))
/* Avoid ensureDenseArrayElements to skip sparse array checks there. */
if (!obj || !obj->ensureSlots(cx, count))
goto error;
PUSH_OBJECT(*obj);

View File

@ -752,9 +752,24 @@ struct JSObject : js::gc::Cell {
inline const js::Value &getDenseArrayElement(uintN idx);
inline js::Value* addressOfDenseArrayElement(uintN idx);
inline void setDenseArrayElement(uintN idx, const js::Value &val);
inline bool ensureDenseArrayElements(JSContext *cx, uintN cap);
inline void shrinkDenseArrayElements(JSContext *cx, uintN cap);
/*
* ensureDenseArrayElements ensures that the dense array can hold at least
* index + extra elements. It returns ED_OK on success, ED_FAILED on
* failure to grow the array, ED_SPARSE when the array is too sparse to
* grow (this includes the case of index + extra overflow). In the last
* two cases the array is kept intact.
*/
enum EnsureDenseResult { ED_OK, ED_FAILED, ED_SPARSE };
inline EnsureDenseResult ensureDenseArrayElements(JSContext *cx, uintN index, uintN extra);
/*
* Check if after growing the dense array will be too sparse.
* newElementsHint is an estimated number of elements to be added.
*/
bool willBeSparseDenseArray(uintN requiredCapacity, uintN newElementsHint);
JSBool makeDenseArraySlow(JSContext *cx);
/*

View File

@ -190,11 +190,13 @@ inline bool
JSObject::methodWriteBarrier(JSContext *cx, const js::Shape &shape, const js::Value &v)
{
if (flags & (BRANDED | METHOD_BARRIER)) {
const js::Value &prev = nativeGetSlot(shape.slot);
if (shape.slot != SHAPE_INVALID_SLOT) {
const js::Value &prev = nativeGetSlot(shape.slot);
if (ChangesMethodValue(prev, v)) {
JS_FUNCTION_METER(cx, mwritebarrier);
return methodShapeChange(cx, shape);
if (ChangesMethodValue(prev, v)) {
JS_FUNCTION_METER(cx, mwritebarrier);
return methodShapeChange(cx, shape);
}
}
}
return true;
@ -329,13 +331,6 @@ JSObject::setDenseArrayElement(uintN idx, const js::Value &val)
setSlot(idx, val);
}
inline bool
JSObject::ensureDenseArrayElements(JSContext *cx, uintN cap)
{
JS_ASSERT(isDenseArray());
return ensureSlots(cx, cap);
}
inline void
JSObject::shrinkDenseArrayElements(JSContext *cx, uintN cap)
{

View File

@ -91,6 +91,14 @@ typedef JSIntn intn;
*/
#if defined(AIX) && defined(HAVE_SYS_INTTYPES_H)
#include <sys/inttypes.h>
#elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
typedef JSInt64 int64;
/* Explicit signed keyword for bitfield types is required. */
/* Some compilers may treat them as unsigned without it. */
typedef signed int int32;
typedef signed short int16;
typedef signed char int8;
#else
typedef JSInt64 int64;

View File

@ -140,7 +140,7 @@ KidsChunk::destroy(JSContext *cx, KidsChunk *chunk)
/*
* NB: Called with cx->runtime->gcLock held, always.
* On failure, return null after unlocking the GC and reporting out of memory.
* On failure, return false after unlocking the GC and reporting out of memory.
*/
bool
PropertyTree::insertChild(JSContext *cx, Shape *parent, Shape *child)
@ -219,8 +219,11 @@ PropertyTree::insertChild(JSContext *cx, Shape *parent, Shape *child)
KidsHash *hash = kidp->toHash();
KidsHash::AddPtr addPtr = hash->lookupForAdd(child);
if (!addPtr) {
if (!hash->add(addPtr, child))
if (!hash->add(addPtr, child)) {
JS_UNLOCK_GC(cx->runtime);
JS_ReportOutOfMemory(cx);
return false;
}
} else {
// FIXME ignore duplicate child case here, going thread-local soon!
}

View File

@ -222,7 +222,6 @@ RegExp::checkMatchPairs(JSString *input, int *buf, size_t matchItemCount)
{
#if DEBUG
size_t inputLength = input->length();
int largestStartSeen = 0;
for (size_t i = 0; i < matchItemCount; i += 2) {
int start = buf[i];
int limit = buf[i + 1];
@ -231,9 +230,6 @@ RegExp::checkMatchPairs(JSString *input, int *buf, size_t matchItemCount)
continue;
JS_ASSERT(start >= 0);
JS_ASSERT(size_t(limit) <= inputLength);
/* Test the monotonically increasing nature of left parens. */
JS_ASSERT(start >= largestStartSeen);
largestStartSeen = start;
}
#endif
}

View File

@ -1701,12 +1701,6 @@ TokenStream::getTokenInternal()
tokenbuf.clear();
for (;;) {
c = getChar();
if (c == '\n' || c == EOF) {
ungetChar(c);
ReportCompileErrorNumber(cx, this, NULL, JSREPORT_ERROR,
JSMSG_UNTERMINATED_REGEXP);
goto error;
}
if (c == '\\') {
if (!tokenbuf.append(c))
goto error;
@ -1719,6 +1713,12 @@ TokenStream::getTokenInternal()
/* For compat with IE, allow unescaped / in char classes. */
break;
}
if (c == '\n' || c == EOF) {
ungetChar(c);
ReportCompileErrorNumber(cx, this, NULL, JSREPORT_ERROR,
JSMSG_UNTERMINATED_REGEXP);
goto error;
}
if (!tokenbuf.append(c))
goto error;
}

View File

@ -910,7 +910,7 @@ JSScript *
JSScript::NewScript(JSContext *cx, uint32 length, uint32 nsrcnotes, uint32 natoms,
uint32 nobjects, uint32 nupvars, uint32 nregexps,
uint32 ntrynotes, uint32 nconsts, uint32 nglobals,
uint32 nClosedArgs, uint32 nClosedVars)
uint16 nClosedArgs, uint16 nClosedVars)
{
size_t size, vectorSize;
JSScript *script;
@ -1160,12 +1160,15 @@ JSScript::NewScriptFromCG(JSContext *cx, JSCodeGenerator *cg)
skip_empty:
CG_COUNT_FINAL_SRCNOTES(cg, nsrcnotes);
uint16 nClosedArgs = uint16(cg->closedArgs.length());
JS_ASSERT(nClosedArgs == cg->closedArgs.length());
uint16 nClosedVars = uint16(cg->closedVars.length());
JS_ASSERT(nClosedVars == cg->closedVars.length());
script = NewScript(cx, prologLength + mainLength, nsrcnotes,
cg->atomList.count, cg->objectList.length,
cg->upvarList.count, cg->regexpList.length,
cg->ntrynotes, cg->constList.length(),
cg->globalUses.length(), cg->closedArgs.length(),
cg->closedVars.length());
cg->globalUses.length(), nClosedArgs, nClosedVars);
if (!script)
return NULL;

View File

@ -208,7 +208,7 @@ struct JSScript {
static JSScript *NewScript(JSContext *cx, uint32 length, uint32 nsrcnotes, uint32 natoms,
uint32 nobjects, uint32 nupvars, uint32 nregexps,
uint32 ntrynotes, uint32 nconsts, uint32 nglobals,
uint32 nClosedArgs, uint32 nClosedVars);
uint16 nClosedArgs, uint16 nClosedVars);
static JSScript *NewScriptFromCG(JSContext *cx, JSCodeGenerator *cg);

View File

@ -2736,7 +2736,6 @@ TraceMonitor::flush()
traceAlloc->reset();
codeAlloc->reset();
tempAlloc->reset();
reTempAlloc->reset();
oracle->clear();
loopProfiles->clear();
@ -7590,7 +7589,6 @@ InitJIT(TraceMonitor *tm)
tm->dataAlloc = new VMAllocator();
tm->traceAlloc = new VMAllocator();
tm->tempAlloc = new VMAllocator();
tm->reTempAlloc = new VMAllocator();
tm->codeAlloc = new CodeAlloc();
tm->frameCache = new FrameInfoCache(tm->dataAlloc);
tm->storage = new TraceNativeStorage();
@ -7727,11 +7725,6 @@ FinishJIT(TraceMonitor *tm)
tm->tempAlloc = NULL;
}
if (tm->reTempAlloc) {
delete tm->reTempAlloc;
tm->reTempAlloc = NULL;
}
if (tm->storage) {
delete tm->storage;
tm->storage = NULL;

View File

@ -207,7 +207,7 @@ class Repatcher : public JSC::RepatchBuffer
#ifdef JS_CPU_ARM
class AutoReserveICSpace {
typedef Assembler::Label Label;
static const size_t reservedSpace = 64;
static const size_t reservedSpace = 68;
Assembler &masm;
#ifdef DEBUG
@ -219,6 +219,11 @@ class AutoReserveICSpace {
masm.ensureSpace(reservedSpace);
#ifdef DEBUG
startLabel = masm.label();
/* Assert that the constant pool is not flushed until we reach a safe point. */
masm.allowPoolFlush(false);
JaegerSpew(JSpew_Insns, " -- BEGIN CONSTANT-POOL-FREE REGION -- \n");
#endif
}
@ -226,8 +231,18 @@ class AutoReserveICSpace {
#ifdef DEBUG
Label endLabel = masm.label();
int spaceUsed = masm.differenceBetween(startLabel, endLabel);
/* Spew the space used, to help tuning of reservedSpace. */
JaegerSpew(JSpew_Insns,
" -- END CONSTANT-POOL-FREE REGION: %u bytes used of %u reserved. -- \n",
spaceUsed, reservedSpace);
/* Assert that we didn't emit more code than we protected. */
JS_ASSERT(spaceUsed >= 0);
JS_ASSERT(size_t(spaceUsed) <= reservedSpace);
/* Allow the pool to be flushed. */
masm.allowPoolFlush(true);
#endif
}
};

View File

@ -2177,8 +2177,12 @@ mjit::Compiler::fixPrimitiveReturn(Assembler *masm, FrameEntry *fe)
bool ool = (masm != &this->masm);
Address thisv(JSFrameReg, JSStackFrame::offsetOfThis(fun));
// Easy cases - no return value, or known primitive, so just return thisv.
if (!fe || (fe->isTypeKnown() && fe->getKnownType() != JSVAL_TYPE_OBJECT)) {
// We can just load |thisv| if either of the following is true:
// (1) There is no explicit return value, AND fp->rval is not used.
// (2) There is an explicit return value, and it's known to be primitive.
if ((!fe && !analysis->usesReturnValue()) ||
(fe && fe->isTypeKnown() && fe->getKnownType() != JSVAL_TYPE_OBJECT))
{
if (ool)
masm->loadValueAsComponents(thisv, JSReturnReg_Type, JSReturnReg_Data);
else
@ -2187,7 +2191,7 @@ mjit::Compiler::fixPrimitiveReturn(Assembler *masm, FrameEntry *fe)
}
// If the type is known to be an object, just load the return value as normal.
if (fe->isTypeKnown() && fe->getKnownType() == JSVAL_TYPE_OBJECT) {
if (fe && fe->isTypeKnown() && fe->getKnownType() == JSVAL_TYPE_OBJECT) {
loadReturnValue(masm, fe);
return;
}

View File

@ -914,6 +914,53 @@ mjit::Compiler::jsop_typeof()
}
}
JSOp fused = JSOp(PC[JSOP_TYPEOF_LENGTH]);
if (fused == JSOP_STRING && !fe->isTypeKnown()) {
JSOp op = JSOp(PC[JSOP_TYPEOF_LENGTH + JSOP_STRING_LENGTH]);
if (op == JSOP_STRICTEQ || op == JSOP_EQ || op == JSOP_STRICTNE || op == JSOP_NE) {
JSAtom *atom = script->getAtom(fullAtomIndex(PC + JSOP_TYPEOF_LENGTH));
JSRuntime *rt = cx->runtime;
JSValueType type = JSVAL_TYPE_UNINITIALIZED;
Assembler::Condition cond = (op == JSOP_STRICTEQ || op == JSOP_EQ)
? Assembler::Equal
: Assembler::NotEqual;
if (atom == rt->atomState.typeAtoms[JSTYPE_VOID]) {
type = JSVAL_TYPE_UNDEFINED;
} else if (atom == rt->atomState.typeAtoms[JSTYPE_STRING]) {
type = JSVAL_TYPE_STRING;
} else if (atom == rt->atomState.typeAtoms[JSTYPE_BOOLEAN]) {
type = JSVAL_TYPE_BOOLEAN;
} else if (atom == rt->atomState.typeAtoms[JSTYPE_NUMBER]) {
type = JSVAL_TYPE_INT32;
/* JSVAL_TYPE_DOUBLE is 0x0 and JSVAL_TYPE_INT32 is 0x1, use <= or > to match both */
cond = (cond == Assembler::Equal) ? Assembler::BelowOrEqual : Assembler::Above;
}
if (type != JSVAL_TYPE_UNINITIALIZED) {
PC += JSOP_STRING_LENGTH;;
PC += JSOP_EQ_LENGTH;
RegisterID result = frame.allocReg(Registers::SingleByteRegs);
#if defined JS_NUNBOX32
if (frame.shouldAvoidTypeRemat(fe))
masm.set32(cond, masm.tagOf(frame.addressOf(fe)), ImmType(type), result);
else
masm.set32(cond, frame.tempRegForType(fe), ImmType(type), result);
#elif defined JS_PUNBOX64
masm.setPtr(cond, frame.tempRegForType(fe), ImmType(type), result);
#endif
frame.pop();
frame.pushTypedPayload(JSVAL_TYPE_BOOLEAN, result);
return;
}
}
}
prepareStubCall(Uses(1));
INLINE_STUBCALL(stubs::TypeOf);
frame.pop();
@ -1049,98 +1096,59 @@ mjit::Compiler::jsop_andor(JSOp op, jsbytecode *target)
void
mjit::Compiler::jsop_localinc(JSOp op, uint32 slot, bool popped)
{
bool post = (op == JSOP_LOCALINC || op == JSOP_LOCALDEC);
int32 amt = (op == JSOP_INCLOCAL || op == JSOP_LOCALINC) ? 1 : -1;
if (popped || (op == JSOP_INCLOCAL || op == JSOP_DECLOCAL)) {
int amt = (op == JSOP_LOCALINC || op == JSOP_INCLOCAL) ? -1 : 1;
frame.pushLocal(slot);
// Before:
// After: V
frame.pushLocal(slot);
FrameEntry *fe = frame.peek(-1);
// Before: V
// After: V 1
frame.push(Int32Value(amt));
// Note, SUB will perform integer conversion for us.
// Before: V 1
// After: N+1
jsop_binary(JSOP_SUB, stubs::Sub);
// Before: N+1
// After: N+1
frame.storeLocal(slot, popped);
if (fe->isConstant() && fe->getValue().isPrimitive()) {
Value v = fe->getValue();
double d;
ValueToNumber(cx, v, &d);
if (post) {
frame.push(NumberValue(d + amt));
frame.storeLocal(slot);
frame.pop();
} else {
frame.pop();
frame.push(NumberValue(d + amt));
frame.storeLocal(slot);
}
if (popped)
frame.pop();
return;
}
} else {
int amt = (op == JSOP_LOCALINC || op == JSOP_INCLOCAL) ? 1 : -1;
/*
* If the local variable is not known to be an int32, or the pre-value
* is observed, then do the simple thing and decompose x++ into simpler
* opcodes.
*/
if (fe->isNotType(JSVAL_TYPE_INT32) || (post && !popped)) {
/* V */
// Before:
// After: V
frame.pushLocal(slot);
// Before: V
// After: N
jsop_pos();
/* N */
if (post && !popped) {
frame.dup();
/* N N */
}
// Before: N
// After: N N
frame.dup();
frame.push(Int32Value(1));
/* N? N 1 */
// Before: N N
// After: N N 1
frame.push(Int32Value(amt));
if (amt == 1)
jsop_binary(JSOP_ADD, stubs::Add);
else
jsop_binary(JSOP_SUB, stubs::Sub);
/* N? N+1 */
// Before: N N 1
// After: N N+1
jsop_binary(JSOP_ADD, stubs::Add);
frame.storeLocal(slot, post || popped);
/* N? N+1 */
// Before: N N+1
// After: N N+1
frame.storeLocal(slot, true);
if (post || popped)
frame.pop();
return;
}
/* If the pre value is not observed, we can emit better code. */
if (!fe->isTypeKnown()) {
Jump intFail = frame.testInt32(Assembler::NotEqual, fe);
stubcc.linkExit(intFail, Uses(1));
}
RegisterID reg = frame.copyDataIntoReg(fe);
Jump ovf;
if (amt > 0)
ovf = masm.branchAdd32(Assembler::Overflow, Imm32(1), reg);
else
ovf = masm.branchSub32(Assembler::Overflow, Imm32(1), reg);
stubcc.linkExit(ovf, Uses(1));
/* Note, stub call will push the original value again no matter what. */
stubcc.leave();
stubcc.masm.move(Imm32(slot), Registers::ArgReg1);
if (op == JSOP_LOCALINC || op == JSOP_INCLOCAL)
OOL_STUBCALL(stubs::IncLocal);
else
OOL_STUBCALL(stubs::DecLocal);
frame.pop();
frame.pushTypedPayload(JSVAL_TYPE_INT32, reg);
frame.storeLocal(slot, popped, false);
if (popped)
// Before: N N+1
// After: N
frame.pop();
else
frame.forgetType(frame.peek(-1));
stubcc.rejoin(Changes(0));
}
}
void

View File

@ -87,7 +87,7 @@ struct VMFrame
# ifdef JS_NO_FASTCALL
inline void** returnAddressLocation() {
return reinterpret_cast<void**>(this) - 3;
return reinterpret_cast<void**>(this) - 5;
}
# else
inline void** returnAddressLocation() {
@ -332,6 +332,7 @@ struct JITScript {
return jcheck >= jitcode && jcheck < jitcode + code.m_size;
}
void nukeScriptDependentICs();
void sweepCallICs();
void purgeMICs();
void purgePICs();

View File

@ -1097,6 +1097,26 @@ ic::PurgeMICs(JSContext *cx, JSScript *script)
script->jitCtor->purgeMICs();
}
void
JITScript::nukeScriptDependentICs()
{
if (!nCallICs)
return;
Repatcher repatcher(this);
for (uint32 i = 0; i < nCallICs; i++) {
ic::CallICInfo &ic = callICs[i];
if (!ic.fastGuardedObject)
continue;
repatcher.repatch(ic.funGuard, NULL);
repatcher.relink(ic.funJump, ic.slowPathStart);
ic.releasePool(CallICInfo::Pool_ClosureStub);
ic.fastGuardedObject = NULL;
ic.hasJsFunCheck = false;
}
}
void
JITScript::sweepCallICs()
{

View File

@ -1477,17 +1477,16 @@ class ScopeNameCompiler : public PICStubCompiler
return false;
}
if (!obj->isNative() || !holder->isNative()) {
if (!obj->getProperty(cx, ATOM_TO_JSID(atom), vp))
return false;
} else {
const Shape *shape = getprop.shape;
JS_ASSERT(shape);
JSObject *normalized = obj;
if (obj->getClass() == &js_WithClass && !shape->hasDefaultGetter())
normalized = js_UnwrapWithObject(cx, obj);
NATIVE_GET(cx, normalized, holder, shape, JSGET_METHOD_BARRIER, vp, return false);
}
// If the property was found, but we decided not to cache it, then
// take a slow path and do a full property fetch.
if (!getprop.shape)
return obj->getProperty(cx, ATOM_TO_JSID(atom), vp);
const Shape *shape = getprop.shape;
JSObject *normalized = obj;
if (obj->getClass() == &js_WithClass && !shape->hasDefaultGetter())
normalized = js_UnwrapWithObject(cx, obj);
NATIVE_GET(cx, normalized, holder, shape, JSGET_METHOD_BARRIER, vp, return false);
return true;
}
@ -1551,6 +1550,8 @@ class BindNameCompiler : public PICStubCompiler
masm.loadShape(pic.objReg, pic.shapeReg);
Jump shapeTest = masm.branch32(Assembler::NotEqual, pic.shapeReg,
Imm32(tobj->shape()));
if (!fails.append(shapeTest))
return error();
tobj = tobj->getParent();
}
if (tobj != obj)
@ -2373,24 +2374,21 @@ SetElementIC::attachHoleStub(JSContext *cx, JSObject *obj, int32 keyval)
Assembler masm;
// Test for indexed properties in Array.prototype. It is safe to bake in
// this pointer because changing __proto__ will slowify.
JSObject *arrayProto = obj->getProto();
masm.move(ImmPtr(arrayProto), objReg);
Jump extendedArray = masm.branchTest32(Assembler::NonZero,
Address(objReg, offsetof(JSObject, flags)),
Imm32(JSObject::INDEXED));
Vector<Jump, 4> fails(cx);
// Text for indexed properties in Object.prototype. Guard that
// Array.prototype doesn't change, too.
JSObject *objProto = arrayProto->getProto();
Jump sameProto = masm.branchPtr(Assembler::NotEqual,
Address(objReg, offsetof(JSObject, proto)),
ImmPtr(objProto));
masm.move(ImmPtr(objProto), objReg);
Jump extendedObject = masm.branchTest32(Assembler::NonZero,
Address(objReg, offsetof(JSObject, flags)),
Imm32(JSObject::INDEXED));
// Test for indexed properties in Array.prototype. We test each shape
// along the proto chain. This affords us two optimizations:
// 1) Loading the prototype can be avoided because the shape would change;
// instead we can bake in their identities.
// 2) We only have to test the shape, rather than INDEXED.
for (JSObject *pobj = obj->getProto(); pobj; pobj = pobj->getProto()) {
if (!pobj->isNative())
return disable(cx, "non-native array prototype");
masm.move(ImmPtr(pobj), objReg);
Jump j = masm.guardShape(objReg, pobj);
if (!fails.append(j))
return error(cx);
}
// Restore |obj|.
masm.rematPayload(StateRemat::FromInt32(objRemat), objReg);
@ -2438,9 +2436,8 @@ SetElementIC::attachHoleStub(JSContext *cx, JSObject *obj, int32 keyval)
return disable(cx, "code memory is out of range");
// Patch all guards.
buffer.link(extendedArray, slowPathStart);
buffer.link(sameProto, slowPathStart);
buffer.link(extendedObject, slowPathStart);
for (size_t i = 0; i < fails.length(); i++)
buffer.link(fails[i], slowPathStart);
buffer.link(done, fastPathRejoin);
CodeLocationLabel cs = buffer.finalize();

View File

@ -146,7 +146,7 @@ TrampolineCompiler::generateForceReturnFast(Assembler &masm)
#else
// In case of no fast call, when we change the return address,
// we need to make sure add esp by 8.
masm.addPtr(Imm32(8), Registers::StackPointer);
masm.addPtr(Imm32(16), Registers::StackPointer);
#endif
return generateForceReturn(masm);
}

View File

@ -1,4 +1,4 @@
/ -*- Mode: C++/ tab-width: 4/ indent-tabs-mode: nil/ c-basic-offset: 4 -*-
/ -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
/ ***** BEGIN LICENSE BLOCK *****
/ Version: MPL 1.1/GPL 2.0/LGPL 2.1
/
@ -92,8 +92,8 @@ JaegerTrampolineReturn:
.type JaegerThrowpoline, @function
JaegerThrowpoline:
/* For Sun Studio there is no fast call. */
/* We add the stack by 8 before. */
addl $0x8, %esp
/* We add the stack by 16 before. */
addl $0x10, %esp
/* Align the stack to 16 bytes. */
pushl %esp
pushl (%esp)
@ -127,8 +127,8 @@ InjectJaegerReturn:
movl 0x1C(%ebx), %ecx /* fp->rval_ type */
movl 0x14(%ebx), %eax /* fp->ncode_ */
/* For Sun Studio there is no fast call. */
/* We add the stack by 8 before. */
addl $0x8, %esp
/* We add the stack by 16 before. */
addl $0x10, %esp
/* Restore frame regs. */
movl 0x1C(%esp), %ebx /* f.fp */
jmp *%eax

View File

@ -1 +1 @@
1f90e61950c44193ea5a1800c06d7dba8240cfd9
4effe362e918583ec7b98b08da24f02c0833d306

View File

@ -1173,20 +1173,24 @@ namespace nanojit
Register rf = findRegFor(iffalse, allow & ~rmask(rr));
if (ins->isop(LIR_cmovd)) {
// See Nativei386.cpp:asm_cmov() for an explanation of the subtleties here.
NIns* target = _nIns;
asm_nongp_copy(rr, rf);
asm_branch(false, cond, target);
asm_branch_helper(false, cond, target);
// If 'iftrue' isn't in a register, it can be clobbered by 'ins'.
Register rt = iftrue->isInReg() ? iftrue->getReg() : rr;
if (rr != rt)
asm_nongp_copy(rr, rt);
freeResourcesOf(ins);
if (!iftrue->isInReg()) {
NanoAssert(rt == rr);
findSpecificRegForUnallocated(iftrue, rr);
}
asm_cmp(cond);
return;
}
@ -1194,8 +1198,8 @@ namespace nanojit
Register rt = iftrue->isInReg() ? iftrue->getReg() : rr;
// WARNING: We cannot generate any code that affects the condition
// codes between the MRcc generation here and the asm_cmp() call
// below. See asm_cmp() for more details.
// codes between the MRcc generation here and the asm_cmpi() call
// below. See asm_cmpi() for more details.
LOpcode condop = cond->opcode();
if (ins->isop(LIR_cmovi)) {
switch (condop) {
@ -1234,30 +1238,36 @@ namespace nanojit
findSpecificRegForUnallocated(iftrue, rr);
}
asm_cmp(cond);
asm_cmpi(cond);
}
NIns* Assembler::asm_branch(bool onFalse, LIns *cond, NIns *target) {
NanoAssert(cond->isCmp());
LOpcode condop = cond->opcode();
NIns* Assembler::asm_branch(bool onFalse, LIns* cond, NIns* target) {
NIns* patch = asm_branch_helper(onFalse, cond, target);
asm_cmp(cond);
return patch;
}
NIns* Assembler::asm_branch_helper(bool onFalse, LIns *cond, NIns *target) {
if (target && !isTargetWithinS32(target)) {
// conditional jumps beyond 32bit range, so invert the branch/compare
// and emit an unconditional jump to the target
// A conditional jump beyond 32-bit range, so invert the
// branch/compare and emit an unconditional jump to the target:
// j(inverted) B1
// jmp target
// B1:
NIns* shortTarget = _nIns;
JMP(target);
target = shortTarget;
onFalse = !onFalse;
}
if (isCmpDOpcode(condop))
return asm_branchd(onFalse, cond, target);
return isCmpDOpcode(cond->opcode())
? asm_branchd_helper(onFalse, cond, target)
: asm_branchi_helper(onFalse, cond, target);
}
NIns* Assembler::asm_branchi_helper(bool onFalse, LIns *cond, NIns *target) {
// We must ensure there's room for the instruction before calculating
// the offset. And the offset determines the opcode (8bit or 32bit).
LOpcode condop = cond->opcode();
if (target && isTargetWithinS8(target)) {
if (onFalse) {
switch (condop) {
@ -1315,9 +1325,7 @@ namespace nanojit
}
}
}
NIns *patch = _nIns; // address of instruction to patch
asm_cmp(cond);
return patch;
return _nIns; // address of instruction to patch
}
NIns* Assembler::asm_branch_ov(LOpcode, NIns* target) {
@ -1334,13 +1342,17 @@ namespace nanojit
return _nIns;
}
void Assembler::asm_cmp(LIns *cond) {
isCmpDOpcode(cond->opcode()) ? asm_cmpd(cond) : asm_cmpi(cond);
}
// WARNING: this function cannot generate code that will affect the
// condition codes prior to the generation of the test/cmp. See
// Nativei386.cpp:asm_cmp() for details.
void Assembler::asm_cmp(LIns *cond) {
// Nativei386.cpp:asm_cmpi() for details.
void Assembler::asm_cmpi(LIns *cond) {
LIns *b = cond->oprnd2();
if (isImm32(b)) {
asm_cmp_imm(cond);
asm_cmpi_imm(cond);
return;
}
LIns *a = cond->oprnd1();
@ -1361,7 +1373,7 @@ namespace nanojit
}
}
void Assembler::asm_cmp_imm(LIns *cond) {
void Assembler::asm_cmpi_imm(LIns *cond) {
LOpcode condop = cond->opcode();
LIns *a = cond->oprnd1();
LIns *b = cond->oprnd2();
@ -1399,11 +1411,9 @@ namespace nanojit
// LIR_jt jae ja swap+jae swap+ja jp over je
// LIR_jf jb jbe swap+jb swap+jbe jne+jp
NIns* Assembler::asm_branchd(bool onFalse, LIns *cond, NIns *target) {
NIns* Assembler::asm_branchd_helper(bool onFalse, LIns *cond, NIns *target) {
LOpcode condop = cond->opcode();
NIns *patch;
LIns *a = cond->oprnd1();
LIns *b = cond->oprnd2();
if (condop == LIR_eqd) {
if (onFalse) {
// branch if unordered or !=
@ -1422,34 +1432,23 @@ namespace nanojit
}
}
else {
if (condop == LIR_ltd) {
condop = LIR_gtd;
LIns *t = a; a = b; b = t;
} else if (condop == LIR_led) {
condop = LIR_ged;
LIns *t = a; a = b; b = t;
}
if (condop == LIR_gtd) {
if (onFalse)
JBE(8, target);
else
JA(8, target);
} else { // LIR_ged
if (onFalse)
JB(8, target);
else
JAE(8, target);
// LIR_ltd and LIR_gtd are handled by the same case because
// asm_cmpd() converts LIR_ltd(a,b) to LIR_gtd(b,a). Likewise for
// LIR_led/LIR_ged.
switch (condop) {
case LIR_ltd:
case LIR_gtd: if (onFalse) JBE(8, target); else JA(8, target); break;
case LIR_led:
case LIR_ged: if (onFalse) JB(8, target); else JAE(8, target); break;
default: NanoAssert(0); break;
}
patch = _nIns;
}
asm_cmpd(a, b);
return patch;
}
void Assembler::asm_condd(LIns *ins) {
LOpcode op = ins->opcode();
LIns *a = ins->oprnd1();
LIns *b = ins->oprnd2();
if (op == LIR_eqd) {
// result = ZF & !PF, must do logic on flags
// r = al|bl|cl|dl, can only use rh without rex prefix
@ -1460,30 +1459,40 @@ namespace nanojit
X86_SETNP(r); // setnp rh rh = !PF
X86_SETE(r); // sete rl rl = ZF
} else {
if (op == LIR_ltd) {
op = LIR_gtd;
LIns *t = a; a = b; b = t;
} else if (op == LIR_led) {
op = LIR_ged;
LIns *t = a; a = b; b = t;
}
// LIR_ltd and LIR_gtd are handled by the same case because
// asm_cmpd() converts LIR_ltd(a,b) to LIR_gtd(b,a). Likewise for
// LIR_led/LIR_ged.
Register r = prepareResultReg(ins, GpRegs); // x64 can use any GPR as setcc target
MOVZX8(r, r);
if (op == LIR_gtd)
SETA(r);
else
SETAE(r);
switch (op) {
case LIR_ltd:
case LIR_gtd: SETA(r); break;
case LIR_led:
case LIR_ged: SETAE(r); break;
default: NanoAssert(0); break;
}
}
freeResourcesOf(ins);
asm_cmpd(a, b);
asm_cmpd(ins);
}
// WARNING: This function cannot generate any code that will affect the
// condition codes prior to the generation of the ucomisd. See asm_cmp()
// condition codes prior to the generation of the ucomisd. See asm_cmpi()
// for more details.
void Assembler::asm_cmpd(LIns *a, LIns *b) {
void Assembler::asm_cmpd(LIns *cond) {
LOpcode opcode = cond->opcode();
LIns* a = cond->oprnd1();
LIns* b = cond->oprnd2();
// First, we convert (a < b) into (b > a), and (a <= b) into (b >= a).
if (opcode == LIR_ltd) {
opcode = LIR_gtd;
LIns* t = a; a = b; b = t;
} else if (opcode == LIR_led) {
opcode = LIR_ged;
LIns* t = a; a = b; b = t;
}
Register ra, rb;
findRegFor2(FpRegs, a, ra, FpRegs, b, rb);
UCOMISD(ra, rb);
@ -1518,7 +1527,7 @@ namespace nanojit
}
// WARNING: the code generated by this function must not affect the
// condition codes. See asm_cmp() for details.
// condition codes. See asm_cmpi() for details.
void Assembler::asm_restore(LIns *ins, Register r) {
if (ins->isop(LIR_allocp)) {
int d = arDisp(ins);
@ -1587,7 +1596,7 @@ namespace nanojit
}
freeResourcesOf(ins);
asm_cmp(ins);
asm_cmpi(ins);
}
void Assembler::asm_ret(LIns *ins) {

View File

@ -423,9 +423,12 @@ namespace nanojit
void endLoadRegs(LIns *ins);\
void dis(NIns *p, int bytes);\
void asm_cmp(LIns*);\
void asm_cmp_imm(LIns*);\
void asm_cmpd(LIns*, LIns*);\
NIns* asm_branchd(bool, LIns*, NIns*);\
void asm_cmpi(LIns*);\
void asm_cmpi_imm(LIns*);\
void asm_cmpd(LIns*);\
NIns* asm_branch_helper(bool, LIns*, NIns*);\
NIns* asm_branchi_helper(bool, LIns*, NIns*);\
NIns* asm_branchd_helper(bool, LIns*, NIns*);\
void asm_div(LIns *ins);\
void asm_div_mod(LIns *ins);\
int max_stk_used;\

View File

@ -854,8 +854,6 @@ namespace nanojit
inline void Assembler::FLD1() { count_fpu(); FPUc(0xd9e8); asm_output("fld1"); fpu_push(); }
inline void Assembler::FLDZ() { count_fpu(); FPUc(0xd9ee); asm_output("fldz"); fpu_push(); }
inline void Assembler::FFREE(R r) { count_fpu(); FPU(0xddc0, r); asm_output("ffree %s",gpn(r)); }
inline void Assembler::FST32(bool p, I32 d, R b){ count_stq(); FPUm(0xd902|(p?1:0), d, b); asm_output("fst%s32 %d(%s)", (p?"p":""), d, gpn(b)); if (p) fpu_pop(); }
inline void Assembler::FSTQ(bool p, I32 d, R b) { count_stq(); FPUm(0xdd02|(p?1:0), d, b); asm_output("fst%sq %d(%s)", (p?"p":""), d, gpn(b)); if (p) fpu_pop(); }
@ -894,8 +892,6 @@ namespace nanojit
inline void Assembler::FMULdm( const double* dm) { count_ldq(); FPUdm(0xdc01, dm); asm_output("fmul (%p)", (void*)dm); }
inline void Assembler::FDIVRdm(const double* dm) { count_ldq(); FPUdm(0xdc07, dm); asm_output("fdivr (%p)", (void*)dm); }
inline void Assembler::FINCSTP() { count_fpu(); FPUc(0xd9f7); asm_output("fincstp"); fpu_pop(); }
inline void Assembler::FCOMP() { count_fpu(); FPUc(0xD8D9); asm_output("fcomp"); fpu_pop();}
inline void Assembler::FCOMPP() { count_fpu(); FPUc(0xDED9); asm_output("fcompp"); fpu_pop();fpu_pop();}
inline void Assembler::FLDr(R r) { count_ldq(); FPU(0xd9c0, r); asm_output("fld %s", gpn(r)); fpu_push(); }
@ -1208,7 +1204,7 @@ namespace nanojit
}
// WARNING: the code generated by this function must not affect the
// condition codes. See asm_cmp().
// condition codes. See asm_cmpi().
void Assembler::asm_restore(LIns* ins, Register r)
{
NanoAssert(ins->getReg() == r);
@ -1521,19 +1517,18 @@ namespace nanojit
}
}
NIns* Assembler::asm_branch(bool branchOnFalse, LIns* cond, NIns* targ)
NIns* Assembler::asm_branch_helper(bool branchOnFalse, LIns* cond, NIns* targ)
{
LOpcode condop = cond->opcode();
NanoAssert(cond->isCmp());
// Handle float conditions separately.
if (isCmpDOpcode(condop)) {
return asm_branchd(branchOnFalse, cond, targ);
}
return isCmpDOpcode(cond->opcode())
? asm_branchd_helper(branchOnFalse, cond, targ)
: asm_branchi_helper(branchOnFalse, cond, targ);
}
NIns* Assembler::asm_branchi_helper(bool branchOnFalse, LIns* cond, NIns* targ)
{
if (branchOnFalse) {
// op == LIR_xf/LIR_jf
switch (condop) {
switch (cond->opcode()) {
case LIR_eqi: JNE(targ); break;
case LIR_lti: JNL(targ); break;
case LIR_lei: JNLE(targ); break;
@ -1547,7 +1542,7 @@ namespace nanojit
}
} else {
// op == LIR_xt/LIR_jt
switch (condop) {
switch (cond->opcode()) {
case LIR_eqi: JE(targ); break;
case LIR_lti: JL(targ); break;
case LIR_lei: JLE(targ); break;
@ -1560,7 +1555,12 @@ namespace nanojit
default: NanoAssert(0); break;
}
}
NIns* at = _nIns;
return _nIns;
}
NIns* Assembler::asm_branch(bool branchOnFalse, LIns* cond, NIns* targ)
{
NIns* at = asm_branch_helper(branchOnFalse, cond, targ);
asm_cmp(cond);
return at;
}
@ -1584,6 +1584,11 @@ namespace nanojit
JMP_indexed(indexreg, 2, table);
}
void Assembler::asm_cmp(LIns *cond)
{
isCmpDOpcode(cond->opcode()) ? asm_cmpd(cond) : asm_cmpi(cond);
}
// This generates a 'test' or 'cmp' instruction for a condition, which
// causes the condition codes to be set appropriately. It's used with
// conditional branches, conditional moves, and when generating
@ -1623,7 +1628,7 @@ namespace nanojit
// asm_restore(), that means that asm_restore() cannot generate code which
// affects the condition codes.
//
void Assembler::asm_cmp(LIns *cond)
void Assembler::asm_cmpi(LIns *cond)
{
LIns* lhs = cond->oprnd1();
LIns* rhs = cond->oprnd2();
@ -1734,7 +1739,7 @@ namespace nanojit
freeResourcesOf(ins);
asm_cmp(ins);
asm_cmpi(ins);
}
// Two example cases for "ins = add lhs, rhs". '*' lines are those
@ -2051,11 +2056,10 @@ namespace nanojit
(ins->isop(LIR_cmovd) && iftrue->isD() && iffalse->isD()));
if (!_config.i386_sse2 && ins->isop(LIR_cmovd)) {
// See the SSE2 case below for an explanation of the subtleties here.
debug_only( Register rr = ) prepareResultReg(ins, x87Regs);
NanoAssert(FST0 == rr);
NanoAssert(!iftrue->isInReg() || iftrue->getReg() == FST0);
NanoAssert(!iffalse->isInReg());
NanoAssert(!iftrue->isInReg() && !iffalse->isInReg());
NIns* target = _nIns;
@ -2065,52 +2069,73 @@ namespace nanojit
int df = findMemFor(iffalse);
FLDQ(df, FP);
}
FSTP(FST0); // pop the stack
asm_branch_helper(false, condval, target);
FINCSTP();
// Its not sufficient to merely decrement the FP stack pointer, we have to
// also free FST0, otherwise the load above fails.
FFREE(FST0);
asm_branch(false, condval, target);
NanoAssert(ins->getReg() == rr);
freeResourcesOf(ins);
if (!iftrue->isInReg())
findSpecificRegForUnallocated(iftrue, FST0);
asm_cmp(condval);
return;
}
RegisterMask allow = ins->isD() ? XmmRegs : GpRegs;
Register rr = prepareResultReg(ins, allow);
Register rf = findRegFor(iffalse, allow & ~rmask(rr));
if (ins->isop(LIR_cmovd)) {
// The obvious way to handle this is as follows:
//
// mov rr, rt # only needed if rt is live afterwards
// do comparison
// jt end
// mov rr, rf
// end:
//
// The problem with this is that doing the comparison can cause
// registers to be evicted, possibly including 'rr', which holds
// 'ins'. And that screws things up. So instead we do this:
//
// do comparison
// mov rr, rt # only needed if rt is live afterwards
// jt end
// mov rr, rf
// end:
//
// Putting the 'mov' between the comparison and the jump is ok
// because move instructions don't modify the condition codes.
//
NIns* target = _nIns;
asm_nongp_copy(rr, rf);
asm_branch(false, condval, target);
asm_branch_helper(false, condval, target);
// If 'iftrue' isn't in a register, it can be clobbered by 'ins'.
Register rt = iftrue->isInReg() ? iftrue->getReg() : rr;
if (rr != rt)
asm_nongp_copy(rr, rt);
NanoAssert(ins->getReg() == rr);
freeResourcesOf(ins);
if (!iftrue->isInReg()) {
NanoAssert(rt == rr);
findSpecificRegForUnallocated(iftrue, rr);
}
asm_cmp(condval);
return;
}
// If 'iftrue' isn't in a register, it can be clobbered by 'ins'.
Register rt = iftrue->isInReg() ? iftrue->getReg() : rr;
NanoAssert(ins->isop(LIR_cmovi));
// WARNING: We cannot generate any code that affects the condition
// codes between the MRcc generation here and the asm_cmp() call
// below. See asm_cmp() for more details.
// codes between the MRcc generation here and the asm_cmpi() call
// below. See asm_cmpi() for more details.
switch (condval->opcode()) {
// Note that these are all opposites...
case LIR_eqi: MRNE(rr, rf); break;
@ -2128,6 +2153,7 @@ namespace nanojit
if (rr != rt)
MR(rr, rt);
NanoAssert(ins->getReg() == rr);
freeResourcesOf(ins);
if (!iftrue->isInReg()) {
NanoAssert(rt == rr);
@ -2614,7 +2640,7 @@ namespace nanojit
}
}
NIns* Assembler::asm_branchd(bool branchOnFalse, LIns *cond, NIns *targ)
NIns* Assembler::asm_branchd_helper(bool branchOnFalse, LIns* cond, NIns *targ)
{
NIns* at = 0;
LOpcode opcode = cond->opcode();
@ -2673,14 +2699,13 @@ namespace nanojit
if (!at)
at = _nIns;
asm_cmpd(cond);
return at;
}
// WARNING: This function cannot generate any code that will affect the
// condition codes prior to the generation of the
// ucomisd/fcompp/fcmop/fcom. See asm_cmp() for more details.
// ucomisd/fcompp/fcmop/fcom. See asm_cmpi() for more details.
void Assembler::asm_cmpd(LIns *cond)
{
LOpcode condop = cond->opcode();
@ -2699,14 +2724,13 @@ namespace nanojit
LIns* t = lhs; lhs = rhs; rhs = t;
}
// LIR_eqd, if lhs == rhs:
// ucomisd ZPC outcome (SETNP/JNP succeeds if P==0)
// ------- --- -------
// UNORDERED 111 SETNP/JNP fails
// EQUAL 100 SETNP/JNP succeeds
//
// LIR_eqd, if lsh != rhs;
// LIR_eqd, if lhs != rhs;
// ucomisd ZPC outcome (SETP/JP succeeds if P==0,
// SETE/JE succeeds if Z==0)
// ------- --- -------
@ -2810,13 +2834,10 @@ namespace nanojit
} else {
TEST_AH(mask);
FNSTSW_AX(); // requires rEAX to be free
if (rhs->isImmD())
{
if (rhs->isImmD()) {
const uint64_t* p = findImmDFromPool(rhs->immDasQ());
FCOMdm(pop, (const double*)p);
}
else
{
} else {
int d = findMemFor(rhs);
FCOM(pop, d, FP);
}

View File

@ -199,9 +199,12 @@ namespace nanojit
void asm_farg(LIns*, int32_t& stkd);\
void asm_arg(ArgType ty, LIns* p, Register r, int32_t& stkd);\
void asm_pusharg(LIns*);\
void asm_cmpd(LIns *cond);\
NIns* asm_branchd(bool, LIns*, NIns*);\
void asm_cmp(LIns *cond); \
void asm_cmpi(LIns *cond); \
void asm_cmpd(LIns *cond);\
NIns* asm_branch_helper(bool, LIns* cond, NIns*);\
NIns* asm_branchi_helper(bool, LIns* cond, NIns*);\
NIns* asm_branchd_helper(bool, LIns* cond, NIns*);\
void asm_div_mod(LIns *cond); \
void asm_load(int d, Register r); \
void asm_immd(Register r, uint64_t q, double d, bool canClobberCCs); \
@ -429,7 +432,6 @@ namespace nanojit
void FCHS(); \
void FLD1(); \
void FLDZ(); \
void FFREE(Register r); \
void FST32(bool p, int32_t d, Register b); \
void FSTQ(bool p, int32_t d, Register b); \
void FSTPQ(int32_t d, Register b); \
@ -451,7 +453,6 @@ namespace nanojit
void FSUBRdm(const double* dm); \
void FMULdm( const double* dm); \
void FDIVRdm(const double* dm); \
void FINCSTP(); \
void FSTP(Register r) { \
count_fpu(); \
FPU(0xddd8, r); \

View File

@ -583,19 +583,6 @@ PRMJ_FormatTime(char *buf, int buflen, const char *fmt, PRMJTime *prtm)
int oldReportMode;
#endif
/* Zero out the tm struct. Linux, SunOS 4 struct tm has extra members int
* tm_gmtoff, char *tm_zone; when tm_zone is garbage, strftime gets
* confused and dumps core. NSPR20 prtime.c attempts to fill these in by
* calling mktime on the partially filled struct, but this doesn't seem to
* work as well; the result string has "can't get timezone" for ECMA-valid
* years. Might still make sense to use this, but find the range of years
* for which valid tz information exists, and map (per ECMA hint) from the
* given year into that range.
* N.B. This hasn't been tested with anything that actually _uses_
* tm_gmtoff; zero might be the wrong thing to set it to if you really need
* to format a time. This fix is for jsdate.c, which only uses
* JS_FormatTime to get a string representing the time zone. */
memset(&a, 0, sizeof(struct tm));
a.tm_sec = prtm->tm_sec;
@ -605,11 +592,33 @@ PRMJ_FormatTime(char *buf, int buflen, const char *fmt, PRMJTime *prtm)
a.tm_mon = prtm->tm_mon;
a.tm_wday = prtm->tm_wday;
/*
* On systems where |struct tm| has members tm_gmtoff and tm_zone, we
* must fill in those values, or else strftime will return wrong results
* (e.g., bug 511726, bug 554338).
*/
#if defined(HAVE_LOCALTIME_R) && defined(HAVE_TM_ZONE_TM_GMTOFF)
{
/*
* Fill out |td| to the time represented by |prtm|, leaving the
* timezone fields zeroed out. localtime_r will then fill in the
* timezone fields for that local time according to the system's
* timezone parameters.
*/
struct tm td;
time_t bogus = 0;
localtime_r(&bogus, &td);
memset(&td, 0, sizeof(td));
td.tm_sec = prtm->tm_sec;
td.tm_min = prtm->tm_min;
td.tm_hour = prtm->tm_hour;
td.tm_mday = prtm->tm_mday;
td.tm_mon = prtm->tm_mon;
td.tm_wday = prtm->tm_wday;
td.tm_year = prtm->tm_year - 1900;
td.tm_yday = prtm->tm_yday;
td.tm_isdst = prtm->tm_isdst;
time_t t = mktime(&td);
localtime_r(&t, &td);
a.tm_gmtoff = td.tm_gmtoff;
a.tm_zone = td.tm_zone;
}

View File

@ -2088,10 +2088,22 @@ Disassemble(JSContext *cx, uintN argc, jsval *vp)
argv++, argc--;
}
for (uintN i = 0; i < argc; i++) {
if (!DisassembleValue(cx, argv[i], lines, recursive))
return false;
if (argc == 0) {
/* Without arguments, disassemble the current script. */
if (JSStackFrame *frame = JS_GetScriptedCaller(cx, NULL)) {
JSScript *script = JS_GetFrameScript(cx, frame);
if (!js_Disassemble(cx, script, lines, stdout))
return false;
SrcNotes(cx, script);
TryNotes(cx, script);
}
} else {
for (uintN i = 0; i < argc; i++) {
if (!DisassembleValue(cx, argv[i], lines, recursive))
return false;
}
}
JS_SET_RVAL(cx, vp, JSVAL_VOID);
return true;
}

View File

@ -0,0 +1,35 @@
/*
* Any copyright is dedicated to the Public Domain.
* http://creativecommons.org/licenses/publicdomain/
*/
var BUGNUMBER = 615070;
var summary = "Line terminator after backslash is invalid in regexp literals";
print(BUGNUMBER + ": " + summary);
/**************
* BEGIN TEST *
**************/
var regexps = ["/\\\u000A/", "/\\\u000D/", "/\\\u2028/", "/\\\u2029/",
"/ab\\\n/", "/ab\\\r/", "/ab\\\u2028/", "/ab\\\u2029/",
"/ab[c\\\n]/", "/a[bc\\", "/\\"];
for(var i=0; i<regexps.length; i++) {
var src = regexps[i];
try {
x = eval(src).source;
} catch(e) {
assertEq(e.constructor, SyntaxError);
continue;
}
assertEq(0, 1);
}
/**************/
if (typeof reportCompare === "function")
reportCompare(true, true);
print("All tests passed!");

View File

@ -1,3 +1,4 @@
url-prefix ../../jsreftest.html?test=ecma_5/RegExp/
script 7.8.5-01.js
script 15.10.5-01.js
script 15.10.7.5-01.js

View File

@ -73,7 +73,7 @@ try
}
catch(ex)
{
expect = 'InternalError: script stack space quota is exhausted';
expect = 'InternalError: allocation size overflow';
actual = ex + '';
print(actual);
}

View File

@ -0,0 +1,31 @@
// -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
// Any copyright is dedicated to the Public Domain.
// http://creativecommons.org/licenses/publicdomain/
function assertThrows(f) {
var ok = false;
try {
f();
} catch (exc) {
ok = true;
}
if (!ok)
throw new TypeError("Assertion failed: " + f + " did not throw as expected");
}
// Don't allow forging bogus Date objects.
var buf = serialize(new Date(NaN));
var a = [1/0, -1/0,
Number.MIN_VALUE, -Number.MIN_VALUE,
Math.PI, 1286523948674.5,
Number.MAX_VALUE, -Number.MAX_VALUE,
8.64e15 + 1, -(8.64e15 + 1)];
for (var i = 0; i < a.length; i++) {
var n = a[i];
var nbuf = serialize(n);
for (var j = 0; j < 8; j++)
buf[j + 8] = nbuf[j];
assertThrows(function () { deserialize(buf); });
}
reportCompare(0, 0);

View File

@ -0,0 +1,63 @@
// -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
// Any copyright is dedicated to the Public Domain.
// http://creativecommons.org/licenses/publicdomain/
var a = [new Boolean(true),
new Boolean(false),
new Number(0),
new Number(-0),
new Number(Math.PI),
new Number(0x7fffffff),
new Number(-0x7fffffff),
new Number(0x80000000),
new Number(-0x80000000),
new Number(0xffffffff),
new Number(-0xffffffff),
new Number(0x100000000),
new Number(-0x100000000),
new Number(Number.MIN_VALUE),
new Number(-Number.MIN_VALUE),
new Number(Number.MAX_VALUE),
new Number(-Number.MAX_VALUE),
new Number(1/0),
new Number(-1/0),
new Number(0/0),
new String(""),
new String("\0123\u4567"),
new Date(0),
new Date(-0),
new Date(0x7fffffff),
new Date(-0x7fffffff),
new Date(0x80000000),
new Date(-0x80000000),
new Date(0xffffffff),
new Date(-0xffffffff),
new Date(0x100000000),
new Date(-0x100000000),
new Date(1286523948674),
new Date(8.64e15), // hard-coded in ES5 spec, hard-coded here
new Date(-8.64e15),
new Date(NaN)];
function primitive(a) {
return a instanceof Date ? +a : a.constructor(a);
}
for (var i = 0; i < a.length; i++) {
var x = a[i];
var expectedSource = x.toSource();
var expectedPrimitive = primitive(x);
var expectedProto = x.__proto__;
var expectedString = Object.prototype.toString.call(x);
x.expando = 1;
x.__proto__ = {};
var y = deserialize(serialize(x));
assertEq(y.toSource(), expectedSource);
assertEq(primitive(y), expectedPrimitive);
assertEq(y.__proto__, expectedProto);
assertEq(Object.prototype.toString.call(y), expectedString);
assertEq("expando" in y, false);
}
reportCompare(0, 0);

View File

@ -15,6 +15,7 @@ function testRegExp(b) {
assertEq(a.ignoreCase, b.ignoreCase);
assertEq(a.multiline, b.multiline);
assertEq(a.sticky, b.sticky);
assertEq("expando" in a, false);
}
testRegExp(RegExp(""));

View File

@ -18,7 +18,9 @@ script destructure-accessor.js
script censor-strict-caller.js
skip-if(!xulRuntime.shell) script clone-simple.js
skip-if(!xulRuntime.shell) script clone-regexp.js
skip-if(!xulRuntime.shell) script clone-leaf-object.js
skip-if(!xulRuntime.shell) script clone-object.js
skip-if(!xulRuntime.shell) script clone-typed-array.js
skip-if(!xulRuntime.shell) script clone-errors.js
skip-if(!xulRuntime.shell) script clone-forge.js
script set-property-non-extensible.js

View File

@ -11,7 +11,8 @@ var src =
' *\n' +
'} catch(e) {}\n' +
'default xml namespace = x\n' +
'for (let b in [0, 0]) <x/>\n';
'for (let b in [0, 0]) <x/>\n' +
'0\n';
evalcx(src, box);

View File

@ -325,6 +325,9 @@ if __name__ == '__main__':
cmd = test_list[0].get_command(TestTask.js_cmd_prefix)
if OPTIONS.show_cmd:
print subprocess.list2cmdline(cmd)
manifest_dir = os.path.dirname(OPTIONS.manifest)
if manifest_dir not in ('', '.'):
os.chdir(os.path.dirname(OPTIONS.manifest))
call(cmd)
sys.exit()

View File

@ -98,7 +98,10 @@ def parse(filename, xul_tester, reldir = ''):
for line in f:
sline = comment_re.sub('', line)
parts = sline.split()
if parts[0] == 'include':
if len(parts) == 0:
# line is empty or just a comment, skip
pass
elif parts[0] == 'include':
include_file = parts[1]
include_reldir = os.path.join(reldir, os.path.dirname(include_file))
ans += parse(os.path.join(dir, include_file), xul_tester, include_reldir)

View File

@ -83,6 +83,7 @@ class XPCNativeScriptableSharedMap;
class XPCWrappedNativeProtoMap;
class XPCNativeWrapperMap;
class WrappedNative2WrapperMap;
class JSObject2JSObjectMap;
class nsXPCComponents;
class nsXPCComponents_Interfaces;

View File

@ -240,6 +240,12 @@ ContextCallback(JSContext *cx, uintN operation)
return JS_TRUE;
}
xpc::CompartmentPrivate::~CompartmentPrivate()
{
if (waiverWrapperMap)
delete waiverWrapperMap;
}
static JSBool
CompartmentCallback(JSContext *cx, JSCompartment *compartment, uintN op)
{
@ -545,6 +551,27 @@ DoDeferredRelease(nsTArray<T> &array)
}
}
static JSDHashOperator
SweepWaiverWrappers(JSDHashTable *table, JSDHashEntryHdr *hdr,
uint32 number, void *arg)
{
JSObject *key = ((JSObject2JSObjectMap::Entry *)hdr)->key;
JSObject *value = ((JSObject2JSObjectMap::Entry *)hdr)->value;
if(IsAboutToBeFinalized(key) || IsAboutToBeFinalized(value))
return JS_DHASH_REMOVE;
return JS_DHASH_NEXT;
}
static PLDHashOperator
SweepCompartment(nsCStringHashKey& aKey, JSCompartment *compartment, void *aClosure)
{
xpc::CompartmentPrivate *priv = (xpc::CompartmentPrivate *)
JS_GetCompartmentPrivate((JSContext *)aClosure, compartment);
if (priv->waiverWrapperMap)
priv->waiverWrapperMap->Enumerate(SweepWaiverWrappers, nsnull);
return PL_DHASH_NEXT;
}
// static
JSBool XPCJSRuntime::GCCallback(JSContext *cx, JSGCStatus status)
{
@ -597,9 +624,14 @@ JSBool XPCJSRuntime::GCCallback(JSContext *cx, JSGCStatus status)
Enumerate(WrappedJSDyingJSObjectFinder, &data);
}
// Find dying scopes...
// Find dying scopes.
XPCWrappedNativeScope::FinishedMarkPhaseOfGC(cx, self);
// Sweep compartments.
self->GetCompartmentMap().EnumerateRead(
(XPCCompartmentMap::EnumReadFunction)
SweepCompartment, cx);
self->mDoingFinalization = JS_TRUE;
break;
}

View File

@ -763,3 +763,18 @@ WrappedNative2WrapperMap::AddLink(JSObject* wrappedObject, Link* oldLink)
}
/***************************************************************************/
// implement JSObject2JSObjectMap...
struct JSDHashTableOps
JSObject2JSObjectMap::sOps = {
JS_DHashAllocTable,
JS_DHashFreeTable,
JS_DHashVoidPtrKeyStub,
JS_DHashMatchEntryStub,
JS_DHashMoveEntryStub,
JS_DHashClearEntryStub,
JS_DHashFinalizeStub,
nsnull
};
/***************************************************************************/

View File

@ -710,4 +710,80 @@ private:
JSDHashTable *mTable;
};
class JSObject2JSObjectMap
{
static struct JSDHashTableOps sOps;
public:
struct Entry : public JSDHashEntryHdr
{
JSObject* key;
JSObject* value;
};
static JSObject2JSObjectMap* newMap(int size)
{
JSObject2JSObjectMap* map = new JSObject2JSObjectMap(size);
if(map && map->mTable)
return map;
delete map;
return nsnull;
}
inline JSObject* Find(JSObject* key)
{
NS_PRECONDITION(key, "bad param");
Entry* entry = (Entry*)
JS_DHashTableOperate(mTable, key, JS_DHASH_LOOKUP);
if(JS_DHASH_ENTRY_IS_FREE(entry))
return nsnull;
return entry->value;
}
// Note: If the entry already exists, return the old value.
inline JSObject* Add(JSObject *key, JSObject *value)
{
NS_PRECONDITION(key,"bad param");
Entry* entry = (Entry*)
JS_DHashTableOperate(mTable, key, JS_DHASH_ADD);
if(!entry)
return nsnull;
if(entry->key)
return entry->value;
entry->key = key;
entry->value = value;
return value;
}
inline void Remove(JSObject* key)
{
NS_PRECONDITION(key,"bad param");
JS_DHashTableOperate(mTable, key, JS_DHASH_REMOVE);
}
inline uint32 Count() {return mTable->entryCount;}
inline uint32 Enumerate(JSDHashEnumerator f, void *arg)
{
return JS_DHashTableEnumerate(mTable, f, arg);
}
~JSObject2JSObjectMap()
{
if(mTable)
JS_DHashTableDestroy(mTable);
}
private:
JSObject2JSObjectMap(int size)
{
mTable = JS_NewDHashTable(&sOps, nsnull, sizeof(Entry), size);
}
JSObject2JSObjectMap(); // no implementation
private:
JSDHashTable *mTable;
};
#endif /* xpcmaps_h___ */

View File

@ -240,6 +240,7 @@ void DEBUG_CheckWrapperThreadSafety(const XPCWrappedNative* wrapper);
#define XPC_NATIVE_JSCLASS_MAP_SIZE 32
#define XPC_THIS_TRANSLATOR_MAP_SIZE 8
#define XPC_NATIVE_WRAPPER_MAP_SIZE 16
#define XPC_WRAPPER_MAP_SIZE 16
/***************************************************************************/
// data declarations...
@ -323,7 +324,7 @@ typedef nsDataHashtableMT<nsISupportsHashKey, JSCompartment *> XPCMTCompartmentM
typedef nsDataHashtable<xpc::PtrAndPrincipalHashKey, JSCompartment *> XPCCompartmentMap;
/***************************************************************************/
// useful macros...
// Useful macros...
#define XPC_STRING_GETTER_BODY(dest, src) \
NS_ENSURE_ARG_POINTER(dest); \
@ -4484,22 +4485,28 @@ struct CompartmentPrivate
: key(key),
ptr(nsnull),
wantXrays(wantXrays),
cycleCollectionEnabled(cycleCollectionEnabled)
cycleCollectionEnabled(cycleCollectionEnabled),
waiverWrapperMap(nsnull)
{
}
CompartmentPrivate(nsISupports *ptr, bool wantXrays, bool cycleCollectionEnabled)
: key(nsnull),
ptr(ptr),
wantXrays(wantXrays),
cycleCollectionEnabled(cycleCollectionEnabled)
cycleCollectionEnabled(cycleCollectionEnabled),
waiverWrapperMap(nsnull)
{
}
~CompartmentPrivate();
// NB: key and ptr are mutually exclusive.
nsAutoPtr<PtrAndPrincipalHashKey> key;
nsCOMPtr<nsISupports> ptr;
bool wantXrays;
bool cycleCollectionEnabled;
JSObject2JSObjectMap *waiverWrapperMap;
};
inline bool

View File

@ -58,6 +58,7 @@ _CHROME_FILES = \
test_bug517163.xul \
test_bug571849.xul \
test_bug601803.xul \
test_bug610390.xul \
$(NULL)
# Disabled until this test gets updated to test the new proxy based

View File

@ -0,0 +1,34 @@
<?xml version="1.0"?>
<?xml-stylesheet href="chrome://global/skin" type="text/css"?>
<?xml-stylesheet href="chrome://mochikit/content/tests/SimpleTest/test.css"
type="text/css"?>
<!--
https://bugzilla.mozilla.org/show_bug.cgi?id=610390
-->
<window title="Mozilla Bug 610390"
xmlns="http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul">
<script type="application/javascript"
src="chrome://mochikit/content/MochiKit/packed.js"></script>
<script type="application/javascript"
src="chrome://mochikit/content/tests/SimpleTest/SimpleTest.js"></script>
<!-- test results are displayed in the html:body -->
<body xmlns="http://www.w3.org/1999/xhtml">
<iframe type="content"
src="data:text/html,&lt;script&gt;var x=3&lt;/script&gt;"
onload="go()"
id="ifr">
</iframe>
</body>
<!-- test code goes here -->
<script type="application/javascript"><![CDATA[
SimpleTest.waitForExplicitFinish();
function go() {
var w = $('ifr').contentWindow;
is(w.wrappedJSObject, w.wrappedJSObject, "wrappedJSObject identity not maintained");
SimpleTest.finish();
}
]]></script>
</window>

View File

@ -48,6 +48,7 @@
#include "XPCWrapper.h"
#include "xpcprivate.h"
#include "xpcmaps.h"
namespace xpc {
@ -307,10 +308,33 @@ WrapperFactory::WaiveXrayAndWrap(JSContext *cx, jsval *vp)
obj = GetCurrentOuter(cx, obj);
{
js::SwitchToCompartment sc(cx, obj->compartment());
obj = JSWrapper::New(cx, obj, NULL, obj->getGlobal(), &WaiveXrayWrapperWrapper);
if (!obj)
return false;
// See if we already have a waiver wrapper for this object.
CompartmentPrivate *priv =
(CompartmentPrivate *)JS_GetCompartmentPrivate(cx, obj->compartment());
JSObject *wobj = nsnull;
if (priv && priv->waiverWrapperMap)
wobj = priv->waiverWrapperMap->Find(obj);
// No wrapper yet, make one.
if (!wobj) {
js::SwitchToCompartment sc(cx, obj->compartment());
wobj = JSWrapper::New(cx, obj, NULL, obj->getGlobal(), &WaiveXrayWrapperWrapper);
if (!wobj)
return false;
// Add the new wrapper so we find it next time.
if (priv) {
if (!priv->waiverWrapperMap) {
priv->waiverWrapperMap = JSObject2JSObjectMap::newMap(XPC_WRAPPER_MAP_SIZE);
if (!priv->waiverWrapperMap)
return false;
}
if (!priv->waiverWrapperMap->Add(obj, wobj))
return false;
}
}
obj = wobj;
}
*vp = OBJECT_TO_JSVAL(obj);