Bug 825714: Refactor JS allocation routines to enable allocation on parallel

threads, and move various ion fields into per-thread-data. r=billm

More detailed:
- A new routine JSObject::parExtendDenseArray() that permits parallel code to
  allocate a dense array on its own Allocator.
- Create an allocation path in the GC for loading a fresh arena as needed
  (ArenaLists::parallelAllocate()).
- Ensure that if GC is triggered during parallel execution, parallel execution
  is aborted and the request is deferred until parallel execution terminates.
- Updates to the ForkJoin/ThreadPool so that they create their own Allocators
  as well as other misc API changes.
- Moves some of the ion-related fields (e.g., |ionTop|) into perThreadData.
- Remove out-of-date malloc tracking fields.
This commit is contained in:
Nicholas D. Matsakis 2013-01-24 21:12:44 -08:00
parent b6c051330b
commit 29d4b19e90
46 changed files with 934 additions and 422 deletions

View File

@ -509,7 +509,7 @@ gc::StartVerifyPreBarriers(JSRuntime *rt)
for (CompartmentsIter c(rt); !c.done(); c.next()) {
PurgeJITCaches(c);
c->setNeedsBarrier(true, JSCompartment::UpdateIon);
c->arenas.purge();
c->allocator.arenas.purge();
}
return;

View File

@ -81,7 +81,7 @@ GetBailedJSScript(JSContext *cx)
// Just after the frame conversion, we can safely interpret the ionTop as JS
// frame because it targets the bailed JS frame converted to an exit frame.
IonJSFrameLayout *frame = reinterpret_cast<IonJSFrameLayout*>(cx->runtime->ionTop);
IonJSFrameLayout *frame = reinterpret_cast<IonJSFrameLayout*>(cx->mainThread().ionTop);
switch (GetCalleeTokenTag(frame->calleeToken())) {
case CalleeToken_Function: {
JSFunction *fun = CalleeTokenToFunction(frame->calleeToken());
@ -360,7 +360,7 @@ ion::Bailout(BailoutStack *sp)
AutoAssertNoGC nogc;
JSContext *cx = GetIonContext()->cx;
// We don't have an exit frame.
cx->runtime->ionTop = NULL;
cx->mainThread().ionTop = NULL;
IonActivationIterator ionActivations(cx);
IonBailoutIterator iter(ionActivations, sp);
IonActivation *activation = ionActivations.activation();
@ -386,7 +386,7 @@ ion::InvalidationBailout(InvalidationBailoutStack *sp, size_t *frameSizeOut)
JSContext *cx = GetIonContext()->cx;
// We don't have an exit frame.
cx->runtime->ionTop = NULL;
cx->mainThread().ionTop = NULL;
IonActivationIterator ionActivations(cx);
IonBailoutIterator iter(ionActivations, sp);
IonActivation *activation = ionActivations.activation();
@ -458,7 +458,7 @@ uint32_t
ion::ReflowTypeInfo(uint32_t bailoutResult)
{
JSContext *cx = GetIonContext()->cx;
IonActivation *activation = cx->runtime->ionActivation;
IonActivation *activation = cx->mainThread().ionActivation;
IonSpew(IonSpew_Bailouts, "reflowing type info");
@ -584,7 +584,7 @@ uint32_t
ion::ThunkToInterpreter(Value *vp)
{
JSContext *cx = GetIonContext()->cx;
IonActivation *activation = cx->runtime->ionActivation;
IonActivation *activation = cx->mainThread().ionActivation;
BailoutClosure *br = activation->takeBailout();
InterpMode resumeMode = JSINTERP_BAILOUT;

View File

@ -1488,7 +1488,7 @@ CodeGenerator::visitCheckOverRecursed(LCheckOverRecursed *lir)
// Since Ion frames exist on the C stack, the stack limit may be
// dynamically set by JS_SetThreadStackLimit() and JS_SetNativeStackQuota().
uintptr_t *limitAddr = &rt->ionStackLimit;
uintptr_t *limitAddr = &rt->mainThread.ionStackLimit;
masm.loadPtr(AbsoluteAddress(limitAddr), limitReg);
CheckOverRecursedFailure *ool = new CheckOverRecursedFailure(lir);

View File

@ -18,9 +18,11 @@
#include "RangeAnalysis.h"
#include "LinearScan.h"
#include "jscompartment.h"
#include "jsworkers.h"
#include "vm/ThreadPool.h"
#include "vm/ForkJoin.h"
#include "IonCompartment.h"
#include "CodeGenerator.h"
#include "jsworkers.h"
#include "BacktrackingAllocator.h"
#include "StupidAllocator.h"
#include "UnreachableCodeElimination.h"
@ -119,6 +121,10 @@ ion::InitializeIon()
PRStatus status = PR_NewThreadPrivateIndex(&IonTLSIndex, NULL);
if (status != PR_SUCCESS)
return false;
if (!ForkJoinSlice::Initialize())
return false;
IonTLSInitialized = true;
}
#endif
@ -220,6 +226,8 @@ IonCompartment::initialize(JSContext *cx)
void
ion::FinishOffThreadBuilder(IonBuilder *builder)
{
JS_ASSERT(builder->info().executionMode() == SequentialExecution);
// Clean up if compilation did not succeed.
if (builder->script()->isIonCompilingOffThread()) {
types::TypeCompartment &types = builder->script()->compartment()->types;
@ -292,30 +300,30 @@ IonCompartment::getVMWrapper(const VMFunction &f)
IonActivation::IonActivation(JSContext *cx, StackFrame *fp)
: cx_(cx),
compartment_(cx->compartment),
prev_(cx->runtime->ionActivation),
prev_(cx->mainThread().ionActivation),
entryfp_(fp),
bailout_(NULL),
prevIonTop_(cx->runtime->ionTop),
prevIonJSContext_(cx->runtime->ionJSContext),
prevIonTop_(cx->mainThread().ionTop),
prevIonJSContext_(cx->mainThread().ionJSContext),
prevpc_(NULL)
{
if (fp)
fp->setRunningInIon();
cx->runtime->ionJSContext = cx;
cx->runtime->ionActivation = this;
cx->runtime->ionStackLimit = cx->runtime->nativeStackLimit;
cx->mainThread().ionJSContext = cx;
cx->mainThread().ionActivation = this;
cx->mainThread().ionStackLimit = cx->mainThread().nativeStackLimit;
}
IonActivation::~IonActivation()
{
JS_ASSERT(cx_->runtime->ionActivation == this);
JS_ASSERT(cx_->mainThread().ionActivation == this);
JS_ASSERT(!bailout_);
if (entryfp_)
entryfp_->clearRunningInIon();
cx_->runtime->ionActivation = prev();
cx_->runtime->ionTop = prevIonTop_;
cx_->runtime->ionJSContext = prevIonJSContext_;
cx_->mainThread().ionActivation = prev();
cx_->mainThread().ionTop = prevIonTop_;
cx_->mainThread().ionJSContext = prevIonJSContext_;
}
IonCode *

View File

@ -85,7 +85,7 @@ inline UnrootedScript
GetTopIonJSScript(JSContext *cx, const SafepointIndex **safepointIndexOut, void **returnAddrOut)
{
AutoAssertNoGC nogc;
IonFrameIterator iter(cx->runtime->ionTop);
IonFrameIterator iter(cx->mainThread().ionTop);
JS_ASSERT(iter.type() == IonFrame_Exit);
++iter;

View File

@ -309,9 +309,9 @@ ion::HandleException(ResumeFromException *rfe)
// Immediately remove any bailout frame guard that might be left over from
// an error in between ConvertFrames and ThunkToInterpreter.
js_delete(cx->runtime->ionActivation->maybeTakeBailout());
js_delete(cx->mainThread().ionActivation->maybeTakeBailout());
IonFrameIterator iter(cx->runtime->ionTop);
IonFrameIterator iter(cx->mainThread().ionTop);
while (!iter.isEntry()) {
if (iter.isScripted()) {
// Search each inlined frame for live iterator objects, and close
@ -359,15 +359,15 @@ IonActivationIterator::settle()
}
IonActivationIterator::IonActivationIterator(JSContext *cx)
: top_(cx->runtime->ionTop),
activation_(cx->runtime->ionActivation)
: top_(cx->mainThread().ionTop),
activation_(cx->mainThread().ionActivation)
{
settle();
}
IonActivationIterator::IonActivationIterator(JSRuntime *rt)
: top_(rt->ionTop),
activation_(rt->ionActivation)
: top_(rt->mainThread.ionTop),
activation_(rt->mainThread.ionActivation)
{
settle();
}
@ -675,7 +675,7 @@ ion::GetPcScript(JSContext *cx, JSScript **scriptRes, jsbytecode **pcRes)
JSRuntime *rt = cx->runtime;
// Recover the return address.
IonFrameIterator it(rt->ionTop);
IonFrameIterator it(rt->mainThread.ionTop);
uint8_t *retAddr = it.returnAddress();
uint32_t hash = PcScriptCache::Hash(retAddr);
JS_ASSERT(retAddr != NULL);

View File

@ -10,6 +10,7 @@
#include "IonMacroAssembler.h"
#include "gc/Root.h"
#include "Bailouts.h"
#include "vm/ForkJoin.h"
using namespace js;
using namespace js::ion;
@ -321,7 +322,7 @@ MacroAssembler::newGCThing(const Register &result,
// If a FreeSpan is replaced, its members are updated in the freeLists table,
// which the code below always re-reads.
gc::FreeSpan *list = const_cast<gc::FreeSpan *>
(compartment->arenas.getFreeList(allocKind));
(compartment->allocator.arenas.getFreeList(allocKind));
loadPtr(AbsoluteAddress(&list->first), result);
branchPtr(Assembler::BelowOrEqual, AbsoluteAddress(&list->last), result, fail);

View File

@ -159,11 +159,11 @@ class MacroAssembler : public MacroAssemblerSpecific
void loadJSContext(const Register &dest) {
movePtr(ImmWord(GetIonContext()->compartment->rt), dest);
loadPtr(Address(dest, offsetof(JSRuntime, ionJSContext)), dest);
loadPtr(Address(dest, offsetof(JSRuntime, mainThread.ionJSContext)), dest);
}
void loadIonActivation(const Register &dest) {
movePtr(ImmWord(GetIonContext()->compartment->rt), dest);
loadPtr(Address(dest, offsetof(JSRuntime, ionActivation)), dest);
loadPtr(Address(dest, offsetof(JSRuntime, mainThread.ionActivation)), dest);
}
template<typename T>

View File

@ -2630,7 +2630,7 @@ MacroAssemblerARMCompat::storeTypeTag(ImmTag tag, Register base, Register index,
void
MacroAssemblerARMCompat::linkExitFrame() {
uint8_t *dest = ((uint8_t*)GetIonContext()->compartment->rt) + offsetof(JSRuntime, ionTop);
uint8_t *dest = ((uint8_t*)GetIonContext()->compartment->rt) + offsetof(JSRuntime, mainThread.ionTop);
movePtr(ImmWord(dest), ScratchRegister);
ma_str(StackPointer, Operand(ScratchRegister, 0));
}

View File

@ -14,6 +14,8 @@
#include "ion/Bailouts.h"
#include "ion/VMFunctions.h"
#include "jscntxtinlines.h"
using namespace js;
using namespace js::ion;

View File

@ -889,7 +889,7 @@ class MacroAssemblerX64 : public MacroAssemblerX86Shared
// ThreadData::ionTop.
void linkExitFrame() {
mov(ImmWord(GetIonContext()->compartment->rt), ScratchReg);
mov(StackPointer, Operand(ScratchReg, offsetof(JSRuntime, ionTop)));
mov(StackPointer, Operand(ScratchReg, offsetof(JSRuntime, mainThread.ionTop)));
}
void callWithExitFrame(IonCode *target, Register dynStack) {

View File

@ -758,7 +758,7 @@ class MacroAssemblerX86 : public MacroAssemblerX86Shared
// ThreadData::ionTop.
void linkExitFrame() {
JSCompartment *compartment = GetIonContext()->compartment;
movl(StackPointer, Operand(&compartment->rt->ionTop));
movl(StackPointer, Operand(&compartment->rt->mainThread.ionTop));
}
void callWithExitFrame(IonCode *target, Register dynStack) {

View File

@ -155,7 +155,7 @@ void edge(unsigned src_index, unsigned dest_index)
void run()
{
finder = new ComponentFinder<TestNode>(rt->nativeStackLimit);
finder = new ComponentFinder<TestNode>(rt->mainThread.nativeStackLimit);
for (unsigned i = 0; i < vertex_count; ++i)
finder->addNode(&Vertex[i]);
resultsList = finder->getResultsList();
@ -246,7 +246,7 @@ BEGIN_TEST(testFindSCCsStackLimit)
for (unsigned i = initial; i < (max - 10); ++i)
vertices[i].edge = &vertices[i + 1];
ComponentFinder<TestNode2> finder(rt->nativeStackLimit);
ComponentFinder<TestNode2> finder(rt->mainThread.nativeStackLimit);
for (unsigned i = 0; i < max; ++i)
finder.addNode(&vertices[i]);

View File

@ -720,12 +720,17 @@ JS_FRIEND_API(bool) JS::NeedRelaxedRootChecks() { return false; }
static const JSSecurityCallbacks NullSecurityCallbacks = { };
PerThreadData::PerThreadData(JSRuntime *runtime)
: runtime_(runtime),
js::PerThreadData::PerThreadData(JSRuntime *runtime)
: PerThreadDataFriendFields(),
runtime_(runtime),
#ifdef DEBUG
gcRelaxRootChecks(false),
gcAssertNoGCDepth(0),
#endif
ionTop(NULL),
ionJSContext(NULL),
ionStackLimit(0),
ionActivation(NULL),
suppressGC(0)
{}
@ -878,10 +883,6 @@ JSRuntime::JSRuntime(JSUseHelperThreads useHelperThreads)
noGCOrAllocationCheck(0),
#endif
jitHardening(false),
ionTop(NULL),
ionJSContext(NULL),
ionStackLimit(0),
ionActivation(NULL),
ionPcScriptCache(NULL),
threadPool(this),
ctypesActivityCallback(NULL),
@ -933,7 +934,7 @@ JSRuntime::init(uint32_t maxbytes)
}
atomsCompartment->isSystemCompartment = true;
atomsCompartment->setGCLastBytes(8192, 8192, GC_NORMAL);
atomsCompartment->setGCLastBytes(8192, GC_NORMAL);
if (!InitAtoms(this))
return false;
@ -1055,9 +1056,9 @@ JSRuntime::clearOwnerThread()
js::TlsPerThreadData.set(NULL);
nativeStackBase = 0;
#if JS_STACK_GROWTH_DIRECTION > 0
nativeStackLimit = UINTPTR_MAX;
mainThread.nativeStackLimit = UINTPTR_MAX;
#else
nativeStackLimit = 0;
mainThread.nativeStackLimit = 0;
#endif
}
@ -2263,18 +2264,6 @@ JS_ComputeThis(JSContext *cx, jsval *vp)
return call.thisv();
}
JS_PUBLIC_API(void)
JS_MallocInCompartment(JSCompartment *comp, size_t nbytes)
{
comp->mallocInCompartment(nbytes);
}
JS_PUBLIC_API(void)
JS_FreeInCompartment(JSCompartment *comp, size_t nbytes)
{
comp->freeInCompartment(nbytes);
}
JS_PUBLIC_API(void *)
JS_malloc(JSContext *cx, size_t nbytes)
{
@ -2312,7 +2301,7 @@ JS_GetDefaultFreeOp(JSRuntime *rt)
JS_PUBLIC_API(void)
JS_updateMallocCounter(JSContext *cx, size_t nbytes)
{
return cx->runtime->updateMallocCounter(cx, nbytes);
return cx->runtime->updateMallocCounter(cx->compartment, nbytes);
}
JS_PUBLIC_API(char *)
@ -3100,17 +3089,17 @@ JS_SetNativeStackQuota(JSRuntime *rt, size_t stackSize)
#if JS_STACK_GROWTH_DIRECTION > 0
if (stackSize == 0) {
rt->nativeStackLimit = UINTPTR_MAX;
rt->mainThread.nativeStackLimit = UINTPTR_MAX;
} else {
JS_ASSERT(rt->nativeStackBase <= size_t(-1) - stackSize);
rt->nativeStackLimit = rt->nativeStackBase + stackSize - 1;
rt->mainThread.nativeStackLimit = rt->nativeStackBase + stackSize - 1;
}
#else
if (stackSize == 0) {
rt->nativeStackLimit = 0;
rt->mainThread.nativeStackLimit = 0;
} else {
JS_ASSERT(rt->nativeStackBase >= stackSize);
rt->nativeStackLimit = rt->nativeStackBase - (stackSize - 1);
rt->mainThread.nativeStackLimit = rt->nativeStackBase - (stackSize - 1);
}
#endif
}

View File

@ -3538,12 +3538,6 @@ JS_THIS(JSContext *cx, jsval *vp)
*/
#define JS_THIS_VALUE(cx,vp) ((vp)[1])
extern JS_PUBLIC_API(void)
JS_MallocInCompartment(JSCompartment *comp, size_t nbytes);
extern JS_PUBLIC_API(void)
JS_FreeInCompartment(JSCompartment *comp, size_t nbytes);
extern JS_PUBLIC_API(void *)
JS_malloc(JSContext *cx, size_t nbytes);

View File

@ -1415,6 +1415,8 @@ js::array_sort(JSContext *cx, unsigned argc, Value *vp)
result = vec.begin() + n;
}
} else {
/* array.sort() cannot currently be used from parallel code */
JS_ASSERT(!ForkJoinSlice::InParallelSection());
FastInvokeGuard fig(cx, fval);
if (!MergeSort(vec.begin(), n, vec.begin() + n,
SortComparatorFunction(cx, fval, fig))) {
@ -2203,6 +2205,7 @@ array_map(JSContext *cx, unsigned argc, Value *vp)
/* Step 8. */
RootedValue kValue(cx);
JS_ASSERT(!ForkJoinSlice::InParallelSection());
FastInvokeGuard fig(cx, ObjectValue(*callable));
InvokeArgsGuard &ag = fig.args();
while (k < len) {
@ -2283,6 +2286,7 @@ array_filter(JSContext *cx, unsigned argc, Value *vp)
uint32_t to = 0;
/* Step 9. */
JS_ASSERT(!ForkJoinSlice::InParallelSection());
FastInvokeGuard fig(cx, ObjectValue(*callable));
InvokeArgsGuard &ag = fig.args();
RootedValue kValue(cx);

View File

@ -173,7 +173,7 @@ JSRuntime::triggerOperationCallback()
* into a weird state where interrupt is stuck at 0 but ionStackLimit is
* MAXADDR.
*/
ionStackLimit = -1;
mainThread.ionStackLimit = -1;
/*
* Use JS_ATOMIC_SET in the hope that it ensures the write will become
@ -467,7 +467,7 @@ ReportError(JSContext *cx, const char *message, JSErrorReport *reportp,
* the reporter triggers an over-recursion.
*/
int stackDummy;
if (!JS_CHECK_STACK_SIZE(cx->runtime->nativeStackLimit, &stackDummy))
if (!JS_CHECK_STACK_SIZE(cx->mainThread().nativeStackLimit, &stackDummy))
return;
if (cx->errorReporter)
@ -1343,7 +1343,7 @@ JSRuntime::setGCMaxMallocBytes(size_t value)
}
void
JSRuntime::updateMallocCounter(JSContext *cx, size_t nbytes)
JSRuntime::updateMallocCounter(JSCompartment *comp, size_t nbytes)
{
/* We tolerate any thread races when updating gcMallocBytes. */
ptrdiff_t oldCount = gcMallocBytes;
@ -1351,8 +1351,8 @@ JSRuntime::updateMallocCounter(JSContext *cx, size_t nbytes)
gcMallocBytes = newCount;
if (JS_UNLIKELY(newCount <= 0 && oldCount > 0))
onTooMuchMalloc();
else if (cx && cx->compartment)
cx->compartment->updateMallocCounter(nbytes);
else if (comp)
comp->updateMallocCounter(nbytes);
}
JS_FRIEND_API(void)

View File

@ -467,6 +467,15 @@ class PerThreadData : public js::PerThreadDataFriendFields
int gcAssertNoGCDepth;
#endif
// If Ion code is on the stack, and has called into C++, this will be
// aligned to an Ion exit frame.
uint8_t *ionTop;
JSContext *ionJSContext;
uintptr_t ionStackLimit;
// This points to the most recent Ion activation running on the thread.
js::ion::IonActivation *ionActivation;
/*
* When this flag is non-zero, any attempt to GC will be skipped. It is used
* to suppress GC when reporting an OOM (see js_ReportOutOfMemory) and in
@ -1061,19 +1070,10 @@ struct JSRuntime : js::RuntimeFriendFields
bool jitHardening;
// If Ion code is on the stack, and has called into C++, this will be
// aligned to an Ion exit frame.
uint8_t *ionTop;
JSContext *ionJSContext;
uintptr_t ionStackLimit;
void resetIonStackLimit() {
ionStackLimit = nativeStackLimit;
mainThread.ionStackLimit = mainThread.nativeStackLimit;
}
// This points to the most recent Ion activation running on the thread.
js::ion::IonActivation *ionActivation;
// Cache for ion::GetPcScript().
js::ion::PcScriptCache *ionPcScriptCache;
@ -1121,66 +1121,44 @@ struct JSRuntime : js::RuntimeFriendFields
* Call the system malloc while checking for GC memory pressure and
* reporting OOM error when cx is not null. We will not GC from here.
*/
void* malloc_(size_t bytes, JSContext *cx = NULL) {
updateMallocCounter(cx, bytes);
void *p = js_malloc(bytes);
return JS_LIKELY(!!p) ? p : onOutOfMemory(NULL, bytes, cx);
}
inline void* malloc_(size_t bytes, JSCompartment *comp = NULL, JSContext *cx = NULL);
/*
* Call the system calloc while checking for GC memory pressure and
* reporting OOM error when cx is not null. We will not GC from here.
*/
void* calloc_(size_t bytes, JSContext *cx = NULL) {
updateMallocCounter(cx, bytes);
void *p = js_calloc(bytes);
return JS_LIKELY(!!p) ? p : onOutOfMemory(reinterpret_cast<void *>(1), bytes, cx);
}
inline void* calloc_(size_t bytes, JSCompartment *comp = NULL, JSContext *cx = NULL);
void* realloc_(void* p, size_t oldBytes, size_t newBytes, JSContext *cx = NULL) {
JS_ASSERT(oldBytes < newBytes);
updateMallocCounter(cx, newBytes - oldBytes);
void *p2 = js_realloc(p, newBytes);
return JS_LIKELY(!!p2) ? p2 : onOutOfMemory(p, newBytes, cx);
}
inline void* realloc_(void* p, size_t oldBytes, size_t newBytes, JSCompartment *comp = NULL, JSContext *cx = NULL);
void* realloc_(void* p, size_t bytes, JSContext *cx = NULL) {
/*
* For compatibility we do not account for realloc that increases
* previously allocated memory.
*/
if (!p)
updateMallocCounter(cx, bytes);
void *p2 = js_realloc(p, bytes);
return JS_LIKELY(!!p2) ? p2 : onOutOfMemory(p, bytes, cx);
inline void* realloc_(void* p, size_t bytes, JSCompartment *comp = NULL, JSContext *cx = NULL);
template <class T>
T *pod_malloc(JSCompartment *comp = NULL, JSContext *cx = NULL) {
return (T *)malloc_(sizeof(T), comp, cx);
}
template <class T>
T *pod_malloc(JSContext *cx = NULL) {
return (T *)malloc_(sizeof(T), cx);
T *pod_calloc(JSCompartment *comp = NULL, JSContext *cx = NULL) {
return (T *)calloc_(sizeof(T), comp, cx);
}
template <class T>
T *pod_calloc(JSContext *cx = NULL) {
return (T *)calloc_(sizeof(T), cx);
}
template <class T>
T *pod_malloc(size_t numElems, JSContext *cx = NULL) {
T *pod_malloc(size_t numElems, JSCompartment *comp = NULL, JSContext *cx = NULL) {
if (numElems & js::tl::MulOverflowMask<sizeof(T)>::result) {
js_ReportAllocationOverflow(cx);
return NULL;
}
return (T *)malloc_(numElems * sizeof(T), cx);
return (T *)malloc_(numElems * sizeof(T), comp, cx);
}
template <class T>
T *pod_calloc(size_t numElems, JSContext *cx = NULL) {
T *pod_calloc(size_t numElems, JSCompartment *comp = NULL, JSContext *cx = NULL) {
if (numElems & js::tl::MulOverflowMask<sizeof(T)>::result) {
js_ReportAllocationOverflow(cx);
return NULL;
}
return (T *)calloc_(numElems * sizeof(T), cx);
return (T *)calloc_(numElems * sizeof(T), comp, cx);
}
JS_DECLARE_NEW_METHODS(new_, malloc_, JS_ALWAYS_INLINE)
@ -1197,7 +1175,7 @@ struct JSRuntime : js::RuntimeFriendFields
* The function must be called outside the GC lock and in case of OOM error
* the caller must ensure that no deadlock possible during OOM reporting.
*/
void updateMallocCounter(JSContext *cx, size_t nbytes);
void updateMallocCounter(JSCompartment *comp, size_t nbytes);
bool isTooMuchMalloc() const {
return gcMallocBytes <= 0;
@ -1399,6 +1377,8 @@ struct JSContext : js::ContextFriendFields,
JSContext *thisDuringConstruction() { return this; }
~JSContext();
js::PerThreadData& mainThread() { return runtime->mainThread; }
private:
/* See JSContext::findVersion. */
JSVersion defaultVersion; /* script compilation version */
@ -1670,36 +1650,36 @@ struct JSContext : js::ContextFriendFields,
void leaveGenerator(JSGenerator *gen);
inline void* malloc_(size_t bytes) {
return runtime->malloc_(bytes, this);
return runtime->malloc_(bytes, compartment, this);
}
inline void* calloc_(size_t bytes) {
return runtime->calloc_(bytes, this);
return runtime->calloc_(bytes, compartment, this);
}
inline void* realloc_(void* p, size_t bytes) {
return runtime->realloc_(p, bytes, this);
return runtime->realloc_(p, bytes, compartment, this);
}
inline void* realloc_(void* p, size_t oldBytes, size_t newBytes) {
return runtime->realloc_(p, oldBytes, newBytes, this);
return runtime->realloc_(p, oldBytes, newBytes, compartment, this);
}
template <class T> T *pod_malloc() {
return runtime->pod_malloc<T>(this);
return runtime->pod_malloc<T>(compartment, this);
}
template <class T> T *pod_calloc() {
return runtime->pod_calloc<T>(this);
return runtime->pod_calloc<T>(compartment, this);
}
template <class T> T *pod_malloc(size_t numElems) {
return runtime->pod_malloc<T>(numElems, this);
return runtime->pod_malloc<T>(numElems, compartment, this);
}
template <class T>
T *pod_calloc(size_t numElems) {
return runtime->pod_calloc<T>(numElems, this);
return runtime->pod_calloc<T>(numElems, compartment, this);
}
JS_DECLARE_NEW_METHODS(new_, malloc_, JS_ALWAYS_INLINE)

View File

@ -22,6 +22,44 @@
#include "jsgcinlines.h"
void*
JSRuntime::malloc_(size_t bytes, JSCompartment *comp, JSContext *cx) {
JS_ASSERT_IF(cx != NULL, cx->compartment == comp);
updateMallocCounter(comp, bytes);
void *p = js_malloc(bytes);
return JS_LIKELY(!!p) ? p : onOutOfMemory(NULL, bytes, cx);
}
void*
JSRuntime::calloc_(size_t bytes, JSCompartment *comp, JSContext *cx) {
JS_ASSERT_IF(cx != NULL, cx->compartment == comp);
updateMallocCounter(comp, bytes);
void *p = js_calloc(bytes);
return JS_LIKELY(!!p) ? p : onOutOfMemory(reinterpret_cast<void *>(1), bytes, cx);
}
void*
JSRuntime::realloc_(void* p, size_t oldBytes, size_t newBytes, JSCompartment *comp, JSContext *cx) {
JS_ASSERT_IF(cx != NULL, cx->compartment == comp);
JS_ASSERT(oldBytes < newBytes);
updateMallocCounter(comp, newBytes - oldBytes);
void *p2 = js_realloc(p, newBytes);
return JS_LIKELY(!!p2) ? p2 : onOutOfMemory(p, newBytes, cx);
}
void*
JSRuntime::realloc_(void* p, size_t bytes, JSCompartment *comp, JSContext *cx) {
JS_ASSERT_IF(cx != NULL, cx->compartment == comp);
/*
* For compatibility we do not account for realloc that increases
* previously allocated memory.
*/
if (!p)
updateMallocCounter(comp, bytes);
void *p2 = js_realloc(p, bytes);
return JS_LIKELY(!!p2) ? p2 : onOutOfMemory(p, bytes, cx);
}
namespace js {
inline void

View File

@ -27,6 +27,7 @@
#include "methodjit/MonoIC.h"
#include "methodjit/Retcon.h"
#include "vm/Debugger.h"
#include "vm/ForkJoin.h"
#include "yarr/BumpPointerAllocator.h"
#include "jsgcinlines.h"
@ -51,6 +52,7 @@ JSCompartment::JSCompartment(JSRuntime *rt)
principals(NULL),
global_(NULL),
enterCompartmentDepth(0),
allocator(this),
#ifdef JSGC_GENERATIONAL
gcNursery(),
gcStoreBuffer(&gcNursery),
@ -74,8 +76,6 @@ JSCompartment::JSCompartment(JSRuntime *rt)
lastAnimationTime(0),
regExps(rt),
propertyTree(thisForCtor()),
gcMallocAndFreeBytes(0),
gcTriggerMallocAndFreeBytes(0),
gcIncomingGrayPointers(NULL),
gcLiveArrayBuffers(NULL),
gcWeakMapList(NULL),
@ -581,7 +581,7 @@ JSCompartment::markTypes(JSTracer *trc)
}
for (size_t thingKind = FINALIZE_OBJECT0; thingKind < FINALIZE_OBJECT_LIMIT; thingKind++) {
ArenaHeader *aheader = arenas.getFirstArena(static_cast<AllocKind>(thingKind));
ArenaHeader *aheader = allocator.arenas.getFirstArena(static_cast<AllocKind>(thingKind));
if (aheader)
rt->gcMarker.pushArenaList(aheader);
}
@ -1034,3 +1034,9 @@ JSCompartment::sizeOfIncludingThis(JSMallocSizeOfFun mallocSizeOf, size_t *compa
*regexpCompartment = regExps.sizeOfExcludingThis(mallocSizeOf);
*debuggeesSet = debuggees.sizeOfExcludingThis(mallocSizeOf);
}
void
JSCompartment::adoptWorkerAllocator(Allocator *workerAllocator)
{
allocator.arenas.adoptArenas(rt, &workerAllocator->arenas);
}

View File

@ -121,6 +121,41 @@ class AutoDebugModeGC;
class DebugScopes;
}
namespace js {
/*
* Encapsulates the data needed to perform allocation. Typically
* there is precisely one of these per compartment
* (|compartment.allocator|). However, in parallel execution mode,
* there will be one per worker thread. In general, if a piece of
* code must perform execution and should work safely either in
* parallel or sequential mode, you should make it take an
* |Allocator*| rather than a |JSContext*|.
*/
class Allocator
{
JSCompartment*const compartment;
public:
explicit Allocator(JSCompartment *compartment);
js::gc::ArenaLists arenas;
inline void *parallelNewGCThing(gc::AllocKind thingKind, size_t thingSize);
inline void* malloc_(size_t bytes);
inline void* calloc_(size_t bytes);
inline void* realloc_(void* p, size_t bytes);
inline void* realloc_(void* p, size_t oldBytes, size_t newBytes);
template <class T> inline T *pod_malloc();
template <class T> inline T *pod_calloc();
template <class T> inline T *pod_malloc(size_t numElems);
template <class T> inline T *pod_calloc(size_t numElems);
JS_DECLARE_NEW_METHODS(new_, malloc_, JS_ALWAYS_INLINE)
};
}
struct JSCompartment : private JS::shadow::Compartment, public js::gc::GraphNodeBase<JSCompartment>
{
JSRuntime *rt;
@ -157,7 +192,14 @@ struct JSCompartment : private JS::shadow::Compartment, public js::gc::GraphNode
}
public:
js::gc::ArenaLists arenas;
js::Allocator allocator;
/*
* Moves all data from the allocator |workerAllocator|, which was
* in use by a parallel worker, into the compartment's main
* allocator. This is used at the end of a parallel section.
*/
void adoptWorkerAllocator(js::Allocator *workerAllocator);
#ifdef JSGC_GENERATIONAL
js::gc::Nursery gcNursery;
@ -359,15 +401,6 @@ struct JSCompartment : private JS::shadow::Compartment, public js::gc::GraphNode
js::CallsiteCloneTable callsiteClones;
void sweepCallsiteClones();
/*
* Keeps track of the total number of malloc bytes connected to a
* compartment's GC things. This counter should be used in preference to
* gcMallocBytes. These counters affect collection in the same way as
* gcBytes and gcTriggerBytes.
*/
size_t gcMallocAndFreeBytes;
size_t gcTriggerMallocAndFreeBytes;
/* During GC, stores the index of this compartment in rt->compartments. */
unsigned gcIndex;
@ -444,12 +477,16 @@ struct JSCompartment : private JS::shadow::Compartment, public js::gc::GraphNode
void findOutgoingEdges(js::gc::ComponentFinder<JSCompartment> &finder);
void setGCLastBytes(size_t lastBytes, size_t lastMallocBytes, js::JSGCInvocationKind gckind);
void setGCLastBytes(size_t lastBytes, js::JSGCInvocationKind gckind);
void reduceGCTriggerBytes(size_t amount);
void resetGCMallocBytes();
void setGCMaxMallocBytes(size_t value);
void updateMallocCounter(size_t nbytes) {
/*
* Note: this code may be run from worker threads. We
* tolerate any thread races when updating gcMallocBytes.
*/
ptrdiff_t oldCount = gcMallocBytes;
ptrdiff_t newCount = oldCount - ptrdiff_t(nbytes);
gcMallocBytes = newCount;
@ -463,15 +500,6 @@ struct JSCompartment : private JS::shadow::Compartment, public js::gc::GraphNode
void onTooMuchMalloc();
void mallocInCompartment(size_t nbytes) {
gcMallocAndFreeBytes += nbytes;
}
void freeInCompartment(size_t nbytes) {
JS_ASSERT(gcMallocAndFreeBytes >= nbytes);
gcMallocAndFreeBytes -= nbytes;
}
js::DtoaCache dtoaCache;
/* Random number generator state, used by jsmath.cpp. */

View File

@ -27,4 +27,62 @@ js::AutoCompartment::~AutoCompartment()
cx_->leaveCompartment(origin_);
}
inline void *
js::Allocator::malloc_(size_t bytes)
{
return compartment->rt->malloc_(bytes, compartment);
}
inline void *
js::Allocator::calloc_(size_t bytes)
{
return compartment->rt->calloc_(bytes, compartment);
}
inline void *
js::Allocator::realloc_(void *p, size_t bytes)
{
return compartment->rt->realloc_(p, bytes, compartment);
}
inline void *
js::Allocator::realloc_(void* p, size_t oldBytes, size_t newBytes)
{
return compartment->rt->realloc_(p, oldBytes, newBytes, compartment);
}
template <class T>
inline T *
js::Allocator::pod_malloc()
{
return compartment->rt->pod_malloc<T>(compartment);
}
template <class T>
inline T *
js::Allocator::pod_calloc()
{
return compartment->rt->pod_calloc<T>(compartment);
}
template <class T>
inline T *
js::Allocator::pod_malloc(size_t numElems)
{
return compartment->rt->pod_malloc<T>(numElems, compartment);
}
template <class T>
inline T *
js::Allocator::pod_calloc(size_t numElems)
{
return compartment->rt->pod_calloc<T>(numElems, compartment);
}
inline void *
js::Allocator::parallelNewGCThing(gc::AllocKind thingKind, size_t thingSize)
{
return arenas.parallelAllocate(compartment, thingKind, thingSize);
}
#endif /* jscompartment_inlines_h___ */

View File

@ -23,9 +23,11 @@ using namespace js;
using namespace JS;
// Required by PerThreadDataFriendFields::getMainThread()
JS_STATIC_ASSERT(offsetof(JSRuntime, mainThread) == sizeof(RuntimeFriendFields));
JS_STATIC_ASSERT(offsetof(JSRuntime, mainThread) ==
PerThreadDataFriendFields::RuntimeMainThreadOffset);
PerThreadDataFriendFields::PerThreadDataFriendFields()
: nativeStackLimit(0)
{
#if defined(DEBUG) && defined(JS_GC_ZEAL) && defined(JSGC_ROOT_ANALYSIS) && !defined(JS_THREADSAFE)
skipGCRooters = NULL;

View File

@ -569,7 +569,7 @@ IsObjectInContextCompartment(RawObject obj, const JSContext *cx);
inline uintptr_t
GetNativeStackLimit(const JSRuntime *rt)
{
return RuntimeFriendFields::get(rt)->nativeStackLimit;
return PerThreadDataFriendFields::getMainThread(rt)->nativeStackLimit;
}
/*

View File

@ -82,6 +82,7 @@
#include "methodjit/MethodJIT.h"
#include "vm/Debugger.h"
#include "vm/String.h"
#include "vm/ForkJoin.h"
#include "ion/IonCode.h"
#ifdef JS_ION
# include "ion/IonMacroAssembler.h"
@ -273,7 +274,7 @@ ArenaHeader::checkSynchronizedWithFreeList() const
FreeSpan firstSpan = FreeSpan::decodeOffsets(arenaAddress(), firstFreeSpanOffsets);
if (firstSpan.isEmpty())
return;
const FreeSpan *list = compartment->arenas.getFreeList(getAllocKind());
const FreeSpan *list = compartment->allocator.arenas.getFreeList(getAllocKind());
if (list->isEmpty() || firstSpan.arenaAddress() != list->arenaAddress())
return;
@ -1099,7 +1100,7 @@ ComputeTriggerBytes(JSCompartment *comp, size_t lastBytes, size_t maxBytes, JSGC
}
void
JSCompartment::setGCLastBytes(size_t lastBytes, size_t lastMallocBytes, JSGCInvocationKind gckind)
JSCompartment::setGCLastBytes(size_t lastBytes, JSGCInvocationKind gckind)
{
/*
* The heap growth factor depends on the heap size after a GC and the GC frequency.
@ -1137,7 +1138,6 @@ JSCompartment::setGCLastBytes(size_t lastBytes, size_t lastMallocBytes, JSGCInvo
}
}
gcTriggerBytes = ComputeTriggerBytes(this, lastBytes, rt->gcMaxBytes, gckind);
gcTriggerMallocAndFreeBytes = ComputeTriggerBytes(this, lastMallocBytes, SIZE_MAX, gckind);
}
void
@ -1150,6 +1150,10 @@ JSCompartment::reduceGCTriggerBytes(size_t amount)
gcTriggerBytes -= amount;
}
Allocator::Allocator(JSCompartment *compartment)
: compartment(compartment)
{}
inline void
ArenaLists::prepareForIncrementalGC(JSRuntime *rt)
{
@ -1170,9 +1174,36 @@ PushArenaAllocatedDuringSweep(JSRuntime *runtime, ArenaHeader *arena)
runtime->gcArenasAllocatedDuringSweep = arena;
}
void *
ArenaLists::parallelAllocate(JSCompartment *comp, AllocKind thingKind, size_t thingSize)
{
/*
* During parallel Rivertrail sections, no GC is permitted. If no
* existing arena can satisfy the allocation, then a new one is
* allocated. If that fails, then we return NULL which will cause
* the parallel section to abort.
*/
void *t = allocateFromFreeList(thingKind, thingSize);
if (t)
return t;
return allocateFromArena(comp, thingKind);
}
inline void *
ArenaLists::allocateFromArena(JSCompartment *comp, AllocKind thingKind)
{
/*
* Parallel JS Note:
*
* This function can be called from parallel threads all of which
* are associated with the same compartment. In that case, each
* thread will have a distinct ArenaLists. Therefore, whenever we
* fall through to PickChunk() we must be sure that we are holding
* a lock.
*/
Chunk *chunk = NULL;
ArenaList *al = &arenaLists[thingKind];
@ -1350,7 +1381,7 @@ ArenaLists::backgroundFinalize(FreeOp *fop, ArenaHeader *listHead, bool onBackgr
* the head list as we emptied the list before the background finalization
* and the allocation adds new arenas before the cursor.
*/
ArenaLists *lists = &comp->arenas;
ArenaLists *lists = &comp->allocator.arenas;
ArenaList *al = &lists->arenaLists[thingKind];
AutoLockGC lock(fop->runtime());
@ -1439,21 +1470,40 @@ ArenaLists::queueIonCodeForSweep(FreeOp *fop)
finalizeNow(fop, FINALIZE_IONCODE);
}
static void
RunLastDitchGC(JSContext *cx, gcreason::Reason reason)
static void*
RunLastDitchGC(JSContext *cx, JSCompartment *comp, AllocKind thingKind)
{
/*
* In parallel sections, we do not attempt to refill the free list
* and hence do not encounter last ditch GC.
*/
JS_ASSERT(!ForkJoinSlice::InParallelSection());
PrepareCompartmentForGC(comp);
JSRuntime *rt = cx->runtime;
/* The last ditch GC preserves all atoms. */
AutoKeepAtoms keep(rt);
GC(rt, GC_NORMAL, reason);
GC(rt, GC_NORMAL, gcreason::LAST_DITCH);
/*
* The JSGC_END callback can legitimately allocate new GC
* things and populate the free list. If that happens, just
* return that list head.
*/
size_t thingSize = Arena::thingSize(thingKind);
if (void *thing = comp->allocator.arenas.allocateFromFreeList(thingKind, thingSize))
return thing;
return NULL;
}
template <AllowGC allowGC>
/* static */ void *
ArenaLists::refillFreeList(JSContext *cx, AllocKind thingKind)
{
JS_ASSERT(cx->compartment->arenas.freeLists[thingKind].isEmpty());
JS_ASSERT(cx->compartment->allocator.arenas.freeLists[thingKind].isEmpty());
JSCompartment *comp = cx->compartment;
JSRuntime *rt = comp->rt;
@ -1464,16 +1514,7 @@ ArenaLists::refillFreeList(JSContext *cx, AllocKind thingKind)
allowGC;
for (;;) {
if (JS_UNLIKELY(runGC)) {
PrepareCompartmentForGC(comp);
RunLastDitchGC(cx, gcreason::LAST_DITCH);
/*
* The JSGC_END callback can legitimately allocate new GC
* things and populate the free list. If that happens, just
* return that list head.
*/
size_t thingSize = Arena::thingSize(thingKind);
if (void *thing = comp->arenas.allocateFromFreeList(thingKind, thingSize))
if (void *thing = RunLastDitchGC(cx, comp, thingKind))
return thing;
}
@ -1486,7 +1527,7 @@ ArenaLists::refillFreeList(JSContext *cx, AllocKind thingKind)
* always try to allocate twice.
*/
for (bool secondAttempt = false; ; secondAttempt = true) {
void *thing = comp->arenas.allocateFromArena(comp, thingKind);
void *thing = comp->allocator.arenas.allocateFromArena(comp, thingKind);
if (JS_LIKELY(!!thing))
return thing;
if (secondAttempt)
@ -1911,6 +1952,12 @@ TriggerOperationCallback(JSRuntime *rt, gcreason::Reason reason)
void
js::TriggerGC(JSRuntime *rt, gcreason::Reason reason)
{
/* Wait till end of parallel section to trigger GC. */
if (ForkJoinSlice *slice = ForkJoinSlice::Current()) {
slice->requestGC(reason);
return;
}
rt->assertValidThread();
if (rt->isHeapBusy())
@ -1923,6 +1970,12 @@ js::TriggerGC(JSRuntime *rt, gcreason::Reason reason)
void
js::TriggerCompartmentGC(JSCompartment *comp, gcreason::Reason reason)
{
/* Wait till end of parallel section to trigger GC. */
if (ForkJoinSlice *slice = ForkJoinSlice::Current()) {
slice->requestCompartmentGC(comp, reason);
return;
}
JSRuntime *rt = comp->rt;
rt->assertValidThread();
@ -1973,12 +2026,6 @@ js::MaybeGC(JSContext *cx)
return;
}
if (comp->gcMallocAndFreeBytes > comp->gcTriggerMallocAndFreeBytes) {
PrepareCompartmentForGC(comp);
GCSlice(rt, GC_NORMAL, gcreason::MAYBEGC);
return;
}
#ifndef JS_MORE_DETERMINISTIC
/*
* Access to the counters and, on 32 bit, setting gcNextFullGCTime below
@ -2140,7 +2187,7 @@ SweepBackgroundThings(JSRuntime* rt, bool onBackgroundThread)
for (JSCompartment *comp = rt->gcSweepingCompartments; comp; comp = comp->gcNextGraphNode) {
for (int index = 0 ; index < BackgroundPhaseLength[phase] ; ++index) {
AllocKind kind = BackgroundPhases[phase][index];
ArenaHeader *arenas = comp->arenas.arenaListsToSweep[kind];
ArenaHeader *arenas = comp->allocator.arenas.arenaListsToSweep[kind];
if (arenas)
ArenaLists::backgroundFinalize(&fop, arenas, onBackgroundThread);
}
@ -2157,8 +2204,8 @@ AssertBackgroundSweepingFinished(JSRuntime *rt)
JS_ASSERT(!rt->gcSweepingCompartments);
for (CompartmentsIter c(rt); !c.done(); c.next()) {
for (unsigned i = 0 ; i < FINALIZE_LIMIT ; ++i) {
JS_ASSERT(!c->arenas.arenaListsToSweep[i]);
JS_ASSERT(c->arenas.doneBackgroundFinalize(AllocKind(i)));
JS_ASSERT(!c->allocator.arenas.arenaListsToSweep[i]);
JS_ASSERT(c->allocator.arenas.doneBackgroundFinalize(AllocKind(i)));
}
}
}
@ -2492,9 +2539,9 @@ SweepCompartments(FreeOp *fop, bool lastGC)
JSCompartment *compartment = *read++;
if (!compartment->hold && compartment->wasGCStarted() &&
(compartment->arenas.arenaListsAreEmpty() || lastGC))
(compartment->allocator.arenas.arenaListsAreEmpty() || lastGC))
{
compartment->arenas.checkEmptyFreeLists();
compartment->allocator.arenas.checkEmptyFreeLists();
if (callback)
callback(fop, compartment);
if (compartment->principals)
@ -2624,7 +2671,7 @@ BeginMarkPhase(JSRuntime *rt)
/* Assert that compartment state is as we expect */
JS_ASSERT(!c->isCollecting());
for (unsigned i = 0; i < FINALIZE_LIMIT; ++i)
JS_ASSERT(!c->arenas.arenaListsToSweep[i]);
JS_ASSERT(!c->allocator.arenas.arenaListsToSweep[i]);
JS_ASSERT(!c->gcLiveArrayBuffers);
/* Set up which compartments will be collected. */
@ -2668,7 +2715,7 @@ BeginMarkPhase(JSRuntime *rt)
*/
if (rt->gcIsIncremental) {
for (GCCompartmentsIter c(rt); !c.done(); c.next())
c->arenas.purge();
c->allocator.arenas.purge();
}
rt->gcMarker.start();
@ -2709,7 +2756,7 @@ BeginMarkPhase(JSRuntime *rt)
for (GCCompartmentsIter c(rt); !c.done(); c.next()) {
/* Unmark everything in the compartments being collected. */
c->arenas.unmarkAll();
c->allocator.arenas.unmarkAll();
/* Reset weak map list for the compartments being collected. */
WeakMapBase::resetCompartmentWeakMapList(c);
@ -3169,7 +3216,7 @@ JSCompartment::findOutgoingEdges(ComponentFinder<JSCompartment> &finder)
static void
FindCompartmentGroups(JSRuntime *rt)
{
ComponentFinder<JSCompartment> finder(rt->nativeStackLimit);
ComponentFinder<JSCompartment> finder(rt->mainThread.nativeStackLimit);
if (!rt->gcIsIncremental)
finder.useOneComponent();
@ -3502,7 +3549,7 @@ BeginSweepingCompartmentGroup(JSRuntime *rt)
c->setGCState(JSCompartment::Sweep);
/* Purge the ArenaLists before sweeping. */
c->arenas.purge();
c->allocator.arenas.purge();
if (c == rt->atomsCompartment)
sweepingAtoms = true;
@ -3553,24 +3600,24 @@ BeginSweepingCompartmentGroup(JSRuntime *rt)
*/
for (GCCompartmentGroupIter c(rt); !c.done(); c.next()) {
gcstats::AutoSCC scc(rt->gcStats, rt->gcCompartmentGroupIndex);
c->arenas.queueObjectsForSweep(&fop);
c->allocator.arenas.queueObjectsForSweep(&fop);
}
for (GCCompartmentGroupIter c(rt); !c.done(); c.next()) {
gcstats::AutoSCC scc(rt->gcStats, rt->gcCompartmentGroupIndex);
c->arenas.queueStringsForSweep(&fop);
c->allocator.arenas.queueStringsForSweep(&fop);
}
for (GCCompartmentGroupIter c(rt); !c.done(); c.next()) {
gcstats::AutoSCC scc(rt->gcStats, rt->gcCompartmentGroupIndex);
c->arenas.queueScriptsForSweep(&fop);
c->allocator.arenas.queueScriptsForSweep(&fop);
}
for (GCCompartmentGroupIter c(rt); !c.done(); c.next()) {
gcstats::AutoSCC scc(rt->gcStats, rt->gcCompartmentGroupIndex);
c->arenas.queueShapesForSweep(&fop);
c->allocator.arenas.queueShapesForSweep(&fop);
}
#ifdef JS_ION
for (GCCompartmentGroupIter c(rt); !c.done(); c.next()) {
gcstats::AutoSCC scc(rt->gcStats, rt->gcCompartmentGroupIndex);
c->arenas.queueIonCodeForSweep(&fop);
c->allocator.arenas.queueIonCodeForSweep(&fop);
}
#endif
@ -3678,7 +3725,7 @@ SweepPhase(JSRuntime *rt, SliceBudget &sliceBudget)
while (rt->gcSweepKindIndex < FinalizePhaseLength[rt->gcSweepPhase]) {
AllocKind kind = FinalizePhases[rt->gcSweepPhase][rt->gcSweepKindIndex];
if (!c->arenas.foregroundFinalize(&fop, kind, sliceBudget))
if (!c->allocator.arenas.foregroundFinalize(&fop, kind, sliceBudget))
return false; /* Yield to the mutator. */
++rt->gcSweepKindIndex;
@ -3730,7 +3777,7 @@ EndSweepPhase(JSRuntime *rt, JSGCInvocationKind gckind, bool lastGC)
if (rt->gcFoundBlackGrayEdges) {
for (CompartmentsIter c(rt); !c.done(); c.next()) {
if (!c->isCollecting())
c->arenas.unmarkAll();
c->allocator.arenas.unmarkAll();
}
}
@ -3805,7 +3852,7 @@ EndSweepPhase(JSRuntime *rt, JSGCInvocationKind gckind, bool lastGC)
}
for (CompartmentsIter c(rt); !c.done(); c.next()) {
c->setGCLastBytes(c->gcBytes, c->gcMallocAndFreeBytes, gckind);
c->setGCLastBytes(c->gcBytes, gckind);
if (c->isCollecting()) {
JS_ASSERT(c->isGCFinished());
c->setGCState(JSCompartment::NoGC);
@ -3826,7 +3873,7 @@ EndSweepPhase(JSRuntime *rt, JSGCInvocationKind gckind, bool lastGC)
for (unsigned i = 0 ; i < FINALIZE_LIMIT ; ++i) {
JS_ASSERT_IF(!IsBackgroundFinalized(AllocKind(i)) ||
!rt->gcSweepOnBackgroundThread,
!c->arenas.arenaListsToSweep[i]);
!c->allocator.arenas.arenaListsToSweep[i]);
}
#endif
}
@ -3895,13 +3942,13 @@ AutoCopyFreeListToArenas::AutoCopyFreeListToArenas(JSRuntime *rt)
: runtime(rt)
{
for (CompartmentsIter c(rt); !c.done(); c.next())
c->arenas.copyFreeListsToArenas();
c->allocator.arenas.copyFreeListsToArenas();
}
AutoCopyFreeListToArenas::~AutoCopyFreeListToArenas()
{
for (CompartmentsIter c(runtime); !c.done(); c.next())
c->arenas.clearFreeListsInArenas();
c->allocator.arenas.clearFreeListsInArenas();
}
static void
@ -3967,7 +4014,7 @@ ResetIncrementalGC(JSRuntime *rt, const char *reason)
JS_ASSERT(!c->needsBarrier());
JS_ASSERT(!c->gcLiveArrayBuffers);
for (unsigned i = 0 ; i < FINALIZE_LIMIT ; ++i)
JS_ASSERT(!c->arenas.arenaListsToSweep[i]);
JS_ASSERT(!c->allocator.arenas.arenaListsToSweep[i]);
}
#endif
}
@ -4014,7 +4061,7 @@ AutoGCSlice::~AutoGCSlice()
for (CompartmentsIter c(runtime); !c.done(); c.next()) {
if (c->isGCMarking()) {
c->setNeedsBarrier(true, JSCompartment::UpdateIon);
c->arenas.prepareForIncrementalGC(runtime);
c->allocator.arenas.prepareForIncrementalGC(runtime);
} else {
c->setNeedsBarrier(false, JSCompartment::UpdateIon);
}
@ -4306,6 +4353,9 @@ static void
Collect(JSRuntime *rt, bool incremental, int64_t budget,
JSGCInvocationKind gckind, gcreason::Reason reason)
{
/* GC shouldn't be running in parallel execution mode */
JS_ASSERT(!ForkJoinSlice::InParallelSection());
JS_AbortIfWrongThread(rt);
#if JS_TRACE_LOGGING
@ -4505,7 +4555,7 @@ gc::NewCompartment(JSContext *cx, JSPrincipals *principals)
// Set up the principals.
JS_SetCompartmentPrincipals(compartment, principals);
compartment->setGCLastBytes(8192, 8192, GC_NORMAL);
compartment->setGCLastBytes(8192, GC_NORMAL);
/*
* Before reporting the OOM condition, |lock| needs to be cleaned up,
@ -4745,6 +4795,65 @@ js::PurgeJITCaches(JSCompartment *c)
#endif
}
void
ArenaLists::adoptArenas(JSRuntime *rt, ArenaLists *fromArenaLists)
{
// The other parallel threads have all completed now, and GC
// should be inactive, but still take the lock as a kind of read
// fence.
AutoLockGC lock(rt);
fromArenaLists->purge();
for (size_t thingKind = 0; thingKind != FINALIZE_LIMIT; thingKind++) {
#ifdef JS_THREADSAFE
// When we enter a parallel section, we join the background
// thread, and we do not run GC while in the parallel section,
// so no finalizer should be active!
volatile uintptr_t *bfs = &backgroundFinalizeState[thingKind];
switch (*bfs) {
case BFS_DONE:
break;
case BFS_JUST_FINISHED:
// No allocations between end of last sweep and now.
// Transfering over arenas is a kind of allocation.
*bfs = BFS_DONE;
break;
default:
JS_ASSERT(!"Background finalization in progress, but it should not be.");
break;
}
#endif /* JS_THREADSAFE */
ArenaList *fromList = &fromArenaLists->arenaLists[thingKind];
ArenaList *toList = &arenaLists[thingKind];
while (fromList->head != NULL) {
ArenaHeader *fromHeader = fromList->head;
fromList->head = fromHeader->next;
fromHeader->next = NULL;
toList->insert(fromHeader);
}
}
}
bool
ArenaLists::containsArena(JSRuntime *rt, ArenaHeader *needle)
{
AutoLockGC lock(rt);
size_t allocKind = needle->getAllocKind();
for (ArenaHeader *aheader = arenaLists[allocKind].head;
aheader != NULL;
aheader = aheader->next)
{
if (aheader == needle)
return true;
}
return false;
}
AutoMaybeTouchDeadCompartments::AutoMaybeTouchDeadCompartments(JSContext *cx)
: runtime(cx->runtime),
markCount(runtime->gcObjectsMarkedInDeadCompartments),

View File

@ -414,6 +414,18 @@ struct ArenaLists {
template <AllowGC allowGC>
static void *refillFreeList(JSContext *cx, AllocKind thingKind);
/*
* Moves all arenas from |fromArenaLists| into |this|. In
* parallel blocks, we temporarily create one ArenaLists per
* parallel thread. When the parallel block ends, we move
* whatever allocations may have been performed back into the
* compartment's main arena list using this function.
*/
void adoptArenas(JSRuntime *runtime, ArenaLists *fromArenaLists);
/* True if the ArenaHeader in question is found in this ArenaLists */
bool containsArena(JSRuntime *runtime, ArenaHeader *arenaHeader);
void checkEmptyFreeLists() {
#ifdef DEBUG
for (size_t i = 0; i < mozilla::ArrayLength(freeLists); ++i)
@ -434,6 +446,14 @@ struct ArenaLists {
bool foregroundFinalize(FreeOp *fop, AllocKind thingKind, SliceBudget &sliceBudget);
static void backgroundFinalize(FreeOp *fop, ArenaHeader *listHead, bool onBackgroundThread);
/*
* Invoked from IonMonkey-compiled parallel worker threads to
* perform an allocation. In this case, |this| will be
* thread-local, but the compartment |comp| is shared between all
* threads.
*/
void *parallelAllocate(JSCompartment *comp, AllocKind thingKind, size_t thingSize);
private:
inline void finalizeNow(FreeOp *fop, AllocKind thingKind);
inline void queueForForegroundSweep(FreeOp *fop, AllocKind thingKind);

View File

@ -233,8 +233,8 @@ class ArenaIter
}
void init(JSCompartment *comp, AllocKind kind) {
aheader = comp->arenas.getFirstArena(kind);
remainingHeader = comp->arenas.getFirstArenaToSweep(kind);
aheader = comp->allocator.arenas.getFirstArena(kind);
remainingHeader = comp->allocator.arenas.getFirstArenaToSweep(kind);
if (!aheader) {
aheader = remainingHeader;
remainingHeader = NULL;
@ -273,7 +273,7 @@ class CellIterImpl
}
void initSpan(JSCompartment *comp, AllocKind kind) {
JS_ASSERT(comp->arenas.isSynchronizedFreeList(kind));
JS_ASSERT(comp->allocator.arenas.isSynchronizedFreeList(kind));
firstThingOffset = Arena::firstThingOffset(kind);
thingSize = Arena::thingSize(kind);
firstSpan.initAsEmpty();
@ -356,7 +356,7 @@ class CellIter : public CellIterImpl
#endif
public:
CellIter(JSCompartment *comp, AllocKind kind)
: lists(&comp->arenas),
: lists(&comp->allocator.arenas),
kind(kind)
{
/*
@ -503,7 +503,7 @@ NewGCThing(JSContext *cx, js::gc::AllocKind kind, size_t thingSize)
MaybeCheckStackRoots(cx, /* relax = */ false);
JSCompartment *comp = cx->compartment;
T *t = static_cast<T *>(comp->arenas.allocateFromFreeList(kind, thingSize));
T *t = static_cast<T *>(comp->allocator.arenas.allocateFromFreeList(kind, thingSize));
if (!t)
t = static_cast<T *>(js::gc::ArenaLists::refillFreeList<allowGC>(cx, kind));

View File

@ -19,6 +19,7 @@
#include "jsstr.h"
#include "methodjit/MethodJIT.h"
#include "vm/ForkJoin.h"
#include "jsatominlines.h"
#include "jsfuninlines.h"
@ -1135,6 +1136,7 @@ class FastInvokeGuard
, useIon_(ion::IsEnabled(cx))
#endif
{
JS_ASSERT(!ForkJoinSlice::InParallelSection());
initFunction(fval);
}

View File

@ -45,8 +45,8 @@
#include "jsinferinlines.h"
#include "jsobjinlines.h"
#include "builtin/ParallelArray-inl.h"
#include "builtin/Iterator-inl.h"
#include "builtin/ParallelArray-inl.h"
#include "vm/Stack-inl.h"
#include "vm/String-inl.h"

View File

@ -62,6 +62,7 @@
#include "jsobjinlines.h"
#include "jsscopeinlines.h"
#include "jsscriptinlines.h"
#include "jscompartmentinlines.h"
#include "vm/BooleanObject-inl.h"
#include "vm/NumberObject-inl.h"
@ -2522,6 +2523,29 @@ JSObject::willBeSparseElements(unsigned requiredCapacity, unsigned newElementsHi
bool
JSObject::growElements(JSContext *cx, unsigned newcap)
{
size_t oldSize = Probes::objectResizeActive() ? computedSizeOfThisSlotsElements() : 0;
if (!growElements(&cx->compartment->allocator, newcap)) {
JS_ReportOutOfMemory(cx);
return false;
}
if (Probes::objectResizeActive())
Probes::resizeObject(cx, this, oldSize, computedSizeOfThisSlotsElements());
return true;
}
bool
JSObject::growElements(js::Allocator *alloc, unsigned newcap)
{
/*
* This version of |growElements()|, which takes a
* |js::Allocator*| as opposed to a |JSContext*|, is intended to
* run either during sequential or parallel execution. As per
* convention, since it does not take a JSContext*, it does not
* report an error on out of memory but simply returns false.
*/
JS_ASSERT(isExtensible());
/*
@ -2537,8 +2561,6 @@ JSObject::growElements(JSContext *cx, unsigned newcap)
uint32_t oldcap = getDenseCapacity();
JS_ASSERT(oldcap <= newcap);
size_t oldSize = Probes::objectResizeActive() ? computedSizeOfThisSlotsElements() : 0;
uint32_t nextsize = (oldcap <= CAPACITY_DOUBLING_MAX)
? oldcap * 2
: oldcap + (oldcap >> 3);
@ -2551,7 +2573,6 @@ JSObject::growElements(JSContext *cx, unsigned newcap)
/* Don't let nelements get close to wrapping around uint32_t. */
if (actualCapacity >= NELEMENTS_LIMIT || actualCapacity < oldcap || actualCapacity < newcap) {
JS_ReportOutOfMemory(cx);
return false;
}
@ -2562,12 +2583,12 @@ JSObject::growElements(JSContext *cx, unsigned newcap)
if (hasDynamicElements()) {
uint32_t oldAllocated = oldcap + ObjectElements::VALUES_PER_HEADER;
newheader = (ObjectElements *)
cx->realloc_(getElementsHeader(), oldAllocated * sizeof(Value),
newAllocated * sizeof(Value));
alloc->realloc_(getElementsHeader(), oldAllocated * sizeof(Value),
newAllocated * sizeof(Value));
if (!newheader)
return false; /* Leave elements as its old size. */
} else {
newheader = (ObjectElements *) cx->malloc_(newAllocated * sizeof(Value));
newheader = (ObjectElements *) alloc->malloc_(newAllocated * sizeof(Value));
if (!newheader)
return false; /* Ditto. */
js_memcpy(newheader, getElementsHeader(),
@ -2579,9 +2600,6 @@ JSObject::growElements(JSContext *cx, unsigned newcap)
Debug_SetSlotRangeToCrashOnTouch(elements + initlen, actualCapacity - initlen);
if (Probes::objectResizeActive())
Probes::resizeObject(cx, this, oldSize, computedSizeOfThisSlotsElements());
return true;
}

View File

@ -580,6 +580,7 @@ class JSObject : public js::ObjectImpl
inline bool ensureElements(JSContext *cx, unsigned cap);
bool growElements(JSContext *cx, unsigned cap);
bool growElements(js::Allocator *alloc, unsigned cap);
void shrinkElements(JSContext *cx, unsigned cap);
inline void setDynamicElements(js::ObjectElements *header);
@ -612,6 +613,10 @@ class JSObject : public js::ObjectImpl
*/
enum EnsureDenseResult { ED_OK, ED_FAILED, ED_SPARSE };
inline EnsureDenseResult ensureDenseElements(JSContext *cx, unsigned index, unsigned extra);
inline EnsureDenseResult parExtendDenseElements(js::Allocator *alloc, js::Value *v,
uint32_t extra);
template<typename CONTEXT>
inline EnsureDenseResult extendDenseElements(CONTEXT *cx, unsigned requiredCapacity, unsigned extra);
/* Convert a single dense element to a sparse property. */
static bool sparsifyDenseElement(JSContext *cx, js::HandleObject obj, unsigned index);

View File

@ -587,6 +587,80 @@ JSObject::ensureDenseInitializedLength(JSContext *cx, uint32_t index, uint32_t e
}
}
template<typename CONTEXT>
JSObject::EnsureDenseResult
JSObject::extendDenseElements(CONTEXT *cx, unsigned requiredCapacity, unsigned extra)
{
/*
* Don't grow elements for non-extensible objects or watched objects. Dense
* elements can be added/written with no extensible or watchpoint checks as
* long as there is capacity for them.
*/
if (!isExtensible() || watched()) {
JS_ASSERT(getDenseCapacity() == 0);
return ED_SPARSE;
}
/*
* Don't grow elements for objects which already have sparse indexes.
* This avoids needing to count non-hole elements in willBeSparseElements
* every time a new index is added.
*/
if (isIndexed())
return ED_SPARSE;
/*
* We use the extra argument also as a hint about number of non-hole
* elements to be inserted.
*/
if (requiredCapacity > MIN_SPARSE_INDEX &&
willBeSparseElements(requiredCapacity, extra)) {
return ED_SPARSE;
}
if (!growElements(cx, requiredCapacity))
return ED_FAILED;
return ED_OK;
}
inline JSObject::EnsureDenseResult
JSObject::parExtendDenseElements(js::Allocator *alloc, js::Value *v, uint32_t extra)
{
JS_ASSERT(isNative());
js::ObjectElements *header = getElementsHeader();
unsigned initializedLength = header->initializedLength;
unsigned requiredCapacity = initializedLength + extra;
if (requiredCapacity < initializedLength)
return ED_SPARSE; /* Overflow. */
if (requiredCapacity > header->capacity) {
EnsureDenseResult edr = extendDenseElements(alloc, requiredCapacity, extra);
if (edr != ED_OK)
return edr;
}
// Watch out lest the header has been reallocated by
// extendDenseElements():
header = getElementsHeader();
js::HeapSlot *sp = elements + initializedLength;
if (v) {
for (uint32_t i = 0; i < extra; i++)
sp[i].init(compartment(), this, js::HeapSlot::Element,
initializedLength+i, v[i]);
} else {
for (uint32_t i = 0; i < extra; i++)
sp[i].init(compartment(), this, js::HeapSlot::Element,
initializedLength+i, js::MagicValue(JS_ELEMENTS_HOLE));
}
header->initializedLength = requiredCapacity;
if (header->length < requiredCapacity)
header->length = requiredCapacity;
return ED_OK;
}
inline JSObject::EnsureDenseResult
JSObject::ensureDenseElements(JSContext *cx, unsigned index, unsigned extra)
{
@ -618,35 +692,9 @@ JSObject::ensureDenseElements(JSContext *cx, unsigned index, unsigned extra)
}
}
/*
* Don't grow elements for non-extensible objects or watched objects. Dense
* elements can be added/written with no extensible or watchpoint checks as
* long as there is capacity for them.
*/
if (!isExtensible() || watched()) {
JS_ASSERT(currentCapacity == 0);
return ED_SPARSE;
}
/*
* Don't grow elements for objects which already have sparse indexes.
* This avoids needing to count non-hole elements in willBeSparseElements
* every time a new index is added.
*/
if (isIndexed())
return ED_SPARSE;
/*
* We use the extra argument also as a hint about number of non-hole
* elements to be inserted.
*/
if (requiredCapacity > MIN_SPARSE_INDEX &&
willBeSparseElements(requiredCapacity, extra)) {
return ED_SPARSE;
}
if (!growElements(cx, requiredCapacity))
return ED_FAILED;
EnsureDenseResult edr = extendDenseElements(cx, requiredCapacity, extra);
if (edr != ED_OK)
return edr;
ensureDenseInitializedLength(cx, index, extra);
return ED_OK;

View File

@ -213,6 +213,8 @@ typedef JSBool (*JSInitCallback)(void);
namespace js {
class Allocator;
template <typename T>
class Rooted;
@ -302,12 +304,8 @@ struct RuntimeFriendFields {
*/
volatile int32_t interrupt;
/* Limit pointer for checking native stack consumption. */
uintptr_t nativeStackLimit;
RuntimeFriendFields()
: interrupt(0),
nativeStackLimit(0) { }
: interrupt(0) { }
static const RuntimeFriendFields *get(const JSRuntime *rt) {
return reinterpret_cast<const RuntimeFriendFields *>(rt);
@ -318,6 +316,23 @@ class PerThreadData;
struct PerThreadDataFriendFields
{
private:
// Note: this type only exists to permit us to derive the offset of
// the perThread data within the real JSRuntime* type in a portable
// way.
struct RuntimeDummy : RuntimeFriendFields
{
struct PerThreadDummy {
void *field1;
uintptr_t field2;
#ifdef DEBUG
uint64_t field3;
#endif
} mainThread;
};
public:
PerThreadDataFriendFields();
#if defined(DEBUG) && defined(JS_GC_ZEAL) && defined(JSGC_ROOT_ANALYSIS) && !defined(JS_THREADSAFE)
@ -332,15 +347,27 @@ struct PerThreadDataFriendFields
SkipRoot *skipGCRooters;
#endif
static PerThreadDataFriendFields *get(js::PerThreadData *pt) {
/* Limit pointer for checking native stack consumption. */
uintptr_t nativeStackLimit;
static const size_t RuntimeMainThreadOffset = offsetof(RuntimeDummy, mainThread);
static inline PerThreadDataFriendFields *get(js::PerThreadData *pt) {
return reinterpret_cast<PerThreadDataFriendFields *>(pt);
}
static PerThreadDataFriendFields *getMainThread(JSRuntime *rt) {
static inline PerThreadDataFriendFields *getMainThread(JSRuntime *rt) {
// mainThread must always appear directly after |RuntimeFriendFields|.
// Tested by a JS_STATIC_ASSERT in |jsfriendapi.cpp|
return reinterpret_cast<PerThreadDataFriendFields *>(
reinterpret_cast<char*>(rt) + sizeof(RuntimeFriendFields));
reinterpret_cast<char*>(rt) + RuntimeMainThreadOffset);
}
static inline const PerThreadDataFriendFields *getMainThread(const JSRuntime *rt) {
// mainThread must always appear directly after |RuntimeFriendFields|.
// Tested by a JS_STATIC_ASSERT in |jsfriendapi.cpp|
return reinterpret_cast<const PerThreadDataFriendFields *>(
reinterpret_cast<const char*>(rt) + RuntimeMainThreadOffset);
}
};

View File

@ -1311,7 +1311,7 @@ ParseNodeToXML(Parser *parser, ParseNode *pn,
JSXMLClass xml_class;
int stackDummy;
if (!JS_CHECK_STACK_SIZE(cx->runtime->nativeStackLimit, &stackDummy)) {
if (!JS_CHECK_STACK_SIZE(cx->mainThread().nativeStackLimit, &stackDummy)) {
parser->reportError(pn, JSMSG_OVER_RECURSED);
return NULL;
}

View File

@ -1364,7 +1364,7 @@ static const JSC::MacroAssembler::RegisterID JSParamReg_Argc = JSC::MIPSRegiste
* span is not empty is handled.
*/
gc::FreeSpan *list = const_cast<gc::FreeSpan *>
(cx->compartment->arenas.getFreeList(allocKind));
(cx->compartment->allocator.arenas.getFreeList(allocKind));
loadPtr(&list->first, result);
Jump jump = branchPtr(Assembler::BelowOrEqual, AbsoluteAddress(&list->last), result);

View File

@ -622,7 +622,7 @@ class CallCompiler : public BaseCompiler
masm.store32(t0, Address(JSFrameReg, StackFrame::offsetOfFlags()));
/* Store the entry fp and calling pc into the IonActivation. */
masm.loadPtr(&cx->runtime->ionActivation, t0);
masm.loadPtr(&cx->mainThread().ionActivation, t0);
masm.storePtr(JSFrameReg, Address(t0, ion::IonActivation::offsetOfEntryFp()));
masm.storePtr(t1, Address(t0, ion::IonActivation::offsetOfPrevPc()));
@ -795,7 +795,7 @@ class CallCompiler : public BaseCompiler
/* Unset IonActivation::entryfp. */
t0 = regs.takeAnyReg().reg();
masm.loadPtr(&cx->runtime->ionActivation, t0);
masm.loadPtr(&cx->mainThread().ionActivation, t0);
masm.storePtr(ImmPtr(NULL), Address(t0, ion::IonActivation::offsetOfEntryFp()));
masm.storePtr(ImmPtr(NULL), Address(t0, ion::IonActivation::offsetOfPrevPc()));

View File

@ -3544,7 +3544,7 @@ RelaxRootChecks(JSContext *cx, unsigned argc, jsval *vp)
}
#ifdef DEBUG
cx->runtime->mainThread.gcRelaxRootChecks = true;
cx->mainThread().gcRelaxRootChecks = true;
#endif
return true;

View File

@ -9,6 +9,8 @@
#include "js/CharacterEncoding.h"
#include "jscntxtinlines.h"
using namespace JS;
Latin1CharsZ

View File

@ -115,6 +115,7 @@
macro(objectNull, objectNull, "[object Null]") \
macro(objectUndefined, objectUndefined, "[object Undefined]") \
macro(of, of, "of") \
macro(offset, offset, "offset") \
macro(parseFloat, parseFloat, "parseFloat") \
macro(parseInt, parseInt, "parseInt") \
macro(propertyIsEnumerable, propertyIsEnumerable, "propertyIsEnumerable") \

View File

@ -1,37 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sw=4 et tw=78:
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef ForkJoin_inl_h__
#define ForkJoin_inl_h__
namespace js {
inline ForkJoinSlice *
ForkJoinSlice::current()
{
#ifdef JS_THREADSAFE_ION
return (ForkJoinSlice*) PR_GetThreadPrivate(ThreadPrivateIndex);
#else
return NULL;
#endif
}
// True if this thread is currently executing a parallel operation across
// multiple threads.
static inline bool
InParallelSection()
{
#ifdef JS_THREADSAFE
return ForkJoinSlice::current() != NULL;
#else
return false;
#endif
}
} // namespace js
#endif // ForkJoin_inl_h__

View File

@ -10,8 +10,9 @@
#include "vm/ForkJoin.h"
#include "vm/Monitor.h"
#include "gc/Marking.h"
#include "vm/ForkJoin-inl.h"
#include "jsinferinlines.h"
#ifdef JS_THREADSAFE
# include "prthread.h"
@ -29,7 +30,7 @@ class js::ForkJoinShared : public TaskExecutor, public Monitor
JSContext *const cx_; // Current context
ThreadPool *const threadPool_; // The thread pool.
ForkJoinOp &op_; // User-defined operations to be perf. in par.
const size_t numThreads_; // Total number of threads.
const uint32_t numSlices_; // Total number of threads.
PRCondVar *rendezvousEnd_; // Cond. var used to signal end of rendezvous.
/////////////////////////////////////////////////////////////////////////
@ -37,16 +38,19 @@ class js::ForkJoinShared : public TaskExecutor, public Monitor
//
// Each worker thread gets an arena to use when allocating.
Vector<gc::ArenaLists *, 16> arenaListss_;
Vector<Allocator *, 16> allocators_;
/////////////////////////////////////////////////////////////////////////
// Locked Fields
//
// Only to be accessed while holding the lock.
size_t uncompleted_; // Number of uncompleted worker threads.
size_t blocked_; // Number of threads that have joined the rendezvous.
size_t rendezvousIndex_; // Number of rendezvous attempts
uint32_t uncompleted_; // Number of uncompleted worker threads
uint32_t blocked_; // Number of threads that have joined rendezvous
uint32_t rendezvousIndex_; // Number of rendezvous attempts
bool gcRequested_; // True if a worker requested a GC
gcreason::Reason gcReason_; // Reason given to request GC
JSCompartment *gcCompartment_; // Compartment for GC, or NULL for full
/////////////////////////////////////////////////////////////////////////
// Asynchronous Flags
@ -68,11 +72,11 @@ class js::ForkJoinShared : public TaskExecutor, public Monitor
volatile bool rendezvous_;
// Invoked only from the main thread:
void executeFromMainThread(uintptr_t stackLimit);
void executeFromMainThread();
// Executes slice #threadId of the work, either from a worker or
// the main thread.
void executePortion(PerThreadData *perThread, size_t threadId, uintptr_t stackLimit);
void executePortion(PerThreadData *perThread, uint32_t threadId);
// Rendezvous protocol:
//
@ -97,8 +101,8 @@ class js::ForkJoinShared : public TaskExecutor, public Monitor
ForkJoinShared(JSContext *cx,
ThreadPool *threadPool,
ForkJoinOp &op,
size_t numThreads,
size_t uncompleted);
uint32_t numSlices,
uint32_t uncompleted);
~ForkJoinShared();
bool init();
@ -106,12 +110,13 @@ class js::ForkJoinShared : public TaskExecutor, public Monitor
ParallelResult execute();
// Invoked from parallel worker threads:
virtual void executeFromWorker(size_t threadId, uintptr_t stackLimit);
virtual void executeFromWorker(uint32_t threadId, uintptr_t stackLimit);
// Moves all the per-thread arenas into the main compartment. This can
// only safely be invoked on the main thread, either during a rendezvous
// or after the workers have completed.
void transferArenasToCompartment();
// Moves all the per-thread arenas into the main compartment and
// processes any pending requests for a GC. This can only safely
// be invoked on the main thread, either during a rendezvous or
// after the workers have completed.
void transferArenasToCompartmentAndProcessGCRequests();
// Invoked during processing by worker threads to "check in".
bool check(ForkJoinSlice &threadCx);
@ -119,6 +124,13 @@ class js::ForkJoinShared : public TaskExecutor, public Monitor
// See comment on |ForkJoinSlice::setFatal()| in forkjoin.h
bool setFatal();
// Requests a GC, either full or specific to a compartment.
void requestGC(gcreason::Reason reason);
void requestCompartmentGC(JSCompartment *compartment, gcreason::Reason reason);
// Requests that computation abort.
void setAbortFlag();
JSRuntime *runtime() { return cx_->runtime; }
};
@ -160,19 +172,22 @@ class js::AutoSetForkJoinSlice
ForkJoinShared::ForkJoinShared(JSContext *cx,
ThreadPool *threadPool,
ForkJoinOp &op,
size_t numThreads,
size_t uncompleted)
: cx_(cx),
threadPool_(threadPool),
op_(op),
numThreads_(numThreads),
arenaListss_(cx),
uncompleted_(uncompleted),
blocked_(0),
rendezvousIndex_(0),
abort_(false),
fatal_(false),
rendezvous_(false)
uint32_t numSlices,
uint32_t uncompleted)
: cx_(cx),
threadPool_(threadPool),
op_(op),
numSlices_(numSlices),
allocators_(cx),
uncompleted_(uncompleted),
blocked_(0),
rendezvousIndex_(0),
gcRequested_(false),
gcReason_(gcreason::NUM_REASONS),
gcCompartment_(NULL),
abort_(false),
fatal_(false),
rendezvous_(false)
{ }
bool
@ -182,10 +197,10 @@ ForkJoinShared::init()
// parallel code.
//
// Note: you might think (as I did, initially) that we could use
// compartment ArenaLists for the main thread. This is not true,
// compartment |Allocator| for the main thread. This is not true,
// because when executing parallel code we sometimes check what
// arena list an object is in to decide if it is writable. If we
// used the compartment ArenaLists for the main thread, then the
// used the compartment |Allocator| for the main thread, then the
// main thread would be permitted to write to any object it wants.
if (!Monitor::init())
@ -195,13 +210,13 @@ ForkJoinShared::init()
if (!rendezvousEnd_)
return false;
for (unsigned i = 0; i < numThreads_; i++) {
gc::ArenaLists *arenaLists = cx_->new_<gc::ArenaLists>();
if (!arenaLists)
for (unsigned i = 0; i < numSlices_; i++) {
Allocator *allocator = cx_->runtime->new_<Allocator>(cx_->compartment);
if (!allocator)
return false;
if (!arenaListss_.append(arenaLists)) {
delete arenaLists;
if (!allocators_.append(allocator)) {
js_delete(allocator);
return false;
}
}
@ -213,31 +228,35 @@ ForkJoinShared::~ForkJoinShared()
{
PR_DestroyCondVar(rendezvousEnd_);
while (arenaListss_.length() > 0)
delete arenaListss_.popCopy();
while (allocators_.length() > 0)
js_delete(allocators_.popCopy());
}
ParallelResult
ForkJoinShared::execute()
{
AutoLockMonitor lock(*this);
// Give the task set a chance to prepare for parallel workload.
if (!op_.pre(numThreads_))
// Sometimes a GC request occurs *just before* we enter into the
// parallel section. Rather than enter into the parallel section
// and then abort, we just check here and abort early.
if (cx_->runtime->interrupt)
return TP_RETRY_SEQUENTIALLY;
AutoLockMonitor lock(*this);
// Notify workers to start and execute one portion on this thread.
{
AutoUnlockMonitor unlock(*this);
if (!threadPool_->submitAll(cx_, this))
return TP_FATAL;
executeFromMainThread(cx_->runtime->ionStackLimit);
executeFromMainThread();
}
// Wait for workers to complete.
while (uncompleted_ > 0)
lock.wait();
transferArenasToCompartmentAndProcessGCRequests();
// Check if any of the workers failed.
if (abort_) {
if (fatal_)
@ -246,37 +265,36 @@ ForkJoinShared::execute()
return TP_RETRY_SEQUENTIALLY;
}
transferArenasToCompartment();
// Give task set a chance to cleanup after parallel execution.
if (!op_.post(numThreads_))
return TP_RETRY_SEQUENTIALLY;
// Everything went swimmingly. Give yourself a pat on the back.
return TP_SUCCESS;
}
void
ForkJoinShared::transferArenasToCompartment()
ForkJoinShared::transferArenasToCompartmentAndProcessGCRequests()
{
#if 0
// XXX: This code will become relevant once other bugs are merged down.
JSRuntime *rt = cx_->runtime;
JSCompartment *comp = cx_->compartment;
for (unsigned i = 0; i < numThreads_; i++)
comp->arenas.adoptArenas(rt, arenaListss_[i]);
#endif
for (unsigned i = 0; i < numSlices_; i++)
comp->adoptWorkerAllocator(allocators_[i]);
if (gcRequested_) {
if (!gcCompartment_)
TriggerGC(cx_->runtime, gcReason_);
else
TriggerCompartmentGC(gcCompartment_, gcReason_);
gcRequested_ = false;
gcCompartment_ = NULL;
}
}
void
ForkJoinShared::executeFromWorker(size_t workerId, uintptr_t stackLimit)
ForkJoinShared::executeFromWorker(uint32_t workerId, uintptr_t stackLimit)
{
JS_ASSERT(workerId < numThreads_ - 1);
JS_ASSERT(workerId < numSlices_ - 1);
PerThreadData thisThread(cx_->runtime);
TlsPerThreadData.set(&thisThread);
executePortion(&thisThread, workerId, stackLimit);
thisThread.ionStackLimit = stackLimit;
executePortion(&thisThread, workerId);
TlsPerThreadData.set(NULL);
AutoLockMonitor lock(*this);
@ -290,23 +308,21 @@ ForkJoinShared::executeFromWorker(size_t workerId, uintptr_t stackLimit)
}
void
ForkJoinShared::executeFromMainThread(uintptr_t stackLimit)
ForkJoinShared::executeFromMainThread()
{
executePortion(&cx_->runtime->mainThread, numThreads_ - 1, stackLimit);
executePortion(&cx_->runtime->mainThread, numSlices_ - 1);
}
void
ForkJoinShared::executePortion(PerThreadData *perThread,
size_t threadId,
uintptr_t stackLimit)
uint32_t threadId)
{
gc::ArenaLists *arenaLists = arenaListss_[threadId];
ForkJoinSlice slice(perThread, threadId, numThreads_,
stackLimit, arenaLists, this);
Allocator *allocator = allocators_[threadId];
ForkJoinSlice slice(perThread, threadId, numSlices_, allocator, this);
AutoSetForkJoinSlice autoContext(&slice);
if (!op_.parallel(slice))
abort_ = true;
setAbortFlag();
}
bool
@ -314,7 +330,7 @@ ForkJoinShared::setFatal()
{
// Might as well set the abort flag to true, as it will make propagation
// faster.
abort_ = true;
setAbortFlag();
fatal_ = true;
return false;
}
@ -326,12 +342,21 @@ ForkJoinShared::check(ForkJoinSlice &slice)
return false;
if (slice.isMainThread()) {
JS_ASSERT(!cx_->runtime->gcIsNeeded);
if (cx_->runtime->interrupt) {
// The GC Needed flag should not be set during parallel
// execution. Instead, one of the requestGC() or
// requestCompartmentGC() methods should be invoked.
JS_ASSERT(!cx_->runtime->gcIsNeeded);
// If interrupt is requested, bring worker threads to a halt,
// service the interrupt, then let them start back up again.
AutoRendezvous autoRendezvous(slice);
if (!js_HandleExecutionInterrupt(cx_))
return setFatal();
// AutoRendezvous autoRendezvous(slice);
// if (!js_HandleExecutionInterrupt(cx_))
// return setFatal();
setAbortFlag();
return false;
}
} else if (rendezvous_) {
joinRendezvous(slice);
@ -392,7 +417,7 @@ ForkJoinShared::joinRendezvous(ForkJoinSlice &slice)
JS_ASSERT(rendezvous_);
AutoLockMonitor lock(*this);
const size_t index = rendezvousIndex_;
const uint32_t index = rendezvousIndex_;
blocked_ += 1;
// If we're the last to arrive, let the main thread know about it.
@ -421,6 +446,42 @@ ForkJoinShared::endRendezvous(ForkJoinSlice &slice)
PR_NotifyAllCondVar(rendezvousEnd_);
}
void
ForkJoinShared::setAbortFlag()
{
abort_ = true;
}
void
ForkJoinShared::requestGC(gcreason::Reason reason)
{
AutoLockMonitor lock(*this);
gcCompartment_ = NULL;
gcReason_ = reason;
gcRequested_ = true;
}
void
ForkJoinShared::requestCompartmentGC(JSCompartment *compartment,
gcreason::Reason reason)
{
AutoLockMonitor lock(*this);
if (gcRequested_ && gcCompartment_ != compartment) {
// If a full GC has been requested, or a GC for another compartment,
// issue a request for a full GC.
gcCompartment_ = NULL;
gcReason_ = reason;
gcRequested_ = true;
} else {
// Otherwise, just GC this compartment.
gcCompartment_ = compartment;
gcReason_ = reason;
gcRequested_ = true;
}
}
#endif // JS_THREADSAFE
/////////////////////////////////////////////////////////////////////////////
@ -428,14 +489,13 @@ ForkJoinShared::endRendezvous(ForkJoinSlice &slice)
//
ForkJoinSlice::ForkJoinSlice(PerThreadData *perThreadData,
size_t sliceId, size_t numSlices,
uintptr_t stackLimit, gc::ArenaLists *arenaLists,
ForkJoinShared *shared)
uint32_t sliceId, uint32_t numSlices,
Allocator *allocator, ForkJoinShared *shared)
: perThreadData(perThreadData),
sliceId(sliceId),
numSlices(numSlices),
ionStackLimit(stackLimit),
arenaLists(arenaLists),
allocator(allocator),
abortedScript(NULL),
shared(shared)
{ }
@ -490,20 +550,100 @@ ForkJoinSlice::Initialize()
#endif
}
void
ForkJoinSlice::requestGC(gcreason::Reason reason)
{
#ifdef JS_THREADSAFE
shared->requestGC(reason);
triggerAbort();
#endif
}
void
ForkJoinSlice::requestCompartmentGC(JSCompartment *compartment,
gcreason::Reason reason)
{
#ifdef JS_THREADSAFE
shared->requestCompartmentGC(compartment, reason);
triggerAbort();
#endif
}
#ifdef JS_THREADSAFE
void
ForkJoinSlice::triggerAbort()
{
shared->setAbortFlag();
// set iontracklimit to -1 so that on next entry to a function,
// the thread will trigger the overrecursedcheck. If the thread
// is in a loop, then it will be calling ForkJoinSlice::check(),
// in which case it will notice the shared abort_ flag.
//
// In principle, we probably ought to set the ionStackLimit's for
// the other threads too, but right now the various slice objects
// are not on a central list so that's not possible.
perThreadData->ionStackLimit = -1;
}
#endif
/////////////////////////////////////////////////////////////////////////////
namespace js {
class AutoEnterParallelSection
{
private:
JSContext *cx_;
uint8_t *prevIonTop_;
public:
AutoEnterParallelSection(JSContext *cx)
: cx_(cx),
prevIonTop_(cx->mainThread().ionTop)
{
// Note: we do not allow GC during parallel sections.
// Moreover, we do not wish to worry about making
// write barriers thread-safe. Therefore, we guarantee
// that there is no incremental GC in progress.
if (IsIncrementalGCInProgress(cx->runtime)) {
PrepareForIncrementalGC(cx->runtime);
FinishIncrementalGC(cx->runtime, gcreason::API);
}
cx->runtime->gcHelperThread.waitBackgroundSweepEnd();
}
~AutoEnterParallelSection() {
cx_->runtime->mainThread.ionTop = prevIonTop_;
}
};
} /* namespace js */
uint32_t
js::ForkJoinSlices(JSContext *cx)
{
#ifndef JS_THREADSAFE
return 1;
#else
// Parallel workers plus this main thread.
return cx->runtime->threadPool.numWorkers() + 1;
#endif
}
ParallelResult
js::ExecuteForkJoinOp(JSContext *cx, ForkJoinOp &op)
{
#ifdef JS_THREADSAFE
// Recursive use of the ThreadPool is not supported.
JS_ASSERT(!InParallelSection());
JS_ASSERT(!ForkJoinSlice::InParallelSection());
AutoEnterParallelSection enter(cx);
ThreadPool *threadPool = &cx->runtime->threadPool;
// Parallel workers plus this main thread.
size_t numThreads = threadPool->numWorkers() + 1;
uint32_t numSlices = ForkJoinSlices(cx);
ForkJoinShared shared(cx, threadPool, op, numThreads, numThreads - 1);
ForkJoinShared shared(cx, threadPool, op, numSlices, numSlices - 1);
if (!shared.init())
return TP_RETRY_SEQUENTIALLY;

View File

@ -26,28 +26,19 @@
// MyForkJoinOp op;
// ExecuteForkJoinOp(cx, op);
//
// |ExecuteForkJoinOp()| will fire up the workers in the runtime's thread
// pool, have them execute the callbacks defined in the |ForkJoinOp| class,
// and then return once all the workers have completed.
//
// There are three callbacks defined in |ForkJoinOp|. The first, |pre()|, is
// invoked before the parallel section begins. It informs you how many slices
// your problem will be divided into (effectively, how many worker threads
// there will be). This is often useful for allocating an array for the
// workers to store their result or something like that.
//
// Next, you will receive |N| calls to the |parallel()| callback, where |N| is
// the number of slices that were specified in |pre()|. Each callback will be
// supplied with a |ForkJoinSlice| instance providing some context.
// |ExecuteForkJoinOp()| will fire up the workers in the runtime's
// thread pool, have them execute the callback |parallel()| defined in
// the |ForkJoinOp| class, and then return once all the workers have
// completed. You will receive |N| calls to the |parallel()|
// callback, where |N| is the value returned by |ForkJoinSlice()|.
// Each callback will be supplied with a |ForkJoinSlice| instance
// providing some context.
//
// Typically there will be one call to |parallel()| from each worker thread,
// but that is not something you should rely upon---if we implement
// work-stealing, for example, then it could be that a single worker thread
// winds up handling multiple slices.
//
// Finally, after the operation is complete the |post()| callback is invoked,
// giving you a chance to collect the various results.
//
// Operation callback:
//
// During parallel execution, you should periodically invoke |slice.check()|,
@ -69,6 +60,35 @@
// parallel code encountered an unexpected path that cannot safely be executed
// in parallel (writes to shared state, say).
//
// Garbage collection and allocation:
//
// Code which executes on these parallel threads must be very careful
// with respect to garbage collection and allocation. Currently, we
// do not permit GC to occur when executing in parallel. Furthermore,
// the typical allocation paths are UNSAFE in parallel code because
// they access shared state (the compartment's arena lists and so
// forth) without any synchronization.
//
// To deal with this, the forkjoin code creates a distinct |Allocator|
// object for each slice. You can access the appropriate object via
// the |ForkJoinSlice| object that is provided to the callbacks. Once
// the execution is complete, all the objects found in these distinct
// |Allocator| is merged back into the main compartment lists and
// things proceed normally.
//
// In Ion-generated code, we will do allocation through the
// |Allocator| found in |ForkJoinSlice| (which is obtained via TLS).
// Also, no write barriers are emitted. Conceptually, we should never
// need a write barrier because we only permit writes to objects that
// are newly allocated, and such objects are always black (to use
// incremental GC terminology). However, to be safe, we also block
// upon entering a parallel section to ensure that any concurrent
// marking or incremental GC has completed.
//
// In the future, it should be possible to lift the restriction that
// we must block until inc. GC has completed and also to permit GC
// during parallel exeution. But we're not there yet.
//
// Current Limitations:
//
// - The API does not support recursive or nested use. That is, the
@ -88,6 +108,11 @@ enum ParallelResult { TP_SUCCESS, TP_RETRY_SEQUENTIALLY, TP_FATAL };
struct ForkJoinOp;
// Returns the number of slices that a fork-join op will have when
// executed.
uint32_t
ForkJoinSlices(JSContext *cx);
// Executes the given |TaskSet| in parallel using the runtime's |ThreadPool|,
// returning upon completion. In general, if there are |N| workers in the
// threadpool, the problem will be divided into |N+1| slices, as the main
@ -98,7 +123,19 @@ class PerThreadData;
class ForkJoinShared;
class AutoRendezvous;
class AutoSetForkJoinSlice;
namespace gc { struct ArenaLists; }
#ifdef DEBUG
struct IonTraceData
{
uint32_t bblock;
uint32_t lir;
uint32_t execModeInt;
const char *lirOpName;
const char *mirOpName;
JSScript *script;
jsbytecode *pc;
};
#endif
struct ForkJoinSlice
{
@ -107,22 +144,26 @@ struct ForkJoinSlice
PerThreadData *perThreadData;
// Which slice should you process? Ranges from 0 to |numSlices|.
const size_t sliceId;
const uint32_t sliceId;
// How many slices are there in total?
const size_t numSlices;
const uint32_t numSlices;
// Top of the stack. This should move into |perThreadData|.
uintptr_t ionStackLimit;
// Arenas to use when allocating on this thread. See
// Allocator to use when allocating on this thread. See
// |ion::ParFunctions::ParNewGCThing()|. This should move into
// |perThreadData|.
gc::ArenaLists *const arenaLists;
Allocator *const allocator;
ForkJoinSlice(PerThreadData *perThreadData, size_t sliceId, size_t numSlices,
uintptr_t stackLimit, gc::ArenaLists *arenaLists,
ForkJoinShared *shared);
// If we took a parallel bailout, the script that bailed out is stored here.
JSScript *abortedScript;
// Records the last instruction to execute on this thread.
#ifdef DEBUG
IonTraceData traceData;
#endif
ForkJoinSlice(PerThreadData *perThreadData, uint32_t sliceId, uint32_t numSlices,
Allocator *arenaLists, ForkJoinShared *shared);
// True if this is the main thread, false if it is one of the parallel workers.
bool isMainThread();
@ -136,6 +177,17 @@ struct ForkJoinSlice
// For convenience, *always* returns false.
bool setFatal();
// When the code would normally trigger a GC, we don't trigger it
// immediately but instead record that request here. This will
// cause |ExecuteForkJoinOp()| to invoke |TriggerGC()| or
// |TriggerCompartmentGC()| as appropriate once the parallel
// section is complete. This is done because those routines do
// various preparations that are not thread-safe, and because the
// full set of arenas is not available until the end of the
// parallel section.
void requestGC(gcreason::Reason reason);
void requestCompartmentGC(JSCompartment *compartment, gcreason::Reason reason);
// During the parallel phase, this method should be invoked periodically,
// for example on every backedge, similar to the interrupt check. If it
// returns false, then the parallel phase has been aborted and so you
@ -146,7 +198,10 @@ struct ForkJoinSlice
// Be wary, the runtime is shared between all threads!
JSRuntime *runtime();
static inline ForkJoinSlice *current();
// Check the current state of parallel execution.
static inline ForkJoinSlice *Current();
static inline bool InParallelSection();
static bool Initialize();
private:
@ -158,6 +213,13 @@ struct ForkJoinSlice
static unsigned ThreadPrivateIndex;
#endif
#ifdef JS_THREADSAFE
// Sets the abort flag and adjusts ionStackLimit so as to cause
// the overrun check to fail. This should lead to the operation
// as a whole aborting.
void triggerAbort();
#endif
ForkJoinShared *const shared;
};
@ -166,26 +228,29 @@ struct ForkJoinSlice
struct ForkJoinOp
{
public:
// Invoked before parallel phase begins; informs the task set how many
// slices there will be and gives it a chance to initialize per-slice data
// structures.
//
// Returns true on success, false to halt parallel execution.
virtual bool pre(size_t numSlices) = 0;
// Invoked from each parallel thread to process one slice. The
// |ForkJoinSlice| which is supplied will also be available using TLS.
//
// Returns true on success, false to halt parallel execution.
virtual bool parallel(ForkJoinSlice &slice) = 0;
// Invoked after parallel phase ends if execution was successful
// (not aborted)
//
// Returns true on success, false to halt parallel execution.
virtual bool post(size_t numSlices) = 0;
};
} // namespace js
/* static */ inline js::ForkJoinSlice *
js::ForkJoinSlice::Current()
{
#ifdef JS_THREADSAFE
return (ForkJoinSlice*) PR_GetThreadPrivate(ThreadPrivateIndex);
#else
return NULL;
#endif
}
/* static */ inline bool
js::ForkJoinSlice::InParallelSection()
{
return Current() != NULL;
}
#endif // ForkJoin_h__

View File

@ -16,6 +16,7 @@
#include "ion/Bailouts.h"
#endif
#include "Stack.h"
#include "ForkJoin.h"
#include "jsgcinlines.h"
#include "jsobjinlines.h"

View File

@ -358,7 +358,7 @@ JSExternalString::new_(JSContext *cx, const jschar *chars, size_t length,
if (!str)
return NULL;
str->init(chars, length, fin);
cx->runtime->updateMallocCounter(cx, (length + 1) * sizeof(jschar));
cx->runtime->updateMallocCounter(cx->compartment, (length + 1) * sizeof(jschar));
return str;
}

View File

@ -118,8 +118,9 @@ ThreadPoolWorker::run()
{
// This is hokey in the extreme. To compute the stack limit,
// subtract the size of the stack from the address of a local
// variable and give a 2k buffer. Is there a better way?
uintptr_t stackLimitOffset = WORKER_THREAD_STACK_SIZE - 2*1024;
// variable and give a 10k buffer. Is there a better way?
// (Note: 2k proved to be fine on Mac, but too little on Linux)
uintptr_t stackLimitOffset = WORKER_THREAD_STACK_SIZE - 10*1024;
uintptr_t stackLimit = (((uintptr_t)&stackLimitOffset) +
stackLimitOffset * JS_STACK_GROWTH_DIRECTION);

View File

@ -28,12 +28,12 @@ namespace js {
class ThreadPoolWorker;
typedef void (*TaskFun)(void *userdata, size_t workerId, uintptr_t stackLimit);
typedef void (*TaskFun)(void *userdata, uint32_t workerId, uintptr_t stackLimit);
class TaskExecutor
{
public:
virtual void executeFromWorker(size_t workerId, uintptr_t stackLimit) = 0;
virtual void executeFromWorker(uint32_t workerId, uintptr_t stackLimit) = 0;
};
// ThreadPool used for parallel JavaScript execution as well as
@ -77,7 +77,7 @@ class ThreadPool
size_t numWorkers_;
// Next worker for |submitOne()|. Atomically modified.
size_t nextId_;
uint32_t nextId_;
bool lazyStartWorkers(JSContext *cx);
void terminateWorkers();