Backed out changeset 5f449dffdff5 (regressed tp4 private bytes, and we are not sure what the hell that means).

This commit is contained in:
Andreas Gal 2009-09-17 15:40:37 -07:00
parent 4b2f3150ab
commit 38b0ee0297
4 changed files with 555 additions and 46 deletions

View File

@ -394,11 +394,8 @@ struct JSRuntime {
uint32 protoHazardShape;
/* Garbage collector state, used by jsgc.c. */
jsuword gcBase;
jsuword gcCursor;
jsuword gcLimit;
JSGCChunkInfo *gcChunkList;
JSGCArenaList gcArenaList[GC_NUM_FREELISTS];
JSGCArenaInfo *emptyArenas;
JSGCDoubleArenaList gcDoubleArenaList;
JSDHashTable gcRootsHash;
JSDHashTable *gcLocksHash;
@ -416,11 +413,6 @@ struct JSRuntime {
volatile JSBool gcIsNeeded;
volatile JSBool gcFlushCodeCaches;
inline bool IsGCThing(void *thing) {
JS_ASSERT((jsuword(thing) & JSVAL_TAGMASK) == 0);
return gcBase <= jsuword(thing) && jsuword(thing) < gcLimit;
}
/*
* NB: do not pack another flag here by claiming gcPadding unless the new
* flag is written only by the GC thread. Atomic updates to packed bytes

View File

@ -105,6 +105,46 @@ extern "C" {
}
#endif
/*
* Include the headers for mmap unless we have posix_memalign and do not
* insist on mmap.
*/
#if JS_GC_USE_MMAP || (!defined JS_GC_USE_MMAP && !HAS_POSIX_MEMALIGN)
# if defined(XP_WIN)
# ifndef JS_GC_USE_MMAP
# define JS_GC_USE_MMAP 1
# endif
# include <windows.h>
# elif defined(__SYMBIAN32__)
// Symbian's OpenC has mmap (and #defines _POSIX_MAPPED_FILES), but
// doesn't implement MAP_ANON. If we have MOZ_MEMORY, then we can use
// posix_memalign; we've defined HAS_POSIX_MEMALIGN above. Otherwise,
// we overallocate.
# else
# if defined(XP_UNIX) || defined(XP_BEOS)
# include <unistd.h>
# endif
# if _POSIX_MAPPED_FILES > 0
# ifndef JS_GC_USE_MMAP
# define JS_GC_USE_MMAP 1
# endif
# include <sys/mman.h>
/* On Mac OS X MAP_ANONYMOUS is not defined. */
# if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
# define MAP_ANONYMOUS MAP_ANON
# endif
# if !defined(MAP_ANONYMOUS)
# define MAP_ANONYMOUS 0
# endif
# else
# if JS_GC_USE_MMAP
# error "JS_GC_USE_MMAP is set when mmap is not available"
# endif
# endif
# endif
#endif
/*
* Check JSTempValueUnion has the size of jsval and void * so we can
* reinterpret jsval as void* GC-thing pointer and use JSTVU_SINGLE for
@ -193,12 +233,67 @@ JS_STATIC_ASSERT(JSVAL_NULL == 0);
* just one bit of flags per double to denote if it was marked during the
* marking phase of the GC. The locking is implemented via a hash table. Thus
* for doubles the flag area becomes a bitmap.
*
* JS_GC_USE_MMAP macro governs the choice of the aligned arena allocator.
* When it is true, a platform-dependent function like mmap is used to get
* memory aligned on CPU page boundaries. If the macro is false or undefined,
* posix_memalign is used when available. Otherwise the code uses malloc to
* over-allocate a chunk with js_gcArenasPerChunk aligned arenas. The
* approximate space overhead of this is 1/js_gcArenasPerChunk. For details,
* see NewGCChunk/DestroyGCChunk below.
*
* The code also allocates arenas in chunks when JS_GC_USE_MMAP is 1 to
* minimize the overhead of mmap/munmap. In this case js_gcArenasPerChunk can
* not be a compile-time constant as the system page size is not known until
* runtime.
*/
#if JS_GC_USE_MMAP
static uint32 js_gcArenasPerChunk = 0;
static JSBool js_gcUseMmap = JS_FALSE;
#elif HAS_POSIX_MEMALIGN
# define js_gcArenasPerChunk 1
#else
# define js_gcArenasPerChunk 7
#endif
#if defined(js_gcArenasPerChunk) && js_gcArenasPerChunk == 1
# define CHUNKED_ARENA_ALLOCATION 0
#else
# define CHUNKED_ARENA_ALLOCATION 1
#endif
#define GC_ARENA_SHIFT 12
#define GC_ARENA_MASK ((jsuword) JS_BITMASK(GC_ARENA_SHIFT))
#define GC_ARENA_SIZE JS_BIT(GC_ARENA_SHIFT)
/*
* JS_GC_ARENA_PAD defines the number of bytes to pad JSGCArenaInfo structure.
* It is used to improve allocation efficiency when using posix_memalign. If
* malloc's implementation uses internal headers, then calling
*
* posix_memalign(&p, GC_ARENA_SIZE, GC_ARENA_SIZE * js_gcArenasPerChunk)
*
* in a sequence leaves holes between allocations of the size GC_ARENA_SIZE
* due to the need to fit headers. JS_GC_ARENA_PAD mitigates that so the code
* calls
*
* posix_memalign(&p, GC_ARENA_SIZE,
* GC_ARENA_SIZE * js_gcArenasPerChunk - JS_GC_ARENA_PAD)
*
* When JS_GC_ARENA_PAD is equal or greater than the number of words in the
* system header, the system can pack all allocations together without holes.
*
* With JS_GC_USE_MEMALIGN we want at least 2 word pad unless posix_memalign
* comes from jemalloc that does not use any headers/trailers.
*/
#ifndef JS_GC_ARENA_PAD
# if HAS_POSIX_MEMALIGN && !MOZ_MEMORY
# define JS_GC_ARENA_PAD (2 * JS_BYTES_PER_WORD)
# else
# define JS_GC_ARENA_PAD 0
# endif
#endif
struct JSGCArenaInfo {
/*
* Allocation list for the arena or NULL if the arena holds double values.
@ -213,13 +308,39 @@ struct JSGCArenaInfo {
*/
JSGCArenaInfo *prev;
#if !CHUNKED_ARENA_ALLOCATION
jsuword prevUntracedPage;
#else
/*
* A link field for the list of arenas with marked but not yet traced
* things. The field is encoded as arena's page to share the space with
* firstArena and arenaIndex fields.
*/
jsuword prevUntracedPage : JS_BITS_PER_WORD - GC_ARENA_SHIFT;
/*
* When firstArena is false, the index of arena in the chunk. When
* firstArena is true, the index of a free arena holding JSGCChunkInfo or
* NO_FREE_ARENAS if there are no free arenas in the chunk.
*
* GET_ARENA_INDEX and GET_CHUNK_INFO_INDEX are convenience macros to
* access either of indexes.
*/
jsuword arenaIndex : GC_ARENA_SHIFT - 1;
/* Flag indicating if the arena is the first in the chunk. */
jsuword firstArena : 1;
#endif
union {
jsuword untracedThings; /* bitset for fast search of marked
but not yet traced things */
JSBool hasMarkedDoubles; /* the arena has marked doubles */
} u;
#if JS_GC_ARENA_PAD != 0
uint8 pad[JS_GC_ARENA_PAD];
#endif
};
/*
@ -260,6 +381,59 @@ JS_STATIC_ASSERT(offsetof(JSGCArenaInfo, u) == 3 * sizeof(jsuword));
(JS_ASSERT((index) < js_gcArenasPerChunk), \
ARENA_START_TO_INFO(chunk + ((index) << GC_ARENA_SHIFT)))
#if CHUNKED_ARENA_ALLOCATION
/*
* Definitions for allocating arenas in chunks.
*
* All chunks that have at least one free arena are put on the doubly-linked
* list with the head stored in JSRuntime.gcChunkList. JSGCChunkInfo contains
* the head of the chunk's free arena list together with the link fields for
* gcChunkList.
*
* Structure stored in one of chunk's free arenas. GET_CHUNK_INFO_INDEX gives
* the index of this arena. When all arenas in the chunk are used, it is
* removed from the list and the index is set to NO_FREE_ARENAS indicating
* that the chunk is not on gcChunkList and has no JSGCChunkInfo available.
*/
struct JSGCChunkInfo {
JSGCChunkInfo **prevp;
JSGCChunkInfo *next;
JSGCArenaInfo *lastFreeArena;
uint32 numFreeArenas;
};
#define NO_FREE_ARENAS JS_BITMASK(GC_ARENA_SHIFT - 1)
#ifdef js_gcArenasPerChunk
JS_STATIC_ASSERT(1 <= js_gcArenasPerChunk &&
js_gcArenasPerChunk <= NO_FREE_ARENAS);
#endif
#define GET_ARENA_CHUNK(arena, index) \
(JS_ASSERT(GET_ARENA_INDEX(arena) == index), \
ARENA_INFO_TO_START(arena) - ((index) << GC_ARENA_SHIFT))
#define GET_ARENA_INDEX(arena) \
((arena)->firstArena ? 0 : (uint32) (arena)->arenaIndex)
#define GET_CHUNK_INFO_INDEX(chunk) \
((uint32) ARENA_START_TO_INFO(chunk)->arenaIndex)
#define SET_CHUNK_INFO_INDEX(chunk, index) \
(JS_ASSERT((index) < js_gcArenasPerChunk || (index) == NO_FREE_ARENAS), \
(void) (ARENA_START_TO_INFO(chunk)->arenaIndex = (jsuword) (index)))
#define GET_CHUNK_INFO(chunk, infoIndex) \
(JS_ASSERT(GET_CHUNK_INFO_INDEX(chunk) == (infoIndex)), \
JS_ASSERT((uint32) (infoIndex) < js_gcArenasPerChunk), \
(JSGCChunkInfo *) ((chunk) + ((infoIndex) << GC_ARENA_SHIFT)))
#define CHUNK_INFO_TO_INDEX(ci) \
GET_ARENA_INDEX(ARENA_START_TO_INFO((jsuword)ci))
#endif
/*
* Macros for GC-thing operations.
*/
@ -648,27 +822,268 @@ ShrinkPtrTable(JSPtrTable *table, const JSPtrTableInfo *info,
#define METER_UPDATE_MAX(maxLval, rval) \
METER_IF((maxLval) < (rval), (maxLval) = (rval))
#if JS_GC_USE_MMAP || !HAS_POSIX_MEMALIGN
/*
* For chunks allocated via over-sized malloc, get a pointer to store the gap
* between the malloc's result and the first arena in the chunk.
*/
static uint32 *
GetMallocedChunkGapPtr(jsuword chunk)
{
JS_ASSERT((chunk & GC_ARENA_MASK) == 0);
/* Use the memory after the chunk, see NewGCChunk for details. */
return (uint32 *) (chunk + (js_gcArenasPerChunk << GC_ARENA_SHIFT));
}
#endif
static jsuword
NewGCChunk(void)
{
void *p;
#if JS_GC_USE_MMAP
if (js_gcUseMmap) {
# if defined(XP_WIN)
p = VirtualAlloc(NULL, js_gcArenasPerChunk << GC_ARENA_SHIFT,
MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
return (jsuword) p;
# else
p = mmap(NULL, js_gcArenasPerChunk << GC_ARENA_SHIFT,
PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
return (p == MAP_FAILED) ? 0 : (jsuword) p;
# endif
}
#endif
#if HAS_POSIX_MEMALIGN
if (0 != posix_memalign(&p, GC_ARENA_SIZE,
GC_ARENA_SIZE * js_gcArenasPerChunk -
JS_GC_ARENA_PAD)) {
return 0;
}
return (jsuword) p;
#else
/*
* Implement chunk allocation using oversized malloc if mmap and
* posix_memalign are not available.
*
* Since malloc allocates pointers aligned on the word boundary, to get
* js_gcArenasPerChunk aligned arenas, we need to malloc only
*
* ((js_gcArenasPerChunk + 1) << GC_ARENA_SHIFT) - sizeof(size_t)
*
* bytes. But since we stores the gap between the malloced pointer and the
* first arena in the chunk after the chunk, we need to ask for
*
* ((js_gcArenasPerChunk + 1) << GC_ARENA_SHIFT)
*
* bytes to ensure that we always have room to store the gap.
*/
p = js_malloc((js_gcArenasPerChunk + 1) << GC_ARENA_SHIFT);
if (!p)
return 0;
{
jsuword chunk;
chunk = ((jsuword) p + GC_ARENA_MASK) & ~GC_ARENA_MASK;
*GetMallocedChunkGapPtr(chunk) = (uint32) (chunk - (jsuword) p);
return chunk;
}
#endif
}
static void
DestroyGCChunk(jsuword chunk)
{
JS_ASSERT((chunk & GC_ARENA_MASK) == 0);
#if JS_GC_USE_MMAP
if (js_gcUseMmap) {
# if defined(XP_WIN)
VirtualFree((void *) chunk, 0, MEM_RELEASE);
# elif defined(SOLARIS)
munmap((char *) chunk, js_gcArenasPerChunk << GC_ARENA_SHIFT);
# else
munmap((void *) chunk, js_gcArenasPerChunk << GC_ARENA_SHIFT);
# endif
return;
}
#endif
#if HAS_POSIX_MEMALIGN
js_free((void *) chunk);
#else
/* See comments in NewGCChunk. */
JS_ASSERT(*GetMallocedChunkGapPtr(chunk) < GC_ARENA_SIZE);
js_free((void *) (chunk - *GetMallocedChunkGapPtr(chunk)));
#endif
}
#if CHUNKED_ARENA_ALLOCATION
static void
AddChunkToList(JSRuntime *rt, JSGCChunkInfo *ci)
{
ci->prevp = &rt->gcChunkList;
ci->next = rt->gcChunkList;
if (rt->gcChunkList) {
JS_ASSERT(rt->gcChunkList->prevp == &rt->gcChunkList);
rt->gcChunkList->prevp = &ci->next;
}
rt->gcChunkList = ci;
}
static void
RemoveChunkFromList(JSRuntime *rt, JSGCChunkInfo *ci)
{
*ci->prevp = ci->next;
if (ci->next) {
JS_ASSERT(ci->next->prevp == &ci->next);
ci->next->prevp = ci->prevp;
}
}
#endif
static JSGCArenaInfo *
NewGCArena(JSRuntime *rt)
{
jsuword chunk;
JSGCArenaInfo *a;
if (rt->emptyArenas) {
a = rt->emptyArenas;
rt->emptyArenas = a->prev;
} else {
jsuword arena = rt->gcCursor;
if (arena + GC_ARENA_SIZE > rt->gcLimit)
if (rt->gcBytes >= rt->gcMaxBytes)
return NULL;
#if CHUNKED_ARENA_ALLOCATION
if (js_gcArenasPerChunk == 1) {
#endif
chunk = NewGCChunk();
if (chunk == 0)
return NULL;
rt->gcCursor = arena + GC_ARENA_SIZE;
rt->gcBytes += GC_ARENA_SIZE;
a = ARENA_START_TO_INFO(arena);
a = ARENA_START_TO_INFO(chunk);
#if CHUNKED_ARENA_ALLOCATION
} else {
JSGCChunkInfo *ci;
uint32 i;
JSGCArenaInfo *aprev;
ci = rt->gcChunkList;
if (!ci) {
chunk = NewGCChunk();
if (chunk == 0)
return NULL;
JS_ASSERT((chunk & GC_ARENA_MASK) == 0);
a = GET_ARENA_INFO(chunk, 0);
a->firstArena = JS_TRUE;
a->arenaIndex = 0;
aprev = NULL;
i = 0;
do {
a->prev = aprev;
aprev = a;
++i;
a = GET_ARENA_INFO(chunk, i);
a->firstArena = JS_FALSE;
a->arenaIndex = i;
} while (i != js_gcArenasPerChunk - 1);
ci = GET_CHUNK_INFO(chunk, 0);
ci->lastFreeArena = aprev;
ci->numFreeArenas = js_gcArenasPerChunk - 1;
AddChunkToList(rt, ci);
} else {
JS_ASSERT(ci->prevp == &rt->gcChunkList);
a = ci->lastFreeArena;
aprev = a->prev;
if (!aprev) {
JS_ASSERT(ci->numFreeArenas == 1);
JS_ASSERT(ARENA_INFO_TO_START(a) == (jsuword) ci);
RemoveChunkFromList(rt, ci);
chunk = GET_ARENA_CHUNK(a, GET_ARENA_INDEX(a));
SET_CHUNK_INFO_INDEX(chunk, NO_FREE_ARENAS);
} else {
JS_ASSERT(ci->numFreeArenas >= 2);
JS_ASSERT(ARENA_INFO_TO_START(a) != (jsuword) ci);
ci->lastFreeArena = aprev;
ci->numFreeArenas--;
}
}
}
#endif
rt->gcBytes += GC_ARENA_SIZE;
a->prevUntracedPage = 0;
memset(&a->u, 0, sizeof(a->u));
return a;
}
static void
DestroyGCArenas(JSRuntime *rt, JSGCArenaInfo *last)
{
JSGCArenaInfo *a;
while (last) {
a = last;
last = last->prev;
METER(rt->gcStats.afree++);
JS_ASSERT(rt->gcBytes >= GC_ARENA_SIZE);
rt->gcBytes -= GC_ARENA_SIZE;
#if CHUNKED_ARENA_ALLOCATION
if (js_gcArenasPerChunk == 1) {
#endif
DestroyGCChunk(ARENA_INFO_TO_START(a));
#if CHUNKED_ARENA_ALLOCATION
} else {
uint32 arenaIndex;
jsuword chunk;
uint32 chunkInfoIndex;
JSGCChunkInfo *ci;
# ifdef DEBUG
jsuword firstArena;
firstArena = a->firstArena;
arenaIndex = a->arenaIndex;
memset((void *) ARENA_INFO_TO_START(a), JS_FREE_PATTERN,
GC_ARENA_SIZE - JS_GC_ARENA_PAD);
a->firstArena = firstArena;
a->arenaIndex = arenaIndex;
# endif
arenaIndex = GET_ARENA_INDEX(a);
chunk = GET_ARENA_CHUNK(a, arenaIndex);
chunkInfoIndex = GET_CHUNK_INFO_INDEX(chunk);
if (chunkInfoIndex == NO_FREE_ARENAS) {
chunkInfoIndex = arenaIndex;
SET_CHUNK_INFO_INDEX(chunk, arenaIndex);
ci = GET_CHUNK_INFO(chunk, chunkInfoIndex);
a->prev = NULL;
ci->lastFreeArena = a;
ci->numFreeArenas = 1;
AddChunkToList(rt, ci);
} else {
JS_ASSERT(chunkInfoIndex != arenaIndex);
ci = GET_CHUNK_INFO(chunk, chunkInfoIndex);
JS_ASSERT(ci->numFreeArenas != 0);
JS_ASSERT(ci->lastFreeArena);
JS_ASSERT(a != ci->lastFreeArena);
if (ci->numFreeArenas == js_gcArenasPerChunk - 1) {
RemoveChunkFromList(rt, ci);
DestroyGCChunk(chunk);
} else {
++ci->numFreeArenas;
a->prev = ci->lastFreeArena;
ci->lastFreeArena = a;
}
}
}
# endif
}
}
static void
InitGCArenaLists(JSRuntime *rt)
{
@ -695,17 +1110,17 @@ FinishGCArenaLists(JSRuntime *rt)
for (i = 0; i < GC_NUM_FREELISTS; i++) {
arenaList = &rt->gcArenaList[i];
DestroyGCArenas(rt, arenaList->last);
arenaList->last = NULL;
arenaList->lastCount = THINGS_PER_ARENA(arenaList->thingSize);
arenaList->freeList = NULL;
}
DestroyGCArenas(rt, rt->gcDoubleArenaList.first);
rt->gcDoubleArenaList.first = NULL;
rt->gcDoubleArenaList.nextDoubleFlags = DOUBLE_BITMAP_SENTINEL;
if (rt->gcBase)
free((void *)rt->gcBase);
rt->gcCursor = rt->gcBase = rt->gcLimit = 0;
rt->gcBytes = 0;
JS_ASSERT(rt->gcChunkList == 0);
}
/*
@ -824,27 +1239,60 @@ typedef struct JSGCRootHashEntry {
/* Initial size of the gcRootsHash table (SWAG, small enough to amortize). */
#define GC_ROOTS_SIZE 256
#if JS_BYTES_PER_WORD == 4 /* 32 bit */
#define GC_MAXBYTES_LIMIT (2 << 30)
#else /* 64 bit */
#define GC_MAXBYTES_LIMIT (2 << 40)
#if CHUNKED_ARENA_ALLOCATION
/*
* For a CPU with extremely large pages using them for GC things wastes
* too much memory.
*/
# define GC_ARENAS_PER_CPU_PAGE_LIMIT JS_BIT(18 - GC_ARENA_SHIFT)
JS_STATIC_ASSERT(GC_ARENAS_PER_CPU_PAGE_LIMIT <= NO_FREE_ARENAS);
#endif
JSBool
js_InitGC(JSRuntime *rt, size_t maxbytes)
js_InitGC(JSRuntime *rt, uint32 maxbytes)
{
if (maxbytes > GC_MAXBYTES_LIMIT)
maxbytes = GC_MAXBYTES_LIMIT;
/* Overallocate so we can make sure our heap is aligned. */
size_t bytes = maxbytes + GC_ARENA_SIZE - 1;
void *base = malloc(bytes);
if (!base)
return false;
rt->gcBase = jsuword(base);
rt->gcLimit = rt->gcBase + bytes;
rt->gcCursor = jsuword((rt->gcBase + GC_ARENA_MASK) & ~GC_ARENA_MASK);
#ifdef __APPLE__
memset((void *)rt->gcBase, 0, bytes);
#if JS_GC_USE_MMAP
if (js_gcArenasPerChunk == 0) {
size_t cpuPageSize, arenasPerPage;
# if defined(XP_WIN)
SYSTEM_INFO si;
GetSystemInfo(&si);
cpuPageSize = si.dwPageSize;
# elif defined(XP_UNIX) || defined(XP_BEOS)
cpuPageSize = (size_t) sysconf(_SC_PAGESIZE);
# else
# error "Not implemented"
# endif
/* cpuPageSize is a power of 2. */
JS_ASSERT((cpuPageSize & (cpuPageSize - 1)) == 0);
arenasPerPage = cpuPageSize >> GC_ARENA_SHIFT;
#ifdef DEBUG
if (arenasPerPage == 0) {
fprintf(stderr,
"JS engine warning: the size of the CPU page, %u bytes, is too low to use\n"
"paged allocation for the garbage collector. Please report this.\n",
(unsigned) cpuPageSize);
}
#endif
if (arenasPerPage - 1 <= (size_t) (GC_ARENAS_PER_CPU_PAGE_LIMIT - 1)) {
/*
* Use at least 4 GC arenas per paged allocation chunk to minimize
* the overhead of mmap/VirtualAlloc.
*/
js_gcUseMmap = JS_TRUE;
js_gcArenasPerChunk = JS_MAX((uint32) arenasPerPage, 4);
} else {
js_gcUseMmap = JS_FALSE;
js_gcArenasPerChunk = 7;
}
}
JS_ASSERT(1 <= js_gcArenasPerChunk &&
js_gcArenasPerChunk <= NO_FREE_ARENAS);
#endif
InitGCArenaLists(rt);
@ -1008,6 +1456,7 @@ js_DumpGCStats(JSRuntime *rt, FILE *fp)
#endif
fprintf(fp, " maximum GC nesting level: %lu\n", ULSTAT(maxlevel));
fprintf(fp, "potentially useful GC calls: %lu\n", ULSTAT(poke));
fprintf(fp, " thing arenas freed so far: %lu\n", ULSTAT(afree));
fprintf(fp, " stack segments scanned: %lu\n", ULSTAT(stackseg));
fprintf(fp, "stack segment slots scanned: %lu\n", ULSTAT(segslots));
fprintf(fp, "reachable closeable objects: %lu\n", ULSTAT(nclose));
@ -1566,8 +2015,6 @@ testReservedObjects:
if (gcLocked)
JS_UNLOCK_GC(rt);
#endif
JS_ASSERT(rt->IsGCThing(thing));
return (T*)thing;
fail:
@ -1809,6 +2256,45 @@ js_ReserveObjects(JSContext *cx, size_t nobjects)
}
#endif
JSBool
js_AddAsGCBytes(JSContext *cx, size_t sz)
{
JSRuntime *rt;
rt = cx->runtime;
if (rt->gcBytes >= rt->gcMaxBytes ||
sz > (size_t) (rt->gcMaxBytes - rt->gcBytes) ||
IsGCThresholdReached(rt)) {
if (JS_ON_TRACE(cx)) {
/*
* If we can't leave the trace, signal OOM condition, otherwise
* exit from trace and proceed with GC.
*/
if (!js_CanLeaveTrace(cx)) {
JS_UNLOCK_GC(rt);
return JS_FALSE;
}
js_LeaveTrace(cx);
}
js_GC(cx, GC_LAST_DITCH);
if (rt->gcBytes >= rt->gcMaxBytes ||
sz > (size_t) (rt->gcMaxBytes - rt->gcBytes)) {
JS_UNLOCK_GC(rt);
JS_ReportOutOfMemory(cx);
return JS_FALSE;
}
}
rt->gcBytes += (uint32) sz;
return JS_TRUE;
}
void
js_RemoveAsGCBytes(JSRuntime *rt, size_t sz)
{
JS_ASSERT((size_t) rt->gcBytes >= sz);
rt->gcBytes -= (uint32) sz;
}
/*
* Shallow GC-things can be locked just by setting the GCF_LOCK bit, because
* they have no descendants to mark during the GC. Currently the optimization
@ -2803,7 +3289,7 @@ js_GC(JSContext *cx, JSGCInvocationKind gckind)
uintN i, type;
JSTracer trc;
uint32 thingSize, indexLimit;
JSGCArenaInfo *a, **ap;
JSGCArenaInfo *a, **ap, *emptyArenas;
uint8 flags, *flagp;
JSGCThing *thing, *freeList;
JSGCArenaList *arenaList;
@ -3097,6 +3583,7 @@ js_GC(JSContext *cx, JSGCInvocationKind gckind)
* of the file guarantees that JSString and jsdouble instances are
* allocated from a different list.
*/
emptyArenas = NULL;
for (i = 0; i < GC_NUM_FREELISTS; i++) {
arenaList = &rt->gcArenaList[i == 0
? GC_FREELIST_INDEX(sizeof(JSObject))
@ -3170,8 +3657,8 @@ js_GC(JSContext *cx, JSGCInvocationKind gckind)
if (a == arenaList->last)
arenaList->lastCount = indexLimit;
*ap = a->prev;
a->prev = rt->emptyArenas;
rt->emptyArenas = a;
a->prev = emptyArenas;
emptyArenas = a;
METER(nkilledarenas++);
} else {
arenaList->freeList = freeList;
@ -3198,8 +3685,8 @@ js_GC(JSContext *cx, JSGCInvocationKind gckind)
if (!a->u.hasMarkedDoubles) {
/* No marked double values in the arena. */
*ap = a->prev;
a->prev = rt->emptyArenas;
rt->emptyArenas = a;
a->prev = emptyArenas;
emptyArenas = a;
METER(nkilledarenas++);
} else {
ap = &a->prev;
@ -3233,6 +3720,12 @@ js_GC(JSContext *cx, JSGCInvocationKind gckind)
*/
js_SweepScriptFilenames(rt);
/*
* Destroy arenas after we finished the sweeping sofinalizers can safely
* use js_IsAboutToBeFinalized().
*/
DestroyGCArenas(rt, emptyArenas);
#ifdef JS_THREADSAFE
cx->submitDeallocatorTask();
#endif

View File

@ -112,7 +112,7 @@ js_GetGCStringRuntime(JSString *str);
#endif
extern JSBool
js_InitGC(JSRuntime *rt, size_t maxbytes);
js_InitGC(JSRuntime *rt, uint32 maxbytes);
extern void
js_FinishGC(JSRuntime *rt);
@ -286,6 +286,7 @@ js_GC(JSContext *cx, JSGCInvocationKind gckind);
typedef struct JSGCArenaInfo JSGCArenaInfo;
typedef struct JSGCArenaList JSGCArenaList;
typedef struct JSGCChunkInfo JSGCChunkInfo;
struct JSGCArenaList {
JSGCArenaInfo *last; /* last allocated GC arena */
@ -325,6 +326,22 @@ struct JSWeakRoots {
#define JS_CLEAR_WEAK_ROOTS(wr) (memset((wr), 0, sizeof(JSWeakRoots)))
/*
* Increase runtime->gcBytes by sz bytes to account for an allocation outside
* the GC that will be freed only after the GC is run. The function may run
* the last ditch GC to ensure that gcBytes does not exceed gcMaxBytes. It will
* fail if the latter is not possible.
*
* This function requires that runtime->gcLock is held on entry. On successful
* return the lock is still held and on failure it will be released with
* the error reported.
*/
extern JSBool
js_AddAsGCBytes(JSContext *cx, size_t sz);
extern void
js_RemoveAsGCBytes(JSRuntime* rt, size_t sz);
#ifdef JS_THREADSAFE
class JSFreePointerListTask : public JSBackgroundTask {
void *head;
@ -396,6 +413,7 @@ typedef struct JSGCStats {
#endif
uint32 maxlevel; /* maximum GC nesting (indirect recursion) level */
uint32 poke; /* number of potentially useful GC calls */
uint32 afree; /* thing arenas freed so far */
uint32 stackseg; /* total extraordinary stack segments scanned */
uint32 segslots; /* total stack segment jsval slots scanned */
uint32 nclose; /* number of objects with close hooks */

View File

@ -5117,6 +5117,11 @@ js_Enumerate(JSContext *cx, JSObject *obj, JSIterateOp enum_op,
JS_ASSERT(ne->cursor == (jsword) length);
if (allocated != 0) {
JS_LOCK_GC(cx->runtime);
if (!js_AddAsGCBytes(cx, allocated)) {
/* js_AddAsGCBytes releases the GC lock on failures. */
cx->free(ne);
return JS_FALSE;
}
ne->next = cx->runtime->nativeEnumerators;
cx->runtime->nativeEnumerators = ne;
JS_ASSERT(((jsuword) ne & (jsuword) 1) == (jsuword) 0);
@ -5211,6 +5216,7 @@ js_TraceNativeEnumerators(JSTracer *trc)
js_TraceId(trc, *cursor);
} while (++cursor != end);
} else if (doGC) {
js_RemoveAsGCBytes(rt, NativeEnumeratorSize(ne->length));
*nep = ne->next;
trc->context->free(ne);
continue;