Bug 840242 - Use the runtime page size to control arena decommit; r=luke

--HG--
extra : rebase_source : 575a7485a5c6ac51f5c0cadc91616302326ce770
This commit is contained in:
Terrence Cole 2013-02-11 13:59:10 -08:00
parent 0d3de8ab37
commit af471f2f39
10 changed files with 117 additions and 115 deletions

View File

@ -13,24 +13,7 @@
namespace js {
namespace gc {
/*
* Page size must be static to support our arena pointer optimizations, so we
* are forced to support each platform with non-4096 pages as a special case.
* Note: The freelist supports a maximum arena shift of 15.
* Note: Do not use JS_CPU_SPARC here, this header is used outside JS.
*/
#if (defined(SOLARIS) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)) && \
(defined(__sparc) || defined(__sparcv9) || defined(__ia64))
const size_t PageShift = 13;
const size_t ArenaShift = PageShift;
#elif defined(__powerpc64__)
const size_t PageShift = 16;
const size_t ArenaShift = 12;
#else
const size_t PageShift = 12;
const size_t ArenaShift = PageShift;
#endif
const size_t PageSize = size_t(1) << PageShift;
const size_t ArenaSize = size_t(1) << ArenaShift;
const size_t ArenaMask = ArenaSize - 1;

View File

@ -6,46 +6,42 @@
#include "gc/Memory.h"
#include "jscntxt.h"
#include "js/HeapAPI.h"
using namespace js;
using namespace js::gc;
/* Unused memory decommiting requires the arena size match the page size. */
static bool
DecommitEnabled()
DecommitEnabled(JSRuntime *rt)
{
return PageSize == ArenaSize;
return rt->gcSystemPageSize == ArenaSize;
}
#if defined(XP_WIN)
#include "jswin.h"
#include <psapi.h>
static size_t AllocationGranularity = 0;
void
gc::InitMemorySubsystem()
gc::InitMemorySubsystem(JSRuntime *rt)
{
SYSTEM_INFO sysinfo;
GetSystemInfo(&sysinfo);
if (sysinfo.dwPageSize != PageSize) {
fprintf(stderr,"SpiderMonkey compiled with incorrect page size; please update js/public/HeapAPI.h.\n");
MOZ_CRASH();
}
AllocationGranularity = sysinfo.dwAllocationGranularity;
rt->gcSystemPageSize = sysinfo.dwPageSize;
rt->gcSystemAllocGranularity = sysinfo.dwAllocationGranularity;
}
void *
gc::MapAlignedPages(size_t size, size_t alignment)
gc::MapAlignedPages(JSRuntime *rt, size_t size, size_t alignment)
{
JS_ASSERT(size >= alignment);
JS_ASSERT(size % alignment == 0);
JS_ASSERT(size % PageSize == 0);
JS_ASSERT(alignment % AllocationGranularity == 0);
JS_ASSERT(size % rt->gcSystemPageSize == 0);
JS_ASSERT(alignment % rt->gcSystemAllocGranularity == 0);
/* Special case: If we want allocation alignment, no further work is needed. */
if (alignment == AllocationGranularity) {
if (alignment == rt->gcSystemAllocGranularity) {
return VirtualAlloc(NULL, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
}
@ -69,7 +65,7 @@ gc::MapAlignedPages(size_t size, size_t alignment)
if (!p)
return NULL;
void *chunkStart = (void *)(uintptr_t(p) + (alignment - (uintptr_t(p) % alignment)));
UnmapPages(p, size * 2);
UnmapPages(rt, p, size * 2);
p = VirtualAlloc(chunkStart, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
/* Failure here indicates a race with another thread, so try again. */
@ -80,26 +76,26 @@ gc::MapAlignedPages(size_t size, size_t alignment)
}
void
gc::UnmapPages(void *p, size_t size)
gc::UnmapPages(JSRuntime *rt, void *p, size_t size)
{
JS_ALWAYS_TRUE(VirtualFree(p, 0, MEM_RELEASE));
}
bool
gc::MarkPagesUnused(void *p, size_t size)
gc::MarkPagesUnused(JSRuntime *rt, void *p, size_t size)
{
if (!DecommitEnabled())
return false;
if (!DecommitEnabled(rt))
return true;
JS_ASSERT(uintptr_t(p) % PageSize == 0);
JS_ASSERT(uintptr_t(p) % rt->gcSystemPageSize == 0);
LPVOID p2 = VirtualAlloc(p, size, MEM_RESET, PAGE_READWRITE);
return p2 == p;
}
bool
gc::MarkPagesInUse(void *p, size_t size)
gc::MarkPagesInUse(JSRuntime *rt, void *p, size_t size)
{
JS_ASSERT(uintptr_t(p) % PageSize == 0);
JS_ASSERT(uintptr_t(p) % rt->gcSystemPageSize == 0);
return true;
}
@ -121,12 +117,13 @@ gc::GetPageFaultCount()
#define OS2_MAX_RECURSIONS 16
void
gc::InitMemorySubsystem()
gc::InitMemorySubsystem(JSRuntime *rt)
{
rt->gcSystemPageSize = rt->gcSystemAllocGranularity = ArenaSize;
}
void
gc::UnmapPages(void *addr, size_t size)
gc::UnmapPages(JSRuntime *rt, void *addr, size_t size)
{
if (!DosFreeMem(addr))
return;
@ -147,7 +144,7 @@ gc::UnmapPages(void *addr, size_t size)
}
static void *
gc::MapAlignedPagesRecursively(size_t size, size_t alignment, int& recursions)
gc::MapAlignedPagesRecursively(JSRuntime *rt, size_t size, size_t alignment, int& recursions)
{
if (++recursions >= OS2_MAX_RECURSIONS)
return NULL;
@ -173,7 +170,7 @@ gc::MapAlignedPagesRecursively(size_t size, size_t alignment, int& recursions)
unsigned long rc = DosQueryMem(&(static_cast<char*>(tmp))[size],
&cb, &flags);
if (!rc && (flags & PAG_FREE) && cb >= filler) {
UnmapPages(tmp, 0);
UnmapPages(rt, tmp, 0);
if (DosAllocMem(&tmp, filler,
OBJ_ANY | PAG_COMMIT | PAG_READ | PAG_WRITE)) {
JS_ALWAYS_TRUE(DosAllocMem(&tmp, filler,
@ -181,19 +178,19 @@ gc::MapAlignedPagesRecursively(size_t size, size_t alignment, int& recursions)
}
}
void *p = MapAlignedPagesRecursively(size, alignment, recursions);
UnmapPages(tmp, 0);
void *p = MapAlignedPagesRecursively(rt, size, alignment, recursions);
UnmapPages(rt, tmp, 0);
return p;
}
void *
gc::MapAlignedPages(size_t size, size_t alignment)
gc::MapAlignedPages(JSRuntime *rt, size_t size, size_t alignment)
{
JS_ASSERT(size >= alignment);
JS_ASSERT(size % alignment == 0);
JS_ASSERT(size % PageSize == 0);
JS_ASSERT(alignment % PageSize == 0);
JS_ASSERT(size % rt->gcSystemPageSize == 0);
JS_ASSERT(alignment % rt->gcSystemAllocGranularity == 0);
int recursions = -1;
@ -202,7 +199,7 @@ gc::MapAlignedPages(size_t size, size_t alignment)
* of the right size by recursively allocating blocks of unaligned
* free memory until only an aligned allocation is possible.
*/
void *p = MapAlignedPagesRecursively(size, alignment, recursions);
void *p = MapAlignedPagesRecursively(rt, size, alignment, recursions);
if (p)
return p;
@ -224,16 +221,16 @@ gc::MapAlignedPages(size_t size, size_t alignment)
}
bool
gc::MarkPagesUnused(void *p, size_t size)
gc::MarkPagesUnused(JSRuntime *rt, void *p, size_t size)
{
JS_ASSERT(uintptr_t(p) % PageSize == 0);
JS_ASSERT(uintptr_t(p) % rt->gcSystemPageSize == 0);
return true;
}
bool
gc::MarkPagesInUse(void *p, size_t size)
gc::MarkPagesInUse(JSRuntime *rt, void *p, size_t size)
{
JS_ASSERT(uintptr_t(p) % PageSize == 0);
JS_ASSERT(uintptr_t(p) % rt->gcSystemPageSize == 0);
return true;
}
@ -253,17 +250,18 @@ gc::GetPageFaultCount()
#endif
void
gc::InitMemorySubsystem()
gc::InitMemorySubsystem(JSRuntime *rt)
{
rt->gcSystemPageSize = rt->gcSystemAllocGranularity = size_t(sysconf(_SC_PAGESIZE));
}
void *
gc::MapAlignedPages(size_t size, size_t alignment)
gc::MapAlignedPages(JSRuntime *rt, size_t size, size_t alignment)
{
JS_ASSERT(size >= alignment);
JS_ASSERT(size % alignment == 0);
JS_ASSERT(size % PageSize == 0);
JS_ASSERT(alignment % PageSize == 0);
JS_ASSERT(size % rt->gcSystemPageSize == 0);
JS_ASSERT(alignment % rt->gcSystemAllocGranularity == 0);
int prot = PROT_READ | PROT_WRITE;
int flags = MAP_PRIVATE | MAP_ANON | MAP_ALIGN | MAP_NOSYNC;
@ -275,22 +273,22 @@ gc::MapAlignedPages(size_t size, size_t alignment)
}
void
gc::UnmapPages(void *p, size_t size)
gc::UnmapPages(JSRuntime *rt, void *p, size_t size)
{
JS_ALWAYS_TRUE(0 == munmap((caddr_t)p, size));
}
bool
gc::MarkPagesUnused(void *p, size_t size)
gc::MarkPagesUnused(JSRuntime *rt, void *p, size_t size)
{
JS_ASSERT(uintptr_t(p) % PageSize == 0);
JS_ASSERT(uintptr_t(p) % rt->gcSystemPageSize == 0);
return true;
}
bool
gc::MarkPagesInUse(void *p, size_t size)
gc::MarkPagesInUse(JSRuntime *rt, void *p, size_t size)
{
JS_ASSERT(uintptr_t(p) % PageSize == 0);
JS_ASSERT(uintptr_t(p) % rt->gcSystemPageSize == 0);
return true;
}
@ -307,27 +305,24 @@ gc::GetPageFaultCount()
#include <unistd.h>
void
gc::InitMemorySubsystem()
gc::InitMemorySubsystem(JSRuntime *rt)
{
if (size_t(sysconf(_SC_PAGESIZE)) != PageSize) {
fprintf(stderr,"SpiderMonkey compiled with incorrect page size; please update js/public/HeapAPI.h.\n");
MOZ_CRASH();
}
rt->gcSystemPageSize = rt->gcSystemAllocGranularity = size_t(sysconf(_SC_PAGESIZE));
}
void *
gc::MapAlignedPages(size_t size, size_t alignment)
gc::MapAlignedPages(JSRuntime *rt, size_t size, size_t alignment)
{
JS_ASSERT(size >= alignment);
JS_ASSERT(size % alignment == 0);
JS_ASSERT(size % PageSize == 0);
JS_ASSERT(alignment % PageSize == 0);
JS_ASSERT(size % rt->gcSystemPageSize == 0);
JS_ASSERT(alignment % rt->gcSystemAllocGranularity == 0);
int prot = PROT_READ | PROT_WRITE;
int flags = MAP_PRIVATE | MAP_ANON;
/* Special case: If we want page alignment, no further work is needed. */
if (alignment == PageSize) {
if (alignment == rt->gcSystemAllocGranularity) {
return mmap(NULL, size, prot, flags, -1, 0);
}
@ -353,26 +348,26 @@ gc::MapAlignedPages(size_t size, size_t alignment)
}
void
gc::UnmapPages(void *p, size_t size)
gc::UnmapPages(JSRuntime *rt, void *p, size_t size)
{
JS_ALWAYS_TRUE(0 == munmap(p, size));
}
bool
gc::MarkPagesUnused(void *p, size_t size)
gc::MarkPagesUnused(JSRuntime *rt, void *p, size_t size)
{
if (!DecommitEnabled())
if (!DecommitEnabled(rt))
return false;
JS_ASSERT(uintptr_t(p) % PageSize == 0);
JS_ASSERT(uintptr_t(p) % rt->gcSystemPageSize == 0);
int result = madvise(p, size, MADV_DONTNEED);
return result != -1;
}
bool
gc::MarkPagesInUse(void *p, size_t size)
gc::MarkPagesInUse(JSRuntime *rt, void *p, size_t size)
{
JS_ASSERT(uintptr_t(p) % PageSize == 0);
JS_ASSERT(uintptr_t(p) % rt->gcSystemPageSize == 0);
return true;
}

View File

@ -15,23 +15,30 @@ namespace gc {
// Sanity check that our compiled configuration matches the currently running
// instance and initialize any runtime data needed for allocation.
void InitMemorySubsystem();
void
InitMemorySubsystem(JSRuntime *rt);
// Allocate or deallocate pages from the system with the given alignment.
void *MapAlignedPages(size_t size, size_t alignment);
void UnmapPages(void *p, size_t size);
void *
MapAlignedPages(JSRuntime *rt, size_t size, size_t alignment);
void
UnmapPages(JSRuntime *rt, void *p, size_t size);
// Tell the OS that the given pages are not in use, so they should not
// be written to a paging file. This may be a no-op on some platforms.
bool MarkPagesUnused(void *p, size_t size);
bool
MarkPagesUnused(JSRuntime *rt, void *p, size_t size);
// Undo |MarkPagesUnused|: tell the OS that the given pages are of interest
// and should be paged in and out normally. This may be a no-op on some
// platforms.
bool MarkPagesInUse(void *p, size_t size);
bool
MarkPagesInUse(JSRuntime *rt, void *p, size_t size);
// Returns #(hard faults) + #(soft faults)
size_t GetPageFaultCount();
size_t
GetPageFaultCount();
} // namespace gc
} // namespace js

View File

@ -1405,7 +1405,7 @@ class MOZ_STACK_CLASS ModuleCompiler
void setSecondPassComplete() {
JS_ASSERT(currentPass_ == 2);
masm_.align(gc::PageSize);
masm_.align(AsmJSPageSize);
module_->setFunctionBytes(masm_.size());
currentPass_ = 3;
}
@ -1478,7 +1478,7 @@ class MOZ_STACK_CLASS ModuleCompiler
// The code must be page aligned, so include extra space so that we can
// AlignBytes the allocation result below.
size_t allocedBytes = totalBytes + gc::PageSize;
size_t allocedBytes = totalBytes + AsmJSPageSize;
// Allocate the slab of memory.
JSC::ExecutableAllocator *execAlloc = cx_->compartment()->ionCompartment()->execAlloc();
@ -1486,7 +1486,7 @@ class MOZ_STACK_CLASS ModuleCompiler
uint8_t *unalignedBytes = (uint8_t*)execAlloc->alloc(allocedBytes, &pool, JSC::ASMJS_CODE);
if (!unalignedBytes)
return false;
uint8_t *code = (uint8_t*)AlignBytes((uintptr_t)unalignedBytes, gc::PageSize);
uint8_t *code = (uint8_t*)AlignBytes((uintptr_t)unalignedBytes, AsmJSPageSize);
// The ExecutablePool owns the memory and must be released by the AsmJSModule.
module_->takeOwnership(pool, code, codeBytes, totalBytes);
@ -6110,6 +6110,9 @@ js::CompileAsmJS(JSContext *cx, TokenStream &ts, ParseNode *fn, const CompileOpt
if (!JSC::MacroAssembler().supportsFloatingPoint())
return Warn(cx, JSMSG_USE_ASM_TYPE_FAIL, "Disabled by lack of floating point support");
if (cx->runtime()->gcSystemPageSize != AsmJSPageSize)
return Warn(cx, JSMSG_USE_ASM_TYPE_FAIL, "Disabled by non 4KiB system page size");
if (!cx->hasOption(JSOPTION_ASMJS))
return Warn(cx, JSMSG_USE_ASM_TYPE_FAIL, "Disabled by javascript.options.asmjs in about:config");

View File

@ -89,6 +89,9 @@ class AsmJSActivation
void setResumePC(void *pc) { resumePC_ = pc; }
};
// The assumed page size; dynamically checked in CompileAsmJS.
const size_t AsmJSPageSize = 4096;
// The asm.js spec requires that the ArrayBuffer's byteLength be a multiple of 4096.
static const size_t AsmJSAllocationGranularity = 4096;

View File

@ -598,12 +598,12 @@ class AsmJSModule
}
void setFunctionBytes(size_t functionBytes) {
JS_ASSERT(functionBytes % gc::PageSize == 0);
JS_ASSERT(functionBytes % AsmJSPageSize == 0);
functionBytes_ = functionBytes;
}
size_t functionBytes() const {
JS_ASSERT(functionBytes_);
JS_ASSERT(functionBytes_ % gc::PageSize == 0);
JS_ASSERT(functionBytes_ % AsmJSPageSize == 0);
return functionBytes_;
}
bool containsPC(void *pc) const {
@ -656,7 +656,7 @@ class AsmJSModule
void takeOwnership(JSC::ExecutablePool *pool, uint8_t *code, size_t codeBytes, size_t totalBytes) {
JS_ASSERT(uintptr_t(code) % gc::PageSize == 0);
JS_ASSERT(uintptr_t(code) % AsmJSPageSize == 0);
codePool_ = pool;
code_ = code;
codeBytes_ = codeBytes;
@ -664,7 +664,7 @@ class AsmJSModule
}
uint8_t *functionCode() const {
JS_ASSERT(code_);
JS_ASSERT(uintptr_t(code_) % gc::PageSize == 0);
JS_ASSERT(uintptr_t(code_) % AsmJSPageSize == 0);
return code_;
}

View File

@ -1140,8 +1140,6 @@ JS_NewRuntime(uint32_t maxbytes, JSUseHelperThreads useHelperThreads)
#undef MSG_DEF
#endif /* DEBUG */
InitMemorySubsystem();
if (!js::TlsPerThreadData.init())
return NULL;

View File

@ -1138,6 +1138,15 @@ struct JSRuntime : public JS::shadow::Runtime,
/* Stack of thread-stack-allocated GC roots. */
js::AutoGCRooter *autoGCRooters;
/*
* The GC can only safely decommit memory when the page size of the
* running process matches the compiled arena size.
*/
size_t gcSystemPageSize;
/* The OS allocation granularity may not match the page size. */
size_t gcSystemAllocGranularity;
/* Strong references on scripts held for PCCount profiling API. */
js::ScriptAndCountsVector *scriptAndCountsVector;

View File

@ -457,13 +457,15 @@ FinalizeArenas(FreeOp *fop,
}
static inline Chunk *
AllocChunk() {
return static_cast<Chunk *>(MapAlignedPages(ChunkSize, ChunkSize));
AllocChunk(JSRuntime *rt)
{
return static_cast<Chunk *>(MapAlignedPages(rt, ChunkSize, ChunkSize));
}
static inline void
FreeChunk(Chunk *p) {
UnmapPages(static_cast<void *>(p), ChunkSize);
FreeChunk(JSRuntime *rt, Chunk *p)
{
UnmapPages(rt, static_cast<void *>(p), ChunkSize);
}
inline bool
@ -553,25 +555,25 @@ ChunkPool::expire(JSRuntime *rt, bool releaseAll)
}
static void
FreeChunkList(Chunk *chunkListHead)
FreeChunkList(JSRuntime *rt, Chunk *chunkListHead)
{
while (Chunk *chunk = chunkListHead) {
JS_ASSERT(!chunk->info.numArenasFreeCommitted);
chunkListHead = chunk->info.next;
FreeChunk(chunk);
FreeChunk(rt, chunk);
}
}
void
ChunkPool::expireAndFree(JSRuntime *rt, bool releaseAll)
{
FreeChunkList(expire(rt, releaseAll));
FreeChunkList(rt, expire(rt, releaseAll));
}
/* static */ Chunk *
Chunk::allocate(JSRuntime *rt)
{
Chunk *chunk = static_cast<Chunk *>(AllocChunk());
Chunk *chunk = AllocChunk(rt);
#ifdef JSGC_ROOT_ANALYSIS
// Our poison pointers are not guaranteed to be invalid on 64-bit
@ -584,7 +586,7 @@ Chunk::allocate(JSRuntime *rt)
// were marked as uncommitted, but it's a little complicated to avoid
// clobbering pre-existing unrelated mappings.
while (IsPoisonedPtr(chunk))
chunk = static_cast<Chunk *>(AllocChunk());
chunk = AllocChunk(rt);
#endif
if (!chunk)
@ -600,7 +602,7 @@ Chunk::release(JSRuntime *rt, Chunk *chunk)
{
JS_ASSERT(chunk);
chunk->prepareToBeFreed(rt);
FreeChunk(chunk);
FreeChunk(rt, chunk);
}
inline void
@ -728,7 +730,7 @@ Chunk::fetchNextDecommittedArena()
decommittedArenas.unset(offset);
Arena *arena = &arenas[offset];
MarkPagesInUse(arena, ArenaSize);
MarkPagesInUse(info.runtime, arena, ArenaSize);
arena->aheader.setAsNotAllocated();
return &arena->aheader;
@ -920,6 +922,8 @@ static const int64_t JIT_SCRIPT_RELEASE_TYPES_INTERVAL = 60 * 1000 * 1000;
JSBool
js_InitGC(JSRuntime *rt, uint32_t maxbytes)
{
InitMemorySubsystem(rt);
if (!rt->gcChunkSet.init(INITIAL_CHUNK_CAPACITY))
return false;
@ -2047,7 +2051,7 @@ DecommitArenasFromAvailableList(JSRuntime *rt, Chunk **availableListHeadp)
Maybe<AutoUnlockGC> maybeUnlock;
if (!rt->isHeapBusy())
maybeUnlock.construct(rt);
ok = MarkPagesUnused(aheader->getArena(), ArenaSize);
ok = MarkPagesUnused(rt, aheader->getArena(), ArenaSize);
}
if (ok) {
@ -2077,7 +2081,7 @@ DecommitArenasFromAvailableList(JSRuntime *rt, Chunk **availableListHeadp)
JS_ASSERT(chunk->info.prevp);
}
if (rt->gcChunkAllocationSinceLastGC) {
if (rt->gcChunkAllocationSinceLastGC || !ok) {
/*
* The allocator thread has started to get new chunks. We should stop
* to avoid decommitting arenas in just allocated chunks.
@ -2115,7 +2119,7 @@ ExpireChunksAndArenas(JSRuntime *rt, bool shouldShrink)
{
if (Chunk *toFree = rt->gcChunkPool.expire(rt, shouldShrink)) {
AutoUnlockGC unlock(rt);
FreeChunkList(toFree);
FreeChunkList(rt, toFree);
}
if (shouldShrink)

View File

@ -356,9 +356,9 @@ ArrayBufferObject::uninlineData(JSContext *maybecx)
// | \ /
// obj->elements required to be page boundaries
//
JS_STATIC_ASSERT(sizeof(ObjectElements) < PageSize);
JS_STATIC_ASSERT(AsmJSAllocationGranularity == PageSize);
static const size_t AsmJSMappedSize = PageSize + AsmJSBufferProtectedSize;
JS_STATIC_ASSERT(sizeof(ObjectElements) < AsmJSPageSize);
JS_STATIC_ASSERT(AsmJSAllocationGranularity == AsmJSPageSize);
static const size_t AsmJSMappedSize = AsmJSPageSize + AsmJSBufferProtectedSize;
bool
ArrayBufferObject::prepareForAsmJS(JSContext *cx, Handle<ArrayBufferObject*> buffer)
@ -381,19 +381,19 @@ ArrayBufferObject::prepareForAsmJS(JSContext *cx, Handle<ArrayBufferObject*> buf
// Enable access to the valid region.
JS_ASSERT(buffer->byteLength() % AsmJSAllocationGranularity == 0);
# ifdef XP_WIN
if (!VirtualAlloc(p, PageSize + buffer->byteLength(), MEM_COMMIT, PAGE_READWRITE)) {
if (!VirtualAlloc(p, AsmJSPageSize + buffer->byteLength(), MEM_COMMIT, PAGE_READWRITE)) {
VirtualFree(p, 0, MEM_RELEASE);
return false;
}
# else
if (mprotect(p, PageSize + buffer->byteLength(), PROT_READ | PROT_WRITE)) {
if (mprotect(p, AsmJSPageSize + buffer->byteLength(), PROT_READ | PROT_WRITE)) {
munmap(p, AsmJSMappedSize);
return false;
}
# endif
// Copy over the current contents of the typed array.
uint8_t *data = reinterpret_cast<uint8_t*>(p) + PageSize;
uint8_t *data = reinterpret_cast<uint8_t*>(p) + AsmJSPageSize;
memcpy(data, buffer->dataPointer(), buffer->byteLength());
// Swap the new elements into the ArrayBufferObject.
@ -415,8 +415,8 @@ ArrayBufferObject::releaseAsmJSArrayBuffer(FreeOp *fop, JSObject *obj)
ArrayBufferObject &buffer = obj->as<ArrayBufferObject>();
JS_ASSERT(buffer.isAsmJSArrayBuffer());
uint8_t *p = buffer.dataPointer() - PageSize ;
JS_ASSERT(uintptr_t(p) % PageSize == 0);
uint8_t *p = buffer.dataPointer() - AsmJSPageSize ;
JS_ASSERT(uintptr_t(p) % AsmJSPageSize == 0);
# ifdef XP_WIN
VirtualFree(p, 0, MEM_RELEASE);
# else