Backed out changeset 39d2e19acdd5 (bug 988486) for bustage on a CLOSED TREE

This commit is contained in:
Carsten "Tomcat" Book 2014-04-30 15:39:40 +02:00
parent c2ae0f9899
commit e0e9729776
10 changed files with 122 additions and 126 deletions

View File

@ -821,7 +821,15 @@ struct Chunk
static Chunk *allocate(JSRuntime *rt);
void decommitAllArenas(JSRuntime *rt);
void decommitAllArenas(JSRuntime *rt) {
decommittedArenas.clear(true);
MarkPagesUnused(rt, &arenas[0], ArenasPerChunk * ArenaSize);
info.freeArenasHead = nullptr;
info.lastDecommittedArenaOffset = 0;
info.numArenasFree = ArenasPerChunk;
info.numArenasFreeCommitted = 0;
}
/* Must be called with the GC lock taken. */
static inline void release(JSRuntime *rt, Chunk *chunk);

View File

@ -12,34 +12,35 @@
using namespace js;
using namespace js::gc;
bool
SystemPageAllocator::decommitEnabled()
static bool
DecommitEnabled(JSRuntime *rt)
{
return pageSize == ArenaSize;
return rt->gcSystemPageSize == ArenaSize;
}
#if defined(XP_WIN)
#include "jswin.h"
#include <psapi.h>
SystemPageAllocator::SystemPageAllocator()
void
gc::InitMemorySubsystem(JSRuntime *rt)
{
SYSTEM_INFO sysinfo;
GetSystemInfo(&sysinfo);
pageSize = sysinfo.dwPageSize;
allocGranularity = sysinfo.dwAllocationGranularity;
rt->gcSystemPageSize = sysinfo.dwPageSize;
rt->gcSystemAllocGranularity = sysinfo.dwAllocationGranularity;
}
void *
SystemPageAllocator::mapAlignedPages(size_t size, size_t alignment)
gc::MapAlignedPages(JSRuntime *rt, size_t size, size_t alignment)
{
JS_ASSERT(size >= alignment);
JS_ASSERT(size % alignment == 0);
JS_ASSERT(size % pageSize == 0);
JS_ASSERT(alignment % allocGranularity == 0);
JS_ASSERT(size % rt->gcSystemPageSize == 0);
JS_ASSERT(alignment % rt->gcSystemAllocGranularity == 0);
/* Special case: If we want allocation alignment, no further work is needed. */
if (alignment == allocGranularity) {
if (alignment == rt->gcSystemAllocGranularity) {
return VirtualAlloc(nullptr, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
}
@ -59,7 +60,7 @@ SystemPageAllocator::mapAlignedPages(size_t size, size_t alignment)
* Since we're going to unmap the whole thing anyway, the first
* mapping doesn't have to commit pages.
*/
size_t reserveSize = size + alignment - pageSize;
size_t reserveSize = size + alignment - rt->gcSystemPageSize;
p = VirtualAlloc(nullptr, reserveSize, MEM_RESERVE, PAGE_READWRITE);
if (!p)
return nullptr;
@ -75,31 +76,31 @@ SystemPageAllocator::mapAlignedPages(size_t size, size_t alignment)
}
void
SystemPageAllocator::unmapPages(void *p, size_t size)
gc::UnmapPages(JSRuntime *rt, void *p, size_t size)
{
JS_ALWAYS_TRUE(VirtualFree(p, 0, MEM_RELEASE));
}
bool
SystemPageAllocator::markPagesUnused(void *p, size_t size)
gc::MarkPagesUnused(JSRuntime *rt, void *p, size_t size)
{
if (!DecommitEnabled(rt))
return true;
JS_ASSERT(uintptr_t(p) % pageSize == 0);
JS_ASSERT(uintptr_t(p) % rt->gcSystemPageSize == 0);
LPVOID p2 = VirtualAlloc(p, size, MEM_RESET, PAGE_READWRITE);
return p2 == p;
}
bool
SystemPageAllocator::markPagesInUse(void *p, size_t size)
gc::MarkPagesInUse(JSRuntime *rt, void *p, size_t size)
{
JS_ASSERT(uintptr_t(p) % pageSize == 0);
JS_ASSERT(uintptr_t(p) % rt->gcSystemPageSize == 0);
return true;
}
size_t
SystemPageAllocator::GetPageFaultCount()
gc::GetPageFaultCount()
{
PROCESS_MEMORY_COUNTERS pmc;
if (!GetProcessMemoryInfo(GetCurrentProcess(), &pmc, sizeof(pmc)))
@ -108,7 +109,7 @@ SystemPageAllocator::GetPageFaultCount()
}
void *
SystemPageAllocator::AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment)
gc::AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment)
{
// TODO: Bug 988813 - Support memory mapped array buffer for Windows platform.
return nullptr;
@ -116,7 +117,7 @@ SystemPageAllocator::AllocateMappedContent(int fd, size_t offset, size_t length,
// Deallocate mapped memory for object.
void
SystemPageAllocator::DeallocateMappedContent(void *p, size_t length)
gc::DeallocateMappedContent(void *p, size_t length)
{
// TODO: Bug 988813 - Support memory mapped array buffer for Windows platform.
}
@ -130,18 +131,19 @@ SystemPageAllocator::DeallocateMappedContent(void *p, size_t length)
# define MAP_NOSYNC 0
#endif
SystemPageAllocator::SystemPageAllocator()
void
gc::InitMemorySubsystem(JSRuntime *rt)
{
pageSize = allocGranularity = size_t(sysconf(_SC_PAGESIZE));
rt->gcSystemPageSize = rt->gcSystemAllocGranularity = size_t(sysconf(_SC_PAGESIZE));
}
void *
SystemPageAllocator::mapAlignedPages(size_t size, size_t alignment)
gc::MapAlignedPages(JSRuntime *rt, size_t size, size_t alignment)
{
JS_ASSERT(size >= alignment);
JS_ASSERT(size % alignment == 0);
JS_ASSERT(size % pageSize == 0);
JS_ASSERT(alignment % allocGranularity == 0);
JS_ASSERT(size % rt->gcSystemPageSize == 0);
JS_ASSERT(alignment % rt->gcSystemAllocGranularity == 0);
int prot = PROT_READ | PROT_WRITE;
int flags = MAP_PRIVATE | MAP_ANON | MAP_ALIGN | MAP_NOSYNC;
@ -153,33 +155,33 @@ SystemPageAllocator::mapAlignedPages(size_t size, size_t alignment)
}
void
SystemPageAllocator::unmapPages(void *p, size_t size)
gc::UnmapPages(JSRuntime *rt, void *p, size_t size)
{
JS_ALWAYS_TRUE(0 == munmap((caddr_t)p, size));
}
bool
SystemPageAllocator::markPagesUnused(void *p, size_t size)
gc::MarkPagesUnused(JSRuntime *rt, void *p, size_t size)
{
JS_ASSERT(uintptr_t(p) % pageSize == 0);
JS_ASSERT(uintptr_t(p) % rt->gcSystemPageSize == 0);
return true;
}
bool
SystemPageAllocator::markPagesInUse(void *p, size_t size)
gc::MarkPagesInUse(JSRuntime *rt, void *p, size_t size)
{
JS_ASSERT(uintptr_t(p) % pageSize == 0);
JS_ASSERT(uintptr_t(p) % rt->gcSystemPageSize == 0);
return true;
}
size_t
SystemPageAllocator::GetPageFaultCount()
gc::GetPageFaultCount()
{
return 0;
}
void *
SystemPageAllocator::AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment)
gc::AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment)
{
// Not implemented.
return nullptr;
@ -187,7 +189,7 @@ SystemPageAllocator::AllocateMappedContent(int fd, size_t offset, size_t length,
// Deallocate mapped memory for object.
void
SystemPageAllocator::DeallocateMappedContent(void *p, size_t length)
gc::DeallocateMappedContent(void *p, size_t length)
{
// Not implemented.
}
@ -201,9 +203,10 @@ SystemPageAllocator::DeallocateMappedContent(void *p, size_t length)
#include <sys/types.h>
#include <unistd.h>
SystemPageAllocator::SystemPageAllocator()
void
gc::InitMemorySubsystem(JSRuntime *rt)
{
pageSize = allocGranularity = size_t(sysconf(_SC_PAGESIZE));
rt->gcSystemPageSize = rt->gcSystemAllocGranularity = size_t(sysconf(_SC_PAGESIZE));
}
static inline void *
@ -241,18 +244,18 @@ MapMemory(size_t length, int prot, int flags, int fd, off_t offset)
}
void *
SystemPageAllocator::mapAlignedPages(size_t size, size_t alignment)
gc::MapAlignedPages(JSRuntime *rt, size_t size, size_t alignment)
{
JS_ASSERT(size >= alignment);
JS_ASSERT(size % alignment == 0);
JS_ASSERT(size % pageSize == 0);
JS_ASSERT(alignment % allocGranularity == 0);
JS_ASSERT(size % rt->gcSystemPageSize == 0);
JS_ASSERT(alignment % rt->gcSystemAllocGranularity == 0);
int prot = PROT_READ | PROT_WRITE;
int flags = MAP_PRIVATE | MAP_ANON;
/* Special case: If we want page alignment, no further work is needed. */
if (alignment == allocGranularity) {
if (alignment == rt->gcSystemAllocGranularity) {
void *region = MapMemory(size, prot, flags, -1, 0);
if (region == MAP_FAILED)
return nullptr;
@ -281,31 +284,31 @@ SystemPageAllocator::mapAlignedPages(size_t size, size_t alignment)
}
void
SystemPageAllocator::unmapPages(void *p, size_t size)
gc::UnmapPages(JSRuntime *rt, void *p, size_t size)
{
JS_ALWAYS_TRUE(0 == munmap(p, size));
}
bool
SystemPageAllocator::markPagesUnused(void *p, size_t size)
gc::MarkPagesUnused(JSRuntime *rt, void *p, size_t size)
{
if (!decommitEnabled())
if (!DecommitEnabled(rt))
return false;
JS_ASSERT(uintptr_t(p) % pageSize == 0);
JS_ASSERT(uintptr_t(p) % rt->gcSystemPageSize == 0);
int result = madvise(p, size, MADV_DONTNEED);
return result != -1;
}
bool
SystemPageAllocator::markPagesInUse(void *p, size_t size)
gc::MarkPagesInUse(JSRuntime *rt, void *p, size_t size)
{
JS_ASSERT(uintptr_t(p) % pageSize == 0);
JS_ASSERT(uintptr_t(p) % rt->gcSystemPageSize == 0);
return true;
}
size_t
SystemPageAllocator::GetPageFaultCount()
gc::GetPageFaultCount()
{
struct rusage usage;
int err = getrusage(RUSAGE_SELF, &usage);
@ -315,7 +318,7 @@ SystemPageAllocator::GetPageFaultCount()
}
void *
SystemPageAllocator::AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment)
gc::AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment)
{
#define NEED_PAGE_ALIGNED 0
size_t pa_start; // Page aligned starting
@ -364,7 +367,7 @@ SystemPageAllocator::AllocateMappedContent(int fd, size_t offset, size_t length,
}
void
SystemPageAllocator::DeallocateMappedContent(void *p, size_t length)
gc::DeallocateMappedContent(void *p, size_t length)
{
void *pa_start; // Page aligned starting
size_t page_size = sysconf(_SC_PAGESIZE); // Page size

View File

@ -14,49 +14,41 @@ struct JSRuntime;
namespace js {
namespace gc {
class SystemPageAllocator
{
public:
// Sanity check that our compiled configuration matches the currently
// running instance and initialize any runtime data needed for allocation.
SystemPageAllocator();
// Sanity check that our compiled configuration matches the currently running
// instance and initialize any runtime data needed for allocation.
void
InitMemorySubsystem(JSRuntime *rt);
size_t systemPageSize() { return pageSize; }
size_t systemAllocGranularity() { return allocGranularity; }
// Allocate or deallocate pages from the system with the given alignment.
void *
MapAlignedPages(JSRuntime *rt, size_t size, size_t alignment);
// Allocate or deallocate pages from the system with the given alignment.
void *mapAlignedPages(size_t size, size_t alignment);
void unmapPages(void *p, size_t size);
void
UnmapPages(JSRuntime *rt, void *p, size_t size);
// Tell the OS that the given pages are not in use, so they should not be
// written to a paging file. This may be a no-op on some platforms.
bool markPagesUnused(void *p, size_t size);
// Tell the OS that the given pages are not in use, so they should not
// be written to a paging file. This may be a no-op on some platforms.
bool
MarkPagesUnused(JSRuntime *rt, void *p, size_t size);
// Undo |MarkPagesUnused|: tell the OS that the given pages are of interest
// and should be paged in and out normally. This may be a no-op on some
// platforms.
bool markPagesInUse(void *p, size_t size);
// Undo |MarkPagesUnused|: tell the OS that the given pages are of interest
// and should be paged in and out normally. This may be a no-op on some
// platforms.
bool
MarkPagesInUse(JSRuntime *rt, void *p, size_t size);
// Returns #(hard faults) + #(soft faults)
static size_t GetPageFaultCount();
// Returns #(hard faults) + #(soft faults)
size_t
GetPageFaultCount();
// Allocate memory mapped content.
// The offset must be aligned according to alignment requirement.
static void *AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment);
// Allocate memory mapped content.
// The offset must be aligned according to alignment requirement.
void *
AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment);
// Deallocate memory mapped content.
static void DeallocateMappedContent(void *p, size_t length);
private:
bool decommitEnabled();
// The GC can only safely decommit memory when the page size of the
// running process matches the compiled arena size.
size_t pageSize;
// The OS allocation granularity may not match the page size.
size_t allocGranularity;
};
// Deallocate memory mapped content.
void
DeallocateMappedContent(void *p, size_t length);
} // namespace gc
} // namespace js

View File

@ -53,7 +53,7 @@ js::Nursery::init()
if (!hugeSlots.init())
return false;
void *heap = runtime()->pageAllocator.mapAlignedPages(NurserySize, Alignment);
void *heap = MapAlignedPages(runtime(), NurserySize, Alignment);
if (!heap)
return false;
@ -79,23 +79,7 @@ js::Nursery::init()
js::Nursery::~Nursery()
{
if (start())
runtime()->pageAllocator.unmapPages((void *)start(), NurserySize);
}
void
js::Nursery::updateDecommittedRegion()
{
#ifndef JS_GC_ZEAL
if (numActiveChunks_ < NumNurseryChunks) {
// Bug 994054: madvise on MacOS is too slow to make this
// optimization worthwhile.
# ifndef XP_MACOSX
uintptr_t decommitStart = chunk(numActiveChunks_).start();
JS_ASSERT(decommitStart == AlignBytes(decommitStart, 1 << 20));
runtime()->pageAllocator.markPagesUnused((void *)decommitStart, heapEnd() - decommitStart);
# endif
}
#endif
UnmapPages(runtime(), (void *)start(), NurserySize);
}
void

View File

@ -216,7 +216,19 @@ class Nursery
initChunk(chunkno);
}
void updateDecommittedRegion();
void updateDecommittedRegion() {
#ifndef JS_GC_ZEAL
if (numActiveChunks_ < NumNurseryChunks) {
// Bug 994054: madvise on MacOS is too slow to make this
// optimization worthwhile.
# ifndef XP_MACOSX
uintptr_t decommitStart = chunk(numActiveChunks_).start();
JS_ASSERT(decommitStart == AlignBytes(decommitStart, 1 << 20));
gc::MarkPagesUnused(runtime(), (void *)decommitStart, heapEnd() - decommitStart);
# endif
}
#endif
}
MOZ_ALWAYS_INLINE uintptr_t allocationEnd() const {
JS_ASSERT(numActiveChunks_ > 0);

View File

@ -21,7 +21,6 @@
#include "vm/Runtime.h"
using namespace js;
using namespace js::gc;
using namespace js::gcstats;
using mozilla::PodArrayZero;
@ -572,7 +571,7 @@ Statistics::beginSlice(int collectedCount, int zoneCount, int compartmentCount,
if (first)
beginGC();
SliceData data(reason, PRMJ_Now(), SystemPageAllocator::GetPageFaultCount());
SliceData data(reason, PRMJ_Now(), gc::GetPageFaultCount());
(void) slices.append(data); /* Ignore any OOMs here. */
if (JSAccumulateTelemetryDataCallback cb = runtime->telemetryCallback)
@ -591,7 +590,7 @@ void
Statistics::endSlice()
{
slices.back().end = PRMJ_Now();
slices.back().endFaults = SystemPageAllocator::GetPageFaultCount();
slices.back().endFaults = gc::GetPageFaultCount();
if (JSAccumulateTelemetryDataCallback cb = runtime->telemetryCallback) {
(*cb)(JS_TELEMETRY_GC_SLICE_MS, t(slices.back().end - slices.back().start));

View File

@ -289,7 +289,7 @@ struct ThreadSafeContext : ContextFriendFields,
void *runtimeAddressForJit() { return runtime_; }
void *stackLimitAddress(StackKind kind) { return &runtime_->mainThread.nativeStackLimit[kind]; }
void *stackLimitAddressForJitCode(StackKind kind);
size_t gcSystemPageSize() { return runtime_->pageAllocator.systemPageSize(); }
size_t gcSystemPageSize() { return runtime_->gcSystemPageSize; }
bool signalHandlersInstalled() const { return runtime_->signalHandlersInstalled(); }
bool jitSupportsFloatingPoint() const { return runtime_->jitSupportsFloatingPoint; }

View File

@ -628,13 +628,13 @@ FinalizeArenas(FreeOp *fop,
static inline Chunk *
AllocChunk(JSRuntime *rt)
{
return static_cast<Chunk *>(rt->pageAllocator.mapAlignedPages(ChunkSize, ChunkSize));
return static_cast<Chunk *>(MapAlignedPages(rt, ChunkSize, ChunkSize));
}
static inline void
FreeChunk(JSRuntime *rt, Chunk *p)
{
rt->pageAllocator.unmapPages(static_cast<void *>(p), ChunkSize);
UnmapPages(rt, static_cast<void *>(p), ChunkSize);
}
inline bool
@ -777,17 +777,6 @@ Chunk::prepareToBeFreed(JSRuntime *rt)
#endif
}
void Chunk::decommitAllArenas(JSRuntime *rt)
{
decommittedArenas.clear(true);
rt->pageAllocator.markPagesUnused(&arenas[0], ArenasPerChunk * ArenaSize);
info.freeArenasHead = nullptr;
info.lastDecommittedArenaOffset = 0;
info.numArenasFree = ArenasPerChunk;
info.numArenasFreeCommitted = 0;
}
void
Chunk::init(JSRuntime *rt)
{
@ -888,7 +877,7 @@ Chunk::fetchNextDecommittedArena()
decommittedArenas.unset(offset);
Arena *arena = &arenas[offset];
info.trailer.runtime->pageAllocator.markPagesInUse(arena, ArenaSize);
MarkPagesInUse(info.trailer.runtime, arena, ArenaSize);
arena->aheader.setAsNotAllocated();
return &arena->aheader;
@ -1100,6 +1089,8 @@ static const int64_t JIT_SCRIPT_RELEASE_TYPES_INTERVAL = 60 * 1000 * 1000;
bool
js_InitGC(JSRuntime *rt, uint32_t maxbytes)
{
InitMemorySubsystem(rt);
if (!rt->gcChunkSet.init(INITIAL_CHUNK_CAPACITY))
return false;
@ -2065,7 +2056,7 @@ DecommitArenasFromAvailableList(JSRuntime *rt, Chunk **availableListHeadp)
Maybe<AutoUnlockGC> maybeUnlock;
if (!rt->isHeapBusy())
maybeUnlock.construct(rt);
ok = rt->pageAllocator.markPagesUnused(aheader->getArena(), ArenaSize);
ok = MarkPagesUnused(rt, aheader->getArena(), ArenaSize);
}
if (ok) {

View File

@ -535,7 +535,7 @@ ArrayBufferObject::canNeuterAsmJSArrayBuffer(JSContext *cx, ArrayBufferObject &b
void *
ArrayBufferObject::createMappedContents(int fd, size_t offset, size_t length)
{
return SystemPageAllocator::AllocateMappedContent(fd, offset, length, ARRAY_BUFFER_ALIGNMENT);
return AllocateMappedContent(fd, offset, length, ARRAY_BUFFER_ALIGNMENT);
}
void
@ -544,7 +544,7 @@ ArrayBufferObject::releaseMappedArray()
if(!isMappedArrayBuffer() || isNeutered())
return;
SystemPageAllocator::DeallocateMappedContent(dataPointer(), byteLength());
DeallocateMappedContent(dataPointer(), byteLength());
}
void
@ -1107,7 +1107,7 @@ JS_CreateMappedArrayBufferContents(int fd, size_t offset, size_t length)
JS_PUBLIC_API(void)
JS_ReleaseMappedArrayBufferContents(void *contents, size_t length)
{
SystemPageAllocator::DeallocateMappedContent(contents, length);
DeallocateMappedContent(contents, length);
}
JS_FRIEND_API(bool)

View File

@ -1297,7 +1297,14 @@ struct JSRuntime : public JS::shadow::Runtime,
ExtraTracerVector gcBlackRootTracers;
ExtraTracer gcGrayRootTracer;
js::gc::SystemPageAllocator pageAllocator;
/*
* The GC can only safely decommit memory when the page size of the
* running process matches the compiled arena size.
*/
size_t gcSystemPageSize;
/* The OS allocation granularity may not match the page size. */
size_t gcSystemAllocGranularity;
/* Strong references on scripts held for PCCount profiling API. */
js::ScriptAndCountsVector *scriptAndCountsVector;