Backed out changeset df60c0297aa2 (bug 1100652) for ggc orange

This commit is contained in:
Wes Kocher 2014-11-18 17:27:54 -08:00
parent 13fe636623
commit f829edc179
2 changed files with 206 additions and 69 deletions

View File

@ -22,7 +22,7 @@ using mozilla::ReentrancyGuard;
/*** Edges ***/
void
StoreBuffer::SlotsEdge::mark(JSTracer *trc) const
StoreBuffer::SlotsEdge::mark(JSTracer *trc)
{
NativeObject *obj = object();
@ -48,7 +48,7 @@ StoreBuffer::SlotsEdge::mark(JSTracer *trc) const
}
void
StoreBuffer::WholeCellEdges::mark(JSTracer *trc) const
StoreBuffer::WholeCellEdges::mark(JSTracer *trc)
{
MOZ_ASSERT(edge->isTenured());
JSGCTraceKind kind = GetGCThingTraceKind(edge);
@ -64,7 +64,7 @@ StoreBuffer::WholeCellEdges::mark(JSTracer *trc) const
}
void
StoreBuffer::CellPtrEdge::mark(JSTracer *trc) const
StoreBuffer::CellPtrEdge::mark(JSTracer *trc)
{
if (!*edge)
return;
@ -74,7 +74,7 @@ StoreBuffer::CellPtrEdge::mark(JSTracer *trc) const
}
void
StoreBuffer::ValueEdge::mark(JSTracer *trc) const
StoreBuffer::ValueEdge::mark(JSTracer *trc)
{
if (!deref())
return;
@ -84,14 +84,135 @@ StoreBuffer::ValueEdge::mark(JSTracer *trc) const
/*** MonoTypeBuffer ***/
template <typename T>
void
StoreBuffer::MonoTypeBuffer<T>::handleOverflow(StoreBuffer *owner)
{
if (!owner->isAboutToOverflow()) {
/*
* Compact the buffer now, and if that fails to free enough space then
* trigger a minor collection.
*/
compact(owner);
if (isLowOnSpace())
owner->setAboutToOverflow();
} else {
/*
* A minor GC has already been triggered, so there's no point
* compacting unless the buffer is totally full.
*/
if (storage_->availableInCurrentChunk() < sizeof(T))
maybeCompact(owner);
}
}
template <typename T>
void
StoreBuffer::MonoTypeBuffer<T>::compactRemoveDuplicates(StoreBuffer *owner)
{
typedef HashSet<T, typename T::Hasher, SystemAllocPolicy> DedupSet;
DedupSet duplicates;
if (!duplicates.init())
return; /* Failure to de-dup is acceptable. */
LifoAlloc::Enum insert(*storage_);
for (LifoAlloc::Enum e(*storage_); !e.empty(); e.popFront<T>()) {
T *edge = e.get<T>();
if (!duplicates.has(*edge)) {
insert.updateFront<T>(*edge);
insert.popFront<T>();
/* Failure to insert will leave the set with duplicates. Oh well. */
duplicates.put(*edge);
}
}
storage_->release(insert.mark());
duplicates.clear();
}
template <typename T>
void
StoreBuffer::MonoTypeBuffer<T>::compact(StoreBuffer *owner)
{
MOZ_ASSERT(storage_);
compactRemoveDuplicates(owner);
usedAtLastCompact_ = storage_->used();
}
template <typename T>
void
StoreBuffer::MonoTypeBuffer<T>::maybeCompact(StoreBuffer *owner)
{
MOZ_ASSERT(storage_);
if (storage_->used() != usedAtLastCompact_)
compact(owner);
}
template <typename T>
void
StoreBuffer::MonoTypeBuffer<T>::mark(StoreBuffer *owner, JSTracer *trc)
{
MOZ_ASSERT(stores_.initialized());
sinkStores(owner);
for (typename StoreSet::Range r = stores_.all(); !r.empty(); r.popFront())
r.front().mark(trc);
MOZ_ASSERT(owner->isEnabled());
ReentrancyGuard g(*owner);
if (!storage_)
return;
maybeCompact(owner);
for (LifoAlloc::Enum e(*storage_); !e.empty(); e.popFront<T>()) {
T *edge = e.get<T>();
edge->mark(trc);
}
}
/*** RelocatableMonoTypeBuffer ***/
template <typename T>
void
StoreBuffer::RelocatableMonoTypeBuffer<T>::compactMoved(StoreBuffer *owner)
{
LifoAlloc &storage = *this->storage_;
EdgeSet invalidated;
if (!invalidated.init())
CrashAtUnhandlableOOM("RelocatableMonoTypeBuffer::compactMoved: Failed to init table.");
/* Collect the set of entries which are currently invalid. */
for (LifoAlloc::Enum e(storage); !e.empty(); e.popFront<T>()) {
T *edge = e.get<T>();
if (edge->isTagged()) {
if (!invalidated.put(edge->untagged().edge))
CrashAtUnhandlableOOM("RelocatableMonoTypeBuffer::compactMoved: Failed to put removal.");
} else {
invalidated.remove(edge->untagged().edge);
}
}
/* Remove all entries which are in the invalidated set. */
LifoAlloc::Enum insert(storage);
for (LifoAlloc::Enum e(storage); !e.empty(); e.popFront<T>()) {
T *edge = e.get<T>();
if (!edge->isTagged() && !invalidated.has(edge->untagged().edge)) {
insert.updateFront<T>(*edge);
insert.popFront<T>();
}
}
storage.release(insert.mark());
invalidated.clear();
#ifdef DEBUG
for (LifoAlloc::Enum e(storage); !e.empty(); e.popFront<T>())
MOZ_ASSERT(!e.get<T>()->isTagged());
#endif
}
template <typename T>
void
StoreBuffer::RelocatableMonoTypeBuffer<T>::compact(StoreBuffer *owner)
{
compactMoved(owner);
StoreBuffer::MonoTypeBuffer<T>::compact(owner);
}
/*** GenericBuffer ***/
@ -252,5 +373,7 @@ template struct StoreBuffer::MonoTypeBuffer<StoreBuffer::ValueEdge>;
template struct StoreBuffer::MonoTypeBuffer<StoreBuffer::CellPtrEdge>;
template struct StoreBuffer::MonoTypeBuffer<StoreBuffer::SlotsEdge>;
template struct StoreBuffer::MonoTypeBuffer<StoreBuffer::WholeCellEdges>;
template struct StoreBuffer::RelocatableMonoTypeBuffer<StoreBuffer::ValueEdge>;
template struct StoreBuffer::RelocatableMonoTypeBuffer<StoreBuffer::CellPtrEdge>;
#endif /* JSGC_GENERATIONAL */

View File

@ -80,6 +80,17 @@ class StoreBuffer
/* The size at which a block is about to overflow. */
static const size_t LowAvailableThreshold = (size_t)(LifoAllocBlockSize * 1.0 / 16.0);
/*
* If the space available in the store buffer hits the
* LowAvailableThreshold and gets compacted, but still doesn't have at
* least HighAvailableThreshold space available, then we will trigger a
* minor GC. HighAvailableThreshold should be set to provide enough space
* for the mutator to run for a while in between compactions. (If
* HighAvailableThreshold is too low, we will thrash and spend most of the
* time compacting. If it is too high, we will tenure things too early.)
*/
static const size_t HighAvailableThreshold = (size_t)(LifoAllocBlockSize * 1.0 / 4.0);
/*
* This buffer holds only a single type of edge. Using this buffer is more
* efficient than the generic buffer when many writes will be to the same
@ -88,75 +99,89 @@ class StoreBuffer
template<typename T>
struct MonoTypeBuffer
{
/* The canonical set of stores. */
typedef HashSet<T, typename T::Hasher, SystemAllocPolicy> StoreSet;
StoreSet stores_;
LifoAlloc *storage_;
size_t usedAtLastCompact_;
/*
* A small, fixed-size buffer in front of the canonical set to simplify
* insertion via jit code.
*/
const static size_t NumBufferEntries = 4096 / sizeof(T);
T buffer_[NumBufferEntries];
T *insert_;
/* Maximum number of entries before we request a minor GC. */
const static size_t MaxEntries = 48 * 1024 / sizeof(T);
explicit MonoTypeBuffer() : insert_(buffer_) {}
~MonoTypeBuffer() { stores_.finish(); }
explicit MonoTypeBuffer() : storage_(nullptr), usedAtLastCompact_(0) {}
~MonoTypeBuffer() { js_delete(storage_); }
bool init() {
if (!stores_.initialized() && !stores_.init())
return false;
if (!storage_)
storage_ = js_new<LifoAlloc>(LifoAllocBlockSize);
clear();
return true;
return bool(storage_);
}
void clear() {
if (stores_.initialized())
stores_.clear();
if (!storage_)
return;
storage_->used() ? storage_->releaseAll() : storage_->freeAll();
usedAtLastCompact_ = 0;
}
bool isAboutToOverflow() const {
return !storage_->isEmpty() && storage_->availableInCurrentChunk() < LowAvailableThreshold;
}
bool isLowOnSpace() const {
return !storage_->isEmpty() && storage_->availableInCurrentChunk() < HighAvailableThreshold;
}
void handleOverflow(StoreBuffer *owner);
/* Compaction algorithms. */
void compactRemoveDuplicates(StoreBuffer *owner);
/*
* Attempts to reduce the usage of the buffer by removing unnecessary
* entries.
*/
virtual void compact(StoreBuffer *owner);
/* Compacts if any entries have been added since the last compaction. */
void maybeCompact(StoreBuffer *owner);
/* Add one item to the buffer. */
void put(StoreBuffer *owner, const T &t) {
MOZ_ASSERT(stores_.initialized());
*insert_++ = t;
if (MOZ_UNLIKELY(insert_ == buffer_ + NumBufferEntries))
sinkStores(owner);
}
MOZ_ASSERT(storage_);
/* Move any buffered stores to the canonical store set. */
void sinkStores(StoreBuffer *owner) {
MOZ_ASSERT(stores_.initialized());
T *tp = storage_->new_<T>(t);
if (!tp)
CrashAtUnhandlableOOM("Failed to allocate for MonoTypeBuffer::put.");
for (T *p = buffer_; p < insert_; ++p) {
if (!stores_.put(*p))
CrashAtUnhandlableOOM("Failed to allocate for MonoTypeBuffer::sinkStores.");
}
insert_ = buffer_;
if (MOZ_UNLIKELY(stores_.count() > MaxEntries))
owner->setAboutToOverflow();
}
/* Remove an item from the store buffer. */
void unput(StoreBuffer *owner, const T &v) {
sinkStores(owner);
stores_.remove(v);
if (isAboutToOverflow())
handleOverflow(owner);
}
/* Mark the source of all edges in the store buffer. */
void mark(StoreBuffer *owner, JSTracer *trc);
size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) {
return stores_.sizeOfExcludingThis(mallocSizeOf);
return storage_ ? storage_->sizeOfIncludingThis(mallocSizeOf) : 0;
}
private:
MonoTypeBuffer &operator=(const MonoTypeBuffer& other) MOZ_DELETE;
};
/*
* Overrides the MonoTypeBuffer to support pointers that may be moved in
* memory outside of the GC's control.
*/
template <typename T>
struct RelocatableMonoTypeBuffer : public MonoTypeBuffer<T>
{
/* Override compaction to filter out removed items. */
void compactMoved(StoreBuffer *owner);
virtual void compact(StoreBuffer *owner) MOZ_OVERRIDE;
/* Record a removal from the buffer. */
void unput(StoreBuffer *owner, const T &v) {
MonoTypeBuffer<T>::put(owner, v.tagged());
}
};
struct GenericBuffer
{
LifoAlloc *storage_;
@ -226,7 +251,6 @@ class StoreBuffer
{
Cell **edge;
CellPtrEdge() : edge(nullptr) {}
explicit CellPtrEdge(Cell **v) : edge(v) {}
bool operator==(const CellPtrEdge &other) const { return edge == other.edge; }
bool operator!=(const CellPtrEdge &other) const { return edge != other.edge; }
@ -236,7 +260,7 @@ class StoreBuffer
return !nursery.isInside(edge);
}
void mark(JSTracer *trc) const;
void mark(JSTracer *trc);
CellPtrEdge tagged() const { return CellPtrEdge((Cell **)(uintptr_t(edge) | 1)); }
CellPtrEdge untagged() const { return CellPtrEdge((Cell **)(uintptr_t(edge) & ~1)); }
@ -249,7 +273,6 @@ class StoreBuffer
{
JS::Value *edge;
ValueEdge() : edge(nullptr) {}
explicit ValueEdge(JS::Value *v) : edge(v) {}
bool operator==(const ValueEdge &other) const { return edge == other.edge; }
bool operator!=(const ValueEdge &other) const { return edge != other.edge; }
@ -261,7 +284,7 @@ class StoreBuffer
return !nursery.isInside(edge);
}
void mark(JSTracer *trc) const;
void mark(JSTracer *trc);
ValueEdge tagged() const { return ValueEdge((JS::Value *)(uintptr_t(edge) | 1)); }
ValueEdge untagged() const { return ValueEdge((JS::Value *)(uintptr_t(edge) & ~1)); }
@ -280,7 +303,6 @@ class StoreBuffer
int32_t start_;
int32_t count_;
SlotsEdge() : objectAndKind_(0), start_(0), count_(0) {}
SlotsEdge(NativeObject *object, int kind, int32_t start, int32_t count)
: objectAndKind_(uintptr_t(object) | kind), start_(start), count_(count)
{
@ -307,7 +329,7 @@ class StoreBuffer
return !IsInsideNursery(JS::AsCell(reinterpret_cast<JSObject *>(object())));
}
void mark(JSTracer *trc) const;
void mark(JSTracer *trc);
typedef struct {
typedef SlotsEdge Lookup;
@ -320,7 +342,6 @@ class StoreBuffer
{
Cell *edge;
WholeCellEdges() : edge(nullptr) {}
explicit WholeCellEdges(Cell *cell) : edge(cell) {
MOZ_ASSERT(edge->isTenured());
}
@ -333,7 +354,7 @@ class StoreBuffer
static bool supportsDeduplication() { return true; }
void *deduplicationKey() const { return (void *)edge; }
void mark(JSTracer *trc) const;
void mark(JSTracer *trc);
typedef PointerEdgeHasher<WholeCellEdges> Hasher;
};
@ -404,8 +425,8 @@ class StoreBuffer
MonoTypeBuffer<CellPtrEdge> bufferCell;
MonoTypeBuffer<SlotsEdge> bufferSlot;
MonoTypeBuffer<WholeCellEdges> bufferWholeCell;
MonoTypeBuffer<ValueEdge> bufferRelocVal;
MonoTypeBuffer<CellPtrEdge> bufferRelocCell;
RelocatableMonoTypeBuffer<ValueEdge> bufferRelocVal;
RelocatableMonoTypeBuffer<CellPtrEdge> bufferRelocCell;
GenericBuffer bufferGeneric;
JSRuntime *runtime_;
@ -484,13 +505,6 @@ class StoreBuffer
/* For use by our owned buffers and for testing. */
void setAboutToOverflow();
/* For jit access to the raw buffer. */
void oolSinkStoresForWholeCellBuffer() { bufferWholeCell.sinkStores(this); }
void *addressOfWholeCellBufferPointer() const { return (void *)&bufferWholeCell.insert_; }
void *addressOfWholeCellBufferEnd() const {
return (void *)(bufferWholeCell.buffer_ + bufferWholeCell.NumBufferEntries);
}
void addSizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf, JS::GCSizes *sizes);
};