Bug 743107 - Add OrderedHashTable. Change implementation of Map and Set to retain insertion order. r=luke.

--HG--
extra : rebase_source : 8b8d67ccd26494f884ba0b5d35e2f44a34296ed6
This commit is contained in:
Jason Orendorff 2012-07-03 16:33:38 -05:00
parent ed099a0de5
commit 823fe08a87
6 changed files with 762 additions and 72 deletions

View File

@ -15,9 +15,6 @@ namespace js {
class TempAllocPolicy;
/* Integral types for all hash functions. */
typedef uint32_t HashNumber;
/*****************************************************************************/
namespace detail {
@ -288,7 +285,6 @@ class HashTable : private AllocPolicy
static const uint8_t sMinAlphaFrac = 64; /* (0x100 * .25) taken from jsdhash.h */
static const uint8_t sMaxAlphaFrac = 192; /* (0x100 * .75) taken from jsdhash.h */
static const uint8_t sInvMaxAlpha = 171; /* (ceil(0x100 / .75) >> 1) */
static const HashNumber sGoldenRatio = 0x9E3779B9U; /* taken from jsdhash.h */
static const HashNumber sFreeKey = Entry::sFreeKey;
static const HashNumber sRemovedKey = Entry::sRemovedKey;
static const HashNumber sCollisionBit = Entry::sCollisionBit;
@ -308,10 +304,7 @@ class HashTable : private AllocPolicy
static HashNumber prepareHash(const Lookup& l)
{
HashNumber keyHash = HashPolicy::hash(l);
/* Improve keyHash distribution. */
keyHash *= sGoldenRatio;
HashNumber keyHash = ScrambleHashCode(HashPolicy::hash(l));
/* Avoid reserved hash codes. */
if (!isLiveHash(keyHash))

View File

@ -355,6 +355,26 @@ JS_FLOOR_LOG2W(size_t n)
return js_FloorLog2wImpl(n);
}
/*
* JS_ROTATE_LEFT32
*
* There is no rotate operation in the C Language so the construct (a << 4) |
* (a >> 28) is used instead. Most compilers convert this to a rotate
* instruction but some versions of MSVC don't without a little help. To get
* MSVC to generate a rotate instruction, we have to use the _rotl intrinsic
* and use a pragma to make _rotl inline.
*
* MSVC in VS2005 will do an inline rotate instruction on the above construct.
*/
#if defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_AMD64) || \
defined(_M_X64))
#include <stdlib.h>
#pragma intrinsic(_rotl)
#define JS_ROTATE_LEFT32(a, bits) _rotl(a, bits)
#else
#define JS_ROTATE_LEFT32(a, bits) (((a) << (bits)) | ((a) >> (32 - (bits))))
#endif
JS_END_EXTERN_C
#ifdef __cplusplus
@ -902,6 +922,50 @@ RoundUpPow2(size_t x)
return size_t(1) << JS_CEILING_LOG2W(x);
}
/* Integral types for all hash functions. */
typedef uint32_t HashNumber;
namespace detail {
/*
* Given a raw hash code, h, return a number that can be used to select a hash
* bucket.
*
* This function aims to produce as uniform an output distribution as possible,
* especially in the most significant (leftmost) bits, even though the input
* distribution may be highly nonrandom, given the constraints that this must
* be deterministic and quick to compute.
*
* Since the leftmost bits of the result are best, the hash bucket index is
* computed by doing ScrambleHashCode(h) / (2^32/N) or the equivalent
* right-shift, not ScrambleHashCode(h) % N or the equivalent bit-mask.
*
* FIXME: OrderedHashTable uses a bit-mask; see bug 775896.
*/
inline HashNumber
ScrambleHashCode(HashNumber h)
{
/*
* Simply returning h would not cause any hash tables to produce wrong
* answers. But it can produce pathologically bad performance: The caller
* right-shifts the result, keeping only the highest bits. The high bits of
* hash codes are very often completely entropy-free. (So are the lowest
* bits.)
*
* So we use Fibonacci hashing, as described in Knuth, The Art of Computer
* Programming, 6.4. This mixes all the bits of the input hash code h.
*
* The value of goldenRatio is taken from the hex
* expansion of the golden ratio, which starts 1.9E3779B9....
* This value is especially good if values with consecutive hash codes
* are stored in a hash table; see Knuth for details.
*/
static const HashNumber goldenRatio = 0x9E3779B9U;
return h * goldenRatio;
}
} /* namespace detail */
} /* namespace js */
namespace JS {

View File

@ -5,8 +5,6 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "mozilla/FloatingPoint.h"
#include "builtin/MapObject.h"
#include "jscntxt.h"
@ -14,6 +12,7 @@
#include "jsobj.h"
#include "gc/Marking.h"
#include "js/Utility.h"
#include "vm/GlobalObject.h"
#include "vm/Stack.h"
@ -21,26 +20,622 @@
using namespace js;
static JSObject *
InitClass(JSContext *cx, Handle<GlobalObject*> global, Class *clasp, JSProtoKey key, Native construct,
JSFunctionSpec *methods)
{
RootedObject proto(cx, global->createBlankPrototype(cx, clasp));
if (!proto)
return NULL;
proto->setPrivate(NULL);
/*** OrderedHashTable ****************************************************************************/
JSAtom *atom = cx->runtime->atomState.classAtoms[key];
RootedFunction ctor(cx, global->createConstructor(cx, construct, atom, 1));
if (!ctor ||
!LinkConstructorAndPrototype(cx, ctor, proto) ||
!DefinePropertiesAndBrand(cx, proto, NULL, methods) ||
!DefineConstructorAndPrototype(cx, global, key, ctor, proto))
/*
* Define two collection templates, js::OrderedHashMap and js::OrderedHashSet.
* They are like js::HashMap and js::HashSet except that:
*
* - Iterating over an Ordered hash table visits the entries in the order in
* which they were inserted. This means that unlike a HashMap, the behavior
* of an OrderedHashMap is deterministic (as long as the HashPolicy methods
* are effect-free and consistent); the hashing is a pure performance
* optimization.
*
* - Range objects over Ordered tables remain valid even when entries are
* added or removed or the table is resized. (However in the case of
* removing entries, note the warning on class Range below.)
*
* - The API is a little different, so it's not a drop-in replacement.
* In particular, the hash policy is a little different.
* Also, the Ordered templates lack the Ptr and AddPtr types.
*
* Hash policies
*
* See the comment about "Hash policy" in HashTable.h for general features that
* hash policy classes must provide. Hash policies for OrderedHashMaps and Sets
* must additionally provide a distinguished "empty" key value and the
* following static member functions:
* bool isEmpty(const Key &);
* void makeEmpty(Key *);
*/
namespace js {
namespace detail {
/*
* detail::OrderedHashTable is the underlying data structure used to implement both
* OrderedHashMap and OrderedHashSet. Programs should use one of those two
* templates rather than OrderedHashTable.
*/
template <class T, class Ops, class AllocPolicy>
class OrderedHashTable
{
public:
typedef typename Ops::KeyType Key;
typedef typename Ops::Lookup Lookup;
struct Data
{
T element;
Data *chain;
Data(const T &e, Data *c) : element(e), chain(c) {}
Data(MoveRef<T> e, Data *c) : element(e), chain(c) {}
};
class Range;
friend class Range;
private:
Data **hashTable; // power-of-2-sized hash table
Data *data; // data vector, an array of Data objects
// data[0:dataLength] are constructed
uint32_t dataLength; // number of constructed elements in data
uint32_t dataCapacity; // size of data, in elements
uint32_t liveCount; // dataLength less empty (removed) entries
uint32_t hashTableMask; // size of hashTable, in elements, minus one
Range *ranges; // list of all live Ranges on this table
AllocPolicy alloc;
public:
OrderedHashTable(AllocPolicy &ap)
: hashTable(NULL), data(NULL), dataLength(0), ranges(NULL), alloc(ap) {}
bool init() {
MOZ_ASSERT(!hashTable, "init must be called at most once");
uint32_t buckets = initialBuckets();
Data **tableAlloc = static_cast<Data **>(alloc.malloc_(buckets * sizeof(Data *)));
if (!tableAlloc)
return false;
for (uint32_t i = 0; i < buckets; i++)
tableAlloc[i] = NULL;
uint32_t capacity = uint32_t(buckets * fillFactor());
Data *dataAlloc = static_cast<Data *>(alloc.malloc_(capacity * sizeof(Data)));
if (!dataAlloc) {
alloc.free_(tableAlloc);
return false;
}
hashTable = tableAlloc;
data = dataAlloc;
dataLength = 0;
dataCapacity = capacity;
liveCount = 0;
hashTableMask = buckets - 1;
return true;
}
~OrderedHashTable() {
alloc.free_(hashTable);
freeData(data, dataLength);
}
/* Return the number of elements in the table. */
uint32_t count() const { return liveCount; }
/* True if any element matches l. */
bool has(const Lookup &l) const {
return lookup(l) != NULL;
}
/* Return a pointer to the element, if any, that matches l, else NULL. */
T *get(const Lookup &l) {
Data *e = lookup(l, prepareHash(l));
return e ? &e->element : NULL;
}
/* Return a pointer to the element, if any, that matches l, else NULL. */
const T *get(const Lookup &l) const {
return const_cast<OrderedHashTable *>(this)->get(l);
}
/*
* If the table already contains an entry that matches |element|,
* replace that entry with |element|. Otherwise add a new entry.
*
* On success, return true, whether there was already a matching element or
* not. On allocation failure, return false. If this returns false, it
* means the element was not added to the table.
*/
bool put(const T &element) {
HashNumber h = prepareHash(Ops::getKey(element));
if (Data *e = lookup(Ops::getKey(element), h)) {
e->element = element;
return true;
}
if (dataLength == dataCapacity) {
// If the hashTable is more than 1/4 deleted data, simply rehash in
// place to free up some space. Otherwise, grow the table.
uint32_t newMask = liveCount >= dataCapacity * 0.75
? (hashTableMask << 1) | 1
: hashTableMask;
if (!rehash(newMask))
return false;
}
h &= hashTableMask;
liveCount++;
Data *e = &data[dataLength++];
new (e) Data(element, hashTable[h]);
hashTable[h] = e;
return true;
}
/*
* If the table contains an element matching l, remove it and set *foundp
* to true. Otherwise set *foundp to false.
*
* Return true on success, false if we tried to shrink the table and hit an
* allocation failure. Even if this returns false, *foundp is set correctly
* and the matching element was removed. Shrinking is an optimization and
* it's OK for it to fail.
*/
bool remove(const Lookup &l, bool *foundp) {
// Note: This could be optimized so that removing the last entry,
// data[dataLength - 1], decrements dataLength. LIFO use cases would
// benefit.
// If a matching entry exists, empty it.
Data *e = lookup(l, prepareHash(l));
if (e == NULL) {
*foundp = false;
return true;
}
*foundp = true;
liveCount--;
Ops::makeEmpty(&e->element);
// Update active Ranges.
uint32_t pos = e - data;
for (Range *r = ranges; r; r = r->next)
r->onRemove(pos);
// If many entries have been removed, try to shrink the table.
if (hashTableMask > initialBuckets() && liveCount < dataLength * minDataFill()) {
if (!rehash(hashTableMask >> 1))
return false;
}
return true;
}
/*
* Ranges are used to iterate over OrderedHashTables.
*
* Suppose 'Map' is some instance of OrderedHashMap, and 'map' is a Map.
* Then you can walk all the key-value pairs like this:
*
* for (Map::Range r = map.all(); !r.empty(); r.popFront()) {
* Map::Entry &pair = r.front();
* ... do something with pair ...
* }
*
* Ranges remain valid for the lifetime of the OrderedHashTable, even if
* entries are added or removed or the table is resized.
*
* Warning: The behavior when the current front() entry is removed from the
* table is subtly different from js::HashTable<>::Enum::removeFront()!
* HashTable::Enum doesn't skip any entries when you removeFront() and then
* popFront(). OrderedHashTable::Range does! (This is useful for using a
* Range to implement JS Map.prototype.iterator.)
*
* The workaround is to call popFront() as soon as possible,
* before there's any possibility of modifying the table:
*
* for (Map::Range r = map.all(); !r.empty(); ) {
* Key key = r.front().key; // this won't modify map
* Value val = r.front().value; // this won't modify map
* r.popFront();
* // ...do things that might modify map...
* }
*/
class Range {
friend class OrderedHashTable;
OrderedHashTable &ht;
/* The index of front() within ht.data. */
uint32_t i;
/*
* The number of nonempty entries in ht.data to the left of front().
* This is used when the table is resized or compacted.
*/
uint32_t count;
/*
* Links in the doubly-linked list of active Ranges on ht.
*
* prevp points to the previous Range's .next field;
* or to ht.ranges if this is the first Range in the list.
* next points to the next Range;
* or NULL if this is the last Range in the list.
*
* Invariant: *prevp == this.
*/
Range **prevp;
Range *next;
/*
* Create a Range over all the entries in ht.
* (This is private on purpose. End users must use ht.all().)
*/
Range(OrderedHashTable &ht) : ht(ht), i(0), count(0), prevp(&ht.ranges), next(ht.ranges) {
*prevp = this;
if (next)
next->prevp = &next;
seek();
}
public:
Range(const Range &other)
: ht(other.ht), i(other.i), count(other.count), prevp(&ht.ranges), next(ht.ranges)
{
*prevp = this;
if (next)
next->prevp = &next;
}
~Range() {
*prevp = next;
if (next)
next->prevp = prevp;
}
private:
// Prohibit copy assignment.
Range &operator=(const Range &other) MOZ_DELETE;
void seek() {
while (i < ht.dataLength && Ops::isEmpty(Ops::getKey(ht.data[i].element)))
i++;
}
/*
* The hash table calls this when an entry is removed.
* j is the index of the removed entry.
*/
void onRemove(uint32_t j) {
if (j < i)
count--;
if (j == i)
seek();
}
/*
* The hash table calls this when the table is resized or compacted.
* Since |count| is the number of nonempty entries to the left of
* front(), discarding the empty entries will not affect count, and it
* will make i and count equal.
*/
void onCompact() {
i = count;
}
public:
bool empty() const { return i >= ht.dataLength; }
/*
* Return the first element in the range. This must not be called if
* this->empty().
*
* Warning: Removing an entry from the table also removes it from any
* live Ranges, and a Range can become empty that way, rendering
* front() invalid. If in doubt, check empty() before calling front().
*/
T &front() {
MOZ_ASSERT(!empty());
return ht.data[i].element;
}
/*
* Remove the first element from this range.
* This must not be called if this->empty().
*
* Warning: Removing an entry from the table also removes it from any
* live Ranges, and a Range can become empty that way, rendering
* popFront() invalid. If in doubt, check empty() before calling
* popFront().
*/
void popFront() {
MOZ_ASSERT(!empty());
MOZ_ASSERT(!Ops::isEmpty(Ops::getKey(ht.data[i].element)));
count++;
i++;
seek();
}
/*
* Change the key of the front entry.
*
* This calls Ops::hash on both the current key and the new key.
* Ops::hash on the current key must return the same hash code as
* when the entry was added to the table.
*/
void rekeyFront(const Key &k) {
Data &entry = ht.data[i];
HashNumber oldHash = prepareHash(Ops::getKey(entry.element)) & ht.hashTableMask;
HashNumber newHash = prepareHash(k) & ht.hashTableMask;
Ops::setKey(entry.element, k);
if (newHash != oldHash) {
// Remove this entry from its old hash chain. (If this crashes
// reading NULL, it would mean we did not find this entry on
// the hash chain where we expected it. That probably means the
// key's hash code changed since it was inserted, breaking the
// hash code invariant.)
Data **ep = &ht.hashTable[oldHash];
while (*ep != &entry)
ep = &(*ep)->chain;
*ep = entry.chain;
// Add it to the new hash chain. We could just insert it at the
// beginning of the chain. Instead, we do a bit of work to
// preserve the invariant that hash chains always go in reverse
// insertion order (descending memory order). No code currently
// depends on this invariant, so it's fine to kill it if
// needed.
ep = &ht.hashTable[newHash];
while (*ep && *ep > &entry)
ep = &(*ep)->chain;
entry.chain = *ep;
*ep = &entry;
}
}
/*
* Change the key of the front entry without calling Ops::hash on the
* entry's current key. The caller must ensure that k has the same hash
* code that the current key had when it was inserted.
*/
void rekeyFrontWithSameHashCode(const Key &k) {
#ifdef DEBUG
// Assert that k falls in the same hash bucket as the old key.
HashNumber h = Ops::hash(k) & ht.hashTableMask;
Data *e = ht.hashTable[h];
while (e && e != &ht.data[i])
e = e->chain;
JS_ASSERT(e == &ht.data[i]);
#endif
Ops::setKey(ht.data[i].element, k);
}
};
Range all() { return Range(*this); }
private:
/*
* The number of buckets in the hash table initially.
* This must be a power of two.
*/
static uint32_t initialBuckets() { return 2; }
/*
* The maximum load factor (mean number of entries per bucket).
* It is an invariant that
* dataCapacity == floor((hashTableMask + 1) * fillFactor()).
*
* The fill factor should be between 2 and 4, and it should be chosen so that
* the fill factor times sizeof(Data) is close to but <= a power of 2.
* This fixed fill factor was chosen to make the size of the data
* array, in bytes, close to a power of two when sizeof(T) is 16.
*/
static double fillFactor() { return 8.0 / 3.0; }
/*
* The minimum permitted value of (liveCount / dataLength).
* If that ratio drops below this value, we shrink the table.
*/
static double minDataFill() { return 0.25; }
static HashNumber prepareHash(const Lookup &l) {
return ScrambleHashCode(Ops::hash(l));
}
void freeData(Data *data, uint32_t length) {
for (Data *p = data + length; p != data; )
(--p)->~Data();
alloc.free_(data);
}
Data *lookup(const Lookup &l, HashNumber h) {
for (Data *e = hashTable[h & hashTableMask]; e; e = e->chain) {
if (Ops::match(Ops::getKey(e->element), l))
return e;
}
return NULL;
}
return proto;
}
const Data *lookup(const Lookup &l) const {
return const_cast<OrderedHashTable *>(this)->lookup(l, prepareHash(l));
}
/* This is called after rehashing the table. */
void compacted() {
// If we had any empty entries, compacting may have moved live entries
// to the left within |data|. Notify all live Ranges of the change.
for (Range *r = ranges; r; r = r->next)
r->onCompact();
}
/* Compact the entries in |data| and rehash them. */
void rehashInPlace() {
for (uint32_t i = 0; i <= hashTableMask; i++)
hashTable[i] = NULL;
Data *wp = data, *end = data + dataLength;
for (Data *rp = data; rp != end; rp++) {
if (!Ops::isEmpty(Ops::getKey(rp->element))) {
HashNumber h = prepareHash(Ops::getKey(rp->element)) & hashTableMask;
if (rp != wp)
wp->element = Move(rp->element);
wp->chain = hashTable[h];
hashTable[h] = wp;
wp++;
}
}
MOZ_ASSERT(wp == data + liveCount);
while (wp != end)
(--end)->~Data();
dataLength = liveCount;
compacted();
}
/*
* Grow, shrink, or compact both |hashTable| and |data|.
*
* On success, this returns true, dataLength == liveCount, and there are no
* empty elements in data[0:dataLength]. On allocation failure, this
* leaves everything as it was and returns false.
*/
bool rehash(uint32_t newMask) {
// If the size of the table is not changing, rehash in place to avoid
// allocating memory.
if (newMask == hashTableMask) {
rehashInPlace();
return true;
}
Data **newHashTable = static_cast<Data **>(alloc.malloc_((newMask + 1) * sizeof(Data *)));
if (!newHashTable)
return false;
for (uint32_t i = 0; i <= newMask; i++)
newHashTable[i] = NULL;
uint32_t newCapacity = uint32_t((newMask + 1) * fillFactor());
Data *newData = static_cast<Data *>(alloc.malloc_(newCapacity * sizeof(Data)));
if (!newData) {
alloc.free_(newHashTable);
return false;
}
Data *wp = newData;
for (Data *p = data, *end = data + dataLength; p != end; p++) {
if (!Ops::isEmpty(Ops::getKey(p->element))) {
HashNumber h = prepareHash(Ops::getKey(p->element)) & newMask;
new (wp) Data(Move(p->element), newHashTable[h]);
newHashTable[h] = wp;
wp++;
}
}
MOZ_ASSERT(wp == newData + liveCount);
alloc.free_(hashTable);
freeData(data, dataLength);
hashTable = newHashTable;
data = newData;
dataLength = liveCount;
dataCapacity = newCapacity;
hashTableMask = newMask;
compacted();
return true;
}
// Not copyable.
OrderedHashTable &operator=(const OrderedHashTable &) MOZ_DELETE;
OrderedHashTable(const OrderedHashTable &) MOZ_DELETE;
};
} // namespace detail
template <class Key, class Value, class OrderedHashPolicy, class AllocPolicy>
class OrderedHashMap
{
public:
class Entry
{
template <class, class, class> friend class detail::OrderedHashTable;
void operator=(const Entry &rhs) {
const_cast<Key &>(key) = rhs.key;
value = rhs.value;
}
void operator=(MoveRef<Entry> rhs) {
const_cast<Key &>(key) = Move(rhs->key);
value = Move(rhs->value);
}
public:
Entry() : key(), value() {}
Entry(const Key &k, const Value &v) : key(k), value(v) {}
Entry(MoveRef<Entry> rhs) : key(Move(rhs->key)), value(Move(rhs->value)) {}
const Key key;
Value value;
};
private:
struct MapOps : OrderedHashPolicy
{
typedef Key KeyType;
static void makeEmpty(Entry *e) {
OrderedHashPolicy::makeEmpty(const_cast<Key *>(&e->key));
}
static const Key &getKey(const Entry &e) { return e.key; }
static void setKey(Entry &e, const Key &k) { const_cast<Key &>(e.key) = k; }
};
typedef detail::OrderedHashTable<Entry, MapOps, AllocPolicy> Impl;
Impl impl;
public:
typedef typename Impl::Range Range;
OrderedHashMap(AllocPolicy ap = AllocPolicy()) : impl(ap) {}
bool init() { return impl.init(); }
uint32_t count() const { return impl.count(); }
bool has(const Key &key) const { return impl.has(key); }
Range all() { return impl.all(); }
const Entry *get(const Key &key) const { return impl.get(key); }
Entry *get(const Key &key) { return impl.get(key); }
bool put(const Key &key, const Value &value) { return impl.put(Entry(key, value)); }
bool remove(const Key &key, bool *foundp) { return impl.remove(key, foundp); }
};
template <class T, class OrderedHashPolicy, class AllocPolicy>
class OrderedHashSet
{
private:
struct SetOps : OrderedHashPolicy
{
typedef const T KeyType;
static const T &getKey(const T &v) { return v; }
static void setKey(const T &e, const T &v) { const_cast<T &>(e) = v; }
};
typedef detail::OrderedHashTable<T, SetOps, AllocPolicy> Impl;
Impl impl;
public:
typedef typename Impl::Range Range;
OrderedHashSet(AllocPolicy ap = AllocPolicy()) : impl(ap) {}
bool init() { return impl.init(); }
uint32_t count() const { return impl.count(); }
bool has(const T &value) const { return impl.has(value); }
Range all() { return impl.all(); }
bool put(const T &value) { return impl.put(value); }
bool remove(const T &value, bool *foundp) { return impl.remove(value, foundp); }
};
} // namespace js
/*** HashableValue *******************************************************************************/
@ -149,6 +744,27 @@ JSFunctionSpec MapObject::methods[] = {
JS_FS_END
};
static JSObject *
InitClass(JSContext *cx, Handle<GlobalObject*> global, Class *clasp, JSProtoKey key, Native construct,
JSFunctionSpec *methods)
{
Rooted<JSObject*> proto(cx, global->createBlankPrototype(cx, clasp));
if (!proto)
return NULL;
proto->setPrivate(NULL);
JSAtom *atom = cx->runtime->atomState.classAtoms[key];
Rooted<JSFunction*> ctor(cx, global->createConstructor(cx, construct, atom, 1));
if (!ctor ||
!LinkConstructorAndPrototype(cx, ctor, proto) ||
!DefinePropertiesAndBrand(cx, proto, NULL, methods) ||
!DefineConstructorAndPrototype(cx, global, key, ctor, proto))
{
return NULL;
}
return proto;
}
JSObject *
MapObject::initClass(JSContext *cx, JSObject *obj)
{
@ -156,14 +772,41 @@ MapObject::initClass(JSContext *cx, JSObject *obj)
return InitClass(cx, global, &class_, JSProto_Map, construct, methods);
}
template <class Range>
static void
MarkKey(Range &r, const HashableValue &key, JSTracer *trc)
{
HashableValue newKey = key.mark(trc);
if (newKey.get() != key.get()) {
if (newKey.get().isString()) {
// GC moved a string. The key stored in the OrderedHashTable must
// be updated to point to the string's new location, but rekeyFront
// would not work because it would access the string's old
// location.
//
// So as a specially gruesome hack, overwrite the key in place.
// FIXME bug 769504.
r.rekeyFrontWithSameHashCode(newKey);
} else {
// GC moved an object. It must be rekeyed, and rekeying is safe
// because the old key's hash() method is still safe to call (it
// does not access the object's old location).
JS_ASSERT(newKey.get().isObject());
r.rekeyFront(newKey);
}
}
}
void
MapObject::mark(JSTracer *trc, JSObject *obj)
{
MapObject *mapobj = static_cast<MapObject *>(obj);
if (ValueMap *map = mapobj->getData()) {
for (ValueMap::Enum iter(*map); !iter.empty(); iter.popFront()) {
gc::MarkValue(trc, &iter.front().value, "value");
iter.rekeyFront(iter.front().key.mark(trc));
for (ValueMap::Range r = map->all(); !r.empty(); r.popFront()) {
MarkKey(r, r.front().key, trc);
gc::MarkValue(trc, &r.front().value, "value");
}
}
}
@ -213,7 +856,8 @@ MapObject::construct(JSContext *cx, unsigned argc, Value *vp)
if (!pairobj->getElement(cx, 1, &val))
return false;
if (!map->put(hkey, val)) {
RelocatableValue rval(val);
if (!map->put(hkey, rval)) {
js_ReportOutOfMemory(cx);
return false;
}
@ -270,8 +914,7 @@ MapObject::get_impl(JSContext *cx, CallArgs args)
ValueMap &map = extract(args);
ARG0_KEY(cx, args, key);
if (ValueMap::Ptr p = map.lookup(key))
if (ValueMap::Entry *p = map.get(key))
args.rval() = p->value;
else
args.rval().setUndefined();
@ -292,7 +935,7 @@ MapObject::has_impl(JSContext *cx, CallArgs args)
ValueMap &map = extract(args);
ARG0_KEY(cx, args, key);
args.rval().setBoolean(map.lookup(key));
args.rval().setBoolean(map.has(key));
return true;
}
@ -310,7 +953,8 @@ MapObject::set_impl(JSContext *cx, CallArgs args)
ValueMap &map = extract(args);
ARG0_KEY(cx, args, key);
if (!map.put(key, args.length() > 1 ? args[1] : UndefinedValue())) {
RelocatableValue rval(args.length() > 1 ? args[1] : UndefinedValue());
if (!map.put(key, rval)) {
js_ReportOutOfMemory(cx);
return false;
}
@ -332,10 +976,9 @@ MapObject::delete_impl(JSContext *cx, CallArgs args)
ValueMap &map = extract(args);
ARG0_KEY(cx, args, key);
ValueMap::Ptr p = map.lookup(key);
bool found = p.found();
if (found)
map.remove(p);
bool found;
if (!map.remove(key, &found))
return false;
args.rval().setBoolean(found);
return true;
}
@ -395,8 +1038,8 @@ SetObject::mark(JSTracer *trc, JSObject *obj)
{
SetObject *setobj = static_cast<SetObject *>(obj);
if (ValueSet *set = setobj->getData()) {
for (ValueSet::Enum iter(*set); !iter.empty(); iter.popFront())
iter.rekeyFront(iter.front().mark(trc));
for (ValueSet::Range r = set->all(); !r.empty(); r.popFront())
MarkKey(r, r.front(), trc);
}
}
@ -523,10 +1166,9 @@ SetObject::delete_impl(JSContext *cx, CallArgs args)
ValueSet &set = extract(args);
ARG0_KEY(cx, args, key);
ValueSet::Ptr p = set.lookup(key);
bool found = p.found();
if (found)
set.remove(p);
bool found;
if (!set.remove(key, &found))
return false;
args.rval().setBoolean(found);
return true;
}

View File

@ -12,7 +12,7 @@
#include "jscntxt.h"
#include "jsobj.h"
#include "js/HashTable.h"
#include "mozilla/FloatingPoint.h"
namespace js {
@ -32,6 +32,8 @@ class HashableValue {
typedef HashableValue Lookup;
static HashNumber hash(const Lookup &v) { return v.hash(); }
static bool match(const HashableValue &k, const Lookup &l) { return k.equals(l); }
static bool isEmpty(const HashableValue &v) { return v.value.isMagic(JS_HASH_KEY_EMPTY); }
static void makeEmpty(HashableValue *vp) { vp->value = MagicValue(JS_HASH_KEY_EMPTY); }
};
HashableValue() : value(UndefinedValue()) {}
@ -40,6 +42,7 @@ class HashableValue {
HashNumber hash() const;
bool equals(const HashableValue &other) const;
HashableValue mark(JSTracer *trc) const;
Value get() const { return value.get(); }
class AutoRooter : private AutoGCRooter
{
@ -61,13 +64,20 @@ class HashableValue {
};
};
typedef HashMap<HashableValue,
RelocatableValue,
HashableValue::Hasher,
RuntimeAllocPolicy> ValueMap;
typedef HashSet<HashableValue,
HashableValue::Hasher,
RuntimeAllocPolicy> ValueSet;
template <class Key, class Value, class OrderedHashPolicy, class AllocPolicy>
class OrderedHashMap;
template <class T, class OrderedHashPolicy, class AllocPolicy>
class OrderedHashSet;
typedef OrderedHashMap<HashableValue,
RelocatableValue,
HashableValue::Hasher,
RuntimeAllocPolicy> ValueMap;
typedef OrderedHashSet<HashableValue,
HashableValue::Hasher,
RuntimeAllocPolicy> ValueSet;
class MapObject : public JSObject {
public:

View File

@ -353,26 +353,6 @@ bool DecompressString(const unsigned char *inp, size_t inplen,
} /* namespace js */
#endif /* __cplusplus */
/*
* JS_ROTATE_LEFT32
*
* There is no rotate operation in the C Language so the construct (a << 4) |
* (a >> 28) is used instead. Most compilers convert this to a rotate
* instruction but some versions of MSVC don't without a little help. To get
* MSVC to generate a rotate instruction, we have to use the _rotl intrinsic
* and use a pragma to make _rotl inline.
*
* MSVC in VS2005 will do an inline rotate instruction on the above construct.
*/
#if defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_AMD64) || \
defined(_M_X64))
#include <stdlib.h>
#pragma intrinsic(_rotl)
#define JS_ROTATE_LEFT32(a, bits) _rotl(a, bits)
#else
#define JS_ROTATE_LEFT32(a, bits) (((a) << (bits)) | ((a) >> (32 - (bits))))
#endif
/* Crash diagnostics */
#ifdef DEBUG
# define JS_CRASH_DIAGNOSTICS 1

View File

@ -218,6 +218,7 @@ typedef enum JSWhyMagic
JS_OVERWRITTEN_CALLEE, /* arguments.callee has been overwritten */
JS_FORWARD_TO_CALL_OBJECT, /* args object element stored in call object */
JS_BLOCK_NEEDS_CLONE, /* value of static block object slot */
JS_HASH_KEY_EMPTY, /* see class js::HashableValue */
JS_GENERIC_MAGIC /* for local use */
} JSWhyMagic;