Backed out changeset 35c79f50d85f

--HG--
extra : rebase_source : a84a5c5554e087f71771c2e3c180405dfdc750c3
This commit is contained in:
Terrence Cole 2012-06-14 17:06:01 -07:00
parent 2abcb61003
commit 4ced15cb1a
2 changed files with 36 additions and 67 deletions

View File

@ -62,7 +62,7 @@ class HashTableEntry {
JS_ASSERT(isLive()); keyHash |= collisionBit;
}
void unsetCollision() { keyHash &= ~sCollisionBit; }
bool hasCollision() const { return keyHash & sCollisionBit; }
bool hasCollision() const { JS_ASSERT(isLive()); return keyHash & sCollisionBit; }
bool matchHash(HashNumber hn) { return (keyHash & ~sCollisionBit) == hn; }
HashNumber getKeyHash() const { JS_ASSERT(!hasCollision()); return keyHash; }
};
@ -185,7 +185,7 @@ class HashTable : private AllocPolicy
friend class HashTable;
HashTable &table;
bool rekeyed;
bool added;
bool removed;
/* Not copyable. */
@ -194,7 +194,7 @@ class HashTable : private AllocPolicy
public:
template<class Map> explicit
Enum(Map &map) : Range(map.all()), table(map.impl), rekeyed(false), removed(false) {}
Enum(Map &map) : Range(map.all()), table(map.impl), added(false), removed(false) {}
/*
* Removes the |front()| element from the table, leaving |front()|
@ -224,7 +224,7 @@ class HashTable : private AllocPolicy
HashPolicy::setKey(t, const_cast<Key &>(k));
table.remove(*this->cur);
table.putNewInfallible(l, t);
rekeyed = true;
added = true;
this->validEntry = false;
}
@ -234,11 +234,28 @@ class HashTable : private AllocPolicy
/* Potentially rehashes the table. */
~Enum() {
if (rekeyed)
table.checkOverRemoved();
JS_ASSERT(!added);
if (removed)
table.checkUnderloaded();
}
/*
* Can be used to end the enumeration before the destructor. Unlike
* |~Enum()|, this can report OOM on resize, so must be called if
* |rekeyFront()| is used during enumeration.
*/
bool endEnumeration() {
if (added) {
added = false;
if (table.checkOverloaded() == RehashFailed)
return false;
}
if (removed) {
removed = false;
table.checkUnderloaded();
}
return true;
}
};
private:
@ -264,7 +281,6 @@ class HashTable : private AllocPolicy
uint32_t grows; /* table expansions */
uint32_t shrinks; /* table contractions */
uint32_t compresses; /* table compressions */
uint32_t rehashes; /* tombstone decontaminations */
} stats;
# define METER(x) x
#else
@ -585,16 +601,6 @@ class HashTable : private AllocPolicy
return changeTableSize(deltaLog2);
}
/* Infallibly rehash the table if we are overloaded with removals. */
void checkOverRemoved()
{
if (overloaded()) {
METER(stats.rehashes++);
rehashTable();
JS_ASSERT(!overloaded());
}
}
void remove(Entry &e)
{
JS_ASSERT(table);
@ -619,52 +625,6 @@ class HashTable : private AllocPolicy
}
}
/*
* This is identical to changeTableSize(currentSize), but without requiring
* a second table. We do this by recycling the collision bits to tell us if
* the element is already inserted or still waiting to be inserted. Since
* already-inserted elements win any conflicts, we get the same table as we
* would have gotten through random insertion order.
*/
void rehashTable()
{
removedCount = 0;
for (size_t i = 0; i < capacity(); ++i)
table[i].unsetCollision();
for (size_t i = 0; i < capacity();) {
Entry *src = &table[i];
if (!src->isLive() || src->hasCollision()) {
++i;
continue;
}
HashNumber keyHash = src->getKeyHash();
HashNumber h1 = hash1(keyHash, hashShift);
DoubleHash dh = hash2(keyHash, hashShift);
Entry *tgt = &table[h1];
while (true) {
if (!tgt->hasCollision()) {
Swap(src, tgt);
tgt->setCollision();
break;
}
h1 = applyDoubleHash(h1, dh);
tgt = &table[h1];
}
}
/*
* TODO: this algorithm leaves collision bits on *all* elements, even if
* they are on no collision path. We also have the option of setting the
* collision bits correctly on a subsequent pass or skipping the rehash
* unless we are totally filled with tombstones: benchmark to find out
* which approach is best.
*/
}
public:
void clear()
{

View File

@ -193,11 +193,13 @@ BEGIN_TEST(testHashRekeyManual)
CHECK(AddLowKeys(&am, &bm, i));
CHECK(MapsAreEqual(am, bm));
for (IntMap::Enum e(am); !e.empty(); e.popFront()) {
IntMap::Enum e(am);
for (; !e.empty(); e.popFront()) {
uint32_t tmp = LowToHigh::rekey(e.front().key);
if (tmp != e.front().key)
e.rekeyFront(tmp);
}
CHECK(e.endEnumeration());
CHECK(SlowRekey<LowToHigh>(&bm));
CHECK(MapsAreEqual(am, bm));
@ -215,11 +217,13 @@ BEGIN_TEST(testHashRekeyManual)
CHECK(AddLowKeys(&as, &bs, i));
CHECK(SetsAreEqual(as, bs));
for (IntSet::Enum e(as); !e.empty(); e.popFront()) {
IntSet::Enum e(as);
for (; !e.empty(); e.popFront()) {
uint32_t tmp = LowToHigh::rekey(e.front());
if (tmp != e.front())
e.rekeyFront(tmp);
}
CHECK(e.endEnumeration());
CHECK(SlowRekey<LowToHigh>(&bs));
CHECK(SetsAreEqual(as, bs));
@ -243,7 +247,8 @@ BEGIN_TEST(testHashRekeyManualRemoval)
CHECK(AddLowKeys(&am, &bm, i));
CHECK(MapsAreEqual(am, bm));
for (IntMap::Enum e(am); !e.empty(); e.popFront()) {
IntMap::Enum e(am);
for (; !e.empty(); e.popFront()) {
if (LowToHighWithRemoval::shouldBeRemoved(e.front().key)) {
e.removeFront();
} else {
@ -252,6 +257,7 @@ BEGIN_TEST(testHashRekeyManualRemoval)
e.rekeyFront(tmp);
}
}
CHECK(e.endEnumeration());
CHECK(SlowRekey<LowToHighWithRemoval>(&bm));
CHECK(MapsAreEqual(am, bm));
@ -269,7 +275,8 @@ BEGIN_TEST(testHashRekeyManualRemoval)
CHECK(AddLowKeys(&as, &bs, i));
CHECK(SetsAreEqual(as, bs));
for (IntSet::Enum e(as); !e.empty(); e.popFront()) {
IntSet::Enum e(as);
for (; !e.empty(); e.popFront()) {
if (LowToHighWithRemoval::shouldBeRemoved(e.front())) {
e.removeFront();
} else {
@ -278,6 +285,7 @@ BEGIN_TEST(testHashRekeyManualRemoval)
e.rekeyFront(tmp);
}
}
CHECK(e.endEnumeration());
CHECK(SlowRekey<LowToHighWithRemoval>(&bs));
CHECK(SetsAreEqual(as, bs));
@ -288,3 +296,4 @@ BEGIN_TEST(testHashRekeyManualRemoval)
return true;
}
END_TEST(testHashRekeyManualRemoval)