Bug 815467 - Store pldhash's recursionLevel in a better place, so that debug and non-debug pldhashes take up the same amount of memory. r=dbaron.

--HG--
extra : rebase_source : c3c928f6706be4ef55620f05320050991a657a6c
This commit is contained in:
Nicholas Nethercote 2013-10-30 15:10:06 -07:00
parent 9a8761e80a
commit 9726ac23f3
2 changed files with 25 additions and 28 deletions

View File

@ -29,16 +29,10 @@
/*
* The following DEBUG-only code is used to assert that calls to one of
* table->ops or to an enumerator do not cause re-entry into a call that
* can mutate the table. The recursion level is stored in additional
* space allocated at the end of the entry store to avoid changing
* PLDHashTable, which could cause issues when mixing DEBUG and
* non-DEBUG components.
* can mutate the table.
*/
#ifdef DEBUG
#define RECURSION_LEVEL(table_) (*(uint32_t*)(table_->entryStore + \
PL_DHASH_TABLE_SIZE(table_) * \
table_->entrySize))
/*
* Most callers that assert about the recursion level don't care about
* this magical value because they are asserting that mutation is
@ -47,29 +41,27 @@
*
* Only PL_DHashTableFinish needs to allow this special value.
*/
#define IMMUTABLE_RECURSION_LEVEL ((uint32_t)-1)
#define IMMUTABLE_RECURSION_LEVEL ((uint16_t)-1)
#define RECURSION_LEVEL_SAFE_TO_FINISH(table_) \
(RECURSION_LEVEL(table_) == 0 || \
RECURSION_LEVEL(table_) == IMMUTABLE_RECURSION_LEVEL)
(table_->recursionLevel == 0 || \
table_->recursionLevel == IMMUTABLE_RECURSION_LEVEL)
#define ENTRY_STORE_EXTRA sizeof(uint32_t)
#define INCREMENT_RECURSION_LEVEL(table_) \
PR_BEGIN_MACRO \
if (RECURSION_LEVEL(table_) != IMMUTABLE_RECURSION_LEVEL) \
++RECURSION_LEVEL(table_); \
if (table_->recursionLevel != IMMUTABLE_RECURSION_LEVEL) \
++table_->recursionLevel; \
PR_END_MACRO
#define DECREMENT_RECURSION_LEVEL(table_) \
PR_BEGIN_MACRO \
if (RECURSION_LEVEL(table_) != IMMUTABLE_RECURSION_LEVEL) { \
MOZ_ASSERT(RECURSION_LEVEL(table_) > 0); \
--RECURSION_LEVEL(table_); \
if (table->recursionLevel != IMMUTABLE_RECURSION_LEVEL) { \
MOZ_ASSERT(table->recursionLevel > 0); \
--table->recursionLevel; \
} \
PR_END_MACRO
#else
#define ENTRY_STORE_EXTRA 0
#define INCREMENT_RECURSION_LEVEL(table_) PR_BEGIN_MACRO PR_END_MACRO
#define DECREMENT_RECURSION_LEVEL(table_) PR_BEGIN_MACRO PR_END_MACRO
@ -234,15 +226,14 @@ PL_DHashTableInit(PLDHashTable *table, const PLDHashTableOps *ops, void *data,
if (!SizeOfEntryStore(capacity, entrySize, &nbytes))
return false; // overflowed
table->entryStore = (char *) ops->allocTable(table,
nbytes + ENTRY_STORE_EXTRA);
table->entryStore = (char *) ops->allocTable(table, nbytes);
if (!table->entryStore)
return false;
memset(table->entryStore, 0, nbytes);
METER(memset(&table->stats, 0, sizeof table->stats));
#ifdef DEBUG
RECURSION_LEVEL(table) = 0;
table->recursionLevel = 0;
#endif
return true;
@ -488,14 +479,13 @@ ChangeTable(PLDHashTable *table, int deltaLog2)
if (!SizeOfEntryStore(newCapacity, entrySize, &nbytes))
return false; // overflowed
newEntryStore = (char *) table->ops->allocTable(table,
nbytes + ENTRY_STORE_EXTRA);
newEntryStore = (char *) table->ops->allocTable(table, nbytes);
if (!newEntryStore)
return false;
/* We can't fail from here on, so update table parameters. */
#ifdef DEBUG
recursionLevel = RECURSION_LEVEL(table);
recursionLevel = table->recursionLevel;
#endif
table->hashShift = PL_DHASH_BITS - newLog2;
table->removedCount = 0;
@ -507,7 +497,7 @@ ChangeTable(PLDHashTable *table, int deltaLog2)
table->entryStore = newEntryStore;
moveEntry = table->ops->moveEntry;
#ifdef DEBUG
RECURSION_LEVEL(table) = recursionLevel;
table->recursionLevel = recursionLevel;
#endif
/* Copy only live entries, leaving removed ones behind. */
@ -536,7 +526,7 @@ PL_DHashTableOperate(PLDHashTable *table, const void *key, PLDHashOperator op)
uint32_t size;
int deltaLog2;
MOZ_ASSERT(op == PL_DHASH_LOOKUP || RECURSION_LEVEL(table) == 0);
MOZ_ASSERT(op == PL_DHASH_LOOKUP || table->recursionLevel == 0);
INCREMENT_RECURSION_LEVEL(table);
keyHash = table->ops->hashKey(table, key);
@ -637,7 +627,7 @@ PL_DHashTableRawRemove(PLDHashTable *table, PLDHashEntryHdr *entry)
{
PLDHashNumber keyHash; /* load first in case clearEntry goofs it */
MOZ_ASSERT(RECURSION_LEVEL(table) != IMMUTABLE_RECURSION_LEVEL);
MOZ_ASSERT(table->recursionLevel != IMMUTABLE_RECURSION_LEVEL);
NS_ASSERTION(PL_DHASH_ENTRY_IS_LIVE(entry),
"PL_DHASH_ENTRY_IS_LIVE(entry)");
@ -685,7 +675,7 @@ PL_DHashTableEnumerate(PLDHashTable *table, PLDHashEnumerator etor, void *arg)
entryAddr += entrySize;
}
MOZ_ASSERT(!didRemove || RECURSION_LEVEL(table) == 1);
MOZ_ASSERT(!didRemove || table->recursionLevel == 1);
/*
* Shrink or compress if a quarter or more of all entries are removed, or
@ -764,7 +754,7 @@ PL_DHashTableSizeOfIncludingThis(const PLDHashTable *table,
void
PL_DHashMarkTableImmutable(PLDHashTable *table)
{
RECURSION_LEVEL(table) = IMMUTABLE_RECURSION_LEVEL;
table->recursionLevel = IMMUTABLE_RECURSION_LEVEL;
}
#endif

View File

@ -186,6 +186,13 @@ struct PLDHashTable {
const PLDHashTableOps *ops; /* virtual operations, see below */
void *data; /* ops- and instance-specific data */
int16_t hashShift; /* multiplicative hash shift */
/*
* |recursionLevel| is only used in debug builds, but is present in opt
* builds to avoid binary compatibility problems when mixing DEBUG and
* non-DEBUG components. (Actually, even if it were removed,
* sizeof(PLDHashTable) wouldn't change, due to struct padding.)
*/
uint16_t recursionLevel; /* used to detect unsafe re-entry */
uint32_t entrySize; /* number of bytes in an entry */
uint32_t entryCount; /* number of entries in table */
uint32_t removedCount; /* removed entry sentinels in table */