Back out changesets 2fcef6b54be7, 2be07829fefc, 66dfe37b8532, df3fcd2be8fd, 0a436bce77a6 (bug 1050035) for causing intermittent crashes and assertion failures.

--HG--
extra : rebase_source : eb30be83c3143c6c203585a80a18f180025efaba
This commit is contained in:
Nicholas Nethercote 2015-02-10 14:39:49 -08:00
parent fd150e850d
commit 44179aabe3
47 changed files with 563 additions and 435 deletions

View File

@ -224,7 +224,7 @@ NS_GetContentList(nsINode* aRootNode,
// First we look in our hashtable. Then we create a content list if needed
if (gContentListHashTable.IsInitialized()) {
entry = static_cast<ContentListHashEntry *>
(PL_DHashTableAdd(&gContentListHashTable, &hashKey, fallible));
(PL_DHashTableAdd(&gContentListHashTable, &hashKey));
if (entry)
list = entry->mContentList;
}
@ -332,7 +332,8 @@ GetFuncStringContentList(nsINode* aRootNode,
nsFuncStringCacheKey hashKey(aRootNode, aFunc, aString);
entry = static_cast<FuncStringContentListHashEntry *>
(PL_DHashTableAdd(&gFuncStringContentListHashTable, &hashKey, fallible));
(PL_DHashTableAdd(&gFuncStringContentListHashTable,
&hashKey));
if (entry) {
list = entry->mContentList;
#ifdef DEBUG

View File

@ -3964,7 +3964,7 @@ nsContentUtils::GetListenerManagerForNode(nsINode *aNode)
EventListenerManagerMapEntry *entry =
static_cast<EventListenerManagerMapEntry *>
(PL_DHashTableAdd(&sEventListenerManagersHash, aNode, fallible));
(PL_DHashTableAdd(&sEventListenerManagersHash, aNode));
if (!entry) {
return nullptr;

View File

@ -1715,8 +1715,8 @@ nsDocument::~nsDocument()
// Kill the subdocument map, doing this will release its strong
// references, if any.
if (mSubDocuments) {
PL_DHashTableFinish(mSubDocuments);
delete mSubDocuments;
PL_DHashTableDestroy(mSubDocuments);
mSubDocuments = nullptr;
}
@ -2126,8 +2126,7 @@ NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(nsDocument)
}
if (tmp->mSubDocuments) {
PL_DHashTableFinish(tmp->mSubDocuments);
delete tmp->mSubDocuments;
PL_DHashTableDestroy(tmp->mSubDocuments);
tmp->mSubDocuments = nullptr;
}
@ -2332,8 +2331,8 @@ nsDocument::ResetToURI(nsIURI *aURI, nsILoadGroup *aLoadGroup,
// Delete references to sub-documents and kill the subdocument map,
// if any. It holds strong references
if (mSubDocuments) {
PL_DHashTableFinish(mSubDocuments);
delete mSubDocuments;
PL_DHashTableDestroy(mSubDocuments);
mSubDocuments = nullptr;
}
@ -4002,13 +4001,16 @@ nsDocument::SetSubDocumentFor(Element* aElement, nsIDocument* aSubDoc)
SubDocInitEntry
};
mSubDocuments = new PLDHashTable();
PL_DHashTableInit(mSubDocuments, &hash_table_ops, sizeof(SubDocMapEntry));
mSubDocuments = PL_NewDHashTable(&hash_table_ops, sizeof(SubDocMapEntry));
if (!mSubDocuments) {
return NS_ERROR_OUT_OF_MEMORY;
}
}
// Add a mapping to the hash table
SubDocMapEntry *entry = static_cast<SubDocMapEntry*>
(PL_DHashTableAdd(mSubDocuments, aElement, fallible));
SubDocMapEntry *entry =
static_cast<SubDocMapEntry*>
(PL_DHashTableAdd(mSubDocuments, aElement));
if (!entry) {
return NS_ERROR_OUT_OF_MEMORY;

View File

@ -230,7 +230,7 @@ nsPropertyTable::SetPropertyInternal(nsPropertyOwner aObject,
// value is destroyed
nsresult result = NS_OK;
PropertyListMapEntry *entry = static_cast<PropertyListMapEntry*>
(PL_DHashTableAdd(&propertyList->mObjectValueMap, aObject, fallible));
(PL_DHashTableAdd(&propertyList->mObjectValueMap, aObject));
if (!entry)
return NS_ERROR_OUT_OF_MEMORY;
// A nullptr entry->key is the sign that the entry has just been allocated

View File

@ -122,16 +122,19 @@ NS_IMPL_ISUPPORTS(
nsIMemoryReporter)
nsScriptNameSpaceManager::nsScriptNameSpaceManager()
: mIsInitialized(false)
{
MOZ_COUNT_CTOR(nsScriptNameSpaceManager);
}
nsScriptNameSpaceManager::~nsScriptNameSpaceManager()
{
UnregisterWeakMemoryReporter(this);
// Destroy the hash
PL_DHashTableFinish(&mGlobalNames);
PL_DHashTableFinish(&mNavigatorNames);
if (mIsInitialized) {
UnregisterWeakMemoryReporter(this);
// Destroy the hash
PL_DHashTableFinish(&mGlobalNames);
PL_DHashTableFinish(&mNavigatorNames);
}
MOZ_COUNT_DTOR(nsScriptNameSpaceManager);
}
@ -139,8 +142,9 @@ nsGlobalNameStruct *
nsScriptNameSpaceManager::AddToHash(PLDHashTable *aTable, const nsAString *aKey,
const char16_t **aClassName)
{
GlobalNameMapEntry *entry = static_cast<GlobalNameMapEntry *>
(PL_DHashTableAdd(aTable, aKey, fallible));
GlobalNameMapEntry *entry =
static_cast<GlobalNameMapEntry *>
(PL_DHashTableAdd(aTable, aKey));
if (!entry) {
return nullptr;
@ -322,13 +326,21 @@ nsScriptNameSpaceManager::Init()
GlobalNameHashInitEntry
};
PL_DHashTableInit(&mGlobalNames, &hash_table_ops,
sizeof(GlobalNameMapEntry),
GLOBALNAME_HASHTABLE_INITIAL_LENGTH);
mIsInitialized = PL_DHashTableInit(&mGlobalNames, &hash_table_ops,
sizeof(GlobalNameMapEntry),
fallible,
GLOBALNAME_HASHTABLE_INITIAL_LENGTH);
NS_ENSURE_TRUE(mIsInitialized, NS_ERROR_OUT_OF_MEMORY);
PL_DHashTableInit(&mNavigatorNames, &hash_table_ops,
sizeof(GlobalNameMapEntry),
GLOBALNAME_HASHTABLE_INITIAL_LENGTH);
mIsInitialized = PL_DHashTableInit(&mNavigatorNames, &hash_table_ops,
sizeof(GlobalNameMapEntry),
fallible,
GLOBALNAME_HASHTABLE_INITIAL_LENGTH);
if (!mIsInitialized) {
PL_DHashTableFinish(&mGlobalNames);
return NS_ERROR_OUT_OF_MEMORY;
}
RegisterWeakMemoryReporter(this);

View File

@ -240,6 +240,8 @@ private:
PLDHashTable mGlobalNames;
PLDHashTable mNavigatorNames;
bool mIsInitialized;
};
#endif /* nsScriptNameSpaceManager_h__ */

View File

@ -1872,7 +1872,7 @@ nsNPObjWrapper::GetNewOrUsed(NPP npp, JSContext *cx, NPObject *npobj)
}
NPObjWrapperHashEntry *entry = static_cast<NPObjWrapperHashEntry *>
(PL_DHashTableAdd(&sNPObjWrappers, npobj, fallible));
(PL_DHashTableAdd(&sNPObjWrappers, npobj));
if (!entry) {
// Out of memory
@ -2035,7 +2035,7 @@ LookupNPP(NPObject *npobj)
}
NPObjWrapperHashEntry *entry = static_cast<NPObjWrapperHashEntry *>
(PL_DHashTableAdd(&sNPObjWrappers, npobj, fallible));
(PL_DHashTableAdd(&sNPObjWrappers, npobj));
if (!entry) {
return nullptr;

View File

@ -216,8 +216,7 @@ XULDocument::~XULDocument()
// Destroy our broadcaster map.
if (mBroadcasterMap) {
PL_DHashTableFinish(mBroadcasterMap);
delete mBroadcasterMap;
PL_DHashTableDestroy(mBroadcasterMap);
}
delete mTemplateBuilderTable;
@ -769,8 +768,12 @@ XULDocument::AddBroadcastListenerFor(Element& aBroadcaster, Element& aListener,
};
if (! mBroadcasterMap) {
mBroadcasterMap = new PLDHashTable();
PL_DHashTableInit(mBroadcasterMap, &gOps, sizeof(BroadcasterMapEntry));
mBroadcasterMap = PL_NewDHashTable(&gOps, sizeof(BroadcasterMapEntry));
if (! mBroadcasterMap) {
aRv.Throw(NS_ERROR_OUT_OF_MEMORY);
return;
}
}
BroadcasterMapEntry* entry =
@ -778,8 +781,9 @@ XULDocument::AddBroadcastListenerFor(Element& aBroadcaster, Element& aListener,
(PL_DHashTableSearch(mBroadcasterMap, &aBroadcaster));
if (!entry) {
entry = static_cast<BroadcasterMapEntry*>
(PL_DHashTableAdd(mBroadcasterMap, &aBroadcaster, fallible));
entry =
static_cast<BroadcasterMapEntry*>
(PL_DHashTableAdd(mBroadcasterMap, &aBroadcaster));
if (! entry) {
aRv.Throw(NS_ERROR_OUT_OF_MEMORY);

View File

@ -28,8 +28,7 @@ public:
if (!mMap.IsInitialized())
return NS_ERROR_NOT_INITIALIZED;
PLDHashEntryHdr* hdr =
PL_DHashTableAdd(&mMap, aElement, mozilla::fallible);
PLDHashEntryHdr* hdr = PL_DHashTableAdd(&mMap, aElement);
if (!hdr)
return NS_ERROR_OUT_OF_MEMORY;

View File

@ -37,8 +37,7 @@ public:
NS_ASSERTION(!PL_DHashTableSearch(&mTable, aContent),
"aContent already in map");
Entry* entry = static_cast<Entry*>
(PL_DHashTableAdd(&mTable, aContent, fallible));
Entry* entry = static_cast<Entry*>(PL_DHashTableAdd(&mTable, aContent));
if (entry) {
entry->mContent = aContent;

View File

@ -230,8 +230,7 @@ nsCommandParams::GetOrMakeEntry(const char* aName, uint8_t entryType)
return foundEntry;
}
foundEntry = static_cast<HashEntry*>
(PL_DHashTableAdd(&mValuesHash, (void *)aName, fallible));
foundEntry = (HashEntry *)PL_DHashTableAdd(&mValuesHash, (void *)aName);
if (!foundEntry) {
return nullptr;
}

View File

@ -688,8 +688,9 @@ public:
}
uint32_t filesize = strtoul(beginning, nullptr, 10);
FNCMapEntry* mapEntry = static_cast<FNCMapEntry*>
(PL_DHashTableAdd(&mMap, filename.get(), fallible));
FNCMapEntry* mapEntry =
static_cast<FNCMapEntry*>
(PL_DHashTableAdd(&mMap, filename.get()));
if (mapEntry) {
mapEntry->mFilename.Assign(filename);
mapEntry->mTimestamp = timestamp;
@ -736,8 +737,9 @@ public:
if (!mMap.IsInitialized()) {
return;
}
FNCMapEntry* entry = static_cast<FNCMapEntry*>
(PL_DHashTableAdd(&mMap, aFileName.get(), fallible));
FNCMapEntry* entry =
static_cast<FNCMapEntry*>
(PL_DHashTableAdd(&mMap, aFileName.get()));
if (entry) {
entry->mFilename.Assign(aFileName);
entry->mTimestamp = aTimestamp;

View File

@ -1536,8 +1536,9 @@ void XPCJSRuntime::DestroyJSContextStack()
void XPCJSRuntime::SystemIsBeingShutDown()
{
mDetachedWrappedNativeProtoMap->
Enumerate(DetachedWrappedNativeProtoShutdownMarker, nullptr);
if (mDetachedWrappedNativeProtoMap)
mDetachedWrappedNativeProtoMap->
Enumerate(DetachedWrappedNativeProtoShutdownMarker, nullptr);
}
#define JS_OPTIONS_DOT_STR "javascript.options."
@ -1619,33 +1620,51 @@ XPCJSRuntime::~XPCJSRuntime()
JS_SetRuntimePrivate(Runtime(), nullptr);
// clean up and destroy maps...
mWrappedJSMap->ShutdownMarker();
delete mWrappedJSMap;
mWrappedJSMap = nullptr;
if (mWrappedJSMap) {
mWrappedJSMap->ShutdownMarker();
delete mWrappedJSMap;
mWrappedJSMap = nullptr;
}
delete mWrappedJSClassMap;
mWrappedJSClassMap = nullptr;
if (mWrappedJSClassMap) {
delete mWrappedJSClassMap;
mWrappedJSClassMap = nullptr;
}
delete mIID2NativeInterfaceMap;
mIID2NativeInterfaceMap = nullptr;
if (mIID2NativeInterfaceMap) {
delete mIID2NativeInterfaceMap;
mIID2NativeInterfaceMap = nullptr;
}
delete mClassInfo2NativeSetMap;
mClassInfo2NativeSetMap = nullptr;
if (mClassInfo2NativeSetMap) {
delete mClassInfo2NativeSetMap;
mClassInfo2NativeSetMap = nullptr;
}
delete mNativeSetMap;
mNativeSetMap = nullptr;
if (mNativeSetMap) {
delete mNativeSetMap;
mNativeSetMap = nullptr;
}
delete mThisTranslatorMap;
mThisTranslatorMap = nullptr;
if (mThisTranslatorMap) {
delete mThisTranslatorMap;
mThisTranslatorMap = nullptr;
}
delete mNativeScriptableSharedMap;
mNativeScriptableSharedMap = nullptr;
if (mNativeScriptableSharedMap) {
delete mNativeScriptableSharedMap;
mNativeScriptableSharedMap = nullptr;
}
delete mDyingWrappedNativeProtoMap;
mDyingWrappedNativeProtoMap = nullptr;
if (mDyingWrappedNativeProtoMap) {
delete mDyingWrappedNativeProtoMap;
mDyingWrappedNativeProtoMap = nullptr;
}
delete mDetachedWrappedNativeProtoMap;
mDetachedWrappedNativeProtoMap = nullptr;
if (mDetachedWrappedNativeProtoMap) {
delete mDetachedWrappedNativeProtoMap;
mDetachedWrappedNativeProtoMap = nullptr;
}
#ifdef MOZ_ENABLE_PROFILER_SPS
// Tell the profiler that the runtime is gone
@ -3495,38 +3514,42 @@ XPCJSRuntime::DebugDump(int16_t depth)
}
XPC_LOG_ALWAYS(("mWrappedJSClassMap @ %x with %d wrapperclasses(s)", \
mWrappedJSClassMap, mWrappedJSClassMap->Count()));
mWrappedJSClassMap, mWrappedJSClassMap ? \
mWrappedJSClassMap->Count() : 0));
// iterate wrappersclasses...
if (depth && mWrappedJSClassMap->Count()) {
if (depth && mWrappedJSClassMap && mWrappedJSClassMap->Count()) {
XPC_LOG_INDENT();
mWrappedJSClassMap->Enumerate(WrappedJSClassMapDumpEnumerator, &depth);
XPC_LOG_OUTDENT();
}
XPC_LOG_ALWAYS(("mWrappedJSMap @ %x with %d wrappers(s)", \
mWrappedJSMap, mWrappedJSMap->Count()));
mWrappedJSMap, mWrappedJSMap ? \
mWrappedJSMap->Count() : 0));
// iterate wrappers...
if (depth && mWrappedJSMap->Count()) {
if (depth && mWrappedJSMap && mWrappedJSMap->Count()) {
XPC_LOG_INDENT();
mWrappedJSMap->Dump(depth);
XPC_LOG_OUTDENT();
}
XPC_LOG_ALWAYS(("mIID2NativeInterfaceMap @ %x with %d interface(s)", \
mIID2NativeInterfaceMap,
mIID2NativeInterfaceMap->Count()));
mIID2NativeInterfaceMap, mIID2NativeInterfaceMap ? \
mIID2NativeInterfaceMap->Count() : 0));
XPC_LOG_ALWAYS(("mClassInfo2NativeSetMap @ %x with %d sets(s)", \
mClassInfo2NativeSetMap, \
mClassInfo2NativeSetMap->Count()));
mClassInfo2NativeSetMap, mClassInfo2NativeSetMap ? \
mClassInfo2NativeSetMap->Count() : 0));
XPC_LOG_ALWAYS(("mThisTranslatorMap @ %x with %d translator(s)", \
mThisTranslatorMap, mThisTranslatorMap->Count()));
mThisTranslatorMap, mThisTranslatorMap ? \
mThisTranslatorMap->Count() : 0));
XPC_LOG_ALWAYS(("mNativeSetMap @ %x with %d sets(s)", \
mNativeSetMap, mNativeSetMap->Count()));
mNativeSetMap, mNativeSetMap ? \
mNativeSetMap->Count() : 0));
// iterate sets...
if (depth && mNativeSetMap->Count()) {
if (depth && mNativeSetMap && mNativeSetMap->Count()) {
XPC_LOG_INDENT();
mNativeSetMap->Enumerate(NativeSetDumpEnumerator, &depth);
XPC_LOG_OUTDENT();

View File

@ -169,19 +169,26 @@ JSObject2WrappedJSMap::SizeOfWrappedJS(mozilla::MallocSizeOf mallocSizeOf) const
Native2WrappedNativeMap*
Native2WrappedNativeMap::newMap(int length)
{
return new Native2WrappedNativeMap(length);
Native2WrappedNativeMap* map = new Native2WrappedNativeMap(length);
if (map && map->mTable)
return map;
// Allocation of the map or the creation of its hash table has
// failed. This will cause a nullptr deref later when we attempt
// to use the map, so we abort immediately to provide a more
// useful crash stack.
NS_RUNTIMEABORT("Ran out of memory.");
return nullptr;
}
Native2WrappedNativeMap::Native2WrappedNativeMap(int length)
{
mTable = new PLDHashTable();
PL_DHashTableInit(mTable, PL_DHashGetStubOps(), sizeof(Entry), length);
mTable = PL_NewDHashTable(PL_DHashGetStubOps(), sizeof(Entry), length);
}
Native2WrappedNativeMap::~Native2WrappedNativeMap()
{
PL_DHashTableFinish(mTable);
delete mTable;
if (mTable)
PL_DHashTableDestroy(mTable);
}
size_t
@ -189,7 +196,7 @@ Native2WrappedNativeMap::SizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf)
{
size_t n = 0;
n += mallocSizeOf(this);
n += PL_DHashTableSizeOfIncludingThis(mTable, SizeOfEntryExcludingThis, mallocSizeOf);
n += mTable ? PL_DHashTableSizeOfIncludingThis(mTable, SizeOfEntryExcludingThis, mallocSizeOf) : 0;
return n;
}
@ -215,19 +222,22 @@ const struct PLDHashTableOps IID2WrappedJSClassMap::Entry::sOps =
IID2WrappedJSClassMap*
IID2WrappedJSClassMap::newMap(int length)
{
return new IID2WrappedJSClassMap(length);
IID2WrappedJSClassMap* map = new IID2WrappedJSClassMap(length);
if (map && map->mTable)
return map;
delete map;
return nullptr;
}
IID2WrappedJSClassMap::IID2WrappedJSClassMap(int length)
{
mTable = new PLDHashTable();
PL_DHashTableInit(mTable, &Entry::sOps, sizeof(Entry), length);
mTable = PL_NewDHashTable(&Entry::sOps, sizeof(Entry), length);
}
IID2WrappedJSClassMap::~IID2WrappedJSClassMap()
{
PL_DHashTableFinish(mTable);
delete mTable;
if (mTable)
PL_DHashTableDestroy(mTable);
}
@ -246,19 +256,22 @@ const struct PLDHashTableOps IID2NativeInterfaceMap::Entry::sOps =
IID2NativeInterfaceMap*
IID2NativeInterfaceMap::newMap(int length)
{
return new IID2NativeInterfaceMap(length);
IID2NativeInterfaceMap* map = new IID2NativeInterfaceMap(length);
if (map && map->mTable)
return map;
delete map;
return nullptr;
}
IID2NativeInterfaceMap::IID2NativeInterfaceMap(int length)
{
mTable = new PLDHashTable();
PL_DHashTableInit(mTable, &Entry::sOps, sizeof(Entry), length);
mTable = PL_NewDHashTable(&Entry::sOps, sizeof(Entry), length);
}
IID2NativeInterfaceMap::~IID2NativeInterfaceMap()
{
PL_DHashTableFinish(mTable);
delete mTable;
if (mTable)
PL_DHashTableDestroy(mTable);
}
size_t
@ -266,7 +279,7 @@ IID2NativeInterfaceMap::SizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf)
{
size_t n = 0;
n += mallocSizeOf(this);
n += PL_DHashTableSizeOfIncludingThis(mTable, SizeOfEntryExcludingThis, mallocSizeOf);
n += mTable ? PL_DHashTableSizeOfIncludingThis(mTable, SizeOfEntryExcludingThis, mallocSizeOf) : 0;
return n;
}
@ -285,19 +298,22 @@ IID2NativeInterfaceMap::SizeOfEntryExcludingThis(PLDHashEntryHdr *hdr,
ClassInfo2NativeSetMap*
ClassInfo2NativeSetMap::newMap(int length)
{
return new ClassInfo2NativeSetMap(length);
ClassInfo2NativeSetMap* map = new ClassInfo2NativeSetMap(length);
if (map && map->mTable)
return map;
delete map;
return nullptr;
}
ClassInfo2NativeSetMap::ClassInfo2NativeSetMap(int length)
{
mTable = new PLDHashTable();
PL_DHashTableInit(mTable, PL_DHashGetStubOps(), sizeof(Entry), length);
mTable = PL_NewDHashTable(PL_DHashGetStubOps(), sizeof(Entry), length);
}
ClassInfo2NativeSetMap::~ClassInfo2NativeSetMap()
{
PL_DHashTableFinish(mTable);
delete mTable;
if (mTable)
PL_DHashTableDestroy(mTable);
}
size_t
@ -306,7 +322,7 @@ ClassInfo2NativeSetMap::ShallowSizeOfIncludingThis(mozilla::MallocSizeOf mallocS
size_t n = 0;
n += mallocSizeOf(this);
// The second arg is nullptr because this is a "shallow" measurement of the map.
n += PL_DHashTableSizeOfIncludingThis(mTable, nullptr, mallocSizeOf);
n += mTable ? PL_DHashTableSizeOfIncludingThis(mTable, nullptr, mallocSizeOf) : 0;
return n;
}
@ -317,19 +333,26 @@ ClassInfo2NativeSetMap::ShallowSizeOfIncludingThis(mozilla::MallocSizeOf mallocS
ClassInfo2WrappedNativeProtoMap*
ClassInfo2WrappedNativeProtoMap::newMap(int length)
{
return new ClassInfo2WrappedNativeProtoMap(length);
ClassInfo2WrappedNativeProtoMap* map = new ClassInfo2WrappedNativeProtoMap(length);
if (map && map->mTable)
return map;
// Allocation of the map or the creation of its hash table has
// failed. This will cause a nullptr deref later when we attempt
// to use the map, so we abort immediately to provide a more
// useful crash stack.
NS_RUNTIMEABORT("Ran out of memory.");
return nullptr;
}
ClassInfo2WrappedNativeProtoMap::ClassInfo2WrappedNativeProtoMap(int length)
{
mTable = new PLDHashTable();
PL_DHashTableInit(mTable, PL_DHashGetStubOps(), sizeof(Entry), length);
mTable = PL_NewDHashTable(PL_DHashGetStubOps(), sizeof(Entry), length);
}
ClassInfo2WrappedNativeProtoMap::~ClassInfo2WrappedNativeProtoMap()
{
PL_DHashTableFinish(mTable);
delete mTable;
if (mTable)
PL_DHashTableDestroy(mTable);
}
size_t
@ -337,7 +360,7 @@ ClassInfo2WrappedNativeProtoMap::SizeOfIncludingThis(mozilla::MallocSizeOf mallo
{
size_t n = 0;
n += mallocSizeOf(this);
n += PL_DHashTableSizeOfIncludingThis(mTable, SizeOfEntryExcludingThis, mallocSizeOf);
n += mTable ? PL_DHashTableSizeOfIncludingThis(mTable, SizeOfEntryExcludingThis, mallocSizeOf) : 0;
return n;
}
@ -435,19 +458,22 @@ const struct PLDHashTableOps NativeSetMap::Entry::sOps =
NativeSetMap*
NativeSetMap::newMap(int length)
{
return new NativeSetMap(length);
NativeSetMap* map = new NativeSetMap(length);
if (map && map->mTable)
return map;
delete map;
return nullptr;
}
NativeSetMap::NativeSetMap(int length)
{
mTable = new PLDHashTable();
PL_DHashTableInit(mTable, &Entry::sOps, sizeof(Entry), length);
mTable = PL_NewDHashTable(&Entry::sOps, sizeof(Entry), length);
}
NativeSetMap::~NativeSetMap()
{
PL_DHashTableFinish(mTable);
delete mTable;
if (mTable)
PL_DHashTableDestroy(mTable);
}
size_t
@ -455,7 +481,7 @@ NativeSetMap::SizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf)
{
size_t n = 0;
n += mallocSizeOf(this);
n += PL_DHashTableSizeOfIncludingThis(mTable, SizeOfEntryExcludingThis, mallocSizeOf);
n += mTable ? PL_DHashTableSizeOfIncludingThis(mTable, SizeOfEntryExcludingThis, mallocSizeOf) : 0;
return n;
}
@ -496,19 +522,22 @@ const struct PLDHashTableOps IID2ThisTranslatorMap::Entry::sOps =
IID2ThisTranslatorMap*
IID2ThisTranslatorMap::newMap(int length)
{
return new IID2ThisTranslatorMap(length);
IID2ThisTranslatorMap* map = new IID2ThisTranslatorMap(length);
if (map && map->mTable)
return map;
delete map;
return nullptr;
}
IID2ThisTranslatorMap::IID2ThisTranslatorMap(int length)
{
mTable = new PLDHashTable();
PL_DHashTableInit(mTable, &Entry::sOps, sizeof(Entry), length);
mTable = PL_NewDHashTable(&Entry::sOps, sizeof(Entry), length);
}
IID2ThisTranslatorMap::~IID2ThisTranslatorMap()
{
PL_DHashTableFinish(mTable);
delete mTable;
if (mTable)
PL_DHashTableDestroy(mTable);
}
/***************************************************************************/
@ -569,19 +598,23 @@ const struct PLDHashTableOps XPCNativeScriptableSharedMap::Entry::sOps =
XPCNativeScriptableSharedMap*
XPCNativeScriptableSharedMap::newMap(int length)
{
return new XPCNativeScriptableSharedMap(length);
XPCNativeScriptableSharedMap* map =
new XPCNativeScriptableSharedMap(length);
if (map && map->mTable)
return map;
delete map;
return nullptr;
}
XPCNativeScriptableSharedMap::XPCNativeScriptableSharedMap(int length)
{
mTable = new PLDHashTable();
PL_DHashTableInit(mTable, &Entry::sOps, sizeof(Entry), length);
mTable = PL_NewDHashTable(&Entry::sOps, sizeof(Entry), length);
}
XPCNativeScriptableSharedMap::~XPCNativeScriptableSharedMap()
{
PL_DHashTableFinish(mTable);
delete mTable;
if (mTable)
PL_DHashTableDestroy(mTable);
}
bool
@ -593,8 +626,7 @@ XPCNativeScriptableSharedMap::GetNewOrUsed(uint32_t flags,
NS_PRECONDITION(si,"bad param");
XPCNativeScriptableShared key(flags, name);
Entry* entry = static_cast<Entry*>
(PL_DHashTableAdd(mTable, &key, fallible));
Entry* entry = (Entry*) PL_DHashTableAdd(mTable, &key);
if (!entry)
return false;
@ -618,20 +650,23 @@ XPCNativeScriptableSharedMap::GetNewOrUsed(uint32_t flags,
XPCWrappedNativeProtoMap*
XPCWrappedNativeProtoMap::newMap(int length)
{
return new XPCWrappedNativeProtoMap(length);
XPCWrappedNativeProtoMap* map = new XPCWrappedNativeProtoMap(length);
if (map && map->mTable)
return map;
delete map;
return nullptr;
}
XPCWrappedNativeProtoMap::XPCWrappedNativeProtoMap(int length)
{
mTable = new PLDHashTable();
PL_DHashTableInit(mTable, PL_DHashGetStubOps(), sizeof(PLDHashEntryStub),
length);
mTable = PL_NewDHashTable(PL_DHashGetStubOps(),
sizeof(PLDHashEntryStub), length);
}
XPCWrappedNativeProtoMap::~XPCWrappedNativeProtoMap()
{
PL_DHashTableFinish(mTable);
delete mTable;
if (mTable)
PL_DHashTableDestroy(mTable);
}
/***************************************************************************/

View File

@ -32,7 +32,7 @@ class JSObject2WrappedJSMap
public:
static JSObject2WrappedJSMap* newMap(int length) {
JSObject2WrappedJSMap* map = new JSObject2WrappedJSMap();
if (map->mTable.init(length))
if (map && map->mTable.init(length))
return map;
delete map;
return nullptr;
@ -120,8 +120,7 @@ public:
NS_PRECONDITION(wrapper,"bad param");
nsISupports* obj = wrapper->GetIdentityObject();
MOZ_ASSERT(!Find(obj), "wrapper already in new scope!");
Entry* entry = static_cast<Entry*>
(PL_DHashTableAdd(mTable, obj, mozilla::fallible));
Entry* entry = (Entry*) PL_DHashTableAdd(mTable, obj);
if (!entry)
return nullptr;
if (entry->key)
@ -186,8 +185,7 @@ public:
{
NS_PRECONDITION(clazz,"bad param");
const nsIID* iid = &clazz->GetIID();
Entry* entry = static_cast<Entry*>
(PL_DHashTableAdd(mTable, iid, mozilla::fallible));
Entry* entry = (Entry*) PL_DHashTableAdd(mTable, iid);
if (!entry)
return nullptr;
if (entry->key)
@ -240,8 +238,7 @@ public:
{
NS_PRECONDITION(iface,"bad param");
const nsIID* iid = iface->GetIID();
Entry* entry = static_cast<Entry*>
(PL_DHashTableAdd(mTable, iid, mozilla::fallible));
Entry* entry = (Entry*) PL_DHashTableAdd(mTable, iid);
if (!entry)
return nullptr;
if (entry->key)
@ -296,8 +293,7 @@ public:
inline XPCNativeSet* Add(nsIClassInfo* info, XPCNativeSet* set)
{
NS_PRECONDITION(info,"bad param");
Entry* entry = static_cast<Entry*>
(PL_DHashTableAdd(mTable, info, mozilla::fallible));
Entry* entry = (Entry*) PL_DHashTableAdd(mTable, info);
if (!entry)
return nullptr;
if (entry->key)
@ -353,8 +349,7 @@ public:
inline XPCWrappedNativeProto* Add(nsIClassInfo* info, XPCWrappedNativeProto* proto)
{
NS_PRECONDITION(info,"bad param");
Entry* entry = static_cast<Entry*>
(PL_DHashTableAdd(mTable, info, mozilla::fallible));
Entry* entry = (Entry*) PL_DHashTableAdd(mTable, info);
if (!entry)
return nullptr;
if (entry->key)
@ -416,8 +411,7 @@ public:
{
NS_PRECONDITION(key,"bad param");
NS_PRECONDITION(set,"bad param");
Entry* entry = static_cast<Entry*>
(PL_DHashTableAdd(mTable, key, mozilla::fallible));
Entry* entry = (Entry*) PL_DHashTableAdd(mTable, key);
if (!entry)
return nullptr;
if (entry->key_value)
@ -489,8 +483,8 @@ public:
inline nsIXPCFunctionThisTranslator* Add(REFNSIID iid,
nsIXPCFunctionThisTranslator* obj)
{
Entry* entry = static_cast<Entry*>
(PL_DHashTableAdd(mTable, &iid, mozilla::fallible));
Entry* entry = (Entry*) PL_DHashTableAdd(mTable, &iid);
if (!entry)
return nullptr;
entry->value = obj;
@ -561,8 +555,8 @@ public:
inline XPCWrappedNativeProto* Add(XPCWrappedNativeProto* proto)
{
NS_PRECONDITION(proto,"bad param");
PLDHashEntryStub* entry = static_cast<PLDHashEntryStub*>
(PL_DHashTableAdd(mTable, proto, mozilla::fallible));
PLDHashEntryStub* entry = (PLDHashEntryStub*)
PL_DHashTableAdd(mTable, proto);
if (!entry)
return nullptr;
if (entry->key)

View File

@ -175,9 +175,8 @@ nsFrameManager::RegisterPlaceholderFrame(nsPlaceholderFrame* aPlaceholderFrame)
PL_DHashTableInit(&mPlaceholderMap, &PlaceholderMapOps,
sizeof(PlaceholderMapEntry));
}
PlaceholderMapEntry *entry = static_cast<PlaceholderMapEntry*>
(PL_DHashTableAdd(&mPlaceholderMap,
aPlaceholderFrame->GetOutOfFlowFrame(), fallible));
PlaceholderMapEntry *entry = static_cast<PlaceholderMapEntry*>(PL_DHashTableAdd(&mPlaceholderMap,
aPlaceholderFrame->GetOutOfFlowFrame()));
if (!entry)
return NS_ERROR_OUT_OF_MEMORY;

View File

@ -573,7 +573,7 @@ void RuleHash::AppendRuleToTable(PLDHashTable* aTable, const void* aKey,
{
// Get a new or existing entry.
RuleHashTableEntry *entry = static_cast<RuleHashTableEntry*>
(PL_DHashTableAdd(aTable, aKey, fallible));
(PL_DHashTableAdd(aTable, aKey));
if (!entry)
return;
entry->mRules.AppendElement(RuleValue(aRuleInfo, mRuleCount++, mQuirksMode));
@ -585,7 +585,7 @@ AppendRuleToTagTable(PLDHashTable* aTable, nsIAtom* aKey,
{
// Get a new or exisiting entry
RuleHashTagTableEntry *entry = static_cast<RuleHashTagTableEntry*>
(PL_DHashTableAdd(aTable, aKey, fallible));
(PL_DHashTableAdd(aTable, aKey));
if (!entry)
return;
@ -1041,7 +1041,7 @@ RuleCascadeData::AttributeListFor(nsIAtom* aAttribute)
{
AtomSelectorEntry *entry =
static_cast<AtomSelectorEntry*>
(PL_DHashTableAdd(&mAttributeSelectors, aAttribute, fallible));
(PL_DHashTableAdd(&mAttributeSelectors, aAttribute));
if (!entry)
return nullptr;
return &entry->mSelectors;
@ -3134,8 +3134,9 @@ AddSelector(RuleCascadeData* aCascade,
if (negation == aSelectorInTopLevel) {
for (nsAtomList* curID = negation->mIDList; curID;
curID = curID->mNext) {
AtomSelectorEntry *entry = static_cast<AtomSelectorEntry*>
(PL_DHashTableAdd(&aCascade->mIdSelectors, curID->mAtom, fallible));
AtomSelectorEntry *entry =
static_cast<AtomSelectorEntry*>(PL_DHashTableAdd(&aCascade->mIdSelectors,
curID->mAtom));
if (entry) {
entry->mSelectors.AppendElement(aSelectorInTopLevel);
}
@ -3148,9 +3149,9 @@ AddSelector(RuleCascadeData* aCascade,
if (negation == aSelectorInTopLevel) {
for (nsAtomList* curClass = negation->mClassList; curClass;
curClass = curClass->mNext) {
AtomSelectorEntry *entry = static_cast<AtomSelectorEntry*>
(PL_DHashTableAdd(&aCascade->mClassSelectors, curClass->mAtom,
fallible));
AtomSelectorEntry *entry =
static_cast<AtomSelectorEntry*>(PL_DHashTableAdd(&aCascade->mClassSelectors,
curClass->mAtom));
if (entry) {
entry->mSelectors.AppendElement(aSelectorInTopLevel);
}
@ -3409,8 +3410,7 @@ CascadeRuleEnumFunc(css::Rule* aRule, void* aData)
sel; sel = sel->mNext) {
int32_t weight = sel->mWeight;
RuleByWeightEntry *entry = static_cast<RuleByWeightEntry*>(
PL_DHashTableAdd(&data->mRulesByWeight, NS_INT32_TO_PTR(weight),
fallible));
PL_DHashTableAdd(&data->mRulesByWeight, NS_INT32_TO_PTR(weight)));
if (!entry)
return false;
entry->data.mWeight = weight;

View File

@ -483,9 +483,8 @@ nsHTMLStyleSheet::UniqueMappedAttributes(nsMappedAttributes* aMapped)
PL_DHashTableInit(&mMappedAttrTable, &MappedAttrTable_Ops,
sizeof(MappedAttrTableEntry));
}
MappedAttrTableEntry *entry =
static_cast<MappedAttrTableEntry*>
(PL_DHashTableAdd(&mMappedAttrTable, aMapped, fallible));
MappedAttrTableEntry *entry = static_cast<MappedAttrTableEntry*>
(PL_DHashTableAdd(&mMappedAttrTable, aMapped));
if (!entry)
return nullptr;
if (!entry->mAttributes) {
@ -519,7 +518,7 @@ nsHTMLStyleSheet::LangRuleFor(const nsString& aLanguage)
sizeof(LangRuleTableEntry));
}
LangRuleTableEntry *entry = static_cast<LangRuleTableEntry*>
(PL_DHashTableAdd(&mLangRuleTable, &aLanguage, fallible));
(PL_DHashTableAdd(&mLangRuleTable, &aLanguage));
if (!entry) {
NS_ASSERTION(false, "out of memory");
return nullptr;

View File

@ -1420,8 +1420,7 @@ nsRuleNode::DestroyInternal(nsRuleNode ***aDestroyQueueTail)
PL_DHashTableEnumerate(children, EnqueueRuleNodeChildren,
&destroyQueueTail);
*destroyQueueTail = nullptr; // ensure null-termination
PL_DHashTableFinish(children);
delete children;
PL_DHashTableDestroy(children);
} else if (HaveChildren()) {
*destroyQueueTail = ChildrenList();
do {
@ -1534,7 +1533,7 @@ nsRuleNode::Transition(nsIStyleRule* aRule, uint8_t aLevel,
if (ChildrenAreHashed()) {
ChildrenHashEntry *entry = static_cast<ChildrenHashEntry*>
(PL_DHashTableAdd(ChildrenHash(), &key, fallible));
(PL_DHashTableAdd(ChildrenHash(), &key));
if (!entry) {
NS_WARNING("out of memory");
return this;
@ -1604,13 +1603,15 @@ nsRuleNode::ConvertChildrenToHash(int32_t aNumKids)
{
NS_ASSERTION(!ChildrenAreHashed() && HaveChildren(),
"must have a non-empty list of children");
PLDHashTable *hash = new PLDHashTable();
PL_DHashTableInit(hash, &ChildrenHashOps, sizeof(ChildrenHashEntry),
aNumKids);
PLDHashTable *hash = PL_NewDHashTable(&ChildrenHashOps,
sizeof(ChildrenHashEntry),
aNumKids);
if (!hash)
return;
for (nsRuleNode* curr = ChildrenList(); curr; curr = curr->mNextSibling) {
// This will never fail because of the initial size we gave the table.
ChildrenHashEntry *entry = static_cast<ChildrenHashEntry*>(
PL_DHashTableAdd(hash, curr->mRule, fallible));
PL_DHashTableAdd(hash, curr->mRule));
NS_ASSERTION(!entry->mRuleNode, "duplicate entries in list");
entry->mRuleNode = curr;
}
@ -9231,8 +9232,7 @@ nsRuleNode::SweepChildren(nsTArray<nsRuleNode*>& aSweepQueue)
PL_DHashTableEnumerate(children, SweepHashEntry, &survivorsWithChildren);
childrenDestroyed = oldChildCount - children->EntryCount();
if (childrenDestroyed == oldChildCount) {
PL_DHashTableFinish(children);
delete children;
PL_DHashTableDestroy(children);
mChildren.asVoid = nullptr;
}
} else {

View File

@ -75,8 +75,7 @@ SpanningCellSorter::AddCell(int32_t aColSpan, int32_t aRow, int32_t aCol)
sizeof(HashTableEntry));
}
HashTableEntry *entry = static_cast<HashTableEntry*>
(PL_DHashTableAdd(&mHashTable, NS_INT32_TO_PTR(aColSpan),
fallible));
(PL_DHashTableAdd(&mHashTable, NS_INT32_TO_PTR(aColSpan)));
NS_ENSURE_TRUE(entry, false);
NS_ASSERTION(entry->mColSpan == 0 || entry->mColSpan == aColSpan,

View File

@ -523,7 +523,8 @@ Preferences::Init()
{
nsresult rv;
PREF_Init();
rv = PREF_Init();
NS_ENSURE_SUCCESS(rv, rv);
rv = pref_InitInitialObjects();
NS_ENSURE_SUCCESS(rv, rv);
@ -643,7 +644,8 @@ Preferences::ResetPrefs()
NotifyServiceObservers(NS_PREFSERVICE_RESET_TOPIC_ID);
PREF_CleanupPrefs();
PREF_Init();
nsresult rv = PREF_Init();
NS_ENSURE_SUCCESS(rv, rv);
return pref_InitInitialObjects();
}

View File

@ -143,14 +143,19 @@ static nsresult pref_HashPref(const char *key, PrefValue value, PrefType type, u
#define PREF_HASHTABLE_INITIAL_LENGTH 1024
void PREF_Init()
nsresult PREF_Init()
{
if (!gHashTable.IsInitialized()) {
PL_DHashTableInit(&gHashTable, &pref_HashTableOps,
sizeof(PrefHashEntry), PREF_HASHTABLE_INITIAL_LENGTH);
if (!PL_DHashTableInit(&gHashTable, &pref_HashTableOps,
sizeof(PrefHashEntry), fallible,
PREF_HASHTABLE_INITIAL_LENGTH)) {
return NS_ERROR_OUT_OF_MEMORY;
}
PL_INIT_ARENA_POOL(&gPrefNameArena, "PrefNameArena",
PREFNAME_ARENA_SIZE);
}
return NS_OK;
}
/* Frees the callback list. */
@ -735,8 +740,7 @@ nsresult pref_HashPref(const char *key, PrefValue value, PrefType type, uint32_t
if (!gHashTable.IsInitialized())
return NS_ERROR_OUT_OF_MEMORY;
PrefHashEntry* pref = static_cast<PrefHashEntry*>
(PL_DHashTableAdd(&gHashTable, key, fallible));
PrefHashEntry* pref = static_cast<PrefHashEntry*>(PL_DHashTableAdd(&gHashTable, key));
if (!pref)
return NS_ERROR_OUT_OF_MEMORY;

View File

@ -42,7 +42,7 @@ struct PrefHashEntry : PLDHashEntryHdr
// the preference hashtable.
// </font>
*/
void PREF_Init();
nsresult PREF_Init();
/*
// Cleanup should be called at program exit to free the

View File

@ -511,8 +511,9 @@ nsLoadGroup::AddRequest(nsIRequest *request, nsISupports* ctxt)
// Add the request to the list of active requests...
//
RequestMapEntry *entry = static_cast<RequestMapEntry *>
(PL_DHashTableAdd(&mRequests, request, fallible));
RequestMapEntry *entry =
static_cast<RequestMapEntry *>
(PL_DHashTableAdd(&mRequests, request));
if (!entry) {
return NS_ERROR_OUT_OF_MEMORY;

View File

@ -385,6 +385,7 @@ nsCacheEntryHashTable::ops =
nsCacheEntryHashTable::nsCacheEntryHashTable()
: initialized(false)
{
MOZ_COUNT_CTOR(nsCacheEntryHashTable);
}
@ -393,21 +394,30 @@ nsCacheEntryHashTable::nsCacheEntryHashTable()
nsCacheEntryHashTable::~nsCacheEntryHashTable()
{
MOZ_COUNT_DTOR(nsCacheEntryHashTable);
Shutdown();
if (initialized)
Shutdown();
}
void
nsresult
nsCacheEntryHashTable::Init()
{
PL_DHashTableInit(&table, &ops, sizeof(nsCacheEntryHashTableEntry), 256);
nsresult rv = NS_OK;
initialized = PL_DHashTableInit(&table, &ops,
sizeof(nsCacheEntryHashTableEntry),
fallible, 256);
if (!initialized) rv = NS_ERROR_OUT_OF_MEMORY;
return rv;
}
void
nsCacheEntryHashTable::Shutdown()
{
if (table.IsInitialized()) {
if (initialized) {
PL_DHashTableFinish(&table);
initialized = false;
}
}
@ -415,9 +425,8 @@ nsCacheEntryHashTable::Shutdown()
nsCacheEntry *
nsCacheEntryHashTable::GetEntry( const nsCString * key)
{
NS_ASSERTION(table.IsInitialized(),
"nsCacheEntryHashTable not initialized");
if (!table.IsInitialized()) return nullptr;
NS_ASSERTION(initialized, "nsCacheEntryHashTable not initialized");
if (!initialized) return nullptr;
PLDHashEntryHdr *hashEntry = PL_DHashTableSearch(&table, key);
return hashEntry ? ((nsCacheEntryHashTableEntry *)hashEntry)->cacheEntry
@ -430,12 +439,11 @@ nsCacheEntryHashTable::AddEntry( nsCacheEntry *cacheEntry)
{
PLDHashEntryHdr *hashEntry;
NS_ASSERTION(table.IsInitialized(),
"nsCacheEntryHashTable not initialized");
if (!table.IsInitialized()) return NS_ERROR_NOT_INITIALIZED;
NS_ASSERTION(initialized, "nsCacheEntryHashTable not initialized");
if (!initialized) return NS_ERROR_NOT_INITIALIZED;
if (!cacheEntry) return NS_ERROR_NULL_POINTER;
hashEntry = PL_DHashTableAdd(&table, &(cacheEntry->mKey), fallible);
hashEntry = PL_DHashTableAdd(&table, &(cacheEntry->mKey));
#ifndef DEBUG_dougt
NS_ASSERTION(((nsCacheEntryHashTableEntry *)hashEntry)->cacheEntry == 0,
"### nsCacheEntryHashTable::AddEntry - entry already used");
@ -449,11 +457,10 @@ nsCacheEntryHashTable::AddEntry( nsCacheEntry *cacheEntry)
void
nsCacheEntryHashTable::RemoveEntry( nsCacheEntry *cacheEntry)
{
NS_ASSERTION(table.IsInitialized(),
"nsCacheEntryHashTable not initialized");
NS_ASSERTION(initialized, "nsCacheEntryHashTable not initialized");
NS_ASSERTION(cacheEntry, "### cacheEntry == nullptr");
if (!table.IsInitialized()) return; // NS_ERROR_NOT_INITIALIZED
if (!initialized) return; // NS_ERROR_NOT_INITIALIZED
#if DEBUG
// XXX debug code to make sure we have the entry we're trying to remove
@ -467,9 +474,8 @@ nsCacheEntryHashTable::RemoveEntry( nsCacheEntry *cacheEntry)
void
nsCacheEntryHashTable::VisitEntries( PLDHashEnumerator etor, void *arg)
{
NS_ASSERTION(table.IsInitialized(),
"nsCacheEntryHashTable not initialized");
if (!table.IsInitialized()) return; // NS_ERROR_NOT_INITIALIZED
NS_ASSERTION(initialized, "nsCacheEntryHashTable not initialized");
if (!initialized) return; // NS_ERROR_NOT_INITIALIZED
PL_DHashTableEnumerate(&table, etor, arg);
}

View File

@ -267,7 +267,7 @@ public:
nsCacheEntryHashTable();
~nsCacheEntryHashTable();
void Init();
nsresult Init();
void Shutdown();
nsCacheEntry *GetEntry( const nsCString * key);
@ -306,6 +306,7 @@ private:
// member variables
static const PLDHashTableOps ops;
PLDHashTable table;
bool initialized;
};
#endif // _nsCacheEntry_h_

View File

@ -1155,7 +1155,8 @@ nsCacheService::Init()
}
// initialize hashtable for active cache entries
mActiveEntries.Init();
rv = mActiveEntries.Init();
if (NS_FAILED(rv)) return rv;
// create profile/preference observer
if (!mObserver) {

View File

@ -236,8 +236,7 @@ nsDiskCacheBindery::AddBinding(nsDiskCacheBinding * binding)
HashTableEntry * hashEntry;
hashEntry = (HashTableEntry *)
PL_DHashTableAdd(&table,
(void *)(uintptr_t) binding->mRecord.HashNumber(),
fallible);
(void *)(uintptr_t) binding->mRecord.HashNumber());
if (!hashEntry) return NS_ERROR_OUT_OF_MEMORY;
if (hashEntry->mBinding == nullptr) {

View File

@ -55,9 +55,9 @@ nsMemoryCacheDevice::Init()
{
if (mInitialized) return NS_ERROR_ALREADY_INITIALIZED;
mMemCacheEntries.Init();
mInitialized = true;
return NS_OK;
nsresult rv = mMemCacheEntries.Init();
mInitialized = NS_SUCCEEDED(rv);
return rv;
}

View File

@ -764,7 +764,7 @@ nsHostResolver::ResolveHost(const char *host,
nsHostKey key = { host, flags, af };
nsHostDBEnt *he = static_cast<nsHostDBEnt *>
(PL_DHashTableAdd(&mDB, &key, fallible));
(PL_DHashTableAdd(&mDB, &key));
// if the record is null, the hash table OOM'd.
if (!he) {

View File

@ -105,8 +105,10 @@ nsHttp::CreateAtomTable()
// The initial length for this table is a value greater than the number of
// known atoms (NUM_HTTP_ATOMS) because we expect to encounter a few random
// headers right off the bat.
PL_DHashTableInit(&sAtomTable, &ops, sizeof(PLDHashEntryStub),
NUM_HTTP_ATOMS + 10);
if (!PL_DHashTableInit(&sAtomTable, &ops, sizeof(PLDHashEntryStub),
fallible, NUM_HTTP_ATOMS + 10)) {
return NS_ERROR_OUT_OF_MEMORY;
}
// fill the table with our known atoms
const char *const atoms[] = {
@ -118,7 +120,7 @@ nsHttp::CreateAtomTable()
for (int i = 0; atoms[i]; ++i) {
PLDHashEntryStub *stub = reinterpret_cast<PLDHashEntryStub *>
(PL_DHashTableAdd(&sAtomTable, atoms[i], fallible));
(PL_DHashTableAdd(&sAtomTable, atoms[i]));
if (!stub)
return NS_ERROR_OUT_OF_MEMORY;
@ -166,7 +168,7 @@ nsHttp::ResolveAtom(const char *str)
MutexAutoLock lock(*sLock);
PLDHashEntryStub *stub = reinterpret_cast<PLDHashEntryStub *>
(PL_DHashTableAdd(&sAtomTable, str, fallible));
(PL_DHashTableAdd(&sAtomTable, str));
if (!stub)
return atom; // out of memory

View File

@ -83,10 +83,17 @@ nsresult
nsHTMLEntities::AddRefTable(void)
{
if (!gTableRefCnt) {
PL_DHashTableInit(&gEntityToUnicode, &EntityToUnicodeOps,
sizeof(EntityNodeEntry), NS_HTML_ENTITY_COUNT);
PL_DHashTableInit(&gUnicodeToEntity, &UnicodeToEntityOps,
sizeof(EntityNodeEntry), NS_HTML_ENTITY_COUNT);
if (!PL_DHashTableInit(&gEntityToUnicode, &EntityToUnicodeOps,
sizeof(EntityNodeEntry),
fallible, NS_HTML_ENTITY_COUNT)) {
return NS_ERROR_OUT_OF_MEMORY;
}
if (!PL_DHashTableInit(&gUnicodeToEntity, &UnicodeToEntityOps,
sizeof(EntityNodeEntry),
fallible, NS_HTML_ENTITY_COUNT)) {
PL_DHashTableFinish(&gEntityToUnicode);
return NS_ERROR_OUT_OF_MEMORY;
}
for (const EntityNode *node = gEntityArray,
*node_end = ArrayEnd(gEntityArray);
node < node_end; ++node) {
@ -94,7 +101,7 @@ nsHTMLEntities::AddRefTable(void)
// add to Entity->Unicode table
EntityNodeEntry* entry =
static_cast<EntityNodeEntry*>
(PL_DHashTableAdd(&gEntityToUnicode, node->mStr, fallible));
(PL_DHashTableAdd(&gEntityToUnicode, node->mStr));
NS_ASSERTION(entry, "Error adding an entry");
// Prefer earlier entries when we have duplication.
if (!entry->node)
@ -103,8 +110,7 @@ nsHTMLEntities::AddRefTable(void)
// add to Unicode->Entity table
entry = static_cast<EntityNodeEntry*>
(PL_DHashTableAdd(&gUnicodeToEntity,
NS_INT32_TO_PTR(node->mUnicode),
fallible));
NS_INT32_TO_PTR(node->mUnicode)));
NS_ASSERTION(entry, "Error adding an entry");
// Prefer earlier entries when we have duplication.
if (!entry->node)

View File

@ -162,9 +162,8 @@ Assertion::Assertion(nsIRDFResource* aSource)
NS_ADDREF(mSource);
u.hash.mPropertyHash = new PLDHashTable();
PL_DHashTableInit(u.hash.mPropertyHash, PL_DHashGetStubOps(),
sizeof(Entry));
u.hash.mPropertyHash =
PL_NewDHashTable(PL_DHashGetStubOps(), sizeof(Entry));
}
Assertion::Assertion(nsIRDFResource* aSource,
@ -195,8 +194,7 @@ Assertion::~Assertion()
if (mHashEntry && u.hash.mPropertyHash) {
PL_DHashTableEnumerate(u.hash.mPropertyHash, DeletePropertyHashEntry,
nullptr);
PL_DHashTableFinish(u.hash.mPropertyHash);
delete u.hash.mPropertyHash;
PL_DHashTableDestroy(u.hash.mPropertyHash);
u.hash.mPropertyHash = nullptr;
}
@ -334,8 +332,7 @@ public:
void
SetForwardArcs(nsIRDFResource* u, Assertion* as) {
if (as) {
Entry* entry = static_cast<Entry*>
(PL_DHashTableAdd(&mForwardArcs, u, mozilla::fallible));
Entry* entry = static_cast<Entry*>(PL_DHashTableAdd(&mForwardArcs, u));
if (entry) {
entry->mNode = u;
entry->mAssertions = as;
@ -349,8 +346,7 @@ public:
void
SetReverseArcs(nsIRDFNode* v, Assertion* as) {
if (as) {
Entry* entry = static_cast<Entry*>
(PL_DHashTableAdd(&mReverseArcs, v, mozilla::fallible));
Entry* entry = static_cast<Entry*>(PL_DHashTableAdd(&mReverseArcs, v));
if (entry) {
entry->mNode = v;
entry->mAssertions = as;
@ -1188,8 +1184,7 @@ InMemoryDataSource::LockedAssert(nsIRDFResource* aSource,
}
else
{
hdr = PL_DHashTableAdd(next->u.hash.mPropertyHash, aProperty,
mozilla::fallible);
hdr = PL_DHashTableAdd(next->u.hash.mPropertyHash, aProperty);
if (hdr)
{
Entry* entry = static_cast<Entry*>(hdr);
@ -1300,9 +1295,8 @@ InMemoryDataSource::LockedUnassert(nsIRDFResource* aSource,
PL_DHashTableRawRemove(root->u.hash.mPropertyHash, hdr);
if (next && next->mNext) {
PLDHashEntryHdr* hdr =
PL_DHashTableAdd(root->u.hash.mPropertyHash, aProperty,
mozilla::fallible);
PLDHashEntryHdr* hdr = PL_DHashTableAdd(root->u.hash.mPropertyHash,
aProperty);
if (hdr) {
Entry* entry = static_cast<Entry*>(hdr);
entry->mNode = aProperty;
@ -1746,8 +1740,7 @@ InMemoryDataSource::EnsureFastContainment(nsIRDFResource* aSource)
val->mNext = first;
}
else {
PLDHashEntryHdr* hdr = PL_DHashTableAdd(table, prop,
mozilla::fallible);
PLDHashEntryHdr* hdr = PL_DHashTableAdd(table, prop);
if (hdr) {
Entry* entry = static_cast<Entry*>(hdr);
entry->mNode = prop;

View File

@ -1162,7 +1162,7 @@ RDFServiceImpl::RegisterResource(nsIRDFResource* aResource, bool aReplace)
aResource, (const char*) uri));
}
else {
hdr = PL_DHashTableAdd(&mResources, uri, fallible);
hdr = PL_DHashTableAdd(&mResources, uri);
if (! hdr)
return NS_ERROR_OUT_OF_MEMORY;
@ -1399,7 +1399,7 @@ RDFServiceImpl::RegisterLiteral(nsIRDFLiteral* aLiteral)
NS_ASSERTION(!PL_DHashTableSearch(&mLiterals, value),
"literal already registered");
PLDHashEntryHdr *hdr = PL_DHashTableAdd(&mLiterals, value, fallible);
PLDHashEntryHdr *hdr = PL_DHashTableAdd(&mLiterals, value);
if (! hdr)
return NS_ERROR_OUT_OF_MEMORY;
@ -1451,7 +1451,7 @@ RDFServiceImpl::RegisterInt(nsIRDFInt* aInt)
NS_ASSERTION(!PL_DHashTableSearch(&mInts, &value),
"int already registered");
PLDHashEntryHdr *hdr = PL_DHashTableAdd(&mInts, &value, fallible);
PLDHashEntryHdr *hdr = PL_DHashTableAdd(&mInts, &value);
if (! hdr)
return NS_ERROR_OUT_OF_MEMORY;
@ -1503,7 +1503,7 @@ RDFServiceImpl::RegisterDate(nsIRDFDate* aDate)
NS_ASSERTION(!PL_DHashTableSearch(&mDates, &value),
"date already registered");
PLDHashEntryHdr *hdr = PL_DHashTableAdd(&mDates, &value, fallible);
PLDHashEntryHdr *hdr = PL_DHashTableAdd(&mDates, &value);
if (! hdr)
return NS_ERROR_OUT_OF_MEMORY;
@ -1550,7 +1550,7 @@ RDFServiceImpl::RegisterBlob(BlobImpl *aBlob)
NS_ASSERTION(!PL_DHashTableSearch(&mBlobs, &aBlob->mData),
"blob already registered");
PLDHashEntryHdr *hdr = PL_DHashTableAdd(&mBlobs, &aBlob->mData, fallible);
PLDHashEntryHdr *hdr = PL_DHashTableAdd(&mBlobs, &aBlob->mData);
if (! hdr)
return NS_ERROR_OUT_OF_MEMORY;

View File

@ -862,8 +862,8 @@ nsSecureBrowserUIImpl::OnStateChange(nsIWebProgress* aWebProgress,
// means, there has already been data transfered.
ReentrantMonitorAutoEnter lock(mReentrantMonitor);
PL_DHashTableAdd(&mTransferringRequests, aRequest, fallible);
PL_DHashTableAdd(&mTransferringRequests, aRequest);
return NS_OK;
}

View File

@ -178,11 +178,14 @@ void nsCertTree::ClearCompareHash()
}
}
void nsCertTree::InitCompareHash()
nsresult nsCertTree::InitCompareHash()
{
ClearCompareHash();
PL_DHashTableInit(&mCompareCache, &gMapOps,
sizeof(CompareCacheHashEntryPtr), 64);
if (!PL_DHashTableInit(&mCompareCache, &gMapOps,
sizeof(CompareCacheHashEntryPtr), fallible, 64)) {
return NS_ERROR_OUT_OF_MEMORY;
}
return NS_OK;
}
nsCertTree::~nsCertTree()
@ -201,8 +204,9 @@ CompareCacheHashEntry *
nsCertTree::getCacheEntry(void *cache, void *aCert)
{
PLDHashTable &aCompareCache = *reinterpret_cast<PLDHashTable*>(cache);
CompareCacheHashEntryPtr *entryPtr = static_cast<CompareCacheHashEntryPtr*>
(PL_DHashTableAdd(&aCompareCache, aCert, fallible));
CompareCacheHashEntryPtr *entryPtr =
static_cast<CompareCacheHashEntryPtr*>
(PL_DHashTableAdd(&aCompareCache, aCert));
return entryPtr ? entryPtr->entry : nullptr;
}
@ -659,11 +663,11 @@ nsCertTree::LoadCertsFromCache(nsINSSCertCache *aCache, uint32_t aType)
mTreeArray = nullptr;
mNumRows = 0;
}
InitCompareHash();
nsresult rv = InitCompareHash();
if (NS_FAILED(rv)) return rv;
nsresult rv =
GetCertsByTypeFromCache(aCache, aType, GetCompareFuncFromCertType(aType),
&mCompareCache);
rv = GetCertsByTypeFromCache(aCache, aType,
GetCompareFuncFromCertType(aType), &mCompareCache);
if (NS_FAILED(rv)) return rv;
return UpdateUIContents();
}
@ -677,10 +681,11 @@ nsCertTree::LoadCerts(uint32_t aType)
mTreeArray = nullptr;
mNumRows = 0;
}
InitCompareHash();
nsresult rv = InitCompareHash();
if (NS_FAILED(rv)) return rv;
nsresult rv =
GetCertsByType(aType, GetCompareFuncFromCertType(aType), &mCompareCache);
rv = GetCertsByType(aType,
GetCompareFuncFromCertType(aType), &mCompareCache);
if (NS_FAILED(rv)) return rv;
return UpdateUIContents();
}

View File

@ -90,7 +90,7 @@ public:
protected:
virtual ~nsCertTree();
void InitCompareHash();
nsresult InitCompareHash();
void ClearCompareHash();
void RemoveCacheEntry(void *key);

View File

@ -70,7 +70,7 @@ void nsNSSShutDownList::remember(nsNSSShutDownObject *o)
PR_ASSERT(o);
MutexAutoLock lock(singleton->mListLock);
PL_DHashTableAdd(&singleton->mObjects, o, fallible);
PL_DHashTableAdd(&singleton->mObjects, o);
}
void nsNSSShutDownList::forget(nsNSSShutDownObject *o)
@ -90,7 +90,7 @@ void nsNSSShutDownList::remember(nsOnPK11LogoutCancelObject *o)
PR_ASSERT(o);
MutexAutoLock lock(singleton->mListLock);
PL_DHashTableAdd(&singleton->mPK11LogoutCancelObjects, o, fallible);
PL_DHashTableAdd(&singleton->mPK11LogoutCancelObjects, o);
}
void nsNSSShutDownList::forget(nsOnPK11LogoutCancelObject *o)

View File

@ -1340,7 +1340,7 @@ nsDocLoader::RefreshAttempted(nsIWebProgress* aWebProgress,
nsresult nsDocLoader::AddRequestInfo(nsIRequest *aRequest)
{
if (!PL_DHashTableAdd(&mRequestInfoHash, aRequest, mozilla::fallible)) {
if (!PL_DHashTableAdd(&mRequestInfoHash, aRequest)) {
return NS_ERROR_OUT_OF_MEMORY;
}

View File

@ -900,8 +900,13 @@ PtrToNodeEntry*
CCGraph::AddNodeToMap(void* aPtr)
{
JS::AutoSuppressGCAnalysis suppress;
return static_cast<PtrToNodeEntry*>
(PL_DHashTableAdd(&mPtrToNodeMap, aPtr)); // infallible add
PtrToNodeEntry* e =
static_cast<PtrToNodeEntry*>(PL_DHashTableAdd(&mPtrToNodeMap, aPtr));
if (!e) {
// Caller should track OOMs
return nullptr;
}
return e;
}
void
@ -2032,6 +2037,7 @@ private:
nsCString mNextEdgeName;
nsCOMPtr<nsICycleCollectorListener> mListener;
bool mMergeZones;
bool mRanOutOfMemory;
nsAutoPtr<NodePool::Enumerator> mCurrNode;
public:
@ -2146,6 +2152,7 @@ CCGraphBuilder::CCGraphBuilder(CCGraph& aGraph,
, mJSZoneParticipant(nullptr)
, mListener(aListener)
, mMergeZones(aMergeZones)
, mRanOutOfMemory(false)
{
if (aJSRuntime) {
mJSParticipant = aJSRuntime->GCThingParticipant();
@ -2179,6 +2186,11 @@ PtrInfo*
CCGraphBuilder::AddNode(void* aPtr, nsCycleCollectionParticipant* aParticipant)
{
PtrToNodeEntry* e = mGraph.AddNodeToMap(aPtr);
if (!e) {
mRanOutOfMemory = true;
return nullptr;
}
PtrInfo* result;
if (!e->mNode) {
// New entry.
@ -2259,6 +2271,11 @@ CCGraphBuilder::BuildGraph(SliceBudget& aBudget)
SetLastChild();
}
if (mRanOutOfMemory) {
MOZ_ASSERT(false, "Ran out of memory while building cycle collector graph");
CC_TELEMETRY(_OOM, true);
}
mCurrNode = nullptr;
return true;

View File

@ -556,8 +556,12 @@ GetAtomHashEntry(const char* aString, uint32_t aLength, uint32_t* aHashOut)
MOZ_ASSERT(NS_IsMainThread(), "wrong thread");
EnsureTableExists();
AtomTableKey key(aString, aLength, aHashOut);
// This is an infallible add.
return static_cast<AtomTableEntry*>(PL_DHashTableAdd(&gAtomTable, &key));
AtomTableEntry* e = static_cast<AtomTableEntry*>(
PL_DHashTableAdd(&gAtomTable, &key));
if (!e) {
NS_ABORT_OOM(gAtomTable.EntryCount() * gAtomTable.EntrySize());
}
return e;
}
static inline AtomTableEntry*
@ -566,8 +570,12 @@ GetAtomHashEntry(const char16_t* aString, uint32_t aLength, uint32_t* aHashOut)
MOZ_ASSERT(NS_IsMainThread(), "wrong thread");
EnsureTableExists();
AtomTableKey key(aString, aLength, aHashOut);
// This is an infallible add.
return static_cast<AtomTableEntry*>(PL_DHashTableAdd(&gAtomTable, &key));
AtomTableEntry* e = static_cast<AtomTableEntry*>(
PL_DHashTableAdd(&gAtomTable, &key));
if (!e) {
NS_ABORT_OOM(gAtomTable.EntryCount() * gAtomTable.EntrySize());
}
return e;
}
class CheckStaticAtomSizes

View File

@ -528,7 +528,7 @@ nsPersistentProperties::SetStringProperty(const nsACString& aKey,
{
const nsAFlatCString& flatKey = PromiseFlatCString(aKey);
PropertyTableEntry* entry = static_cast<PropertyTableEntry*>(
PL_DHashTableAdd(&mTable, flatKey.get(), mozilla::fallible));
PL_DHashTableAdd(&mTable, flatKey.get()));
if (entry->mKey) {
aOldValue = entry->mValue;

View File

@ -138,8 +138,11 @@ nsStaticCaseInsensitiveNameTable::Init(const char* const aNames[],
return false;
}
PL_DHashTableInit(&mNameTable, &nametable_CaseInsensitiveHashTableOps,
sizeof(NameTableEntry), aLength);
if (!PL_DHashTableInit(&mNameTable, &nametable_CaseInsensitiveHashTableOps,
sizeof(NameTableEntry), fallible,
aLength)) {
return false;
}
for (int32_t index = 0; index < aLength; ++index) {
const char* raw = aNames[index];
@ -161,8 +164,8 @@ nsStaticCaseInsensitiveNameTable::Init(const char* const aNames[],
NameTableKey key(strPtr);
NameTableEntry* entry = static_cast<NameTableEntry*>
(PL_DHashTableAdd(&mNameTable, &key, fallible));
NameTableEntry* entry =
static_cast<NameTableEntry*>(PL_DHashTableAdd(&mNameTable, &key));
if (!entry) {
continue;
}

View File

@ -149,21 +149,19 @@ public:
*/
EntryType* PutEntry(KeyType aKey)
{
NS_ASSERTION(mTable.IsInitialized(),
"nsTHashtable was not initialized properly.");
return static_cast<EntryType*> // infallible add
(PL_DHashTableAdd(&mTable, EntryType::KeyToPointer(aKey)));
EntryType* e = PutEntry(aKey, mozilla::fallible);
if (!e) {
NS_ABORT_OOM(mTable.EntrySize() * mTable.EntryCount());
}
return e;
}
EntryType* PutEntry(KeyType aKey, const fallible_t&) NS_WARN_UNUSED_RESULT
{
EntryType* PutEntry(KeyType aKey, const fallible_t&) NS_WARN_UNUSED_RESULT {
NS_ASSERTION(mTable.IsInitialized(),
"nsTHashtable was not initialized properly.");
return static_cast<EntryType*>
(PL_DHashTableAdd(&mTable, EntryType::KeyToPointer(aKey),
mozilla::fallible));
return static_cast<EntryType*>(PL_DHashTableAdd(
&mTable, EntryType::KeyToPointer(aKey)));
}
/**

View File

@ -167,6 +167,29 @@ SizeOfEntryStore(uint32_t aCapacity, uint32_t aEntrySize, uint32_t* aNbytes)
return uint64_t(*aNbytes) == nbytes64; // returns false on overflow
}
PLDHashTable*
PL_NewDHashTable(const PLDHashTableOps* aOps, uint32_t aEntrySize,
uint32_t aLength)
{
PLDHashTable* table = (PLDHashTable*)malloc(sizeof(*table));
if (!table) {
return nullptr;
}
if (!PL_DHashTableInit(table, aOps, aEntrySize, fallible, aLength)) {
free(table);
return nullptr;
}
return table;
}
void
PL_DHashTableDestroy(PLDHashTable* aTable)
{
PL_DHashTableFinish(aTable);
free(aTable);
}
/*
* Compute max and min load numbers (entry counts). We have a secondary max
* that allows us to overload a table reasonably if it cannot be grown further
@ -196,12 +219,12 @@ MinCapacity(uint32_t aLength)
return (aLength * 4 + (3 - 1)) / 3; // == ceil(aLength * 4 / 3)
}
MOZ_ALWAYS_INLINE void
MOZ_ALWAYS_INLINE bool
PLDHashTable::Init(const PLDHashTableOps* aOps,
uint32_t aEntrySize, uint32_t aLength)
uint32_t aEntrySize, const fallible_t&, uint32_t aLength)
{
if (aLength > PL_DHASH_MAX_INITIAL_LENGTH) {
MOZ_CRASH("Initial length is too large");
return false;
}
// Compute the smallest capacity allowing |aLength| elements to be inserted
@ -215,30 +238,54 @@ PLDHashTable::Init(const PLDHashTableOps* aOps,
capacity = 1u << log2;
MOZ_ASSERT(capacity <= PL_DHASH_MAX_CAPACITY);
mOps = aOps;
mHashShift = PL_DHASH_BITS - log2;
mEntrySize = aEntrySize;
mEntryCount = mRemovedCount = 0;
mGeneration = 0;
uint32_t nbytes;
if (!SizeOfEntryStore(capacity, aEntrySize, &nbytes)) {
MOZ_CRASH("Initial entry store size is too large");
return false; // overflowed
}
mEntryStore = nullptr;
mEntryStore = (char*)malloc(nbytes);
if (!mEntryStore) {
return false;
}
memset(mEntryStore, 0, nbytes);
METER(memset(&mStats, 0, sizeof(mStats)));
// Set this only once we reach a point where we know we can't fail.
mOps = aOps;
#ifdef DEBUG
mRecursionLevel = 0;
#endif
return true;
}
bool
PL_DHashTableInit(PLDHashTable* aTable, const PLDHashTableOps* aOps,
uint32_t aEntrySize,
const fallible_t& aFallible, uint32_t aLength)
{
return aTable->Init(aOps, aEntrySize, aFallible, aLength);
}
void
PL_DHashTableInit(PLDHashTable* aTable, const PLDHashTableOps* aOps,
uint32_t aEntrySize, uint32_t aLength)
{
aTable->Init(aOps, aEntrySize, aLength);
if (!PL_DHashTableInit(aTable, aOps, aEntrySize, fallible, aLength)) {
if (aLength > PL_DHASH_MAX_INITIAL_LENGTH) {
MOZ_CRASH(); // the asked-for length was too big
}
uint32_t capacity = MinCapacity(aLength), nbytes;
if (!SizeOfEntryStore(capacity, aEntrySize, &nbytes)) {
MOZ_CRASH(); // the required mEntryStore size was too big
}
NS_ABORT_OOM(nbytes); // allocation failed
}
}
/*
@ -301,7 +348,6 @@ PLDHashTable::Finish()
/* Free entry storage last. */
free(mEntryStore);
mEntryStore = nullptr;
}
void
@ -319,7 +365,6 @@ template <PLDHashTable::SearchReason Reason>
PLDHashEntryHdr* PL_DHASH_FASTCALL
PLDHashTable::SearchTable(const void* aKey, PLDHashNumber aKeyHash)
{
MOZ_ASSERT(mEntryStore);
METER(mStats.mSearches++);
NS_ASSERTION(!(aKeyHash & COLLISION_FLAG),
"!(aKeyHash & COLLISION_FLAG)");
@ -400,7 +445,6 @@ PLDHashEntryHdr* PL_DHASH_FASTCALL
PLDHashTable::FindFreeEntry(PLDHashNumber aKeyHash)
{
METER(mStats.mSearches++);
MOZ_ASSERT(mEntryStore);
NS_ASSERTION(!(aKeyHash & COLLISION_FLAG),
"!(aKeyHash & COLLISION_FLAG)");
@ -442,8 +486,6 @@ PLDHashTable::FindFreeEntry(PLDHashNumber aKeyHash)
bool
PLDHashTable::ChangeTable(int aDeltaLog2)
{
MOZ_ASSERT(mEntryStore);
/* Look, but don't touch, until we succeed in getting new entry store. */
int oldLog2 = PL_DHASH_BITS - mHashShift;
int newLog2 = oldLog2 + aDeltaLog2;
@ -519,9 +561,8 @@ PLDHashTable::Search(const void* aKey)
METER(mStats.mSearches++);
PLDHashEntryHdr* entry =
mEntryStore ? SearchTable<ForSearchOrRemove>(aKey, ComputeKeyHash(aKey))
: nullptr;
PLDHashNumber keyHash = ComputeKeyHash(aKey);
PLDHashEntryHdr* entry = SearchTable<ForSearchOrRemove>(aKey, keyHash);
DECREMENT_RECURSION_LEVEL(this);
@ -529,33 +570,20 @@ PLDHashTable::Search(const void* aKey)
}
MOZ_ALWAYS_INLINE PLDHashEntryHdr*
PLDHashTable::Add(const void* aKey, const mozilla::fallible_t&)
PLDHashTable::Add(const void* aKey)
{
PLDHashNumber keyHash;
PLDHashEntryHdr* entry;
uint32_t capacity;
MOZ_ASSERT(mRecursionLevel == 0);
INCREMENT_RECURSION_LEVEL(this);
// Allocate the entry storage if it hasn't already been allocated.
if (!mEntryStore) {
uint32_t nbytes;
if (!SizeOfEntryStore(CapacityFromHashShift(), mEntrySize, &nbytes) ||
!(mEntryStore = (char*)malloc(nbytes))) {
METER(mStats.mAddFailures++);
entry = nullptr;
goto exit;
}
memset(mEntryStore, 0, nbytes);
}
/*
* If alpha is >= .75, grow or compress the table. If aKey is already
* in the table, we may grow once more than necessary, but only if we
* are on the edge of being overloaded.
*/
capacity = Capacity();
uint32_t capacity = Capacity();
if (mEntryCount + mRemovedCount >= MaxLoad(capacity)) {
/* Compress if a quarter or more of all entries are removed. */
int deltaLog2;
@ -585,7 +613,7 @@ PLDHashTable::Add(const void* aKey, const mozilla::fallible_t&)
* then skip it while growing the table and re-add it after.
*/
keyHash = ComputeKeyHash(aKey);
entry = mEntryStore ? SearchTable<ForAdd>(aKey, keyHash) : nullptr;
entry = SearchTable<ForAdd>(aKey, keyHash);
if (!ENTRY_IS_LIVE(entry)) {
/* Initialize the entry, indicating that it's no longer free. */
METER(mStats.mAddMisses++);
@ -618,9 +646,8 @@ PLDHashTable::Remove(const void* aKey)
MOZ_ASSERT(mRecursionLevel == 0);
INCREMENT_RECURSION_LEVEL(this);
PLDHashEntryHdr* entry =
mEntryStore ? SearchTable<ForSearchOrRemove>(aKey, ComputeKeyHash(aKey))
: nullptr;
PLDHashNumber keyHash = ComputeKeyHash(aKey);
PLDHashEntryHdr* entry = SearchTable<ForSearchOrRemove>(aKey, keyHash);
if (entry) {
/* Clear this entry and mark it as "removed". */
METER(mStats.mRemoveHits++);
@ -647,24 +674,10 @@ PL_DHashTableSearch(PLDHashTable* aTable, const void* aKey)
return aTable->Search(aKey);
}
PLDHashEntryHdr* PL_DHASH_FASTCALL
PL_DHashTableAdd(PLDHashTable* aTable, const void* aKey,
const fallible_t& aFallible)
{
return aTable->Add(aKey, aFallible);
}
PLDHashEntryHdr* PL_DHASH_FASTCALL
PL_DHashTableAdd(PLDHashTable* aTable, const void* aKey)
{
PLDHashEntryHdr* entry = PL_DHashTableAdd(aTable, aKey, fallible);
if (!entry) {
// There are two ways the Add could fail: (a) a entry storage reallocation
// failed, or (b) mOps->initEntry failed. The number we're reporting here
// is the one for case (a), which is the more likely of the two.
NS_ABORT_OOM(aTable->EntrySize() * aTable->EntryCount());
}
return entry;
return aTable->Add(aKey);
}
void PL_DHASH_FASTCALL
@ -723,7 +736,6 @@ PLDHashTable::Enumerate(PLDHashEnumerator aEtor, void* aArg)
}
}
MOZ_ASSERT_IF(capacity > 0, mEntryStore);
for (uint32_t e = 0; e < capacity; ++e) {
PLDHashEntryHdr* entry = (PLDHashEntryHdr*)entryAddr;
if (ENTRY_IS_LIVE(entry)) {
@ -803,10 +815,6 @@ PLDHashTable::SizeOfExcludingThis(
PLDHashSizeOfEntryExcludingThisFun aSizeOfEntryExcludingThis,
MallocSizeOf aMallocSizeOf, void* aArg /* = nullptr */) const
{
if (!mEntryStore) {
return 0;
}
size_t n = 0;
n += aMallocSizeOf(mEntryStore);
if (aSizeOfEntryExcludingThis) {
@ -919,7 +927,6 @@ PLDHashEntryHdr* PLDHashTable::Iterator::NextEntry()
// checks pass, then this method will only iterate through the full capacity
// once. If they fail, then this loop may end up returning the early entries
// more than once.
MOZ_ASSERT_IF(capacity > 0, mTable->mEntryStore);
for (uint32_t e = 0; e < capacity; ++e) {
PLDHashEntryHdr* entry = (PLDHashEntryHdr*)mEntryAddr;
@ -973,7 +980,6 @@ PLDHashTable::DumpMeter(PLDHashEnumerator aDump, FILE* aFp)
hash2 = 0;
sqsum = 0;
MOZ_ASSERT_IF(capacity > 0, mEntryStore);
for (uint32_t i = 0; i < capacity; i++) {
entry = (PLDHashEntryHdr*)entryAddr;
entryAddr += mEntrySize;

View File

@ -147,9 +147,6 @@ typedef size_t (*PLDHashSizeOfEntryExcludingThisFun)(
* on most architectures, and may be allocated on the stack or within another
* structure or class (see below for the Init and Finish functions to use).
*
* No entry storage is allocated until the first element is added. This means
* that empty hash tables are cheap, which is good because they are common.
*
* There used to be a long, math-heavy comment here about the merits of
* double hashing vs. chaining; it was removed in bug 1058335. In short, double
* hashing is more space-efficient unless the element size gets large (in which
@ -178,7 +175,8 @@ private:
uint32_t mEntryCount; /* number of entries in table */
uint32_t mRemovedCount; /* removed entry sentinels in table */
uint32_t mGeneration; /* entry storage generation number */
char* mEntryStore; /* entry storage; allocated lazily */
char* mEntryStore; /* entry storage */
#ifdef PL_DHASHMETER
struct PLDHashStats
{
@ -228,24 +226,25 @@ public:
/*
* Size in entries (gross, not net of free and removed sentinels) for table.
* This can be zero if no elements have been added yet, in which case the
* entry storage will not have yet been allocated.
* We store mHashShift rather than sizeLog2 to optimize the collision-free
* case in SearchTable.
*/
uint32_t Capacity() const
{
return mEntryStore ? CapacityFromHashShift() : 0;
return ((uint32_t)1 << (PL_DHASH_BITS - mHashShift));
}
uint32_t EntrySize() const { return mEntrySize; }
uint32_t EntryCount() const { return mEntryCount; }
uint32_t Generation() const { return mGeneration; }
void Init(const PLDHashTableOps* aOps, uint32_t aEntrySize, uint32_t aLength);
bool Init(const PLDHashTableOps* aOps, uint32_t aEntrySize,
const mozilla::fallible_t&, uint32_t aLength);
void Finish();
PLDHashEntryHdr* Search(const void* aKey);
PLDHashEntryHdr* Add(const void* aKey, const mozilla::fallible_t&);
PLDHashEntryHdr* Add(const void* aKey);
void Remove(const void* aKey);
void RawRemove(PLDHashEntryHdr* aEntry);
@ -298,13 +297,6 @@ public:
private:
static bool EntryIsFree(PLDHashEntryHdr* aEntry);
// We store mHashShift rather than sizeLog2 to optimize the collision-free
// case in SearchTable.
uint32_t CapacityFromHashShift() const
{
return ((uint32_t)1 << (PL_DHASH_BITS - mHashShift));
}
PLDHashNumber ComputeKeyHash(const void* aKey);
enum SearchReason { ForSearchOrRemove, ForAdd };
@ -434,22 +426,45 @@ void PL_DHashFreeStringKey(PLDHashTable* aTable, PLDHashEntryHdr* aEntry);
const PLDHashTableOps* PL_DHashGetStubOps(void);
/*
* Initialize aTable with aOps and aEntrySize. The table's initial capacity
* will be chosen such that |aLength| elements can be inserted without
* rehashing; if |aLength| is a power-of-two, this capacity will be |2*length|.
* However, because entry storage is allocated lazily, this initial capacity
* won't be relevant until the first element is added; prior to that the
* capacity will be zero.
* Dynamically allocate a new PLDHashTable, initialize it using
* PL_DHashTableInit, and return its address. Return null on allocation failure.
*/
PLDHashTable* PL_NewDHashTable(
const PLDHashTableOps* aOps, uint32_t aEntrySize,
uint32_t aLength = PL_DHASH_DEFAULT_INITIAL_LENGTH);
/*
* Free |aTable|'s entry storage and |aTable| itself (both via
* aTable->mOps->freeTable). Use this function to destroy a PLDHashTable that
* was allocated on the heap via PL_NewDHashTable().
*/
void PL_DHashTableDestroy(PLDHashTable* aTable);
/*
* Initialize aTable with aOps, aEntrySize, and aCapacity. The table's initial
* capacity will be chosen such that |aLength| elements can be inserted without
* rehashing. If |aLength| is a power-of-two, this capacity will be |2*length|.
*
* This function will crash if |aEntrySize| and/or |aLength| are too large.
* This function will crash if it can't allocate enough memory, or if
* |aEntrySize| and/or |aLength| are too large.
*/
void PL_DHashTableInit(
PLDHashTable* aTable, const PLDHashTableOps* aOps,
uint32_t aEntrySize, uint32_t aLength = PL_DHASH_DEFAULT_INITIAL_LENGTH);
/*
* Clear |aTable|'s elements (via aTable->mOps->clearEntry) and free its entry
* storage, if has any.
* Initialize aTable. This is the same as PL_DHashTableInit, except that it
* returns a boolean indicating success, rather than crashing on failure.
*/
MOZ_WARN_UNUSED_RESULT bool PL_DHashTableInit(
PLDHashTable* aTable, const PLDHashTableOps* aOps,
uint32_t aEntrySize, const mozilla::fallible_t&,
uint32_t aLength = PL_DHASH_DEFAULT_INITIAL_LENGTH);
/*
* Free |aTable|'s entry storage (via aTable->mOps->freeTable). Use this
* function to destroy a PLDHashTable that is allocated on the stack or in
* static memory and was created via PL_DHashTableInit().
*/
void PL_DHashTableFinish(PLDHashTable* aTable);
@ -467,7 +482,7 @@ PL_DHashTableSearch(PLDHashTable* aTable, const void* aKey);
/*
* To add an entry identified by key to table, call:
*
* entry = PL_DHashTableAdd(table, key, mozilla::fallible);
* entry = PL_DHashTableAdd(table, key);
*
* If entry is null upon return, then either (a) the table is severely
* overloaded and memory can't be allocated for entry storage, or (b)
@ -481,14 +496,6 @@ PL_DHashTableSearch(PLDHashTable* aTable, const void* aKey);
* optional initEntry hook was not used).
*/
PLDHashEntryHdr* PL_DHASH_FASTCALL
PL_DHashTableAdd(PLDHashTable* aTable, const void* aKey,
const mozilla::fallible_t&);
/*
* This is like the other PL_DHashTableAdd() function, but infallible, and so
* never returns null.
*/
PLDHashEntryHdr* PL_DHASH_FASTCALL
PL_DHashTableAdd(PLDHashTable* aTable, const void* aKey);
/*

View File

@ -7,8 +7,10 @@
#include <stdio.h>
#include "pldhash.h"
// This test mostly focuses on edge cases. But more coverage of normal
// operations wouldn't be a bad thing.
// pldhash is very widely used and so any basic bugs in it are likely to be
// exposed through normal usage. Therefore, this test currently focusses on
// extreme cases relating to maximum table capacity and potential overflows,
// which are unlikely to be hit during normal execution.
namespace TestPLDHash {
@ -22,20 +24,12 @@ static bool test_pldhash_Init_capacity_ok()
}
// Try the largest allowed capacity. With PL_DHASH_MAX_CAPACITY==1<<26, this
// would allocate (if we added an element) 0.5GB of entry store on 32-bit
// platforms and 1GB on 64-bit platforms.
//
// Ideally we'd also try (a) a too-large capacity, and (b) a large capacity
// combined with a large entry size that when multipled overflow. But those
// cases would cause the test to abort immediately.
//
// Furthermore, ideally we'd also try a large-but-ok capacity that almost but
// doesn't quite overflow, but that would result in allocating just under 4GB
// of entry storage. That's very likely to fail on 32-bit platforms, so such
// a test wouldn't be reliable.
//
PL_DHashTableInit(&t, PL_DHashGetStubOps(), sizeof(PLDHashEntryStub),
PL_DHASH_MAX_INITIAL_LENGTH);
// will allocate 0.5GB of entry store on 32-bit platforms and 1GB on 64-bit
// platforms.
if (!PL_DHashTableInit(&t, PL_DHashGetStubOps(), sizeof(PLDHashEntryStub),
mozilla::fallible, PL_DHASH_MAX_INITIAL_LENGTH)) {
return false;
}
// Check that Init() sets |ops|.
if (!t.IsInitialized()) {
@ -51,64 +45,64 @@ static bool test_pldhash_Init_capacity_ok()
return true;
}
static bool test_pldhash_lazy_storage()
static bool test_pldhash_Init_capacity_too_large()
{
PLDHashTable t;
PL_DHashTableInit(&t, PL_DHashGetStubOps(), sizeof(PLDHashEntryStub));
// PLDHashTable allocates entry storage lazily. Check that all the non-add
// operations work appropriately when the table is empty and the storage
// hasn't yet been allocated.
if (!t.IsInitialized()) {
// Check that the constructor nulls |ops|.
if (t.IsInitialized()) {
return false;
}
if (t.Capacity() != 0) {
// Try the smallest too-large capacity.
if (PL_DHashTableInit(&t, PL_DHashGetStubOps(),
sizeof(PLDHashEntryStub),
mozilla::fallible,
PL_DHASH_MAX_INITIAL_LENGTH + 1)) {
return false; // it succeeded!?
}
// Don't call PL_DHashTableFinish() here; it's not safe after Init() failure.
// Check that |ops| is still null.
if (t.IsInitialized()) {
return false;
}
if (t.EntrySize() != sizeof(PLDHashEntryStub)) {
return true;
}
static bool test_pldhash_Init_overflow()
{
PLDHashTable t;
// Check that the constructor nulls |ops|.
if (t.IsInitialized()) {
return false;
}
if (t.EntryCount() != 0) {
// Try an acceptable capacity, but one whose byte size overflows uint32_t.
//
// Ideally we'd also try a large-but-ok capacity that almost but doesn't
// quite overflow, but that would result in allocating just under 4GB of
// entry storage. That's very likely to fail on 32-bit platforms, so such a
// test wouldn't be reliable.
struct OneKBEntry {
PLDHashEntryHdr hdr;
char buf[1024 - sizeof(PLDHashEntryHdr)];
};
if (PL_DHashTableInit(&t, PL_DHashGetStubOps(), sizeof(OneKBEntry),
mozilla::fallible, PL_DHASH_MAX_INITIAL_LENGTH)) {
return false; // it succeeded!?
}
// Don't call PL_DHashTableFinish() here; it's not safe after Init() failure.
// Check that |ops| is still null.
if (t.IsInitialized()) {
return false;
}
if (t.Generation() != 0) {
return false;
}
if (PL_DHashTableSearch(&t, (const void*)1)) {
return false; // search succeeded?
}
// No result to check here, but call it to make sure it doesn't crash.
PL_DHashTableRemove(&t, (const void*)2);
// Using a null |enumerator| should be fine because it shouldn't be called
// for an empty table.
PLDHashEnumerator enumerator = nullptr;
if (PL_DHashTableEnumerate(&t, enumerator, nullptr) != 0) {
return false; // enumeration count is non-zero?
}
for (PLDHashTable::Iterator iter = t.Iterate();
iter.HasMoreEntries();
iter.NextEntry()) {
return false; // shouldn't hit this on an empty table
}
// Using a null |mallocSizeOf| should be fine because it shouldn't be called
// for an empty table.
mozilla::MallocSizeOf mallocSizeOf = nullptr;
if (PL_DHashTableSizeOfExcludingThis(&t, nullptr, mallocSizeOf) != 0) {
return false; // size is non-zero?
}
PL_DHashTableFinish(&t);
return true;
}
@ -133,13 +127,18 @@ static bool test_pldhash_grow_to_max_capacity()
nullptr
};
PLDHashTable t;
PL_DHashTableInit(&t, &ops, sizeof(PLDHashEntryStub), 128);
// This is infallible.
PLDHashTable* t = PL_NewDHashTable(&ops, sizeof(PLDHashEntryStub), 128);
// Check that New() sets |t->ops|.
if (!t->IsInitialized()) {
return false;
}
// Keep inserting elements until failure occurs because the table is full.
size_t numInserted = 0;
while (true) {
if (!PL_DHashTableAdd(&t, (const void*)numInserted, mozilla::fallible)) {
if (!PL_DHashTableAdd(t, (const void*)numInserted)) {
break;
}
numInserted++;
@ -151,7 +150,7 @@ static bool test_pldhash_grow_to_max_capacity()
return false;
}
PL_DHashTableFinish(&t);
PL_DHashTableDestroy(t);
return true;
}
@ -167,7 +166,8 @@ static const struct Test {
TestFunc func;
} tests[] = {
DECL_TEST(test_pldhash_Init_capacity_ok),
DECL_TEST(test_pldhash_lazy_storage),
DECL_TEST(test_pldhash_Init_capacity_too_large),
DECL_TEST(test_pldhash_Init_overflow),
// See bug 931062, we skip this test on Android due to OOM.
#ifndef MOZ_WIDGET_ANDROID
DECL_TEST(test_pldhash_grow_to_max_capacity),