Backed out changeset 3deeb3c83c77

This commit is contained in:
Chris Jones 2009-05-02 21:13:57 -07:00
parent d2b61202f4
commit 0023bdaa7d
14 changed files with 587 additions and 2165 deletions

View File

@ -21,7 +21,6 @@
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
* Chris Jones <jones.chris.g@gmail.com>
*
* Alternatively, the contents of this file may be used under the terms of
* either of the GNU General Public License Version 2 or later (the "GPL"),
@ -39,170 +38,466 @@
#ifdef DEBUG
#define MOZILLA_INTERNAL_API
#include "nsPrintfCString.h"
#undef MOZILLA_INTERNAL_API
#include "nsAutoPtr.h"
#include "plhash.h"
#include "prprf.h"
#include "prlock.h"
#include "prthread.h"
#include "nsDebug.h"
#include "nsVoidArray.h"
#include "mozilla/BlockingResourceBase.h"
#include "mozilla/CondVar.h"
#include "mozilla/Monitor.h"
#include "mozilla/Mutex.h"
#ifdef NS_TRACE_MALLOC
# include <stdio.h>
# include "nsTraceMalloc.h"
#endif
namespace mozilla {
//
// BlockingResourceBase implementation
//
static PRUintn ResourceAcqnChainFrontTPI = (PRUintn)-1;
static PLHashTable* OrderTable = 0;
static PRLock* OrderTableLock = 0;
static PRLock* ResourceMutex = 0;
// needs to be kept in sync with BlockingResourceType
const char* const BlockingResourceBase::kResourceTypeName[] =
{
// Needs to be kept in sync with BlockingResourceType. */
static char const *const kResourceTypeNames[] = {
"Mutex", "Monitor", "CondVar"
};
// static members
PRCallOnceType BlockingResourceBase::sCallOnce;
PRUintn BlockingResourceBase::sResourceAcqnChainFrontTPI = (PRUintn)-1;
BlockingResourceBase::DDT BlockingResourceBase::sDeadlockDetector;
struct nsNamedVector : public nsVoidArray {
const char* mName;
bool
BlockingResourceBase::DeadlockDetectorEntry::Print(
const DDT::ResourceAcquisition& aFirstSeen,
nsACString& out,
bool aPrintFirstSeenCx) const
#ifdef NS_TRACE_MALLOC
// Callsites for the inner locks/monitors stored in our base nsVoidArray.
// This array parallels our base nsVoidArray.
nsVoidArray mInnerSites;
#endif
nsNamedVector(const char* name = 0, PRUint32 initialSize = 0)
: nsVoidArray(initialSize),
mName(name)
{
}
};
static void *
_hash_alloc_table(void *pool, PRSize size)
{
CallStack lastAcquisition = mAcquisitionContext; // RACY, but benign
bool maybeCurrentlyAcquired = (CallStack::kNone != lastAcquisition);
CallStack printAcquisition =
(aPrintFirstSeenCx || !maybeCurrentlyAcquired) ?
aFirstSeen.mCallContext : lastAcquisition;
return operator new(size);
}
fprintf(stderr, "--- %s : %s",
kResourceTypeName[mType], mName);
out += nsPrintfCString(256, "%s : %s",
BlockingResourceBase::kResourceTypeName[mType],
mName);
if (maybeCurrentlyAcquired) {
fputs(" (currently acquired)", stderr);
out += " (currently acquired)\n";
static void
_hash_free_table(void *pool, void *item)
{
operator delete(item);
}
static PLHashEntry *
_hash_alloc_entry(void *pool, const void *key)
{
return new PLHashEntry;
}
/*
* Because monitors and locks may be associated with an mozilla::BlockingResourceBase,
* without having had their associated nsNamedVector created explicitly in
* nsAutoMonitor::NewMonitor/DeleteMonitor, we need to provide a freeEntry
* PLHashTable hook, to avoid leaking nsNamedVectors which are replaced by
* nsAutoMonitor::NewMonitor.
*
* There is still a problem with the OrderTable containing orphaned
* nsNamedVector entries, for manually created locks wrapped by nsAutoLocks.
* (there should be no manually created monitors wrapped by nsAutoMonitors:
* you should use nsAutoMonitor::NewMonitor and nsAutoMonitor::DestroyMonitor
* instead of PR_NewMonitor and PR_DestroyMonitor). These lock vectors don't
* strictly leak, as they are killed on shutdown, but there are unnecessary
* named vectors in the hash table that outlive their associated locks.
*
* XXX so we should have nsLock, nsMonitor, etc. and strongly type their
* XXX nsAutoXXX counterparts to take only the non-auto types as inputs
*/
static void
_hash_free_entry(void *pool, PLHashEntry *entry, PRUintn flag)
{
nsNamedVector* vec = (nsNamedVector*) entry->value;
if (vec) {
entry->value = 0;
delete vec;
}
if (flag == HT_FREE_ENTRY)
delete entry;
}
static const PLHashAllocOps _hash_alloc_ops = {
_hash_alloc_table, _hash_free_table,
_hash_alloc_entry, _hash_free_entry
};
static PRIntn
_purge_one(PLHashEntry* he, PRIntn cnt, void* arg)
{
nsNamedVector* vec = (nsNamedVector*) he->value;
if (he->key == arg)
return HT_ENUMERATE_REMOVE;
vec->RemoveElement(arg);
return HT_ENUMERATE_NEXT;
}
static void
OnResourceRecycle(void* aResource)
{
NS_PRECONDITION(OrderTable, "should be inited!");
PR_Lock(OrderTableLock);
PL_HashTableEnumerateEntries(OrderTable, _purge_one, aResource);
PR_Unlock(OrderTableLock);
}
static PLHashNumber
_hash_pointer(const void* key)
{
return PLHashNumber(NS_PTR_TO_INT32(key)) >> 2;
}
// TODO just included for function below
#include "prcmon.h"
// Must be single-threaded here, early in primordial thread.
static void InitAutoLockStatics()
{
(void) PR_NewThreadPrivateIndex(&ResourceAcqnChainFrontTPI, 0);
OrderTable = PL_NewHashTable(64, _hash_pointer,
PL_CompareValues, PL_CompareValues,
&_hash_alloc_ops, 0);
if (OrderTable && !(OrderTableLock = PR_NewLock())) {
PL_HashTableDestroy(OrderTable);
OrderTable = 0;
}
fputs(" calling context\n", stderr);
printAcquisition.Print(stderr);
if (OrderTable && !(ResourceMutex = PR_NewLock())) {
PL_HashTableDestroy(OrderTable);
OrderTable = 0;
}
return maybeCurrentlyAcquired;
// TODO unnecessary after API changes
PR_CSetOnMonitorRecycle(OnResourceRecycle);
}
BlockingResourceBase::BlockingResourceBase(
const char* aName,
BlockingResourceBase::BlockingResourceType aType)
/* TODO re-enable this after API change. with backwards compatibility
enabled, it conflicts with the impl in nsAutoLock.cpp. Not freeing
this stuff will "leak" memory that is cleanup up when the process
exits. */
#if 0
void _FreeAutoLockStatics()
{
// PR_CallOnce guaranatees that InitStatics is called in a
// thread-safe way
if (PR_SUCCESS != PR_CallOnce(&sCallOnce, InitStatics))
NS_RUNTIMEABORT("can't initialize blocking resource static members");
PLHashTable* table = OrderTable;
if (!table) return;
mDDEntry = new BlockingResourceBase::DeadlockDetectorEntry(aName, aType);
if (!mDDEntry)
NS_RUNTIMEABORT("can't allocated deadlock detector entry");
// Called at shutdown, so we don't need to lock.
// TODO unnecessary after API changes
PR_CSetOnMonitorRecycle(0);
PR_DestroyLock(OrderTableLock);
OrderTableLock = 0;
PL_HashTableDestroy(table);
OrderTable = 0;
}
#endif
mChainPrev = 0;
sDeadlockDetector.Add(mDDEntry);
static nsNamedVector* GetVector(PLHashTable* table, const void* key)
{
PLHashNumber hash = _hash_pointer(key);
PLHashEntry** hep = PL_HashTableRawLookup(table, hash, key);
PLHashEntry* he = *hep;
if (he)
return (nsNamedVector*) he->value;
nsNamedVector* vec = new nsNamedVector();
if (vec)
PL_HashTableRawAdd(table, hep, hash, key, vec);
return vec;
}
static void OnResourceCreated(const void* key, const char* name )
{
NS_PRECONDITION(key && OrderTable, "should be inited!");
nsNamedVector* value = new nsNamedVector(name);
if (value) {
PR_Lock(OrderTableLock);
PL_HashTableAdd(OrderTable, key, value);
PR_Unlock(OrderTableLock);
}
}
// We maintain an acyclic graph in OrderTable, so recursion can't diverge.
static PRBool Reachable(PLHashTable* table, const void* goal, const void* start)
{
PR_ASSERT(goal);
PR_ASSERT(start);
nsNamedVector* vec = GetVector(table, start);
for (PRUint32 i = 0, n = vec->Count(); i < n; i++) {
void* aResource = vec->ElementAt(i);
if (aResource == goal || Reachable(table, goal, aResource))
return PR_TRUE;
}
return PR_FALSE;
}
static PRBool WellOrdered(const void* aResource1, const void* aResource2,
const void *aCallsite2, PRUint32* aIndex2p,
nsNamedVector** aVec1p, nsNamedVector** aVec2p)
{
NS_ASSERTION(OrderTable && OrderTableLock, "supposed to be initialized!");
PRBool rv = PR_TRUE;
PLHashTable* table = OrderTable;
PR_Lock(OrderTableLock);
// Check whether we've already asserted (addr1 < addr2).
nsNamedVector* vec1 = GetVector(table, aResource1);
if (vec1) {
PRUint32 i, n;
for (i = 0, n = vec1->Count(); i < n; i++)
if (vec1->ElementAt(i) == aResource2)
break;
if (i == n) {
// Now check for (addr2 < addr1) and return false if so.
nsNamedVector* vec2 = GetVector(table, aResource2);
if (vec2) {
for (i = 0, n = vec2->Count(); i < n; i++) {
void* aResourcei = vec2->ElementAt(i);
PR_ASSERT(aResourcei);
if (aResourcei == aResource1
|| Reachable(table, aResource1, aResourcei)) {
*aIndex2p = i;
*aVec1p = vec1;
*aVec2p = vec2;
rv = PR_FALSE;
break;
}
}
if (rv) {
// Insert (addr1 < addr2) into the order table.
// XXX fix plvector/nsVector to use const void*
vec1->AppendElement((void*) aResource2);
#ifdef NS_TRACE_MALLOC
vec1->mInnerSites.AppendElement((void*) aCallsite2);
#endif
}
}
}
}
// all control flow must pass through this point
//unlock:
PR_Unlock(OrderTableLock);
return rv;
}
//
// BlockingResourceBase implementation
//
// Note that in static/member functions, user code abiding by the
// BlockingResourceBase API's contract gives us the following guarantees:
//
// - mResource is not NULL
// - mResource points to a valid underlying resource
// - mResource is NOT shared with any other mozilla::BlockingResourceBase
//
// If user code violates the API contract, the behavior of the following
// functions is undefined. So no error checking is strictly necessary.
//
// That said, assertions of the the above facts are sprinkled throughout the
// following code, to check for errors in user code.
//
namespace mozilla {
void
BlockingResourceBase::Init(void* aResource,
const char* aName,
BlockingResourceBase::BlockingResourceType aType)
{
if (NS_UNLIKELY(ResourceAcqnChainFrontTPI == PRUintn(-1)))
InitAutoLockStatics();
NS_PRECONDITION(aResource, "null resource");
OnResourceCreated(aResource, aName);
mResource = aResource;
mChainPrev = 0;
mType = aType;
}
BlockingResourceBase::~BlockingResourceBase()
{
// we don't check for really obviously bad things like freeing
// Mutexes while they're still locked. it is assumed that the
// base class, or its underlying primitive, will check for such
// stupid mistakes.
mChainPrev = 0; // racy only for stupidly buggy client code
mDDEntry = 0; // owned by deadlock detector
NS_PRECONDITION(mResource, "bad subclass impl, or double free");
// we don't expect to find this resouce in the acquisition chain.
// it should have been Release()'n as many times as it has been
// Acquire()ed, before this destructor was called.
// additionally, WE are the only ones who can create underlying
// resources, so there should be one underlying instance per
// BlockingResourceBase. thus we don't need to do any cleanup unless
// client code has done something seriously and obviously wrong.
// that, plus the potential performance impact of full cleanup, mean
// that it has been removed for now.
OnResourceRecycle(mResource);
mResource = 0;
mChainPrev = 0;
}
void
BlockingResourceBase::CheckAcquire(const CallStack& aCallContext)
void BlockingResourceBase::Acquire()
{
if (eCondVar == mDDEntry->mType) {
NS_NOTYETIMPLEMENTED(
"FIXME bug 456272: annots. to allow CheckAcquire()ing condvars");
NS_PRECONDITION(mResource, "bad base class impl or double free");
PR_ASSERT_CURRENT_THREAD_OWNS_LOCK(ResourceMutex);
if (mType == eCondVar) {
NS_NOTYETIMPLEMENTED("TODO: figure out how to Acquire() condvars");
return;
}
BlockingResourceBase* chainFront = ResourceChainFront();
nsAutoPtr<DDT::ResourceAcquisitionArray> cycle(
sDeadlockDetector.CheckAcquisition(
chainFront ? chainFront->mDDEntry : 0, mDDEntry,
aCallContext));
if (!cycle)
return;
BlockingResourceBase* chainFront =
(BlockingResourceBase*)
PR_GetThreadPrivate(ResourceAcqnChainFrontTPI);
fputs("###!!! ERROR: Potential deadlock detected:\n", stderr);
nsCAutoString out("Potential deadlock detected:\n");
bool maybeImminent = PrintCycle(cycle, out);
if (eMonitor == mType) {
// reentering a monitor: the old implementation ensured that this
// was only done immediately after a previous entry. little
// tricky here since we can't rely on the monitor already having
// been entered, as AutoMonitor used to let us do (sort of)
if (maybeImminent) {
fputs("\n###!!! Deadlock may happen NOW!\n\n", stderr);
out.Append("\n###!!! Deadlock may happen NOW!\n\n");
} else {
fputs("\nDeadlock may happen for some other execution\n\n",
stderr);
out.Append("\nDeadlock may happen for some other execution\n\n");
if (this == chainFront) {
// acceptable reentry, and nothing to update.
return;
}
else if (chainFront) {
BlockingResourceBase* br = chainFront->mChainPrev;
while (br) {
if (br == this) {
NS_ASSERTION(br != this,
"reentered monitor after acquiring "
"other resources");
// have to ignore this allocation and return. even if
// we cleaned up the old acquisition of the monitor and
// put the new one at the front of the chain, we'd
// almost certainly set off the deadlock detector.
// this error is interesting enough.
return;
}
br = br->mChainPrev;
}
}
}
// XXX can customize behavior on whether we /think/ deadlock is
// XXX about to happen. for example:
// XXX if (maybeImminent)
// NS_RUNTIMEABORT(out.get());
NS_ERROR(out.get());
const void* callContext =
#ifdef NS_TRACE_MALLOC
(const void*)NS_TraceMallocGetStackTrace();
#else
nsnull
#endif
;
if (eMutex == mType
&& chainFront == this
&& !(chainFront->mChainPrev)) {
// corner case: acquire only a single lock, then try to reacquire
// the lock. there's no entry in the order table for the first
// acquisition, so we might not detect this (immediate and real!)
// deadlock.
//
// XXX need to remove this corner case, and get a stack trace for
// first acquisition, and get the lock's name
char buf[128];
PR_snprintf(buf, sizeof buf,
"Imminent deadlock between Mutex@%p and itself!",
chainFront->mResource);
#ifdef NS_TRACE_MALLOC
fputs("\nDeadlock will occur here:\n", stderr);
NS_TraceMallocPrintStackTrace(
stderr, (nsTMStackTraceIDStruct*)callContext);
putc('\n', stderr);
#endif
NS_ERROR(buf);
return; // what else to do? we're screwed
}
nsNamedVector* vec1;
nsNamedVector* vec2;
PRUint32 i2;
if (!chainFront
|| WellOrdered(chainFront->mResource, mResource,
callContext,
&i2,
&vec1, &vec2)) {
mChainPrev = chainFront;
PR_SetThreadPrivate(ResourceAcqnChainFrontTPI, this);
}
else {
char buf[128];
PR_snprintf(buf, sizeof buf,
"Potential deadlock between %s%s@%p and %s%s@%p",
vec1->mName ? vec1->mName : "",
kResourceTypeNames[chainFront->mType],
chainFront->mResource,
vec2->mName ? vec2->mName : "",
kResourceTypeNames[mType],
mResource);
#ifdef NS_TRACE_MALLOC
fprintf(stderr, "\n*** %s\n\nStack of current acquisition:\n", buf);
NS_TraceMallocPrintStackTrace(
stderr, NS_TraceMallocGetStackTrace());
fputs("\nStack of conflicting acquisition:\n", stderr);
NS_TraceMallocPrintStackTrace(
stderr, (nsTMStackTraceIDStruct *)vec2->mInnerSites.ElementAt(i2));
putc('\n', stderr);
#endif
NS_ERROR(buf);
// because we know the latest acquisition set off the deadlock,
// detector, it's debatable whether we should append it to the
// acquisition chain; it might just trigger later errors that
// have already been reported here. I choose not too add it.
}
}
void
BlockingResourceBase::Acquire(const CallStack& aCallContext)
void BlockingResourceBase::Release()
{
if (eCondVar == mDDEntry->mType) {
NS_NOTYETIMPLEMENTED(
"FIXME bug 456272: annots. to allow Acquire()ing condvars");
NS_PRECONDITION(mResource, "bad base class impl or double free");
PR_ASSERT_CURRENT_THREAD_OWNS_LOCK(ResourceMutex);
if (mType == eCondVar) {
NS_NOTYETIMPLEMENTED("TODO: figure out how to Release() condvars");
return;
}
NS_ASSERTION(mDDEntry->mAcquisitionContext == CallStack::kNone,
"reacquiring already acquired resource");
ResourceChainAppend(ResourceChainFront());
mDDEntry->mAcquisitionContext = aCallContext;
}
void
BlockingResourceBase::Release()
{
if (eCondVar == mDDEntry->mType) {
NS_NOTYETIMPLEMENTED(
"FIXME bug 456272: annots. to allow Release()ing condvars");
return;
}
BlockingResourceBase* chainFront = ResourceChainFront();
NS_ASSERTION(chainFront
&& CallStack::kNone != mDDEntry->mAcquisitionContext,
BlockingResourceBase* chainFront =
(BlockingResourceBase*)
PR_GetThreadPrivate(ResourceAcqnChainFrontTPI);
NS_ASSERTION(chainFront,
"Release()ing something that hasn't been Acquire()ed");
if (chainFront == this) {
ResourceChainRemove();
if (NS_LIKELY(chainFront == this)) {
PR_SetThreadPrivate(ResourceAcqnChainFrontTPI, mChainPrev);
}
else {
// not an error, but makes code hard to reason about.
NS_WARNING("Resource acquired at calling context\n");
mDDEntry->mAcquisitionContext.Print(stderr);
NS_WARNING("\nis being released in non-LIFO order; why?");
// remove this resource from wherever it lives in the chain
NS_WARNING("you're releasing a resource in non-LIFO order; why?");
// we walk backwards in order of acquisition:
// (1) ...node<-prev<-curr...
// / /
@ -214,170 +509,54 @@ BlockingResourceBase::Release()
if (prev == this)
curr->mChainPrev = prev->mChainPrev;
}
mDDEntry->mAcquisitionContext = CallStack::kNone;
}
bool
BlockingResourceBase::PrintCycle(const DDT::ResourceAcquisitionArray* aCycle,
nsACString& out)
{
NS_ASSERTION(aCycle->Length() > 1, "need > 1 element for cycle!");
bool maybeImminent = true;
fputs("=== Cyclical dependency starts at\n", stderr);
out += "Cyclical dependency starts at\n";
const DDT::ResourceAcquisition res = aCycle->ElementAt(0);
maybeImminent &= res.mResource->Print(res, out);
DDT::ResourceAcquisitionArray::index_type i;
DDT::ResourceAcquisitionArray::size_type len = aCycle->Length();
const DDT::ResourceAcquisition* it = 1 + aCycle->Elements();
for (i = 1; i < len - 1; ++i, ++it) {
fputs("\n--- Next dependency:\n", stderr);
out += "Next dependency:\n";
maybeImminent &= it->mResource->Print(*it, out);
}
fputs("\n=== Cycle completed at\n", stderr);
out += "Cycle completed at\n";
it->mResource->Print(*it, out, true);
return maybeImminent;
}
//
// Debug implementation of Mutex
void
Mutex::Lock()
{
CallStack callContext = CallStack();
PR_Lock(ResourceMutex);
Acquire();
PR_Unlock(ResourceMutex);
CheckAcquire(callContext);
PR_Lock(mLock);
Acquire(callContext); // protected by mLock
}
void
Mutex::Unlock()
{
Release(); // protected by mLock
PRStatus status = PR_Unlock(mLock);
NS_ASSERTION(PR_SUCCESS == status, "bad Mutex::Unlock()");
}
NS_ASSERTION(PR_SUCCESS == status, "problem Unlock()ing");
PR_Lock(ResourceMutex);
Release();
PR_Unlock(ResourceMutex);
}
//
// Debug implementation of Monitor
void
Monitor::Enter()
void
Monitor::Enter()
{
BlockingResourceBase* chainFront = ResourceChainFront();
PR_Lock(ResourceMutex);
++mEntryCount;
Acquire();
PR_Unlock(ResourceMutex);
// the code below implements monitor reentrancy semantics
if (this == chainFront) {
// immediately re-entered the monitor: acceptable
PR_EnterMonitor(mMonitor);
++mEntryCount;
return;
}
CallStack callContext = CallStack();
// this is sort of a hack around not recording the thread that
// owns this monitor
if (chainFront) {
for (BlockingResourceBase* br = ResourceChainPrev(chainFront);
br;
br = ResourceChainPrev(br)) {
if (br == this) {
NS_WARNING(
"Re-entering Monitor after acquiring other resources.\n"
"At calling context\n");
GetAcquisitionContext().Print(stderr);
// show the caller why this is potentially bad
CheckAcquire(callContext);
PR_EnterMonitor(mMonitor);
++mEntryCount;
return;
}
}
}
CheckAcquire(callContext);
PR_EnterMonitor(mMonitor);
NS_ASSERTION(0 == mEntryCount, "Monitor isn't free!");
Acquire(callContext); // protected by mMonitor
mEntryCount = 1;
}
void
Monitor::Exit()
{
if (0 == --mEntryCount)
Release(); // protected by mMonitor
PRStatus status = PR_ExitMonitor(mMonitor);
NS_ASSERTION(PR_SUCCESS == status, "bad Monitor::Exit()");
}
NS_ASSERTION(PR_SUCCESS == status, "bad ExitMonitor()");
nsresult
Monitor::Wait(PRIntervalTime interval)
{
AssertCurrentThreadIn();
// save monitor state and reset it to empty
PRInt32 savedEntryCount = mEntryCount;
CallStack savedAcquisitionContext = GetAcquisitionContext();
BlockingResourceBase* savedChainPrev = mChainPrev;
mEntryCount = 0;
SetAcquisitionContext(CallStack::kNone);
mChainPrev = 0;
// give up the monitor until we're back from Wait()
nsresult rv =
PR_Wait(mMonitor, interval) == PR_SUCCESS ?
NS_OK : NS_ERROR_FAILURE;
// restore saved state
mEntryCount = savedEntryCount;
SetAcquisitionContext(savedAcquisitionContext);
mChainPrev = savedChainPrev;
return rv;
}
//
// Debug implementation of CondVar
nsresult
CondVar::Wait(PRIntervalTime interval)
{
AssertCurrentThreadOwnsMutex();
// save mutex state and reset to empty
CallStack savedAcquisitionContext = mLock->GetAcquisitionContext();
BlockingResourceBase* savedChainPrev = mLock->mChainPrev;
mLock->SetAcquisitionContext(CallStack::kNone);
mLock->mChainPrev = 0;
// give up mutex until we're back from Wait()
nsresult rv =
PR_WaitCondVar(mCvar, interval) == PR_SUCCESS ?
NS_OK : NS_ERROR_FAILURE;
// restore saved state
mLock->SetAcquisitionContext(savedAcquisitionContext);
mLock->mChainPrev = savedChainPrev;
return rv;
PR_Lock(ResourceMutex);
if (--mEntryCount == 0)
Release();
PR_Unlock(ResourceMutex);
}

View File

@ -21,7 +21,6 @@
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
* Chris Jones <jones.chris.g@gmail.com>
*
* Alternatively, the contents of this file may be used under the terms of
* either of the GNU General Public License Version 2 or later (the "GPL"),
@ -41,21 +40,10 @@
#ifndef mozilla_BlockingResourceBase_h
#define mozilla_BlockingResourceBase_h
#include "prinit.h"
#include "prlock.h"
#include "prlog.h"
#include "prthread.h"
#include "nscore.h"
#include "nsDebug.h"
#include "prlog.h"
#include "nsError.h"
#define MOZILLA_INTERNAL_API
#include "nsString.h"
#undef MOZILLA_INTERNAL_API
#ifdef DEBUG
#include "mozilla/DeadlockDetector.h" // big header file
#endif
#include "nsDebug.h"
//
// This header is not meant to be included by client code.
@ -65,291 +53,128 @@ namespace mozilla {
/**
* BlockingResourceBase
* Base class of resources that might block clients trying to acquire them.
* Does debugging and deadlock detection in DEBUG builds.
* Aids in debugging, deadlock detection, etc.
**/
class NS_COM_GLUE BlockingResourceBase
{
public:
protected:
BlockingResourceBase() {
}
// Needs to be kept in sync with kResourceTypeNames.
enum BlockingResourceType { eMutex, eMonitor, eCondVar };
/**
* kResourceTypeName
* Human-readable version of BlockingResourceType enum.
*/
static const char* const kResourceTypeName[];
#ifdef DEBUG
private:
// forward declaration for the following typedef
struct DeadlockDetectorEntry;
// ``DDT'' = ``Deadlock Detector Type''
typedef DeadlockDetector<DeadlockDetectorEntry> DDT;
/**
* DeadlockDetectorEntry
* We free BlockingResources, but we never free entries in the
* deadlock detector. This struct outlives its BlockingResource
* and preserves all the state needed to print subsequent
* error messages.
*
* These objects are owned by the deadlock detector.
*/
struct DeadlockDetectorEntry
{
DeadlockDetectorEntry(const char* aName,
BlockingResourceType aType) :
mName(aName),
mType(aType),
mAcquisitionContext(CallStack::kNone)
{
}
/**
* Print
* Write a description of this blocking resource to |out|. If
* the resource appears to be currently acquired, the current
* acquisition context is printed and true is returned.
* Otherwise, we print the context from |aFirstSeen|, the
* first acquisition from which the code calling |Print()|
* became interested in us, and return false. |Print()| can
* be forced to print the context from |aFirstSeen| regardless
* by passing |aPrintFirstSeenCx=true|.
*
* *NOT* thread safe. Reads |mAcquisitionContext| without
* synchronization, but this will not cause correctness
* problems.
*
* FIXME bug 456272: hack alert: because we can't write call
* contexts into strings, all info is written to stderr, but
* only some info is written into |out|
*/
bool Print(const DDT::ResourceAcquisition& aFirstSeen,
nsACString& out,
bool aPrintFirstSeenCx=false) const;
/**
* mName
* A descriptive name for this resource. Used in error
* messages etc.
*/
const char* mName;
/**
* mType
* The more specific type of this resource. Used to implement
* special semantics (e.g., reentrancy of monitors).
**/
BlockingResourceType mType;
/**
* mAcquisitionContext
* The calling context from which this resource was acquired, or
* |CallStack::kNone| if it is currently free (or freed).
*/
CallStack mAcquisitionContext;
};
protected:
/**
* BlockingResourceBase
* Initialize this blocking resource. Also hooks the resource into
* instrumentation code.
*
* Thread safe.
*
* @param aName A meaningful, unique name that can be used in
* error messages, et al.
* @param aType The specific type of |this|, if any.
**/
BlockingResourceBase(const char* aName, BlockingResourceType aType);
~BlockingResourceBase();
/**
* CheckAcquire
*
* Thread safe.
*
* @param aCallContext the client's calling context from which the
* original acquisition request was made.
* Initialize this blocking resource. Also hooks the resource into
* instrumentation code.
* @param aResource Unique handle for the underlying blocking resource.
* @param aName A meaningful, unique name that can be used in
* error messages, et al.
* @param aType The specific type of |aResource|, if any.
**/
void CheckAcquire(const CallStack& aCallContext);
void Init(void* aResource,
const char* aName,
BlockingResourceType aType);
/*
* The following variables and methods are used by the deadlock detector.
* To understand how they're used, it's best to give a quick overview
* of how the deadlock detector works.
*
* The deadlock detector ensures that all blocking resources are
* acquired according to a partial order P. One type of blocking
* resource is a lock. If a lock l1 is acquired (locked) before l2,
* then we say that $l1 <_P l2$. The detector flags an error if two
* locks l1 and l2 have an inconsistent ordering in P; that is, if both
* $l1 <_P l2$ and $l2 <_P l1$. This is a potential error because a
* thread acquiring l1,l2 according to the first order might simultaneous
* run with a thread acquiring them according to the second order.
* If this happens under the right conditions, then the
* acquisitions will deadlock.
*
* This deadlock detector doesn't know at compile-time what P is. So,
* it tries to discover the order at run time. More precisely, it
* finds <i>some</i> order P, then tries to find chains of resource
* acquisitions that violate P. An example acquisition sequence, and
* the orders they impose, is the following:
* l1.lock() // current chain: [ l1 ]
* // order: { }
*
* l2.lock() // current chain: [ l1, l2 ]
* // order: { l1 <_P l2 }
*
* l3.lock() // current chain: [ l1, l2, l3 ]
* // order: { l1 <_P l2, l2 <_P l3, l1 <_P l3 }
* // (note: <_P is transitive, so also $l1 <_P l3$)
*
* l2.unlock() // current chain: [ l1, l3 ]
* // order: { l1 <_P l2, l2 <_P l3, l1 <_P l3 }
* // (note: it's OK, but weird, that l2 was unlocked out
* // of order. we still have l1 <_P l3).
*
* l2.lock() // current chain: [ l1, l3, l2 ]
* // order: { l1 <_P l2, l2 <_P l3, l1 <_P l3,
* l3 <_P l2 (!!!) }
* BEEP BEEP! Here the detector will flag a potential error, since
* l2 and l3 were used inconsistently (and potentially deadlocking-
* inducingly).
*
* In reality, this is somewhat more complicated because each thread
* has its own acquisition chain. In addition, the example above
* elides several important details from the detector implementation.
*/
/**
* Acquire
*
* *NOT* thread safe. Requires ownership of underlying resource.
*
* @param aCallContext the client's calling context from which the
* original acquisition request was made.
* mResource
* Some handle to a resource that may block acquisition. Used to
* identify the resource in acquisition chains.
**/
void Acquire(const CallStack& aCallContext); //NS_NEEDS_RESOURCE(this)
void* mResource;
/**
* Release
* Remove this resource from the current thread's acquisition chain.
* The resource does not have to be at the front of the chain, although
* it is confusing to release resources in a different order than they
* are acquired. This generates a warning.
*
* *NOT* thread safe. Requires ownership of underlying resource.
* mType
* The more specific type of this resource. Used to implement special
* semantics (e.g., reentrancy of monitors.)
**/
void Release(); //NS_NEEDS_RESOURCE(this)
/**
* PrintCycle
* Append to |out| detailed information about the circular
* dependency in |cycle|. Returns true if it *appears* that this
* cycle may represent an imminent deadlock, but this is merely a
* heuristic; the value returned may be a false positive or false
* negative.
*
* *NOT* thread safe. Calls |Print()|.
*
* FIXME bug 456272 hack alert: because we can't write call
* contexts into strings, all info is written to stderr, but only
* some info is written into |out|
*/
static bool PrintCycle(const DDT::ResourceAcquisitionArray* cycle,
nsACString& out);
/**
* ResourceChainFront
*
* Thread safe.
*
* @return the front of the resource acquisition chain, i.e., the last
* resource acquired.
*/
static BlockingResourceBase* ResourceChainFront()
{
return (BlockingResourceBase*)
PR_GetThreadPrivate(sResourceAcqnChainFrontTPI);
}
/**
* ResourceChainPrev
*
* *NOT* thread safe. Requires ownership of underlying resource.
*/
static BlockingResourceBase*
ResourceChainPrev(const BlockingResourceBase* aResource)
{
return aResource->mChainPrev;
} //NS_NEEDS_RESOURCE(this)
/**
* ResourceChainAppend
* Set |this| to the front of the resource acquisition chain, and link
* |this| to |aPrev|.
*
* *NOT* thread safe. Requires ownership of underlying resource.
*/
void ResourceChainAppend(BlockingResourceBase* aPrev)
{
mChainPrev = aPrev;
PR_SetThreadPrivate(sResourceAcqnChainFrontTPI, this);
} //NS_NEEDS_RESOURCE(this)
/**
* ResourceChainRemove
* Remove |this| from the front of the resource acquisition chain.
*
* *NOT* thread safe. Requires ownership of underlying resource.
*/
void ResourceChainRemove()
{
NS_ASSERTION(this == ResourceChainFront(), "not at chain front");
PR_SetThreadPrivate(sResourceAcqnChainFrontTPI, mChainPrev);
} //NS_NEEDS_RESOURCE(this)
/**
* GetAcquisitionContext
* Return the calling context from which this resource was acquired,
* or CallStack::kNone if it's currently free.
*
* *NOT* thread safe. Requires ownership of underlying resource.
*/
CallStack
GetAcquisitionContext()
{
return mDDEntry->mAcquisitionContext;
}
/**
* SetAcquisitionContext
* Set the calling context from which this resource was acquired.
*
* *NOT* thread safe. Requires ownership of underlying resource.
*/
void
SetAcquisitionContext(CallStack aAcquisitionContext)
{
mDDEntry->mAcquisitionContext = aAcquisitionContext;
}
BlockingResourceType mType;
/**
* mChainPrev
* A series of resource acquisitions creates a chain of orders. This
* chain is implemented as a linked list; |mChainPrev| points to the
* resource most recently Acquire()'d before this one.
* resource most recently Show()'n before this one.
**/
BlockingResourceBase* mChainPrev;
private:
/**
* mDDEntry
* The key for this BlockingResourceBase in the deadlock detector.
*/
DeadlockDetectorEntry* mDDEntry;
* Acquire
* Add this resource to the front of the current thread's acquisition
* chain. Should be called by, e.g., Mutex::Lock().
**/
void Acquire();
/**
* Release
* Remove this resource from the current thread's acquisition chain.
* The resource does not have to be at the front of the chain, although
* it is confusing to release resources in a different order than they
* are acquired.
**/
void Release();
/**
* sCallOnce
* Ensures static members are initialized only once, and in a
* thread-safe way.
*/
static PRCallOnceType sCallOnce;
/**
* sResourceAcqnChainFrontTPI
* Thread-private index to the front of each thread's resource
* acquisition chain.
*/
static PRUintn sResourceAcqnChainFrontTPI;
/**
* sDeadlockDetector
* Does as named.
*/
static DDT sDeadlockDetector;
/**
* InitStatics
*
* *NOT* thread safe.
*
* Inititialize static members of BlockingResourceBase that can't
* be statically initialized.
*/
static PRStatus InitStatics() {
PR_NewThreadPrivateIndex(&sResourceAcqnChainFrontTPI, 0);
return PR_SUCCESS;
#else
~BlockingResourceBase() {
}
void Init(void* aResource,
const char* aName,
BlockingResourceType aType) { }
#else // non-DEBUG implementation
BlockingResourceBase(const char* aName, BlockingResourceType aType)
{
}
~BlockingResourceBase()
{
}
void Acquire() { }
void Release() { }
#endif
};

View File

@ -60,28 +60,26 @@ public:
*
* The CALLER owns |lock|.
*
* @param aLock An Mutex to associate with this condition variable.
* @param aName A name which can reference this monitor
* @param lock An Mutex to associate with this condition variable.
* @param name A name which can reference this monitor
* @returns If failure, nsnull.
* If success, a valid Monitor* which must be destroyed
* by Monitor::DestroyMonitor()
**/
CondVar(Mutex& aLock, const char* aName) :
BlockingResourceBase(aName, eCondVar),
mLock(&aLock)
{
CondVar(Mutex& aLock, const char* name)
: mLock(&aLock) {
// |lock| must necessarily already be known to the deadlock detector
mCvar = PR_NewCondVar(mLock->mLock);
if (!mCvar)
NS_RUNTIMEABORT("Can't allocate mozilla::CondVar");
Init(mCvar, name, eCondVar);
}
/**
* ~CondVar
* Clean up after this CondVar, but NOT its associated Mutex.
**/
~CondVar()
{
~CondVar() {
NS_ASSERTION(mCvar && mLock,
"improperly constructed CondVar or double free");
PR_DestroyCondVar(mCvar);
@ -89,27 +87,21 @@ public:
mLock = 0;
}
#ifndef DEBUG
/**
* Wait
* @see prcvar.h
**/
nsresult Wait(PRIntervalTime interval = PR_INTERVAL_NO_TIMEOUT)
{
nsresult Wait(PRIntervalTime interval = PR_INTERVAL_NO_TIMEOUT) {
// NSPR checks for lock ownership
return PR_WaitCondVar(mCvar, interval) == PR_SUCCESS
? NS_OK : NS_ERROR_FAILURE;
}
#else
nsresult Wait(PRIntervalTime interval = PR_INTERVAL_NO_TIMEOUT);
#endif // ifndef DEBUG
/**
* Notify
* @see prcvar.h
**/
nsresult Notify()
{
nsresult Notify() {
// NSPR checks for lock ownership
return PR_NotifyCondVar(mCvar) == PR_SUCCESS
? NS_OK : NS_ERROR_FAILURE;
@ -119,8 +111,7 @@ public:
* NotifyAll
* @see prcvar.h
**/
nsresult NotifyAll()
{
nsresult NotifyAll() {
// NSPR checks for lock ownership
return PR_NotifyAllCondVar(mCvar) == PR_SUCCESS
? NS_OK : NS_ERROR_FAILURE;
@ -131,8 +122,7 @@ public:
* AssertCurrentThreadOwnsMutex
* @see Mutex::AssertCurrentThreadOwns
**/
void AssertCurrentThreadOwnsMutex()
{
void AssertCurrentThreadOwnsMutex () {
mLock->AssertCurrentThreadOwns();
}
@ -140,18 +130,13 @@ public:
* AssertNotCurrentThreadOwnsMutex
* @see Mutex::AssertNotCurrentThreadOwns
**/
void AssertNotCurrentThreadOwnsMutex()
{
void AssertNotCurrentThreadOwnsMutex () {
mLock->AssertNotCurrentThreadOwns();
}
#else
void AssertCurrentThreadOwnsMutex()
{
}
void AssertNotCurrentThreadOwnsMutex()
{
}
void AssertCurrentThreadOwnsMutex () { }
void AssertNotCurrentThreadOwnsMutex () { }
#endif // ifdef DEBUG

View File

@ -1,44 +0,0 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: sw=4 ts=4 et :
* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is mozilla.org code.
*
* The Initial Developer of the Original Code is
* Netscape Communications Corporation.
* Portions created by the Initial Developer are Copyright (C) 1998
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
* Chris Jones <jones.chris.g@gmail.com>
*
* Alternatively, the contents of this file may be used under the terms of
* either of the GNU General Public License Version 2 or later (the "GPL"),
* or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
#include "DeadlockDetector.h"
namespace mozilla {
const CallStack CallStack::kNone((CallStack::callstack_id) -1);
}

View File

@ -1,577 +0,0 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: sw=4 ts=4 et :
* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is mozilla.org code.
*
* The Initial Developer of the Original Code is
* Netscape Communications Corporation.
* Portions created by the Initial Developer are Copyright (C) 1998
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
* Chris Jones <jones.chris.g@gmail.com>
*
* Alternatively, the contents of this file may be used under the terms of
* either of the GNU General Public License Version 2 or later (the "GPL"),
* or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
#ifndef mozilla_DeadlockDetector_h
#define mozilla_DeadlockDetector_h
#include <stdlib.h>
#include "plhash.h"
#include "prlock.h"
#include "nsTArray.h"
#ifdef NS_TRACE_MALLOC
# include "tracemalloc/nsTraceMalloc.h"
#endif // ifdef NS_TRACE_MALLOC
namespace mozilla {
// FIXME bug 456272: split this off into a convenience API on top of
// nsStackWalk?
class CallStack
{
private:
#ifdef NS_TRACE_MALLOC
typedef nsTMStackTraceID callstack_id;
// needs to be a macro to avoid disturbing the backtrace
# define NS_GET_BACKTRACE() NS_TraceMallocGetStackTrace()
#else
typedef void* callstack_id;
# define NS_GET_BACKTRACE() 0
#endif // ifdef NS_TRACE_MALLOC
callstack_id mCallStack;
public:
/**
* CallStack
* *ALWAYS* *ALWAYS* *ALWAYS* call this with no arguments. This
* constructor takes an argument *ONLY* so that |GET_BACKTRACE()|
* can be evaluated in the stack frame of the caller, rather than
* that of the constructor.
*
* *BEWARE*: this means that calling this constructor with no
* arguments is not the same as a "default, do-nothing"
* constructor: it *will* construct a backtrace. This can cause
* unexpected performance issues.
*/
CallStack(const callstack_id aCallStack = NS_GET_BACKTRACE()) :
mCallStack(aCallStack)
{
}
CallStack(const CallStack& aFrom) :
mCallStack(aFrom.mCallStack)
{
}
CallStack& operator=(const CallStack& aFrom)
{
mCallStack = aFrom.mCallStack;
return *this;
}
bool operator==(const CallStack& aOther) const
{
return mCallStack == aOther.mCallStack;
}
bool operator!=(const CallStack& aOther) const
{
return mCallStack != aOther.mCallStack;
}
// FIXME bug 456272: if this is split off,
// NS_TraceMallocPrintStackTrace should be modified to print into
// an nsACString
void Print(FILE* f) const
{
#ifdef NS_TRACE_MALLOC
if (this != &kNone && mCallStack) {
NS_TraceMallocPrintStackTrace(f, mCallStack);
return;
}
#endif
fputs(" [stack trace unavailable]\n", f);
}
/** The "null" callstack. */
static const CallStack kNone;
};
/**
* DeadlockDetector
*
* The following is an approximate description of how the deadlock detector
* works.
*
* The deadlock detector ensures that all blocking resources are
* acquired according to a partial order P. One type of blocking
* resource is a lock. If a lock l1 is acquired (locked) before l2,
* then we say that |l1 <_P l2|. The detector flags an error if two
* locks l1 and l2 have an inconsistent ordering in P; that is, if
* both |l1 <_P l2| and |l2 <_P l1|. This is a potential error
* because a thread acquiring l1,l2 according to the first order might
* race with a thread acquiring them according to the second order.
* If this happens under the right conditions, then the acquisitions
* will deadlock.
*
* This deadlock detector doesn't know at compile-time what P is. So,
* it tries to discover the order at run time. More precisely, it
* finds <i>some</i> order P, then tries to find chains of resource
* acquisitions that violate P. An example acquisition sequence, and
* the orders they impose, is
* l1.lock() // current chain: [ l1 ]
* // order: { }
*
* l2.lock() // current chain: [ l1, l2 ]
* // order: { l1 <_P l2 }
*
* l3.lock() // current chain: [ l1, l2, l3 ]
* // order: { l1 <_P l2, l2 <_P l3, l1 <_P l3 }
* // (note: <_P is transitive, so also |l1 <_P l3|)
*
* l2.unlock() // current chain: [ l1, l3 ]
* // order: { l1 <_P l2, l2 <_P l3, l1 <_P l3 }
* // (note: it's OK, but weird, that l2 was unlocked out
* // of order. we still have l1 <_P l3).
*
* l2.lock() // current chain: [ l1, l3, l2 ]
* // order: { l1 <_P l2, l2 <_P l3, l1 <_P l3,
* l3 <_P l2 (!!!) }
* BEEP BEEP! Here the detector will flag a potential error, since
* l2 and l3 were used inconsistently (and potentially in ways that
* would deadlock).
*/
template <typename T>
class DeadlockDetector
{
public:
/**
* ResourceAcquisition
* Consists simply of a resource and the calling context from
* which it was acquired. We pack this information together so
* that it can be returned back to the caller when a potential
* deadlock has been found.
*/
struct ResourceAcquisition
{
const T* mResource;
CallStack mCallContext;
ResourceAcquisition(
const T* aResource,
const CallStack aCallContext=CallStack::kNone) :
mResource(aResource),
mCallContext(aCallContext)
{
}
ResourceAcquisition(const ResourceAcquisition& aFrom) :
mResource(aFrom.mResource),
mCallContext(aFrom.mCallContext)
{
}
ResourceAcquisition& operator=(const ResourceAcquisition& aFrom)
{
mResource = aFrom.mResource;
mCallContext = aFrom.mCallContext;
return *this;
}
};
typedef nsTArray<ResourceAcquisition> ResourceAcquisitionArray;
private:
typedef nsTArray<PLHashEntry*> HashEntryArray;
typedef typename HashEntryArray::index_type index_type;
typedef typename HashEntryArray::size_type size_type;
enum {
NoIndex = HashEntryArray::NoIndex
};
/**
* Value type for the ordering table. Contains the other
* resources on which an ordering constraint |key < other|
* exists. The catch is that we also store the calling context at
* which the other resource was acquired; this improves the
* quality of error messages when potential deadlock is detected.
*/
struct OrderingEntry
{
OrderingEntry() :
mFirstSeen(CallStack::kNone),
mOrderedLT() // FIXME bug 456272: set to empirical
{ // dep size?
}
~OrderingEntry()
{
}
CallStack mFirstSeen; // first site from which the resource appeared
HashEntryArray mOrderedLT; // this <_o Other
};
static void* TableAlloc(void* /*pool*/, PRSize size)
{
return operator new(size);
}
static void TableFree(void* /*pool*/, void* item)
{
operator delete(item);
}
static PLHashEntry* EntryAlloc(void* /*pool*/, const void* key)
{
return new PLHashEntry;
}
static void EntryFree(void* /*pool*/, PLHashEntry* entry, PRUintn flag)
{
delete static_cast<T*>(const_cast<void*>(entry->key));
delete static_cast<OrderingEntry*>(entry->value);
entry->value = 0;
if (HT_FREE_ENTRY == flag)
delete entry;
}
static PLHashNumber HashKey(const void* aKey)
{
return NS_PTR_TO_INT32(aKey) >> 2;
}
static const PLHashAllocOps kAllocOps;
// Hash table "interface" the rest of the code should use
PLHashEntry** GetEntry(const T* aKey)
{
return PL_HashTableRawLookup(mOrdering, HashKey(aKey), aKey);
}
void PutEntry(T* aKey)
{
PL_HashTableAdd(mOrdering, aKey, new OrderingEntry());
}
// XXX need these helper methods because OrderingEntry doesn't have
// XXX access to underlying PLHashEntry
/**
* Add the order |aFirst <_o aSecond|.
*
* WARNING: this does not check whether it's sane to add this
* order. In the "best" bad case, when this order already exists,
* adding it anyway may unnecessarily result in O(n^2) space. In
* the "worst" bad case, adding it anyway will cause
* |InTransitiveClosure()| to diverge.
*/
void AddOrder(PLHashEntry* aLT, PLHashEntry* aGT)
{
static_cast<OrderingEntry*>(aLT->value)->mOrderedLT
.InsertElementSorted(aGT);
}
/**
* Return true iff the order |aFirst < aSecond| has been
* *explicitly* added.
*
* Does not consider transitivity.
*/
bool IsOrdered(const PLHashEntry* aFirst, const PLHashEntry* aSecond)
const
{
return NoIndex !=
static_cast<const OrderingEntry*>(aFirst->value)->mOrderedLT
.BinaryIndexOf(aSecond);
}
/**
* Return a pointer to the array of all elements "that" for
* which the order |this < that| has been explicitly added.
*
* NOTE: this does *not* consider transitive orderings.
*/
PLHashEntry* const* GetOrders(const PLHashEntry* aEntry) const
{
return static_cast<const OrderingEntry*>(aEntry->value)->mOrderedLT
.Elements();
}
/**
* Return the number of elements "that" for which the order
* |this < that| has been explicitly added.
*
* NOTE: this does *not* consider transitive orderings.
*/
size_type NumOrders(const PLHashEntry* aEntry) const
{
return static_cast<const OrderingEntry*>(aEntry->value)->mOrderedLT
.Length();
}
/** Make a ResourceAcquisition out of |aEntry|. */
ResourceAcquisition MakeResourceAcquisition(const PLHashEntry* aEntry)
const
{
return ResourceAcquisition(
static_cast<const T*>(aEntry->key),
static_cast<const OrderingEntry*>(aEntry->value)->mFirstSeen);
}
// Throwaway RAII lock to make the following code safer.
struct PRAutoLock
{
PRAutoLock(PRLock* aLock) : mLock(aLock) { PR_Lock(mLock); }
~PRAutoLock() { PR_Unlock(mLock); }
PRLock* mLock;
};
public:
static const PRUint32 kDefaultNumBuckets;
/**
* DeadlockDetector
* Create a new deadlock detector.
*
* @param aNumResourcesGuess Guess at approximate number of resources
* that will be checked.
*/
DeadlockDetector(PRUint32 aNumResourcesGuess = kDefaultNumBuckets)
{
mOrdering = PL_NewHashTable(aNumResourcesGuess,
HashKey,
PL_CompareValues, PL_CompareValues,
&kAllocOps, 0);
if (!mOrdering)
NS_RUNTIMEABORT("couldn't initialize resource ordering table");
mLock = PR_NewLock();
if (!mLock)
NS_RUNTIMEABORT("couldn't allocate deadlock detector lock");
}
/**
* Add
* Make the deadlock detector aware of |aResource|.
*
* WARNING: The deadlock detector owns |aResource|.
*
* Thread safe.
*
* @param aResource Resource to make deadlock detector aware of.
*/
void Add(T* aResource)
{
PRAutoLock _(mLock);
PutEntry(aResource);
}
// Nb: implementing a Remove() method makes the detector "more
// unsound." By removing a resource from the orderings, deadlocks
// may be missed that would otherwise have been found. However,
// removing resources possibly reduces the # of false positives,
// and additionally saves space. So it's a trade off; we have
// chosen to err on the side of caution and not implement Remove().
/**
* CheckAcquisition This method is called after acquiring |aLast|,
* but before trying to acquire |aProposed| from |aCallContext|.
* It determines whether actually trying to acquire |aProposed|
* will create problems. It is OK if |aLast| is NULL; this is
* interpreted as |aProposed| being the thread's first acquisition
* of its current chain.
*
* Iff acquiring |aProposed| may lead to deadlock for some thread
* interleaving (including the current one!), the cyclical
* dependency from which this was deduced is returned. Otherwise,
* 0 is returned.
*
* If a potential deadlock is detected and a resource cycle is
* returned, it is the *caller's* responsibility to free it.
*
* Thread safe.
*
* @param aLast Last resource acquired by calling thread (or 0).
* @param aProposed Resource calling thread proposes to acquire.
* @param aCallContext Calling context whence acquisiton request came.
*/
ResourceAcquisitionArray* CheckAcquisition(const T* aLast,
const T* aProposed,
const CallStack& aCallContext)
{
NS_ASSERTION(aProposed, "null resource");
PRAutoLock _(mLock);
PLHashEntry* second = *GetEntry(aProposed);
OrderingEntry* e = static_cast<OrderingEntry*>(second->value);
if (CallStack::kNone == e->mFirstSeen)
e->mFirstSeen = aCallContext;
if (!aLast)
// don't check if |0 < proposed|; just vamoose
return 0;
PLHashEntry* first = *GetEntry(aLast);
// this is the crux of the deadlock detector algorithm
if (first == second) {
// reflexive deadlock. fastpath b/c InTransitiveClosure is
// not applicable here.
ResourceAcquisitionArray* cycle = new ResourceAcquisitionArray();
if (!cycle)
NS_RUNTIMEABORT("can't allocate dep. cycle array");
cycle->AppendElement(MakeResourceAcquisition(first));
cycle->AppendElement(ResourceAcquisition(aProposed,
aCallContext));
return cycle;
}
if (InTransitiveClosure(first, second)) {
// we've already established |last < proposed|. all is well.
return 0;
}
if (InTransitiveClosure(second, first)) {
// the order |proposed < last| has been deduced, perhaps
// transitively. we're attempting to violate that
// constraint by acquiring resources in the order
// |last < proposed|, and thus we may deadlock under the
// right conditions.
ResourceAcquisitionArray* cycle = GetDeductionChain(second, first);
// show how acquiring |proposed| would complete the cycle
cycle->AppendElement(ResourceAcquisition(aProposed,
aCallContext));
return cycle;
}
// |last|, |proposed| are unordered according to our
// poset. this is fine, but we now need to add this
// ordering constraint.
AddOrder(first, second);
return 0;
}
/**
* Return true iff |aTarget| is in the transitive closure of |aStart|
* over the ordering relation `<_this'.
*
* @precondition |aStart != aTarget|
*/
bool InTransitiveClosure(const PLHashEntry* aStart,
const PLHashEntry* aTarget) const
{
if (IsOrdered(aStart, aTarget))
return true;
index_type i = 0;
size_type len = NumOrders(aStart);
for (const PLHashEntry* const* it = GetOrders(aStart);
i < len; ++i, ++it)
if (InTransitiveClosure(*it, aTarget))
return true;
return false;
}
/**
* Return an array of all resource acquisitions
* aStart <_this r1 <_this r2 <_ ... <_ aTarget
* from which |aStart <_this aTarget| was deduced, including
* |aStart| and |aTarget|.
*
* Nb: there may be multiple deductions of |aStart <_this
* aTarget|. This function returns the first ordering found by
* depth-first search.
*
* Nb: |InTransitiveClosure| could be replaced by this function.
* However, this one is more expensive because we record the DFS
* search stack on the heap whereas the other doesn't.
*
* @precondition |aStart != aTarget|
*/
ResourceAcquisitionArray* GetDeductionChain(
const PLHashEntry* aStart,
const PLHashEntry* aTarget)
{
ResourceAcquisitionArray* chain = new ResourceAcquisitionArray();
if (!chain)
NS_RUNTIMEABORT("can't allocate dep. cycle array");
chain->AppendElement(MakeResourceAcquisition(aStart));
NS_ASSERTION(GetDeductionChain_Helper(aStart, aTarget, chain),
"GetDeductionChain called when there's no deadlock");
return chain;
}
// precondition: |aStart != aTarget|
// invariant: |aStart| is the last element in |aChain|
bool GetDeductionChain_Helper(const PLHashEntry* aStart,
const PLHashEntry* aTarget,
ResourceAcquisitionArray* aChain)
{
if (IsOrdered(aStart, aTarget)) {
aChain->AppendElement(MakeResourceAcquisition(aTarget));
return true;
}
index_type i = 0;
size_type len = NumOrders(aStart);
for (const PLHashEntry* const* it = GetOrders(aStart);
i < len; ++i, ++it) {
aChain->AppendElement(MakeResourceAcquisition(*it));
if (GetDeductionChain_Helper(*it, aTarget, aChain))
return true;
aChain->RemoveElementAt(aChain->Length() - 1);
}
return false;
}
/**
* The partial order on resource acquisitions used by the deadlock
* detector.
*/
PLHashTable* mOrdering; // T* -> PLHashEntry<OrderingEntry>
/**
* Protects contentious methods.
* Nb: can't use mozilla::Mutex since we are used as its deadlock
* detector.
*/
PRLock* mLock;
DeadlockDetector(const DeadlockDetector& aDD);
DeadlockDetector& operator=(const DeadlockDetector& aDD);
};
template<typename T>
const PLHashAllocOps DeadlockDetector<T>::kAllocOps = {
DeadlockDetector<T>::TableAlloc, DeadlockDetector<T>::TableFree,
DeadlockDetector<T>::EntryAlloc, DeadlockDetector<T>::EntryFree
};
template<typename T>
// FIXME bug 456272: tune based on average workload
const PRUint32 DeadlockDetector<T>::kDefaultNumBuckets = 64;
} // namespace mozilla
#endif // ifndef mozilla_DeadlockDetector_h

View File

@ -133,7 +133,7 @@ SDK_LIBRARY = \
## TODO temp hack until build system handles this
export:: BlockingResourceBase.h CondVar.h DeadlockDetector.h Monitor.h Mutex.h
export:: BlockingResourceBase.h CondVar.h Monitor.h Mutex.h
$(INSTALL) $^ $(DIST)/include/mozilla

View File

@ -67,27 +67,26 @@ class NS_COM_GLUE Monitor : BlockingResourceBase
public:
/**
* Monitor
* @param aName A name which can reference this monitor
* @param name A name which can reference this monitor
* @returns If failure, nsnull
* If success, a valid Monitor*, which must be destroyed
* by Monitor::DestroyMonitor()
**/
Monitor(const char* aName) :
BlockingResourceBase(aName, eMonitor)
Monitor(const char* name)
#ifdef DEBUG
, mEntryCount(0)
: mEntryCount(0)
#endif
{
mMonitor = PR_NewMonitor();
if (!mMonitor)
NS_RUNTIMEABORT("Can't allocate mozilla::Monitor");
Init(mMonitor, name, eMonitor);
}
/**
* ~Monitor
**/
~Monitor()
{
~Monitor() {
NS_ASSERTION(mMonitor,
"improperly constructed Monitor or double free");
PR_DestroyMonitor(mMonitor);
@ -99,8 +98,8 @@ public:
* Enter
* @see prmon.h
**/
void Enter()
{
void Enter() {
// NSPR does consistency checks for us
PR_EnterMonitor(mMonitor);
}
@ -108,34 +107,30 @@ public:
* Exit
* @see prmon.h
**/
void Exit()
{
void Exit() {
PR_ExitMonitor(mMonitor);
}
/**
* Wait
* @see prmon.h
**/
nsresult Wait(PRIntervalTime interval = PR_INTERVAL_NO_TIMEOUT)
{
return PR_Wait(mMonitor, interval) == PR_SUCCESS ?
NS_OK : NS_ERROR_FAILURE;
}
#else
void Enter();
void Exit();
nsresult Wait(PRIntervalTime interval = PR_INTERVAL_NO_TIMEOUT);
#endif // ifndef DEBUG
/**
* Wait
* @see prmon.h
**/
nsresult Wait(PRIntervalTime interval = PR_INTERVAL_NO_TIMEOUT) {
return PR_Wait(mMonitor, interval) == PR_SUCCESS
? NS_OK : NS_ERROR_FAILURE;
}
/**
* Notify
* @see prmon.h
**/
nsresult Notify()
{
nsresult Notify() {
return PR_Notify(mMonitor) == PR_SUCCESS
? NS_OK : NS_ERROR_FAILURE;
}
@ -144,8 +139,7 @@ public:
* NotifyAll
* @see prmon.h
**/
nsresult NotifyAll()
{
nsresult NotifyAll() {
return PR_NotifyAll(mMonitor) == PR_SUCCESS
? NS_OK : NS_ERROR_FAILURE;
}
@ -155,8 +149,7 @@ public:
* AssertCurrentThreadIn
* @see prmon.h
**/
void AssertCurrentThreadIn()
{
void AssertCurrentThreadIn () {
PR_ASSERT_CURRENT_THREAD_IN_MONITOR(mMonitor);
}
@ -164,18 +157,13 @@ public:
* AssertNotCurrentThreadIn
* @see prmon.h
**/
void AssertNotCurrentThreadIn()
{
// FIXME bug 476536
void AssertNotCurrentThreadIn () {
// TODO
}
#else
void AssertCurrentThreadIn()
{
}
void AssertNotCurrentThreadIn()
{
}
void AssertCurrentThreadIn () { }
void AssertNotCurrentThreadIn () { }
#endif // ifdef DEBUG
@ -209,30 +197,25 @@ public:
* @param aMonitor A valid mozilla::Monitor* returned by
* mozilla::Monitor::NewMonitor.
**/
MonitorAutoEnter(mozilla::Monitor &aMonitor) :
mMonitor(&aMonitor)
{
MonitorAutoEnter(mozilla::Monitor &aMonitor)
: mMonitor(&aMonitor) {
NS_ASSERTION(mMonitor, "null monitor");
mMonitor->Enter();
}
~MonitorAutoEnter(void)
{
~MonitorAutoEnter(void) {
mMonitor->Exit();
}
nsresult Wait(PRIntervalTime interval = PR_INTERVAL_NO_TIMEOUT)
{
nsresult Wait(PRIntervalTime interval = PR_INTERVAL_NO_TIMEOUT) {
return mMonitor->Wait(interval);
}
nsresult Notify()
{
nsresult Notify() {
return mMonitor->Notify();
}
nsresult NotifyAll()
{
nsresult NotifyAll() {
return mMonitor->Notify();
}

View File

@ -73,19 +73,17 @@ public:
* If success, a valid Mutex* which must be destroyed
* by Mutex::DestroyMutex()
**/
Mutex(const char* name) :
BlockingResourceBase(name, eMutex)
{
Mutex(const char* name) {
mLock = PR_NewLock();
if (!mLock)
NS_RUNTIMEABORT("Can't allocate mozilla::Mutex");
Init(mLock, name, eMutex);
}
/**
* ~Mutex
**/
~Mutex()
{
~Mutex() {
NS_ASSERTION(mLock,
"improperly constructed Lock or double free");
// NSPR does consistency checks for us
@ -98,8 +96,7 @@ public:
* Lock
* @see prlock.h
**/
void Lock()
{
void Lock() {
PR_Lock(mLock);
}
@ -107,8 +104,7 @@ public:
* Unlock
* @see prlock.h
**/
void Unlock()
{
void Unlock() {
PR_Unlock(mLock);
}
@ -116,30 +112,24 @@ public:
* AssertCurrentThreadOwns
* @see prlock.h
**/
void AssertCurrentThreadOwns ()
{
}
void AssertCurrentThreadOwns () { }
/**
* AssertNotCurrentThreadOwns
* @see prlock.h
**/
void AssertNotCurrentThreadOwns ()
{
}
void AssertNotCurrentThreadOwns () { }
#else
void Lock();
void Unlock();
void AssertCurrentThreadOwns ()
{
void AssertCurrentThreadOwns () {
PR_ASSERT_CURRENT_THREAD_OWNS_LOCK(mLock);
}
void AssertNotCurrentThreadOwns ()
{
// FIXME bug 476536
void AssertNotCurrentThreadOwns () {
// TODO
}
#endif // ifndef DEBUG
@ -173,9 +163,8 @@ public:
* @param aLock A valid mozilla::Mutex* returned by
* mozilla::Mutex::NewMutex.
**/
MutexAutoLock(mozilla::Mutex& aLock) :
mLock(&aLock)
{
MutexAutoLock(mozilla::Mutex& aLock)
: mLock(&aLock) {
NS_ASSERTION(mLock, "null mutex");
mLock->Lock();
}
@ -205,9 +194,8 @@ private:
class NS_COM_GLUE NS_STACK_CLASS MutexAutoUnlock
{
public:
MutexAutoUnlock(mozilla::Mutex& aLock) :
mLock(&aLock)
{
MutexAutoUnlock(mozilla::Mutex& aLock)
: mLock(&aLock) {
NS_ASSERTION(mLock, "null lock");
mLock->Unlock();
}

View File

@ -541,70 +541,6 @@ class nsTArray : public nsTArray_base {
return elem;
}
// This method searches for the least index of the greatest
// element less than or equal to |item|. If |item| is inserted at
// this index, the array will remain sorted. True is returned iff
// this index is also equal to |item|. In this case, the returned
// index may point to the start of multiple copies of |item|.
// @param item The item to search for.
// @param comp The Comparator used.
// @outparam idx The index of greatest element <= to |item|
// @return True iff |item == array[*idx]|.
// @precondition The array is sorted
template<class Item, class Comparator>
PRBool
GreatestIndexLtEq(const Item& item,
const Comparator& comp,
index_type* idx NS_OUTPARAM) const {
// Nb: we could replace all the uses of "BinaryIndexOf" with this
// function, but BinaryIndexOf will be oh-so-slightly faster so
// it's not strictly desired to do.
// invariant: low <= [idx] < high
index_type low = 0, high = Length();
while (high > low) {
index_type mid = (high + low) >> 1;
if (comp.Equals(ElementAt(mid), item)) {
// we might have the array [..., 2, 4, 4, 4, 4, 4, 5, ...]
// and be searching for "4". it's arbitrary where mid ends
// up here, so we back it up to the first instance to maintain
// the "least index ..." we promised above.
do {
--mid;
} while (NoIndex != mid && comp.Equals(ElementAt(mid), item));
*idx = ++mid;
return PR_TRUE;
}
if (comp.LessThan(ElementAt(mid), item))
// invariant: low <= idx < high
low = mid + 1;
else
// invariant: low <= idx < high
high = mid;
}
// low <= idx < high, so insert at high ("shifting" high up by
// 1) to maintain invariant.
// (or insert at low, since low==high; just a matter of taste here.)
*idx = high;
return PR_FALSE;
}
// Inserts |item| at such an index to guarantee that if the array
// was previously sorted, it will remain sorted after this
// insertion.
template<class Item, class Comparator>
elem_type *InsertElementSorted(const Item& item, const Comparator& comp) {
index_type index;
GreatestIndexLtEq(item, comp, &index);
return InsertElementAt(index, item);
}
// A variation on the InsertElementSorted metod defined above.
template<class Item>
elem_type *InsertElementSorted(const Item& item) {
return InsertElementSorted(item, nsDefaultComparator<elem_type, Item>());
}
// This method appends elements to the end of this array.
// @param array The elements to append to this array.
// @param arrayLen The number of elements to append to this array.
@ -696,27 +632,6 @@ class nsTArray : public nsTArray_base {
return RemoveElement(item, nsDefaultComparator<elem_type, Item>());
}
// This helper function combines GreatestIndexLtEq with
// RemoveElementAt to "search and destroy" the first element that
// is equal to the given element.
// @param item The item to search for.
// @param comp The Comparator used to determine element equality.
// @return PR_TRUE if the element was found
template<class Item, class Comparator>
PRBool RemoveElementSorted(const Item& item, const Comparator& comp) {
index_type index;
PRBool found = GreatestIndexLtEq(item, comp, &index);
if (found)
RemoveElementAt(index);
return found;
}
// A variation on the RemoveElementSorted method defined above.
template<class Item>
PRBool RemoveElementSorted(const Item& item) {
return RemoveElementSorted(item, nsDefaultComparator<elem_type, Item>());
}
// This method causes the elements contained in this array and the given
// array to be swapped.
// NOTE: This method isn't heavily optimized if either array is an

View File

@ -77,7 +77,6 @@ XPCOM_GLUE_SRC_CPPSRCS = $(addprefix $(topsrcdir)/xpcom/glue/, $(XPCOM_GLUE_SRC_
XPCOM_GLUENS_SRC_LCPPSRCS = \
BlockingResourceBase.cpp \
DeadlockDetector.cpp \
nsAutoLock.cpp \
nsGenericFactory.cpp \
nsProxyRelease.cpp \

View File

@ -103,26 +103,19 @@ CPP_UNIT_TESTS += \
TestArray.cpp \
TestAutoLock.cpp \
TestCRT.cpp \
TestDeque.cpp \
TestEncoding.cpp \
TestExpirationTracker.cpp \
TestPipes.cpp \
TestProxies.cpp \
TestStorageStream.cpp \
TestThreads.cpp \
TestThreadPool.cpp \
TestXPIDLString.cpp \
TestDeque.cpp \
TestStrings.cpp \
TestStorageStream.cpp \
TestSynchronization.cpp \
TestTArray.cpp \
TestThreadPool.cpp \
TestThreads.cpp \
TestTimeStamp.cpp \
TestXPIDLString.cpp \
$(NULL)
endif
ifdef MOZ_DEBUG
CPP_UNIT_TESTS += \
TestDeadlockDetector.cpp \
TestDeadlockDetectorScalability.cpp \
$(NULL)
endif

View File

@ -1,583 +0,0 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: sw=4 ts=4 et :
* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is mozilla.org code.
*
* The Initial Developer of the Original Code is
* Netscape Communications Corporation.
* Portions created by the Initial Developer are Copyright (C) 1998
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
* Chris Jones <jones.chris.g@gmail.com>
*
* Alternatively, the contents of this file may be used under the terms of
* either of the GNU General Public License Version 2 or later (the "GPL"),
* or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
#include "prerror.h"
#include "prio.h"
#include "prproces.h"
#include "TestHarness.h"
#include "mozilla/CondVar.h"
#include "mozilla/Monitor.h"
#include "mozilla/Mutex.h"
using namespace mozilla;
static PRThread*
spawn(void (*run)(void*), void* arg)
{
return PR_CreateThread(PR_SYSTEM_THREAD,
run,
arg,
PR_PRIORITY_NORMAL,
PR_GLOBAL_THREAD,
PR_JOINABLE_THREAD,
0);
}
#define PASS() \
do { \
passed(__FUNCTION__); \
return NS_OK; \
} while (0);
#define FAIL(why) \
do { \
fail(why); \
return NS_ERROR_FAILURE; \
} while (0);
#define ALEN(arr) (sizeof(arr) / sizeof(arr[0]))
//-----------------------------------------------------------------------------
static const char* sPathToThisBinary;
class Subprocess
{
public:
// not available until process finishes
PRInt32 mExitCode;
nsCString mStdout;
nsCString mStderr;
Subprocess(const char*aTestName) {
// set up stdio redirection
PRFileDesc* readStdin; PRFileDesc* writeStdin;
PRFileDesc* readStdout; PRFileDesc* writeStdout;
PRFileDesc* readStderr; PRFileDesc* writeStderr;
PRProcessAttr* pattr = PR_NewProcessAttr();
NS_ASSERTION(pattr, "couldn't allocate process attrs");
NS_ASSERTION(PR_SUCCESS == PR_CreatePipe(&readStdin, &writeStdin),
"couldn't create child stdin pipe");
NS_ASSERTION(PR_SUCCESS == PR_SetFDInheritable(readStdin, PR_TRUE),
"couldn't set child stdin inheritable");
PR_ProcessAttrSetStdioRedirect(pattr, PR_StandardInput, readStdin);
NS_ASSERTION(PR_SUCCESS == PR_CreatePipe(&readStdout, &writeStdout),
"couldn't create child stdout pipe");
NS_ASSERTION(PR_SUCCESS == PR_SetFDInheritable(writeStdout, PR_TRUE),
"couldn't set child stdout inheritable");
PR_ProcessAttrSetStdioRedirect(pattr, PR_StandardOutput, writeStdout);
NS_ASSERTION(PR_SUCCESS == PR_CreatePipe(&readStderr, &writeStderr),
"couldn't create child stderr pipe");
NS_ASSERTION(PR_SUCCESS == PR_SetFDInheritable(writeStderr, PR_TRUE),
"couldn't set child stderr inheritable");
PR_ProcessAttrSetStdioRedirect(pattr, PR_StandardError, writeStderr);
// set up argv with test name to run
char** newArgv = new char*[3];
if (!newArgv)
NS_RUNTIMEABORT("can't allocate argv");
newArgv[0] = strdup(sPathToThisBinary);
newArgv[1] = strdup(aTestName);
newArgv[2] = 0;
PRProcess* proc;
NS_ASSERTION(proc = PR_CreateProcess(sPathToThisBinary,
newArgv,
0, // inherit environment
pattr),
"couldn't create process");
PR_Close(readStdin);
PR_Close(writeStdout);
PR_Close(writeStderr);
mProc = proc;
mStdinfd = writeStdin;
mStdoutfd = readStdout;
mStderrfd = readStderr;
PR_DestroyProcessAttr(pattr);
delete[] newArgv;
}
void RunToCompletion(PRUint32 aWaitMs)
{
PR_Close(mStdinfd);
PRPollDesc pollfds[2];
PRInt32 nfds;
PRBool stdoutOpen = PR_TRUE, stderrOpen = PR_TRUE;
char buf[4096];
PRInt32 len;
PRIntervalTime now = PR_IntervalNow();
PRIntervalTime deadline = now + PR_MillisecondsToInterval(aWaitMs);
while ((stdoutOpen || stderrOpen) && now < deadline) {
nfds = 0;
if (stdoutOpen) {
pollfds[nfds].fd = mStdoutfd;
pollfds[nfds].in_flags = PR_POLL_READ;
pollfds[nfds].out_flags = 0;
++nfds;
}
if (stderrOpen) {
pollfds[nfds].fd = mStderrfd;
pollfds[nfds].in_flags = PR_POLL_READ;
pollfds[nfds].out_flags = 0;
++nfds;
}
PRInt32 rv = PR_Poll(pollfds, nfds, deadline - now);
NS_ASSERTION(0 <= rv, PR_ErrorToName(PR_GetError()));
if (0 == rv) { // timeout
Finish(PR_FALSE); // abnormal
return;
}
for (PRInt32 i = 0; i < nfds; ++i) {
if (!pollfds[i].out_flags)
continue;
PRBool isStdout = mStdoutfd == pollfds[i].fd;
if (PR_POLL_READ & pollfds[i].out_flags) {
len = PR_Read(pollfds[i].fd, buf, sizeof(buf) - 1);
NS_ASSERTION(0 <= len, PR_ErrorToName(PR_GetError()));
}
else if (PR_POLL_HUP & pollfds[i].out_flags) {
len = 0;
}
else {
NS_ERROR(PR_ErrorToName(PR_GetError()));
}
if (0 < len) {
buf[len] = '\0';
if (isStdout)
mStdout += buf;
else
mStderr += buf;
}
else {
if (isStdout) {
stdoutOpen = PR_FALSE;
PR_Close(mStdoutfd);
}
else {
stderrOpen = PR_FALSE;
PR_Close(mStderrfd);
}
}
}
now = PR_IntervalNow();
}
Finish(!stdoutOpen && !stderrOpen && now <= deadline);
}
private:
void Finish(PRBool normalExit) {
if (!normalExit) {
PR_KillProcess(mProc);
PR_Close(mStdoutfd);
PR_Close(mStderrfd);
mExitCode = -1;
PRInt32 dummy;
PR_WaitProcess(mProc, &dummy);
}
else {
PR_WaitProcess(mProc, &mExitCode); // this had better not block ...
}
}
PRProcess* mProc;
PRFileDesc* mStdinfd; // writeable
PRFileDesc* mStdoutfd; // readable
PRFileDesc* mStderrfd; // readable
};
//-----------------------------------------------------------------------------
// Harness for checking detector errors
bool
CheckForDeadlock(const char* test, const char* const* findTokens)
{
Subprocess proc(test);
proc.RunToCompletion(1000);
if (0 == proc.mExitCode)
return false;
PRInt32 idx = 0;
for (const char* const* tp = findTokens; *tp; ++tp) {
const char* const token = *tp;
idx = proc.mStderr.Find(token, PR_FALSE, idx);
if (kNotFound == idx) {
printf("(missed token '%s' in output)\n", token);
puts("----------------------------------\n");
puts(proc.mStderr.get());
puts("----------------------------------\n");
return false;
}
idx += strlen(token);
}
return true;
}
//-----------------------------------------------------------------------------
// Single-threaded sanity tests
// Stupidest possible deadlock.
nsresult
Sanity_Child()
{
mozilla::Mutex m1("dd.sanity.m1");
m1.Lock();
m1.Lock();
return 0; // not reached
}
nsresult
Sanity()
{
const char* const tokens[] = {
"###!!! ERROR: Potential deadlock detected",
"=== Cyclical dependency starts at\n--- Mutex : dd.sanity.m1",
"=== Cycle completed at\n--- Mutex : dd.sanity.m1",
"###!!! Deadlock may happen NOW!", // better catch these easy cases...
"###!!! ASSERTION: Potential deadlock detected",
0
};
if (CheckForDeadlock("Sanity", tokens)) {
PASS();
} else {
FAIL("deadlock not detected");
}
}
// Slightly less stupid deadlock.
nsresult
Sanity2_Child()
{
mozilla::Mutex m1("dd.sanity2.m1");
mozilla::Mutex m2("dd.sanity2.m2");
m1.Lock();
m2.Lock();
m1.Lock();
return 0; // not reached
}
nsresult
Sanity2()
{
const char* const tokens[] = {
"###!!! ERROR: Potential deadlock detected",
"=== Cyclical dependency starts at\n--- Mutex : dd.sanity2.m1",
"--- Next dependency:\n--- Mutex : dd.sanity2.m2",
"=== Cycle completed at\n--- Mutex : dd.sanity2.m1",
"###!!! Deadlock may happen NOW!", // better catch these easy cases...
"###!!! ASSERTION: Potential deadlock detected",
0
};
if (CheckForDeadlock("Sanity2", tokens)) {
PASS();
} else {
FAIL("deadlock not detected");
}
}
nsresult
Sanity3_Child()
{
mozilla::Mutex m1("dd.sanity3.m1");
mozilla::Mutex m2("dd.sanity3.m2");
mozilla::Mutex m3("dd.sanity3.m3");
mozilla::Mutex m4("dd.sanity3.m4");
m1.Lock();
m2.Lock();
m3.Lock();
m4.Lock();
m4.Unlock();
m3.Unlock();
m2.Unlock();
m1.Unlock();
m4.Lock();
m1.Lock();
return 0;
}
nsresult
Sanity3()
{
const char* const tokens[] = {
"###!!! ERROR: Potential deadlock detected",
"=== Cyclical dependency starts at\n--- Mutex : dd.sanity3.m1",
"--- Next dependency:\n--- Mutex : dd.sanity3.m2",
"--- Next dependency:\n--- Mutex : dd.sanity3.m3",
"--- Next dependency:\n--- Mutex : dd.sanity3.m4",
"=== Cycle completed at\n--- Mutex : dd.sanity3.m1",
"###!!! ASSERTION: Potential deadlock detected",
0
};
if (CheckForDeadlock("Sanity3", tokens)) {
PASS();
} else {
FAIL("deadlock not detected");
}
}
nsresult
Sanity4_Child()
{
mozilla::Monitor m1("dd.sanity4.m1");
mozilla::Mutex m2("dd.sanity4.m2");
m1.Enter();
m2.Lock();
m1.Enter();
return 0;
}
nsresult
Sanity4()
{
const char* const tokens[] = {
"Re-entering Monitor after acquiring other resources",
"###!!! ERROR: Potential deadlock detected",
"=== Cyclical dependency starts at\n--- Monitor : dd.sanity4.m1",
"--- Next dependency:\n--- Mutex : dd.sanity4.m2",
"=== Cycle completed at\n--- Monitor : dd.sanity4.m1",
"###!!! ASSERTION: Potential deadlock detected",
0
};
if (CheckForDeadlock("Sanity4", tokens)) {
PASS();
} else {
FAIL("deadlock not detected");
}
}
//-----------------------------------------------------------------------------
// Multithreaded tests
mozilla::Mutex* ttM1;
mozilla::Mutex* ttM2;
static void
TwoThreads_thread(void* arg)
{
PRInt32 m1First = NS_PTR_TO_INT32(arg);
if (m1First) {
ttM1->Lock();
ttM2->Lock();
ttM2->Unlock();
ttM1->Unlock();
}
else {
ttM2->Lock();
ttM1->Lock();
ttM1->Unlock();
ttM2->Unlock();
}
}
nsresult
TwoThreads_Child()
{
ttM1 = new mozilla::Mutex("dd.twothreads.m1");
ttM2 = new mozilla::Mutex("dd.twothreads.m2");
if (!ttM1 || !ttM2)
NS_RUNTIMEABORT("couldn't allocate mutexes");
PRThread* t1 = spawn(TwoThreads_thread, (void*) 0);
PR_JoinThread(t1);
PRThread* t2 = spawn(TwoThreads_thread, (void*) 1);
PR_JoinThread(t2);
return 0;
}
nsresult
TwoThreads()
{
const char* const tokens[] = {
"###!!! ERROR: Potential deadlock detected",
"=== Cyclical dependency starts at\n--- Mutex : dd.twothreads.m2",
"--- Next dependency:\n--- Mutex : dd.twothreads.m1",
"=== Cycle completed at\n--- Mutex : dd.twothreads.m2",
"###!!! ASSERTION: Potential deadlock detected",
0
};
if (CheckForDeadlock("TwoThreads", tokens)) {
PASS();
} else {
FAIL("deadlock not detected");
}
}
mozilla::Mutex* cndMs[4];
const PRUint32 K = 100000;
static void
ContentionNoDeadlock_thread(void* arg)
{
PRInt32 starti = NS_PTR_TO_INT32(arg);
for (PRUint32 k = 0; k < K; ++k) {
for (PRInt32 i = starti; i < ALEN(cndMs); ++i)
cndMs[i]->Lock();
// comment out the next two lines for deadlocking fun!
for (PRInt32 i = ALEN(cndMs) - 1; i >= starti; --i)
cndMs[i]->Unlock();
starti = (starti + 1) % 3;
}
}
nsresult
ContentionNoDeadlock_Child()
{
PRThread* threads[3];
for (PRUint32 i = 0; i < ALEN(cndMs); ++i)
cndMs[i] = new mozilla::Mutex("dd.cnd.ms");
for (PRInt32 i = 0; i < ALEN(threads); ++i)
threads[i] = spawn(ContentionNoDeadlock_thread, NS_INT32_TO_PTR(i));
for (PRUint32 i = 0; i < ALEN(threads); ++i)
PR_JoinThread(threads[i]);
for (PRUint32 i = 0; i < ALEN(cndMs); ++i)
delete cndMs[i];
return 0;
}
nsresult
ContentionNoDeadlock()
{
Subprocess proc(__FUNCTION__);
proc.RunToCompletion(10000);
if (0 != proc.mExitCode) {
printf("(expected 0 == return code, got %d)\n", proc.mExitCode);
puts("(output)\n----------------------------------\n");
puts(proc.mStdout.get());
puts("----------------------------------\n");
puts("(error output)\n----------------------------------\n");
puts(proc.mStderr.get());
puts("----------------------------------\n");
FAIL("deadlock");
}
PASS();
}
//-----------------------------------------------------------------------------
int
main(int argc, char** argv)
{
if (1 < argc) {
// XXX can we run w/o scoped XPCOM?
const char* test = argv[1];
ScopedXPCOM xpcom(test);
if (xpcom.failed())
return 1;
// running in a spawned process. call the specificed child function.
if (!strcmp("Sanity", test))
return Sanity_Child();
if (!strcmp("Sanity2", test))
return Sanity2_Child();
if (!strcmp("Sanity3", test))
return Sanity3_Child();
if (!strcmp("Sanity4", test))
return Sanity4_Child();
if (!strcmp("TwoThreads", test))
return TwoThreads_Child();
if (!strcmp("ContentionNoDeadlock", test))
return ContentionNoDeadlock_Child();
FAIL("unknown child test");
}
ScopedXPCOM xpcom("Deadlock detector correctness");
if (xpcom.failed())
return 1;
// in the first invocation of this process. we will be the "driver".
int rv = 0;
sPathToThisBinary = argv[0];
if (NS_FAILED(Sanity()))
rv = 1;
if (NS_FAILED(Sanity2()))
rv = 1;
if (NS_FAILED(Sanity3()))
rv = 1;
if (NS_FAILED(Sanity4()))
rv = 1;
if (NS_FAILED(TwoThreads()))
rv = 1;
if (NS_FAILED(ContentionNoDeadlock()))
rv = 1;
return rv;
}

View File

@ -1,202 +0,0 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: sw=4 ts=4 et :
* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is mozilla.org code.
*
* The Initial Developer of the Original Code is
* Netscape Communications Corporation.
* Portions created by the Initial Developer are Copyright (C) 1998
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
* Chris Jones <jones.chris.g@gmail.com>
*
* Alternatively, the contents of this file may be used under the terms of
* either of the GNU General Public License Version 2 or later (the "GPL"),
* or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
#include "TestHarness.h"
//#define OLD_API
#define PASS() \
do { \
passed(__FUNCTION__); \
return NS_OK; \
} while (0);
#define FAIL(why) \
do { \
fail(why); \
return NS_ERROR_FAILURE; \
} while (0);
#ifdef OLD_API
# include "nsAutoLock.h"
typedef PRLock* lock_t;
# define NEWLOCK(n) nsAutoLock::NewLock(n)
# define DELETELOCK(v) nsAutoLock::DestroyLock(v)
# define AUTOLOCK(v, l) nsAutoLock v(l)
#else
# include "mozilla/Mutex.h"
typedef mozilla::Mutex* lock_t;
# define NEWLOCK(n) new mozilla::Mutex(n)
# define DELETELOCK(v) delete (v)
# define AUTOLOCK(v, l) mozilla::MutexAutoLock v(*l)
#endif
//-----------------------------------------------------------------------------
static void
AllocLockRecurseUnlockFree(int i)
{
if (0 == i)
return;
lock_t lock = NEWLOCK("deadlockDetector.scalability.t1");
{
AUTOLOCK(_, lock);
AllocLockRecurseUnlockFree(i - 1);
}
DELETELOCK(lock);
}
// This test creates a resource dependency chain N elements long, then
// frees all the resources in the chain.
static nsresult
LengthNDepChain(int N)
{
AllocLockRecurseUnlockFree(N);
PASS();
}
//-----------------------------------------------------------------------------
// This test creates a single lock that is ordered < N resources, then
// repeatedly exercises this order k times.
static nsresult
OneLockNDeps(const int N, const int K)
{
lock_t lock = NEWLOCK("deadlockDetector.scalability.t2.master");
lock_t* locks = new lock_t[N];
if (!locks)
NS_RUNTIMEABORT("couldn't allocate lock array");
for (int i = 0; i < N; ++i)
locks[i] =
NEWLOCK("deadlockDetector.scalability.t2.dep");
// establish orders
{AUTOLOCK(m, lock);
for (int i = 0; i < N; ++i)
AUTOLOCK(s, locks[i]);
}
// exercise order check
{AUTOLOCK(m, lock);
for (int i = 0; i < K; ++i)
for (int j = 0; j < N; ++j)
AUTOLOCK(s, locks[i]);
}
for (int i = 0; i < N; ++i)
DELETELOCK(locks[i]);
delete[] locks;
PASS();
}
//-----------------------------------------------------------------------------
// This test creates N resources and adds the theoretical maximum number
// of dependencies, O(N^2). It then repeats that sequence of
// acquisitions k times. Finally, all resources are freed.
//
// It's very difficult to perform well on this test. It's put forth as a
// challenge problem.
static nsresult
MaxDepsNsq(const int N, const int K)
{
lock_t* locks = new lock_t[N];
if (!locks)
NS_RUNTIMEABORT("couldn't allocate lock array");
for (int i = 0; i < N; ++i)
locks[i] = NEWLOCK("deadlockDetector.scalability.t3");
for (int i = 0; i < N; ++i) {
AUTOLOCK(al1, locks[i]);
for (int j = i+1; j < N; ++j)
AUTOLOCK(al2, locks[j]);
}
for (int i = 0; i < K; ++i) {
for (int j = 0; j < N; ++j) {
AUTOLOCK(al1, locks[j]);
for (int k = j+1; k < N; ++k)
AUTOLOCK(al2, locks[k]);
}
}
for (int i = 0; i < N; ++i)
DELETELOCK(locks[i]);
delete[] locks;
PASS();
}
//-----------------------------------------------------------------------------
int
main(int argc, char** argv)
{
ScopedXPCOM xpcom("Deadlock detector scalability");
if (xpcom.failed())
return 1;
int rv = 0;
// Uncomment these tests to run them. Not expected to be common.
#if 0
if (NS_FAILED(LengthNDepChain(1 << 14))) // 16K
rv = 1;
#endif
#if 0
if (NS_FAILED(OneLockNDeps(1 << 14, 100))) // 16k
rv = 1;
#endif
#if 0
if (NS_FAILED(MaxDepsNsq(1 << 10, 10))) // 1k
rv = 1;
#endif
return rv;
}

View File

@ -1,42 +1,3 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: sw=4 ts=4 et :
* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is mozilla.org code.
*
* The Initial Developer of the Original Code is
* Netscape Communications Corporation.
* Portions created by the Initial Developer are Copyright (C) 1998
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
* Chris Jones <jones.chris.g@gmail.com>
*
* Alternatively, the contents of this file may be used under the terms of
* either of the GNU General Public License Version 2 or later (the "GPL"),
* or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
#include "TestHarness.h"
#include "mozilla/CondVar.h"