mirror of
https://gitlab.winehq.org/wine/wine-gecko.git
synced 2024-09-13 09:24:08 -07:00
Bug 939231 - Stop requiring trace-malloc for the deadlock detector; r=bent
The existing deadlock detector code uses the trace-malloc stack walking facilities, which is problematic for a few reasons. 1. It is only available in builds with --enable-trace-malloc, which is not in the default build configuration. 2. It tries to capture a symbolicated stack trace every time that a lock is acquired or released, which is really slow. This patch changes the deadlock detector to use the XPCOM stack walking and symbolification facilities, and avoids the symbolification until the point where we need to print out the call stack, which makes the deadlock detector a lot faster than it currently is in trace-malloc builds.
This commit is contained in:
parent
bc8601e98e
commit
2524d4cb55
@ -39,7 +39,7 @@ BlockingResourceBase::DeadlockDetectorEntry::Print(
|
||||
bool aPrintFirstSeenCx) const
|
||||
{
|
||||
CallStack lastAcquisition = mAcquisitionContext; // RACY, but benign
|
||||
bool maybeCurrentlyAcquired = (CallStack::kNone != lastAcquisition);
|
||||
bool maybeCurrentlyAcquired = !lastAcquisition.IsEmpty();
|
||||
CallStack printAcquisition =
|
||||
(aPrintFirstSeenCx || !maybeCurrentlyAcquired) ?
|
||||
aFirstSeen.mCallContext : lastAcquisition;
|
||||
@ -134,7 +134,7 @@ BlockingResourceBase::Acquire(const CallStack& aCallContext)
|
||||
"FIXME bug 456272: annots. to allow Acquire()ing condvars");
|
||||
return;
|
||||
}
|
||||
NS_ASSERTION(mDDEntry->mAcquisitionContext == CallStack::kNone,
|
||||
NS_ASSERTION(mDDEntry->mAcquisitionContext.IsEmpty(),
|
||||
"reacquiring already acquired resource");
|
||||
|
||||
ResourceChainAppend(ResourceChainFront());
|
||||
@ -152,8 +152,7 @@ BlockingResourceBase::Release()
|
||||
}
|
||||
|
||||
BlockingResourceBase* chainFront = ResourceChainFront();
|
||||
NS_ASSERTION(chainFront
|
||||
&& CallStack::kNone != mDDEntry->mAcquisitionContext,
|
||||
NS_ASSERTION(chainFront && !mDDEntry->mAcquisitionContext.IsEmpty(),
|
||||
"Release()ing something that hasn't been Acquire()ed");
|
||||
|
||||
if (chainFront == this) {
|
||||
@ -178,7 +177,7 @@ BlockingResourceBase::Release()
|
||||
curr->mChainPrev = prev->mChainPrev;
|
||||
}
|
||||
|
||||
mDDEntry->mAcquisitionContext = CallStack::kNone;
|
||||
mDDEntry->mAcquisitionContext = CallStack::NullCallStack();
|
||||
}
|
||||
|
||||
|
||||
@ -301,7 +300,7 @@ ReentrantMonitor::Wait(PRIntervalTime interval)
|
||||
CallStack savedAcquisitionContext = GetAcquisitionContext();
|
||||
BlockingResourceBase* savedChainPrev = mChainPrev;
|
||||
mEntryCount = 0;
|
||||
SetAcquisitionContext(CallStack::kNone);
|
||||
SetAcquisitionContext(CallStack::NullCallStack());
|
||||
mChainPrev = 0;
|
||||
|
||||
// give up the monitor until we're back from Wait()
|
||||
@ -328,7 +327,7 @@ CondVar::Wait(PRIntervalTime interval)
|
||||
// save mutex state and reset to empty
|
||||
CallStack savedAcquisitionContext = mLock->GetAcquisitionContext();
|
||||
BlockingResourceBase* savedChainPrev = mLock->mChainPrev;
|
||||
mLock->SetAcquisitionContext(CallStack::kNone);
|
||||
mLock->SetAcquisitionContext(CallStack::NullCallStack());
|
||||
mLock->mChainPrev = 0;
|
||||
|
||||
// give up mutex until we're back from Wait()
|
||||
|
@ -74,7 +74,7 @@ private:
|
||||
BlockingResourceType aType) :
|
||||
mName(aName),
|
||||
mType(aType),
|
||||
mAcquisitionContext(CallStack::kNone)
|
||||
mAcquisitionContext(CallStack::NullCallStack())
|
||||
{
|
||||
NS_ABORT_IF_FALSE(mName, "Name must be nonnull");
|
||||
}
|
||||
@ -117,7 +117,7 @@ private:
|
||||
/**
|
||||
* mAcquisitionContext
|
||||
* The calling context from which this resource was acquired, or
|
||||
* |CallStack::kNone| if it is currently free (or freed).
|
||||
* |CallStack::NullCallStack()| if it is currently free (or freed).
|
||||
*/
|
||||
CallStack mAcquisitionContext;
|
||||
};
|
||||
@ -239,7 +239,7 @@ protected:
|
||||
/**
|
||||
* GetAcquisitionContext
|
||||
* Return the calling context from which this resource was acquired,
|
||||
* or CallStack::kNone if it's currently free.
|
||||
* or CallStack::NullCallStack() if it's currently free.
|
||||
*
|
||||
* *NOT* thread safe. Requires ownership of underlying resource.
|
||||
*/
|
||||
|
@ -5,7 +5,46 @@
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include "DeadlockDetector.h"
|
||||
#include "nsStackWalk.h"
|
||||
#include "mozilla/Util.h"
|
||||
|
||||
namespace mozilla {
|
||||
const CallStack CallStack::kNone((CallStack::callstack_id) -1);
|
||||
|
||||
static void
|
||||
StackCallback(void* pc, void* sp, void* closure)
|
||||
{
|
||||
nsTArray<void*>* stack = static_cast<nsTArray<void*>*>(closure);
|
||||
stack->AppendElement(pc);
|
||||
}
|
||||
|
||||
nsTArray<void*>
|
||||
CallStack::GetBacktrace()
|
||||
{
|
||||
nsTArray<void*> callstack;
|
||||
callstack.SetCapacity(32); // Hopefully 32 frames is a good average
|
||||
NS_StackWalk(StackCallback, 2, 0, &callstack, 0, nullptr);
|
||||
return mozilla::Move(callstack);
|
||||
}
|
||||
|
||||
void
|
||||
CallStack::Print(FILE* f) const
|
||||
{
|
||||
if (mCallStack.IsEmpty()) {
|
||||
fputs(" [stack trace unavailable]\n", f);
|
||||
} else {
|
||||
nsCodeAddressDetails addr;
|
||||
for (uint32_t i = 0; i < mCallStack.Length(); ++i) {
|
||||
nsresult rv = NS_DescribeCodeAddress(mCallStack[i], &addr);
|
||||
if (NS_SUCCEEDED(rv)) {
|
||||
char buf[1024];
|
||||
NS_FormatCodeAddressDetails(mCallStack[i], &addr, buf, ArrayLength(buf));
|
||||
fputs(buf, f);
|
||||
} else {
|
||||
fprintf(f, "Frame information for %p unavailable", mCallStack[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
fflush(f);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -7,6 +7,7 @@
|
||||
#define mozilla_DeadlockDetector_h
|
||||
|
||||
#include "mozilla/Attributes.h"
|
||||
#include "mozilla/Move.h"
|
||||
|
||||
#include <stdlib.h>
|
||||
|
||||
@ -15,30 +16,13 @@
|
||||
|
||||
#include "nsTArray.h"
|
||||
|
||||
#ifdef NS_TRACE_MALLOC
|
||||
# include "nsTraceMalloc.h"
|
||||
#endif // ifdef NS_TRACE_MALLOC
|
||||
|
||||
namespace mozilla {
|
||||
|
||||
|
||||
// FIXME bug 456272: split this off into a convenience API on top of
|
||||
// nsStackWalk?
|
||||
class NS_COM_GLUE CallStack
|
||||
{
|
||||
private:
|
||||
#ifdef NS_TRACE_MALLOC
|
||||
typedef nsTMStackTraceID callstack_id;
|
||||
// needs to be a macro to avoid disturbing the backtrace
|
||||
# define NS_GET_BACKTRACE() NS_TraceMallocGetStackTrace()
|
||||
# define NS_DEADLOCK_DETECTOR_CONSTEXPR
|
||||
#else
|
||||
typedef void* callstack_id;
|
||||
# define NS_GET_BACKTRACE() 0
|
||||
# define NS_DEADLOCK_DETECTOR_CONSTEXPR MOZ_CONSTEXPR
|
||||
#endif // ifdef NS_TRACE_MALLOC
|
||||
|
||||
callstack_id mCallStack;
|
||||
nsTArray<void*> mCallStack;
|
||||
|
||||
public:
|
||||
/**
|
||||
@ -53,12 +37,10 @@ public:
|
||||
* constructor: it *will* construct a backtrace. This can cause
|
||||
* unexpected performance issues.
|
||||
*/
|
||||
NS_DEADLOCK_DETECTOR_CONSTEXPR
|
||||
CallStack(const callstack_id aCallStack = NS_GET_BACKTRACE()) :
|
||||
mCallStack(aCallStack)
|
||||
CallStack(const nsTArray<void*>& aCallStack = GetBacktrace()) :
|
||||
mCallStack(mozilla::Move(aCallStack))
|
||||
{
|
||||
}
|
||||
NS_DEADLOCK_DETECTOR_CONSTEXPR
|
||||
CallStack(const CallStack& aFrom) :
|
||||
mCallStack(aFrom.mCallStack)
|
||||
{
|
||||
@ -77,22 +59,19 @@ public:
|
||||
return mCallStack != aOther.mCallStack;
|
||||
}
|
||||
|
||||
// FIXME bug 456272: if this is split off,
|
||||
// NS_TraceMallocPrintStackTrace should be modified to print into
|
||||
// an nsACString
|
||||
void Print(FILE* f) const
|
||||
bool IsEmpty() const
|
||||
{
|
||||
#ifdef NS_TRACE_MALLOC
|
||||
if (this != &kNone && mCallStack) {
|
||||
NS_TraceMallocPrintStackTrace(f, mCallStack);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
fputs(" [stack trace unavailable]\n", f);
|
||||
return mCallStack.IsEmpty();
|
||||
}
|
||||
|
||||
/** The "null" callstack. */
|
||||
static const CallStack kNone;
|
||||
void Print(FILE* f) const;
|
||||
|
||||
static nsTArray<void*> GetBacktrace();
|
||||
|
||||
static const CallStack NullCallStack()
|
||||
{
|
||||
return CallStack(nsTArray<void*>());
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@ -158,7 +137,7 @@ public:
|
||||
|
||||
ResourceAcquisition(
|
||||
const T* aResource,
|
||||
const CallStack aCallContext=CallStack::kNone) :
|
||||
const CallStack aCallContext=CallStack::NullCallStack()) :
|
||||
mResource(aResource),
|
||||
mCallContext(aCallContext)
|
||||
{
|
||||
@ -195,7 +174,7 @@ private:
|
||||
struct OrderingEntry
|
||||
{
|
||||
OrderingEntry() :
|
||||
mFirstSeen(CallStack::kNone),
|
||||
mFirstSeen(CallStack::NullCallStack()),
|
||||
mOrderedLT() // FIXME bug 456272: set to empirical
|
||||
{ // dep size?
|
||||
}
|
||||
@ -407,7 +386,7 @@ public:
|
||||
|
||||
PLHashEntry* second = *GetEntry(aProposed);
|
||||
OrderingEntry* e = static_cast<OrderingEntry*>(second->value);
|
||||
if (CallStack::kNone == e->mFirstSeen)
|
||||
if (e->mFirstSeen.IsEmpty())
|
||||
e->mFirstSeen = aCallContext;
|
||||
|
||||
if (!aLast)
|
||||
|
@ -1653,6 +1653,18 @@ public:
|
||||
nsTArray() {}
|
||||
explicit nsTArray(size_type capacity) : base_type(capacity) {}
|
||||
explicit nsTArray(const nsTArray& other) : base_type(other) {}
|
||||
nsTArray(nsTArray&& other) {
|
||||
this->SwapElements(other);
|
||||
}
|
||||
|
||||
nsTArray& operator=(const nsTArray& other) {
|
||||
base_type::operator=(other);
|
||||
return *this;
|
||||
}
|
||||
nsTArray& operator=(nsTArray&& other) {
|
||||
this->SwapElements(other);
|
||||
return *this;
|
||||
}
|
||||
|
||||
template<class Allocator>
|
||||
explicit nsTArray(const nsTArray_Impl<E, Allocator>& other) : base_type(other) {}
|
||||
|
Loading…
Reference in New Issue
Block a user